hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
a2120250711c71a7bcf87446006952b8f5093514.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mesh_belonging.h" #include <mirheo/core/celllist.h> #include <mirheo/core/pvs/object_vector.h> #include <mirheo/core/pvs/views/ov.h> #include <mirheo/core/rigid/utils.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/utils/quaternion.h> namespace mirheo { namespace mesh_belonging_kernels { const real tolerance = 1e-6_r; /// https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm __device__ static inline bool doesRayIntersectTriangle( real3 rayOrigin, real3 rayVector, real3 v0, real3 v1, real3 v2) { real3 edge1, edge2, h, s, q; real a,f,u,v; edge1 = v1 - v0; edge2 = v2 - v0; h = cross(rayVector, edge2); a = dot(edge1, h); if (math::abs(a) < tolerance) return false; f = 1.0_r / a; s = rayOrigin - v0; u = f * (dot(s, h)); if (u < 0.0_r || u > 1.0_r) return false; q = cross(s, edge1); v = f * dot(rayVector, q); if (v < 0.0_r || u + v > 1.0_r) return false; // At this stage we can compute t to find out where the intersection point is on the line. real t = f * dot(edge2, q); if (t > tolerance) // ray intersection return true; else return false; // This means that there is a line intersection but not a ray intersection. } __device__ static inline real3 fetchPosition(const real4 *vertices, int i) { auto v = vertices[i]; return {v.x, v.y, v.z}; } /** * One warp works on one particle */ __device__ static inline BelongingTags oneParticleInsideMesh(int pid, real3 r, int objId, const real3 com, const MeshView mesh, const real4* vertices) { // Work in obj reference frame for simplicity r = r - com; // shoot 3 rays in different directions, count intersections constexpr int nRays = 3; constexpr real3 rays[nRays] = { {0,1,0}, {0,1,0}, {0,1,0} }; int counters[nRays] = {0, 0, 0}; for (int i = laneId(); i < mesh.ntriangles; i += warpSize) { int3 trid = mesh.triangles[i]; real3 v0 = fetchPosition(vertices, objId*mesh.nvertices + trid.x) - com; real3 v1 = fetchPosition(vertices, objId*mesh.nvertices + trid.y) - com; real3 v2 = fetchPosition(vertices, objId*mesh.nvertices + trid.z) - com; for (int c = 0; c < nRays; c++) if (doesRayIntersectTriangle(r, rays[c], v0, v1, v2)) counters[c]++; } // counter is odd if the particle is inside // however, realing-point precision sometimes yields in errors // so we choose what the majority(!) of the rays say int intersecting = 0; for (int c = 0; c < nRays; c++) { counters[c] = warpReduce(counters[c], [] (int a, int b) { return a+b; }); if ( (counters[c] % 2) != 0 ) intersecting++; } if (intersecting > (nRays/2)) return BelongingTags::Inside; else return BelongingTags::Outside; } /** * OVview view is only used to provide # of objects and extent information * Actual data is in \p vertices * @param cinfo is the cell-list sync'd with the target ParticleVector data */ template<int WARPS_PER_OBJ> __global__ void insideMesh(const OVview ovView, const MeshView mesh, const real4 *vertices, CellListInfo cinfo, PVview pvView, BelongingTags* tags) { const int gid = blockIdx.x*blockDim.x + threadIdx.x; const int wid = gid / warpSize; const int objId = wid / WARPS_PER_OBJ; const int locWid = wid % WARPS_PER_OBJ; if (objId >= ovView.nObjects) return; const int3 cidLow = cinfo.getCellIdAlongAxes(ovView.comAndExtents[objId].low - 0.5_r); const int3 cidHigh = cinfo.getCellIdAlongAxes(ovView.comAndExtents[objId].high + 0.5_r); const int3 span = cidHigh - cidLow + make_int3(1,1,1); const int totCells = span.x * span.y * span.z; for (int i = locWid; i < totCells; i += WARPS_PER_OBJ) { const int3 cid3 = make_int3( i % span.x, (i/span.x) % span.y, i / (span.x*span.y) ) + cidLow; const int cid = cinfo.encode(cid3); if (cid < 0 || cid >= cinfo.totcells) continue; int pstart = cinfo.cellStarts[cid]; int pend = cinfo.cellStarts[cid+1]; #pragma unroll 3 for (int pid = pstart; pid < pend; pid++) { const Particle p(pvView.readParticle(pid)); auto tag = oneParticleInsideMesh(pid, p.r, objId, ovView.comAndExtents[objId].com, mesh, vertices); // Only tag particles inside, default is outside anyways if (laneId() == 0 && tag != BelongingTags::Outside) tags[pid] = tag; } } } } // namespace mesh_belonging_kernels void MeshBelongingChecker::_tagInner(ParticleVector *pv, CellList *cl, hipStream_t stream) { tags_.resize_anew(pv->local()->size()); tags_.clearDevice(stream); auto computeTags = [&](ParticleVectorLocality locality) { ov_->findExtentAndCOM(stream, locality); auto lov = ov_->get(locality); auto view = OVview(ov_, lov); auto vertices = lov->getMeshVertices(stream); auto meshView = MeshView(ov_->mesh.get()); debug("Computing inside/outside tags (against mesh) for %d %s objects '%s' and %d '%s' particles", view.nObjects, getParticleVectorLocalityStr(locality).c_str(), ov_->getCName(), pv->local()->size(), pv->getCName()); constexpr int nthreads = 128; constexpr int warpsPerObject = 1024; SAFE_KERNEL_LAUNCH( mesh_belonging_kernels::insideMesh<warpsPerObject>, getNblocks(warpsPerObject*32*view.nObjects, nthreads), nthreads, 0, stream, view, meshView, reinterpret_cast<real4*>(vertices->devPtr()), cl->cellInfo(), cl->getView<PVview>(), tags_.devPtr()); }; computeTags(ParticleVectorLocality::Local); computeTags(ParticleVectorLocality::Halo); } } // namespace mirheo
a2120250711c71a7bcf87446006952b8f5093514.cu
#include "mesh_belonging.h" #include <mirheo/core/celllist.h> #include <mirheo/core/pvs/object_vector.h> #include <mirheo/core/pvs/views/ov.h> #include <mirheo/core/rigid/utils.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/utils/quaternion.h> namespace mirheo { namespace mesh_belonging_kernels { const real tolerance = 1e-6_r; /// https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm __device__ static inline bool doesRayIntersectTriangle( real3 rayOrigin, real3 rayVector, real3 v0, real3 v1, real3 v2) { real3 edge1, edge2, h, s, q; real a,f,u,v; edge1 = v1 - v0; edge2 = v2 - v0; h = cross(rayVector, edge2); a = dot(edge1, h); if (math::abs(a) < tolerance) return false; f = 1.0_r / a; s = rayOrigin - v0; u = f * (dot(s, h)); if (u < 0.0_r || u > 1.0_r) return false; q = cross(s, edge1); v = f * dot(rayVector, q); if (v < 0.0_r || u + v > 1.0_r) return false; // At this stage we can compute t to find out where the intersection point is on the line. real t = f * dot(edge2, q); if (t > tolerance) // ray intersection return true; else return false; // This means that there is a line intersection but not a ray intersection. } __device__ static inline real3 fetchPosition(const real4 *vertices, int i) { auto v = vertices[i]; return {v.x, v.y, v.z}; } /** * One warp works on one particle */ __device__ static inline BelongingTags oneParticleInsideMesh(int pid, real3 r, int objId, const real3 com, const MeshView mesh, const real4* vertices) { // Work in obj reference frame for simplicity r = r - com; // shoot 3 rays in different directions, count intersections constexpr int nRays = 3; constexpr real3 rays[nRays] = { {0,1,0}, {0,1,0}, {0,1,0} }; int counters[nRays] = {0, 0, 0}; for (int i = laneId(); i < mesh.ntriangles; i += warpSize) { int3 trid = mesh.triangles[i]; real3 v0 = fetchPosition(vertices, objId*mesh.nvertices + trid.x) - com; real3 v1 = fetchPosition(vertices, objId*mesh.nvertices + trid.y) - com; real3 v2 = fetchPosition(vertices, objId*mesh.nvertices + trid.z) - com; for (int c = 0; c < nRays; c++) if (doesRayIntersectTriangle(r, rays[c], v0, v1, v2)) counters[c]++; } // counter is odd if the particle is inside // however, realing-point precision sometimes yields in errors // so we choose what the majority(!) of the rays say int intersecting = 0; for (int c = 0; c < nRays; c++) { counters[c] = warpReduce(counters[c], [] (int a, int b) { return a+b; }); if ( (counters[c] % 2) != 0 ) intersecting++; } if (intersecting > (nRays/2)) return BelongingTags::Inside; else return BelongingTags::Outside; } /** * OVview view is only used to provide # of objects and extent information * Actual data is in \p vertices * @param cinfo is the cell-list sync'd with the target ParticleVector data */ template<int WARPS_PER_OBJ> __global__ void insideMesh(const OVview ovView, const MeshView mesh, const real4 *vertices, CellListInfo cinfo, PVview pvView, BelongingTags* tags) { const int gid = blockIdx.x*blockDim.x + threadIdx.x; const int wid = gid / warpSize; const int objId = wid / WARPS_PER_OBJ; const int locWid = wid % WARPS_PER_OBJ; if (objId >= ovView.nObjects) return; const int3 cidLow = cinfo.getCellIdAlongAxes(ovView.comAndExtents[objId].low - 0.5_r); const int3 cidHigh = cinfo.getCellIdAlongAxes(ovView.comAndExtents[objId].high + 0.5_r); const int3 span = cidHigh - cidLow + make_int3(1,1,1); const int totCells = span.x * span.y * span.z; for (int i = locWid; i < totCells; i += WARPS_PER_OBJ) { const int3 cid3 = make_int3( i % span.x, (i/span.x) % span.y, i / (span.x*span.y) ) + cidLow; const int cid = cinfo.encode(cid3); if (cid < 0 || cid >= cinfo.totcells) continue; int pstart = cinfo.cellStarts[cid]; int pend = cinfo.cellStarts[cid+1]; #pragma unroll 3 for (int pid = pstart; pid < pend; pid++) { const Particle p(pvView.readParticle(pid)); auto tag = oneParticleInsideMesh(pid, p.r, objId, ovView.comAndExtents[objId].com, mesh, vertices); // Only tag particles inside, default is outside anyways if (laneId() == 0 && tag != BelongingTags::Outside) tags[pid] = tag; } } } } // namespace mesh_belonging_kernels void MeshBelongingChecker::_tagInner(ParticleVector *pv, CellList *cl, cudaStream_t stream) { tags_.resize_anew(pv->local()->size()); tags_.clearDevice(stream); auto computeTags = [&](ParticleVectorLocality locality) { ov_->findExtentAndCOM(stream, locality); auto lov = ov_->get(locality); auto view = OVview(ov_, lov); auto vertices = lov->getMeshVertices(stream); auto meshView = MeshView(ov_->mesh.get()); debug("Computing inside/outside tags (against mesh) for %d %s objects '%s' and %d '%s' particles", view.nObjects, getParticleVectorLocalityStr(locality).c_str(), ov_->getCName(), pv->local()->size(), pv->getCName()); constexpr int nthreads = 128; constexpr int warpsPerObject = 1024; SAFE_KERNEL_LAUNCH( mesh_belonging_kernels::insideMesh<warpsPerObject>, getNblocks(warpsPerObject*32*view.nObjects, nthreads), nthreads, 0, stream, view, meshView, reinterpret_cast<real4*>(vertices->devPtr()), cl->cellInfo(), cl->getView<PVview>(), tags_.devPtr()); }; computeTags(ParticleVectorLocality::Local); computeTags(ParticleVectorLocality::Halo); } } // namespace mirheo
d75c4f6b63f794c7c98b37811ccfa4e8f4ff45f4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zmergeblockkrylov.cu, normal z -> c, Mon Jun 25 18:24:26 2018 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 16 #define PRECISION_c // These routines merge multiple kernels from qmr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_cmergeblockkrylov_kernel( int num_rows, int num_cols, magmaFloatComplex *alpha, magmaFloatComplex *p, magmaFloatComplex *x ) { int num_vecs = num_cols; int row = blockIdx.x * blockDim.x + threadIdx.x; int vec = blockIdx.y; if ( row<num_rows ) { magmaFloatComplex val = x[ row + vec * num_rows ]; for( int j=0; j<num_vecs; j++ ){ magmaFloatComplex lalpha = alpha[ j * num_vecs + vec ]; magmaFloatComplex xval = p[ row + j * num_rows ]; val += lalpha * xval; } x[ row + vec * num_rows ] = val; } } /** Purpose ------- Mergels multiple operations into one kernel: v = y / rho y = y / rho w = wt / psi z = z / psi @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex_ptr matrix containing all SKP @param[in] p magmaFloatComplex_ptr search directions @param[in,out] x magmaFloatComplex_ptr approximation vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cmergeblockkrylov( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex_ptr alpha, magmaFloatComplex_ptr p, magmaFloatComplex_ptr x, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE, num_cols ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_cmergeblockkrylov_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, p, x ); return MAGMA_SUCCESS; }
d75c4f6b63f794c7c98b37811ccfa4e8f4ff45f4.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zmergeblockkrylov.cu, normal z -> c, Mon Jun 25 18:24:26 2018 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 16 #define PRECISION_c // These routines merge multiple kernels from qmr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_cmergeblockkrylov_kernel( int num_rows, int num_cols, magmaFloatComplex *alpha, magmaFloatComplex *p, magmaFloatComplex *x ) { int num_vecs = num_cols; int row = blockIdx.x * blockDim.x + threadIdx.x; int vec = blockIdx.y; if ( row<num_rows ) { magmaFloatComplex val = x[ row + vec * num_rows ]; for( int j=0; j<num_vecs; j++ ){ magmaFloatComplex lalpha = alpha[ j * num_vecs + vec ]; magmaFloatComplex xval = p[ row + j * num_rows ]; val += lalpha * xval; } x[ row + vec * num_rows ] = val; } } /** Purpose ------- Mergels multiple operations into one kernel: v = y / rho y = y / rho w = wt / psi z = z / psi @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha magmaFloatComplex_ptr matrix containing all SKP @param[in] p magmaFloatComplex_ptr search directions @param[in,out] x magmaFloatComplex_ptr approximation vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cmergeblockkrylov( magma_int_t num_rows, magma_int_t num_cols, magmaFloatComplex_ptr alpha, magmaFloatComplex_ptr p, magmaFloatComplex_ptr x, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE, num_cols ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_cmergeblockkrylov_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>> ( num_rows, num_cols, alpha, p, x ); return MAGMA_SUCCESS; }
60e5f44a0bf6c0f1060c706bbd9f7037c44633bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // ., . // . : // . . - .: , 1982. // 583 . #define _CRT_SECURE_NO_WARNINGS #define _SCL_SECURE_NO_WARNINGS #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/version.h> #include <cstdlib> #include <algorithm> #include <cstdio> #include <iostream> #include <sstream> #include <string> #include <assert.h> #include <time.h> #include <fstream> using namespace std; using namespace thrust; // Thrust is a C++ template library for CUDA based on the Standard Template Library (STL). // Thrust allows you to implement high performance parallel applications with minimal programming effort through a high-level interface that is fully interoperable with CUDA C. // Thrust provides a rich collection of data parallel primitives such as scan, sort, and reduce, which can be composed together to implement complex algorithms with concise, readable source code. // By describing your computation in terms of these high-level abstractions you provide Thrust with the freedom to select the most efficient implementation automatically. // As a result, Thrust can be utilized in rapid prototyping of CUDA applications, where programmer productivity matters most, as well as in production, where robustness and absolute performance are crucial. // Read more at: http://docs.nvidia.com/cuda/thrust/index.html#ixzz3hymTnQwX double module(thrust::device_vector<double>& x); double delta(thrust::device_vector<double>& x, thrust::device_vector<double>& y); double distance(thrust::device_vector<double>& x, thrust::device_vector<double>& y); unsigned long total_of(thrust::device_vector<size_t>& m); template <typename T> struct inc_functor { __host__ __device__ T operator()(const T& value) const { return value + 1; } }; template <typename T> struct square_functor { __host__ __device__ T operator()(const T& value) const { return value * value; } }; template <typename T> struct add_functor { __host__ __device__ T operator()(const T& value1, const T& value2) const { return value1 + value2; } }; template <typename T> struct sub_functor { __host__ __device__ T operator()(const T& value1, const T& value2) const { return value1 - value2; } }; template <typename T> struct mul_functor { __host__ __device__ T operator()(const T& value1, const T& value2) const { return value1 * value2; } }; template <typename T> struct diff_functor { __host__ __device__ T operator()(const T& value1, const T& value2) const { return thrust::max(value1 - value2, value2 - value1); } }; template <typename T> struct abs_functor { __host__ __device__ T operator()(const T& value) const { return thrust::max(value, -value); } }; template <typename T> struct max_functor { __host__ __device__ T operator()(const T& value1, const T& value2) const { return thrust::max(value1, value2); } }; ///////////////////////////////////////////////////////// // // m - unsigned long total_of(thrust::device_vector<size_t>& m) { return thrust::transform_reduce(m.begin(), m.end(), inc_functor<size_t>(), 1UL, mul_functor<unsigned long>()); } ///////////////////////////////////////////////////////// // double module(thrust::device_vector<double>& x) { return thrust::transform_reduce(x.begin(), x.end(), abs_functor<double>(), 0.0, max_functor<double>()); } ///////////////////////////////////////////////////////// // double delta(thrust::device_vector<double>& x, thrust::device_vector<double>& y) { size_t i = thrust::min(x.size(), y.size()); thrust::device_vector<double> diff(thrust::max(x.size(), y.size())); thrust::transform(x.begin(), x.begin() + i, y.begin(), diff.begin(), diff_functor<double>()); thrust::transform(x.begin() + i, x.end(), diff.begin() + i, abs_functor<double>()); thrust::transform(y.begin() + i, y.end(), diff.begin() + i, abs_functor<double>()); return thrust::reduce(diff.begin(), diff.end(), 0.0, max_functor<double>()); } ///////////////////////////////////////////////////////// // double distance(thrust::device_vector<double>& x, thrust::device_vector<double>& y) { size_t i = thrust::min(x.size(), y.size()); thrust::device_vector<double> sub(::max(x.size(), y.size())); thrust::transform(x.begin(), x.end(), y.begin(), sub.begin(), sub_functor<double>()); thrust::copy(x.begin() + i, x.end(), sub.begin() + i); thrust::copy(y.begin() + i, y.end(), sub.begin() + i); return std::sqrt(thrust::transform_reduce(sub.begin(), sub.end(), square_functor<double>(), 0.0, add_functor<double>())); } enum t_ask_mode { NOASK = 0, ASK = 1 }; enum t_trace_mode { NOTRACE = 0, TRACE = 1 }; t_ask_mode ask_mode = NOASK; t_trace_mode trace_mode = NOTRACE; ///////////////////////////////////////////////////////// // static const unsigned _count = 1; static const size_t _n = 2; static const size_t _md = 20; static const size_t _m[] = {20, 20}; static const double _a[] = {0, 0}; static const double _b[] = {1000, 1000}; static const double _f1[] = {0, 0, 500}; static const double _f2[] = {100, 100, 500}; static const double* _f[] = {_f1, _f2}; static const double _w1[] = {0, 0, 3040}; static const double _w2[] = {150, 180, 1800}; static const double _w3[] = {240, 200, 800}; static const double _w4[] = {260, 90, 1200}; static const double* _w[] = {_w1, _w2, _w3, _w4}; static const double _e = 1e-8; ///////////////////////////////////////////////////////// // // index - __device__ void vector_of(unsigned* vector, unsigned long index, size_t* m, size_t n) { for (size_t i = 0; i < n; i++) { unsigned long m1 = 1ul + m[i]; vector[i] = index % m1; index /= m1; } } ///////////////////////////////////////////////////////// // // // vector - // m - // a - // b - __device__ void point_of(double* point, unsigned* vector, size_t* m, double* a, double* b, size_t n) { for (size_t i = 0; i < n; i++) point[i] = (a[i] * (m[i] - vector[i]) + b[i] * vector[i]) / m[i]; } ///////////////////////////////////////////////////////// // , // x - // f - // a - // b - __device__ bool check(double* x, double* f, double* a, double* b, size_t n, size_t m) { for (int j = 0; j < m; j++) { double s1 = 0; for (int i = 0; i < n; i++) { double y = x[i] - f[j * (n + 1) + i]; s1 += y * y; } if (sqrt(s1) > f[j * (n + 1) + n]) return false; } return true; } ///////////////////////////////////////////////////////// // __device__ double target(double* x, double* w, size_t n, size_t m) { double s = 0; for (int j = 0; j < m; j++) { double s1 = 0; for (int i = 0; i < n; i++) { double y = x[i] - w[j * (n + 1) + i]; s1 += y * y; } s += sqrt(s1) * w[j * (n + 1) + n]; } return s; } __device__ void copy(double* x, double* y, size_t n) { for (size_t i = 0; i < n; i++) x[i] = y[i]; } __global__ void kernel0( unsigned* vPtr, double* tPtr, double* xPtr, double* yPtr, bool* ePtr, size_t* m, double* a, double* b, double* f, double* w, unsigned long total, size_t n, size_t mf, size_t mw) { // int id = blockDim.x * blockIdx.x + threadIdx.x; unsigned* v = &vPtr[id * n]; double* t = &tPtr[id * n]; double* x = &xPtr[id * n]; double* y = &yPtr[id]; bool* e = &ePtr[id]; *e = false; for (unsigned long index = blockDim.x * blockIdx.x + threadIdx.x; index < total; index += blockDim.x * gridDim.x) { vector_of(v, index, m, n); point_of(t, v, m, a, b, n); if (!check(t, f, a, b, n, mf)) continue; if (!*e) { ::copy(x, t, n); *y = target(t, w, n, mw); *e = true; continue; } double y1 = target(t, w, n, mw); if (y1 < *y) { ::copy(x, t, n); *y = y1; } } } __global__ void kernel1( double* x0, double* tPtr, double* xPtr, double* yPtr, bool* ePtr, double* a, double* b, double* f, double* w, double ak, double bk, size_t k, size_t mk, size_t n, size_t mf, size_t mw) { // int id = blockDim.x * blockIdx.x + threadIdx.x; double* t = &tPtr[id * n]; double* x = &xPtr[id]; double* y = &yPtr[id]; bool* e = &ePtr[id]; ::copy(t, x0, n); *e = false; for (size_t index = blockDim.x * blockIdx.x + threadIdx.x; index <= mk; index += blockDim.x * gridDim.x) { t[k] = (ak * (mk - index) + bk * index) / mk; if (!check(t, f, a, b, n, mf)) continue; if (!*e) { *x = t[k]; *y = target(t, w, n, mw); *e = true; continue; } double y1 = target(t, w, n, mw); if (y1 < *y) { *x = t[k]; *y = y1; } } } __global__ void kernel2( double* tPtr, double* xPtr, double* yPtr, double* pPtr, bool* ePtr, double* x1, double* x2, double* a, double* b, double* f, double* w, double l, double h, size_t md, size_t n, size_t mf, size_t mw) { // int id = blockDim.x * blockIdx.x + threadIdx.x; double* t = &tPtr[id * n]; double* x = &xPtr[id * n]; double* y = &yPtr[id]; double* p = &pPtr[id]; bool* e = &ePtr[id]; *e = false; for (size_t index = blockDim.x * blockIdx.x + threadIdx.x; index <= md + md; index += blockDim.x * gridDim.x) { double pt = (l * (md + md - index) + h * index) / (md + md); for (size_t i = 0; i < n; i++) t[i] = x2[i] * (1.0 - pt) + x1[i] * pt; if (!check(t, f, a, b, n, mf)) continue; if (!*e) { ::copy(x, t, n); *y = target(t, w, n, mw); *p = pt; *e = true; continue; } double y1 = target(t, w, n, mw); if (y1 < *y) { ::copy(x, t, n); *y = y1; *p = pt; } } } int main(int argc, char* argv[]) { // http://stackoverflow.com/questions/2236197/what-is-the-easiest-way-to-initialize-a-stdvector-with-hardcoded-elements unsigned count = _count; size_t n = _n; double e = _e; size_t md = _md; thrust::host_vector<size_t> hm(_m, _m + sizeof(_m) / sizeof(_m[0])); thrust::host_vector<double> ha(_a, _a + sizeof(_a) / sizeof(_a[0])); thrust::host_vector<double> hb(_b, _b + sizeof(_b) / sizeof(_b[0])); thrust::host_vector<double> hf; thrust::host_vector<double> hw; for (size_t i = 0; i < sizeof(_f) / sizeof(_f[0]); i++) for (size_t j = 0; j <= n; j++) hf.push_back(_f[i][j]); for (size_t i = 0; i < sizeof(_w) / sizeof(_w[0]); i++) for (size_t j = 0; j <= n; j++) hw.push_back(_w[i][j]); char* input_file_name = NULL; char* output_file_name = NULL; char* options_file_name = NULL; int gridSize = 0; int blockSize = 0; // Windows // setlocale() , - , LC_TYPE - , . // "Russian", , . setlocale(LC_ALL, ""); for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-help") == 0) { std::cout << "Usage :\t" << argv[0] << " [...] [g <gridSize>] [b <blockSize>] [-input <inputfile>] [-output <outputfile>]" << std::endl; std::cout << " " << std::endl; std::cout << " " << std::endl; std::cout << "( )" << std::endl; // std::cout << "\t-n < >" << std::endl; std::cout << "\t-c < >" << std::endl; std::cout << "\t-m < >" << std::endl; std::cout << "\t-md < >" << std::endl; std::cout << "\t-a < >" << std::endl; std::cout << "\t-b < >" << std::endl; std::cout << "\t-e < >" << std::endl; std::cout << "\t-ask/noask" << std::endl; std::cout << "\t-trace/notrace" << std::endl; } else if (strcmp(argv[i], "-ask") == 0) ask_mode = ASK; else if (strcmp(argv[i], "-noask") == 0) ask_mode = NOASK; else if (strcmp(argv[i], "-trace") == 0) trace_mode = TRACE; else if (strcmp(argv[i], "-notrace") == 0) trace_mode = NOTRACE; // else if(strcmp(argv[i],"-n")==0) n = atoi(argv[++i]); else if (strcmp(argv[i], "-e") == 0) e = atof(argv[++i]); else if (strcmp(argv[i], "-c") == 0) count = atoi(argv[++i]); else if (strcmp(argv[i], "-md") == 0) md = atoi(argv[++i]); else if (strcmp(argv[i], "-m") == 0) { std::istringstream ss(argv[++i]); hm.clear(); for (size_t i = 0; i < n; i++) hm.push_back(atoi(argv[++i])); } else if (strcmp(argv[i], "-a") == 0) { ha.clear(); for (size_t i = 0; i < n; i++) ha.push_back(atof(argv[++i])); } else if (strcmp(argv[i], "-b") == 0) { hb.clear(); for (size_t i = 0; i < n; i++) hb.push_back(atof(argv[++i])); } else if (strcmp(argv[i], "-input") == 0) input_file_name = argv[++i]; else if (strcmp(argv[i], "-output") == 0) output_file_name = argv[++i]; else if (strcmp(argv[i], "-options") == 0) options_file_name = argv[++i]; else if (strcmp(argv[i], "g") == 0) gridSize = atoi(argv[++i]); else if (strcmp(argv[i], "b") == 0) blockSize = atoi(argv[++i]); } if (input_file_name != NULL) freopen(input_file_name, "r",stdin); if (output_file_name != NULL) freopen(output_file_name, "w",stdout); if (options_file_name != NULL) { hf.clear(); hw.clear(); std::ifstream options(options_file_name); if (!options.is_open()) throw "Error opening file"; std::string line; while (std::getline(options, line)) { std::cout << line << std::endl; std::stringstream lineStream(line); std::string id; std::string cell; thrust::host_vector<double> x; thrust::host_vector<size_t> y; std::getline(lineStream, id, ' '); while (std::getline(lineStream, cell, ' ')) { x.push_back(stod(cell)); y.push_back(stoi(cell)); } if (id[0] == 'N') n = stoi(cell); if (id[0] == 'E') e = stod(cell); if (id[0] == 'M') hm = y; if (id[0] == 'A') ha = x; if (id[0] == 'B') hb = x; if (id[0] == 'F') for (size_t i = 0; i < x.size(); i++) hf.push_back(x[i]); if (id[0] == 'W') for (size_t i = 0; i < x.size(); i++) hw.push_back(x[i]); } } if (ask_mode == ASK) { // std::cout << " :"<< std::endl; std::cin >> n; std::cout << " m[" << n << "]:" << std::endl; hm.clear(); for (size_t i = 0; i < n; i++) { int x; std::cin >> x; hm.push_back(x); } std::cout << " :" << std::endl; std::cin >> md; std::cout << " a[" << n << "]:" << std::endl; ha.clear(); for (size_t i = 0; i < n; i++) { double x; std::cin >> x; ha.push_back(x); } std::cout << " b[" << n << "]:" << std::endl; hb.clear(); for (size_t i = 0; i < n; i++) { double x; std::cin >> x; hb.push_back(x); } std::cout << " :" << std::endl; std::cin >> e; std::cout << " :" << std::endl; std::cin >> count; } // Find/set the device. int device_count = 0; hipGetDeviceCount(&device_count); for (int i = 0; i < device_count; ++i) { hipDeviceProp_t properties; hipGetDeviceProperties(&properties, i); std::cout << "Running on GPU " << i << " (" << properties.name << ")" << std::endl; } int major = THRUST_MAJOR_VERSION; int minor = THRUST_MINOR_VERSION; std::cout << "Thrust v" << major << "." << minor << std::endl; thrust::device_vector<size_t> m(hm); thrust::device_vector<double> a(ha); thrust::device_vector<double> b(hb); thrust::device_vector<double> f(hf); thrust::device_vector<double> w(hw); for (size_t i = 0; i < m.size(); i++) assert(m[i]>2); // clock_t time = clock(); size_t mMax = thrust::max(md, thrust::reduce(m.begin(), m.end(), (size_t)0, max_functor<size_t>())); // , int blocks = (gridSize > 0) ? gridSize : thrust::min(15, (int)pow(mMax + 1, 0.333333)); int threads = (blockSize > 0) ? blockSize : thrust::min(15, (int)pow(mMax + 1, 0.333333)); // thrust::host_vector<double> hyArray(blocks * threads); thrust::host_vector<double> hpArray(blocks * threads); thrust::device_vector<unsigned> vArray(blocks * threads * n); thrust::device_vector<double> tArray(blocks * threads * n); thrust::device_vector<double> xArray(blocks * threads * n); thrust::device_vector<double> yArray(blocks * threads); thrust::device_vector<double> pArray(blocks * threads); thrust::device_vector<bool> eArray(blocks * threads); unsigned* vPtr = thrust::raw_pointer_cast(&vArray[0]); double* tPtr = thrust::raw_pointer_cast(&tArray[0]); double* xPtr = thrust::raw_pointer_cast(&xArray[0]); double* yPtr = thrust::raw_pointer_cast(&yArray[0]); double* pPtr = thrust::raw_pointer_cast(&pArray[0]); bool* ePtr = thrust::raw_pointer_cast(&eArray[0]); size_t* mPtr = thrust::raw_pointer_cast(&m[0]); double* aPtr = thrust::raw_pointer_cast(&a[0]); double* bPtr = thrust::raw_pointer_cast(&b[0]); double* fPtr = thrust::raw_pointer_cast(&f[0]); double* wPtr = thrust::raw_pointer_cast(&w[0]); // thrust::host_vector<double> ht(n); thrust::host_vector<double> hx(n); thrust::device_vector<double> x(n); thrust::device_vector<double> x1(n); thrust::device_vector<double> x2(n); double y; double diameter = ::distance(a, b); double* xPtr0 = thrust::raw_pointer_cast(&x[0]); double* xPtr1 = thrust::raw_pointer_cast(&x1[0]); double* xPtr2 = thrust::raw_pointer_cast(&x2[0]); if (trace_mode == TRACE && count == 1) std::cout << "for #1" << std::endl; for (unsigned s = 0; s < count; s++) { if (trace_mode == TRACE && count == 1) std::cout << "while #1" << std::endl; while (true) { // , unsigned long total = total_of(m); if (trace_mode == TRACE && count == 1) std::cout << "kernel0" << std::endl; hipLaunchKernelGGL(( kernel0) , dim3(blocks) , dim3(threads) , 0, 0, vPtr , tPtr , xPtr , yPtr , ePtr , mPtr , aPtr , bPtr , fPtr , wPtr , total , n , f.size() / (n + 1) , w.size() / (n + 1)); thrust::copy(yArray.begin(), yArray.end(), hyArray.begin()); auto it = thrust::find(eArray.begin(), eArray.end(), true); if (it >= eArray.end()) { for (size_t i = 0; i < n; i++) m[i] <<= 1u; continue; } size_t index = thrust::distance(eArray.begin(), it); y = hyArray[index]; thrust::copy(&xArray[index * n], &xArray[index * n + n], x.begin()); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; while ((it = thrust::find(it, eArray.end(), true)) < eArray.end()) { size_t index = thrust::distance(eArray.begin(), it++); double y1 = hyArray[index]; if (y < y1) continue; y = y1; thrust::copy(&xArray[index * n], &xArray[index * n + n], x.begin()); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; } break; } if (trace_mode == TRACE && count == 1) std::cout << "while #2" << std::endl; while (true) { // , // thrust::copy(x.begin(), x.end(), x1.begin()); // // if (trace_mode == TRACE && count == 1) std::cout << "for #2" << std::endl; for (size_t k = 0; k < n; k++) { // double ak = thrust::min(a[k], b[k]); double bk = thrust::max(a[k], b[k]); size_t mk = m[k]; while (true) { if (trace_mode == TRACE && count == 1) std::cout << "kernel1" << std::endl; hipLaunchKernelGGL(( kernel1) , dim3(blocks) , dim3(threads) , 0, 0, xPtr0 , tPtr , xPtr , yPtr , ePtr , aPtr , bPtr , fPtr , wPtr , ak , bk , k , mk , n , f.size() / (n + 1) , w.size() / (n + 1)); thrust::copy(yArray.begin(), yArray.end(), hyArray.begin()); // , auto it = thrust::find(eArray.begin(), eArray.end(), true); assert(it<eArray.end()); size_t index = thrust::distance(eArray.begin(), it++); y = hyArray[index]; thrust::copy(&xArray[index], &xArray[index + 1], &x[k]); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; while ((it = thrust::find(it, eArray.end(), true)) < eArray.end()) { size_t index = thrust::distance(eArray.begin(), it++); if (index > mk) break; double y1 = hyArray[index]; if (y < y1) continue; y = y1; thrust::copy(&xArray[index], &xArray[index + 1], &x[k]); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; } double dd = thrust::max(ak - bk, bk - ak); double cc = thrust::max(thrust::max(ak, -ak), thrust::max(-bk, bk)); if (dd <= cc * e) break; double xk = x[k]; ak = thrust::max(ak, xk - dd / mk); bk = thrust::min(bk, xk + dd / mk); } } thrust::copy(x.begin(), x.end(), x2.begin()); // double l = -diameter; double h = diameter; // , // x2->x1 if (trace_mode == TRACE && count == 1) std::cout << "while #3" << std::endl; while (true) { if (trace_mode == TRACE && count == 1) std::cout << "kernel2" << std::endl; hipLaunchKernelGGL(( kernel2) , dim3(blocks) , dim3(threads) , 0, 0, tPtr , xPtr , yPtr , pPtr , ePtr , xPtr1 , xPtr2 , aPtr , bPtr , fPtr , wPtr , l , h , md , n , f.size() / (n + 1) , w.size() / (n + 1)); thrust::copy(yArray.begin(), yArray.end(), hyArray.begin()); thrust::copy(pArray.begin(), pArray.end(), hpArray.begin()); // , auto it = thrust::find(eArray.begin(), eArray.end(), true); assert(it<eArray.end()); size_t index = thrust::distance(eArray.begin(), it++); y = hyArray[index]; double p = hpArray[index]; thrust::copy(&xArray[index * n], &xArray[index * n + n], x.begin()); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; while ((it = thrust::find(it, eArray.end(), true)) < eArray.end()) { size_t index = thrust::distance(eArray.begin(), it++); double y1 = hyArray[index]; if (y < y1) continue; y = y1; p = hpArray[index]; thrust::copy(&xArray[index * n], &xArray[index * n + n], x.begin()); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; } double dd = thrust::max(h - l, l - h); double cc = thrust::max(thrust::max(h, -h), thrust::max(-l, l)); if (dd <= cc * e) break; double ll = l; double hh = h; l = thrust::max(ll, p - dd / md); h = thrust::min(hh, p + dd / md); } double dd = delta(x, x1); double cc = thrust::max(module(x), module(x1)); if (dd <= cc * e) break; } } time = clock() - time; double seconds = ((double)time) / CLOCKS_PER_SEC / count; thrust::copy(x.begin(), x.end(), hx.begin()); std::cout << " : " << argv[0] << std::endl; std::cout << " : " << n << std::endl; std::cout << " : "; for (size_t i = 0; i < hm.size(); i++) std::cout << hm[i] << " "; std::cout << "+ " << md; std::cout << std::endl; std::cout << " : "; for (size_t i = 0; i < ha.size(); i++) std::cout << ha[i] << " "; std::cout << std::endl; std::cout << " : "; for (size_t i = 0; i < hb.size(); i++) std::cout << hb[i] << " "; std::cout << std::endl; std::cout << " : " << e << std::endl; std::cout << " : "; for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; std::cout << std::endl; std::cout << " : " << y << std::endl; std::cout << " (.) : " << seconds << std::endl; getchar(); getchar(); return 0; }
60e5f44a0bf6c0f1060c706bbd9f7037c44633bd.cu
// Алгоритм Хука и Дживса с использованием одномерной минимизации // Базара М., Шетти К. // Нелинейное программирование. Теория и алгоритмы: // Пер. с англ. - М.: Мир, 1982. // 583 с. #define _CRT_SECURE_NO_WARNINGS #define _SCL_SECURE_NO_WARNINGS #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/version.h> #include <cstdlib> #include <algorithm> #include <cstdio> #include <iostream> #include <sstream> #include <string> #include <assert.h> #include <time.h> #include <fstream> using namespace std; using namespace thrust; // Thrust is a C++ template library for CUDA based on the Standard Template Library (STL). // Thrust allows you to implement high performance parallel applications with minimal programming effort through a high-level interface that is fully interoperable with CUDA C. // Thrust provides a rich collection of data parallel primitives such as scan, sort, and reduce, which can be composed together to implement complex algorithms with concise, readable source code. // By describing your computation in terms of these high-level abstractions you provide Thrust with the freedom to select the most efficient implementation automatically. // As a result, Thrust can be utilized in rapid prototyping of CUDA applications, where programmer productivity matters most, as well as in production, where robustness and absolute performance are crucial. // Read more at: http://docs.nvidia.com/cuda/thrust/index.html#ixzz3hymTnQwX double module(thrust::device_vector<double>& x); double delta(thrust::device_vector<double>& x, thrust::device_vector<double>& y); double distance(thrust::device_vector<double>& x, thrust::device_vector<double>& y); unsigned long total_of(thrust::device_vector<size_t>& m); template <typename T> struct inc_functor { __host__ __device__ T operator()(const T& value) const { return value + 1; } }; template <typename T> struct square_functor { __host__ __device__ T operator()(const T& value) const { return value * value; } }; template <typename T> struct add_functor { __host__ __device__ T operator()(const T& value1, const T& value2) const { return value1 + value2; } }; template <typename T> struct sub_functor { __host__ __device__ T operator()(const T& value1, const T& value2) const { return value1 - value2; } }; template <typename T> struct mul_functor { __host__ __device__ T operator()(const T& value1, const T& value2) const { return value1 * value2; } }; template <typename T> struct diff_functor { __host__ __device__ T operator()(const T& value1, const T& value2) const { return thrust::max(value1 - value2, value2 - value1); } }; template <typename T> struct abs_functor { __host__ __device__ T operator()(const T& value) const { return thrust::max(value, -value); } }; template <typename T> struct max_functor { __host__ __device__ T operator()(const T& value1, const T& value2) const { return thrust::max(value1, value2); } }; ///////////////////////////////////////////////////////// // Вычисление числа узлов решётки // m - число сегментов по каждому из измерений unsigned long total_of(thrust::device_vector<size_t>& m) { return thrust::transform_reduce(m.begin(), m.end(), inc_functor<size_t>(), 1UL, mul_functor<unsigned long>()); } ///////////////////////////////////////////////////////// // Вычисление модуля вектора double module(thrust::device_vector<double>& x) { return thrust::transform_reduce(x.begin(), x.end(), abs_functor<double>(), 0.0, max_functor<double>()); } ///////////////////////////////////////////////////////// // Вычисление растояния между двумя векторами координат double delta(thrust::device_vector<double>& x, thrust::device_vector<double>& y) { size_t i = thrust::min(x.size(), y.size()); thrust::device_vector<double> diff(thrust::max(x.size(), y.size())); thrust::transform(x.begin(), x.begin() + i, y.begin(), diff.begin(), diff_functor<double>()); thrust::transform(x.begin() + i, x.end(), diff.begin() + i, abs_functor<double>()); thrust::transform(y.begin() + i, y.end(), diff.begin() + i, abs_functor<double>()); return thrust::reduce(diff.begin(), diff.end(), 0.0, max_functor<double>()); } ///////////////////////////////////////////////////////// // Вычисление растояния между двумя векторами координат double distance(thrust::device_vector<double>& x, thrust::device_vector<double>& y) { size_t i = thrust::min(x.size(), y.size()); thrust::device_vector<double> sub(std::max(x.size(), y.size())); thrust::transform(x.begin(), x.end(), y.begin(), sub.begin(), sub_functor<double>()); thrust::copy(x.begin() + i, x.end(), sub.begin() + i); thrust::copy(y.begin() + i, y.end(), sub.begin() + i); return std::sqrt(thrust::transform_reduce(sub.begin(), sub.end(), square_functor<double>(), 0.0, add_functor<double>())); } enum t_ask_mode { NOASK = 0, ASK = 1 }; enum t_trace_mode { NOTRACE = 0, TRACE = 1 }; t_ask_mode ask_mode = NOASK; t_trace_mode trace_mode = NOTRACE; ///////////////////////////////////////////////////////// // Дефолтные значения static const unsigned _count = 1; static const size_t _n = 2; static const size_t _md = 20; static const size_t _m[] = {20, 20}; static const double _a[] = {0, 0}; static const double _b[] = {1000, 1000}; static const double _f1[] = {0, 0, 500}; static const double _f2[] = {100, 100, 500}; static const double* _f[] = {_f1, _f2}; static const double _w1[] = {0, 0, 3040}; static const double _w2[] = {150, 180, 1800}; static const double _w3[] = {240, 200, 800}; static const double _w4[] = {260, 90, 1200}; static const double* _w[] = {_w1, _w2, _w3, _w4}; static const double _e = 1e-8; ///////////////////////////////////////////////////////// // Вычисление вектора индексов координат решётки по номеру узла // index - номер узла решётки __device__ void vector_of(unsigned* vector, unsigned long index, size_t* m, size_t n) { for (size_t i = 0; i < n; i++) { unsigned long m1 = 1ul + m[i]; vector[i] = index % m1; index /= m1; } } ///////////////////////////////////////////////////////// // Преобразование вектора индексов координат решётки // в вектор координат точки // vector - вектор индексов координат решётки // m - число сегментов по каждому из измерений // a - вектор минимальных координат точек // b - вектор максимальных координат точек __device__ void point_of(double* point, unsigned* vector, size_t* m, double* a, double* b, size_t n) { for (size_t i = 0; i < n; i++) point[i] = (a[i] * (m[i] - vector[i]) + b[i] * vector[i]) / m[i]; } ///////////////////////////////////////////////////////// // Проверка принадлежности точки области, заданной ограничениями // x - координаты точки // f - набор проверочных функций // a - вектор минимальных координат точек // b - вектор максимальных координат точек __device__ bool check(double* x, double* f, double* a, double* b, size_t n, size_t m) { for (int j = 0; j < m; j++) { double s1 = 0; for (int i = 0; i < n; i++) { double y = x[i] - f[j * (n + 1) + i]; s1 += y * y; } if (sqrt(s1) > f[j * (n + 1) + n]) return false; } return true; } ///////////////////////////////////////////////////////// // Искомая функция __device__ double target(double* x, double* w, size_t n, size_t m) { double s = 0; for (int j = 0; j < m; j++) { double s1 = 0; for (int i = 0; i < n; i++) { double y = x[i] - w[j * (n + 1) + i]; s1 += y * y; } s += sqrt(s1) * w[j * (n + 1) + n]; } return s; } __device__ void copy(double* x, double* y, size_t n) { for (size_t i = 0; i < n; i++) x[i] = y[i]; } __global__ void kernel0( unsigned* vPtr, double* tPtr, double* xPtr, double* yPtr, bool* ePtr, size_t* m, double* a, double* b, double* f, double* w, unsigned long total, size_t n, size_t mf, size_t mw) { // Получаем идентификатор нити int id = blockDim.x * blockIdx.x + threadIdx.x; unsigned* v = &vPtr[id * n]; double* t = &tPtr[id * n]; double* x = &xPtr[id * n]; double* y = &yPtr[id]; bool* e = &ePtr[id]; *e = false; for (unsigned long index = blockDim.x * blockIdx.x + threadIdx.x; index < total; index += blockDim.x * gridDim.x) { vector_of(v, index, m, n); point_of(t, v, m, a, b, n); if (!check(t, f, a, b, n, mf)) continue; if (!*e) { ::copy(x, t, n); *y = target(t, w, n, mw); *e = true; continue; } double y1 = target(t, w, n, mw); if (y1 < *y) { ::copy(x, t, n); *y = y1; } } } __global__ void kernel1( double* x0, double* tPtr, double* xPtr, double* yPtr, bool* ePtr, double* a, double* b, double* f, double* w, double ak, double bk, size_t k, size_t mk, size_t n, size_t mf, size_t mw) { // Получаем идентификатор нити int id = blockDim.x * blockIdx.x + threadIdx.x; double* t = &tPtr[id * n]; double* x = &xPtr[id]; double* y = &yPtr[id]; bool* e = &ePtr[id]; ::copy(t, x0, n); *e = false; for (size_t index = blockDim.x * blockIdx.x + threadIdx.x; index <= mk; index += blockDim.x * gridDim.x) { t[k] = (ak * (mk - index) + bk * index) / mk; if (!check(t, f, a, b, n, mf)) continue; if (!*e) { *x = t[k]; *y = target(t, w, n, mw); *e = true; continue; } double y1 = target(t, w, n, mw); if (y1 < *y) { *x = t[k]; *y = y1; } } } __global__ void kernel2( double* tPtr, double* xPtr, double* yPtr, double* pPtr, bool* ePtr, double* x1, double* x2, double* a, double* b, double* f, double* w, double l, double h, size_t md, size_t n, size_t mf, size_t mw) { // Получаем идентификатор нити int id = blockDim.x * blockIdx.x + threadIdx.x; double* t = &tPtr[id * n]; double* x = &xPtr[id * n]; double* y = &yPtr[id]; double* p = &pPtr[id]; bool* e = &ePtr[id]; *e = false; for (size_t index = blockDim.x * blockIdx.x + threadIdx.x; index <= md + md; index += blockDim.x * gridDim.x) { double pt = (l * (md + md - index) + h * index) / (md + md); for (size_t i = 0; i < n; i++) t[i] = x2[i] * (1.0 - pt) + x1[i] * pt; if (!check(t, f, a, b, n, mf)) continue; if (!*e) { ::copy(x, t, n); *y = target(t, w, n, mw); *p = pt; *e = true; continue; } double y1 = target(t, w, n, mw); if (y1 < *y) { ::copy(x, t, n); *y = y1; *p = pt; } } } int main(int argc, char* argv[]) { // http://stackoverflow.com/questions/2236197/what-is-the-easiest-way-to-initialize-a-stdvector-with-hardcoded-elements unsigned count = _count; size_t n = _n; double e = _e; size_t md = _md; thrust::host_vector<size_t> hm(_m, _m + sizeof(_m) / sizeof(_m[0])); thrust::host_vector<double> ha(_a, _a + sizeof(_a) / sizeof(_a[0])); thrust::host_vector<double> hb(_b, _b + sizeof(_b) / sizeof(_b[0])); thrust::host_vector<double> hf; thrust::host_vector<double> hw; for (size_t i = 0; i < sizeof(_f) / sizeof(_f[0]); i++) for (size_t j = 0; j <= n; j++) hf.push_back(_f[i][j]); for (size_t i = 0; i < sizeof(_w) / sizeof(_w[0]); i++) for (size_t j = 0; j <= n; j++) hw.push_back(_w[i][j]); char* input_file_name = NULL; char* output_file_name = NULL; char* options_file_name = NULL; int gridSize = 0; int blockSize = 0; // Поддержка кириллицы в консоли Windows // Функция setlocale() имеет два параметра, первый параметр - тип категории локали, в нашем случае LC_TYPE - набор символов, второй параметр — значение локали. // Вместо второго аргумента можно писать "Russian", или оставлять пустые двойные кавычки, тогда набор символов будет такой же как и в ОС. setlocale(LC_ALL, ""); for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-help") == 0) { std::cout << "Usage :\t" << argv[0] << " [...] [g <gridSize>] [b <blockSize>] [-input <inputfile>] [-output <outputfile>]" << std::endl; std::cout << "Алгоритм циклического покоординатного спуска" << std::endl; std::cout << "Используя алгоритм одномерной оптимизации по направлению" << std::endl; std::cout << "(Алгоритм деления значений аргумента функции)" << std::endl; // std::cout << "\t-n <размерность пространства>" << std::endl; std::cout << "\t-c <количество повторений алгоритма для замера времени>" << std::endl; std::cout << "\t-m <число сегментов по каждому из измерений>" << std::endl; std::cout << "\t-md <число сегментов по вычисленному направлению>" << std::endl; std::cout << "\t-a <минимальные координаты по каждому из измерений>" << std::endl; std::cout << "\t-b <максимальные координаты по каждому из измерений>" << std::endl; std::cout << "\t-e <точность вычислений>" << std::endl; std::cout << "\t-ask/noask" << std::endl; std::cout << "\t-trace/notrace" << std::endl; } else if (strcmp(argv[i], "-ask") == 0) ask_mode = ASK; else if (strcmp(argv[i], "-noask") == 0) ask_mode = NOASK; else if (strcmp(argv[i], "-trace") == 0) trace_mode = TRACE; else if (strcmp(argv[i], "-notrace") == 0) trace_mode = NOTRACE; // else if(strcmp(argv[i],"-n")==0) n = atoi(argv[++i]); else if (strcmp(argv[i], "-e") == 0) e = atof(argv[++i]); else if (strcmp(argv[i], "-c") == 0) count = atoi(argv[++i]); else if (strcmp(argv[i], "-md") == 0) md = atoi(argv[++i]); else if (strcmp(argv[i], "-m") == 0) { std::istringstream ss(argv[++i]); hm.clear(); for (size_t i = 0; i < n; i++) hm.push_back(atoi(argv[++i])); } else if (strcmp(argv[i], "-a") == 0) { ha.clear(); for (size_t i = 0; i < n; i++) ha.push_back(atof(argv[++i])); } else if (strcmp(argv[i], "-b") == 0) { hb.clear(); for (size_t i = 0; i < n; i++) hb.push_back(atof(argv[++i])); } else if (strcmp(argv[i], "-input") == 0) input_file_name = argv[++i]; else if (strcmp(argv[i], "-output") == 0) output_file_name = argv[++i]; else if (strcmp(argv[i], "-options") == 0) options_file_name = argv[++i]; else if (strcmp(argv[i], "g") == 0) gridSize = atoi(argv[++i]); else if (strcmp(argv[i], "b") == 0) blockSize = atoi(argv[++i]); } if (input_file_name != NULL) freopen(input_file_name, "r",stdin); if (output_file_name != NULL) freopen(output_file_name, "w",stdout); if (options_file_name != NULL) { hf.clear(); hw.clear(); std::ifstream options(options_file_name); if (!options.is_open()) throw "Error opening file"; std::string line; while (std::getline(options, line)) { std::cout << line << std::endl; std::stringstream lineStream(line); std::string id; std::string cell; thrust::host_vector<double> x; thrust::host_vector<size_t> y; std::getline(lineStream, id, ' '); while (std::getline(lineStream, cell, ' ')) { x.push_back(stod(cell)); y.push_back(stoi(cell)); } if (id[0] == 'N') n = stoi(cell); if (id[0] == 'E') e = stod(cell); if (id[0] == 'M') hm = y; if (id[0] == 'A') ha = x; if (id[0] == 'B') hb = x; if (id[0] == 'F') for (size_t i = 0; i < x.size(); i++) hf.push_back(x[i]); if (id[0] == 'W') for (size_t i = 0; i < x.size(); i++) hw.push_back(x[i]); } } if (ask_mode == ASK) { // std::cout << "Введите размерность пространства:"<< std::endl; std::cin >> n; std::cout << "Введите число сегментов по каждому из измерений m[" << n << "]:" << std::endl; hm.clear(); for (size_t i = 0; i < n; i++) { int x; std::cin >> x; hm.push_back(x); } std::cout << "Введите число сегментов по вычисленному направлению:" << std::endl; std::cin >> md; std::cout << "Введите минимальные координаты по каждому из измерений a[" << n << "]:" << std::endl; ha.clear(); for (size_t i = 0; i < n; i++) { double x; std::cin >> x; ha.push_back(x); } std::cout << "Введите максимальные координаты по каждому из измерений b[" << n << "]:" << std::endl; hb.clear(); for (size_t i = 0; i < n; i++) { double x; std::cin >> x; hb.push_back(x); } std::cout << "Введите точность вычислений:" << std::endl; std::cin >> e; std::cout << "Введите количество повторений алгоритма для замера времени:" << std::endl; std::cin >> count; } // Find/set the device. int device_count = 0; cudaGetDeviceCount(&device_count); for (int i = 0; i < device_count; ++i) { cudaDeviceProp properties; cudaGetDeviceProperties(&properties, i); std::cout << "Running on GPU " << i << " (" << properties.name << ")" << std::endl; } int major = THRUST_MAJOR_VERSION; int minor = THRUST_MINOR_VERSION; std::cout << "Thrust v" << major << "." << minor << std::endl; thrust::device_vector<size_t> m(hm); thrust::device_vector<double> a(ha); thrust::device_vector<double> b(hb); thrust::device_vector<double> f(hf); thrust::device_vector<double> w(hw); for (size_t i = 0; i < m.size(); i++) assert(m[i]>2); // Алгоритм clock_t time = clock(); size_t mMax = thrust::max(md, thrust::reduce(m.begin(), m.end(), (size_t)0, max_functor<size_t>())); // Определим оптимальное разбиения на процессы, нити int blocks = (gridSize > 0) ? gridSize : thrust::min(15, (int)pow(mMax + 1, 0.333333)); int threads = (blockSize > 0) ? blockSize : thrust::min(15, (int)pow(mMax + 1, 0.333333)); // Аллокируем память для параллельных вычислений thrust::host_vector<double> hyArray(blocks * threads); thrust::host_vector<double> hpArray(blocks * threads); thrust::device_vector<unsigned> vArray(blocks * threads * n); thrust::device_vector<double> tArray(blocks * threads * n); thrust::device_vector<double> xArray(blocks * threads * n); thrust::device_vector<double> yArray(blocks * threads); thrust::device_vector<double> pArray(blocks * threads); thrust::device_vector<bool> eArray(blocks * threads); unsigned* vPtr = thrust::raw_pointer_cast(&vArray[0]); double* tPtr = thrust::raw_pointer_cast(&tArray[0]); double* xPtr = thrust::raw_pointer_cast(&xArray[0]); double* yPtr = thrust::raw_pointer_cast(&yArray[0]); double* pPtr = thrust::raw_pointer_cast(&pArray[0]); bool* ePtr = thrust::raw_pointer_cast(&eArray[0]); size_t* mPtr = thrust::raw_pointer_cast(&m[0]); double* aPtr = thrust::raw_pointer_cast(&a[0]); double* bPtr = thrust::raw_pointer_cast(&b[0]); double* fPtr = thrust::raw_pointer_cast(&f[0]); double* wPtr = thrust::raw_pointer_cast(&w[0]); // Алгоритм thrust::host_vector<double> ht(n); thrust::host_vector<double> hx(n); thrust::device_vector<double> x(n); thrust::device_vector<double> x1(n); thrust::device_vector<double> x2(n); double y; double diameter = ::distance(a, b); double* xPtr0 = thrust::raw_pointer_cast(&x[0]); double* xPtr1 = thrust::raw_pointer_cast(&x1[0]); double* xPtr2 = thrust::raw_pointer_cast(&x2[0]); if (trace_mode == TRACE && count == 1) std::cout << "for #1" << std::endl; for (unsigned s = 0; s < count; s++) { if (trace_mode == TRACE && count == 1) std::cout << "while #1" << std::endl; while (true) { // Находим первую точку в области, заданной ограничениями unsigned long total = total_of(m); if (trace_mode == TRACE && count == 1) std::cout << "kernel0" << std::endl; kernel0 <<< blocks , threads >>> (vPtr , tPtr , xPtr , yPtr , ePtr , mPtr , aPtr , bPtr , fPtr , wPtr , total , n , f.size() / (n + 1) , w.size() / (n + 1)); thrust::copy(yArray.begin(), yArray.end(), hyArray.begin()); auto it = thrust::find(eArray.begin(), eArray.end(), true); if (it >= eArray.end()) { for (size_t i = 0; i < n; i++) m[i] <<= 1u; continue; } size_t index = thrust::distance(eArray.begin(), it); y = hyArray[index]; thrust::copy(&xArray[index * n], &xArray[index * n + n], x.begin()); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; while ((it = thrust::find(it, eArray.end(), true)) < eArray.end()) { size_t index = thrust::distance(eArray.begin(), it++); double y1 = hyArray[index]; if (y < y1) continue; y = y1; thrust::copy(&xArray[index * n], &xArray[index * n + n], x.begin()); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; } break; } if (trace_mode == TRACE && count == 1) std::cout << "while #2" << std::endl; while (true) { // Находим следующую точку в области, заданной ограничениями // Используя алгоритм одномерной оптимизации по направлению thrust::copy(x.begin(), x.end(), x1.begin()); // Сохранение значения последней точки // Цикл по измерениям if (trace_mode == TRACE && count == 1) std::cout << "for #2" << std::endl; for (size_t k = 0; k < n; k++) { // Алгоритм одномерной оптимизации по направлению double ak = thrust::min(a[k], b[k]); double bk = thrust::max(a[k], b[k]); size_t mk = m[k]; while (true) { if (trace_mode == TRACE && count == 1) std::cout << "kernel1" << std::endl; kernel1 <<< blocks , threads >>> (xPtr0 , tPtr , xPtr , yPtr , ePtr , aPtr , bPtr , fPtr , wPtr , ak , bk , k , mk , n , f.size() / (n + 1) , w.size() / (n + 1)); thrust::copy(yArray.begin(), yArray.end(), hyArray.begin()); // Находим первую точку в области, заданной ограничениями auto it = thrust::find(eArray.begin(), eArray.end(), true); assert(it<eArray.end()); size_t index = thrust::distance(eArray.begin(), it++); y = hyArray[index]; thrust::copy(&xArray[index], &xArray[index + 1], &x[k]); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; while ((it = thrust::find(it, eArray.end(), true)) < eArray.end()) { size_t index = thrust::distance(eArray.begin(), it++); if (index > mk) break; double y1 = hyArray[index]; if (y < y1) continue; y = y1; thrust::copy(&xArray[index], &xArray[index + 1], &x[k]); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; } double dd = thrust::max(ak - bk, bk - ak); double cc = thrust::max(thrust::max(ak, -ak), thrust::max(-bk, bk)); if (dd <= cc * e) break; double xk = x[k]; ak = thrust::max(ak, xk - dd / mk); bk = thrust::min(bk, xk + dd / mk); } } thrust::copy(x.begin(), x.end(), x2.begin()); // Сохранение значения последней точки double l = -diameter; double h = diameter; // Находим следующую точку в области, заданной ограничениями // Используя алгоритм одномерной оптимизации по направлению x2->x1 if (trace_mode == TRACE && count == 1) std::cout << "while #3" << std::endl; while (true) { if (trace_mode == TRACE && count == 1) std::cout << "kernel2" << std::endl; kernel2 <<< blocks , threads >>> (tPtr , xPtr , yPtr , pPtr , ePtr , xPtr1 , xPtr2 , aPtr , bPtr , fPtr , wPtr , l , h , md , n , f.size() / (n + 1) , w.size() / (n + 1)); thrust::copy(yArray.begin(), yArray.end(), hyArray.begin()); thrust::copy(pArray.begin(), pArray.end(), hpArray.begin()); // Находим первую точку в области, заданной ограничениями auto it = thrust::find(eArray.begin(), eArray.end(), true); assert(it<eArray.end()); size_t index = thrust::distance(eArray.begin(), it++); y = hyArray[index]; double p = hpArray[index]; thrust::copy(&xArray[index * n], &xArray[index * n + n], x.begin()); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; while ((it = thrust::find(it, eArray.end(), true)) < eArray.end()) { size_t index = thrust::distance(eArray.begin(), it++); double y1 = hyArray[index]; if (y < y1) continue; y = y1; p = hpArray[index]; thrust::copy(&xArray[index * n], &xArray[index * n + n], x.begin()); if (trace_mode == TRACE && count == 1) { thrust::copy(x.begin(), x.end(), hx.begin()); for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; } if (trace_mode == TRACE && count == 1) std::cout << "-> " << y << std::endl; } double dd = thrust::max(h - l, l - h); double cc = thrust::max(thrust::max(h, -h), thrust::max(-l, l)); if (dd <= cc * e) break; double ll = l; double hh = h; l = thrust::max(ll, p - dd / md); h = thrust::min(hh, p + dd / md); } double dd = delta(x, x1); double cc = thrust::max(module(x), module(x1)); if (dd <= cc * e) break; } } time = clock() - time; double seconds = ((double)time) / CLOCKS_PER_SEC / count; thrust::copy(x.begin(), x.end(), hx.begin()); std::cout << "Исполняемый файл : " << argv[0] << std::endl; std::cout << "Размерность пространства : " << n << std::endl; std::cout << "Число сегментов : "; for (size_t i = 0; i < hm.size(); i++) std::cout << hm[i] << " "; std::cout << "+ " << md; std::cout << std::endl; std::cout << "Минимальные координаты : "; for (size_t i = 0; i < ha.size(); i++) std::cout << ha[i] << " "; std::cout << std::endl; std::cout << "Максимальные координаты : "; for (size_t i = 0; i < hb.size(); i++) std::cout << hb[i] << " "; std::cout << std::endl; std::cout << "Точность вычислений : " << e << std::endl; std::cout << "Точка минимума : "; for (size_t i = 0; i < hx.size(); i++) std::cout << hx[i] << " "; std::cout << std::endl; std::cout << "Минимальное значение : " << y << std::endl; std::cout << "Время вычислений (сек.) : " << seconds << std::endl; getchar(); getchar(); return 0; }
16cced8b745a83daea20078ba7ad070e3e587188.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Aurora Renderer * Copyright (c) 2013 Michal Siejak * Licensed under MIT open-source license, see COPYING.txt file for details. */ #include <stdafx.h> #include <kernels/kernels.h> using namespace Aurora; #include <kernels/lib/common.cuh> #include <kernels/lib/intersect.cuh> #include <kernels/lib/light.cuh> __global__ static void cudaRaycastKernel(const Geometry geometry, const ShadersArray shaders, const LightsArray lights, const unsigned int numRays, Ray* rays, HitPoint* hitpoints) { unsigned int threadId = blockDim.x * blockIdx.x + threadIdx.x; if(threadId >= numRays) return; Ray& ray = rays[threadId]; HitPoint& hit = hitpoints[threadId]; if(intersect(geometry, ray, hit)) { float3 N, T, S; getBasisVectors(geometry, hit.triangleID, hit.u, hit.v, N, S, T); const float3 P = ray.point(); const unsigned int shaderID = getSafeID(geometry.shaders[hit.triangleID]); const Shader shader = shaders[shaderID]; hit.color = make_float3(0.0f); for(unsigned int i=0; i<lights.size; i++) { const float3 gL = normalize(lights[i].position - P); const float3 L = worldToLocal(gL, N, S, T); const float dotNL = cosTheta(L); if(dotNL > 0.0f) hit.color = hit.color + dotNL * shader.diffuse * lights[i].intensity * shader.color * lights[i].color; } } } void cudaRaycast(const Geometry& geometry, const ShadersArray& shaders, const LightsArray& lights, const unsigned int numRays, Ray* rays, HitPoint* hitpoints) { dim3 blockSize(256); dim3 gridSize = make_grid(blockSize, dim3(numRays)); hipLaunchKernelGGL(( cudaRaycastKernel), dim3(gridSize), dim3(blockSize), 0, 0, geometry, shaders, lights, numRays, rays, hitpoints); }
16cced8b745a83daea20078ba7ad070e3e587188.cu
/* Aurora Renderer * Copyright (c) 2013 Michal Siejak * Licensed under MIT open-source license, see COPYING.txt file for details. */ #include <stdafx.h> #include <kernels/kernels.h> using namespace Aurora; #include <kernels/lib/common.cuh> #include <kernels/lib/intersect.cuh> #include <kernels/lib/light.cuh> __global__ static void cudaRaycastKernel(const Geometry geometry, const ShadersArray shaders, const LightsArray lights, const unsigned int numRays, Ray* rays, HitPoint* hitpoints) { unsigned int threadId = blockDim.x * blockIdx.x + threadIdx.x; if(threadId >= numRays) return; Ray& ray = rays[threadId]; HitPoint& hit = hitpoints[threadId]; if(intersect(geometry, ray, hit)) { float3 N, T, S; getBasisVectors(geometry, hit.triangleID, hit.u, hit.v, N, S, T); const float3 P = ray.point(); const unsigned int shaderID = getSafeID(geometry.shaders[hit.triangleID]); const Shader shader = shaders[shaderID]; hit.color = make_float3(0.0f); for(unsigned int i=0; i<lights.size; i++) { const float3 gL = normalize(lights[i].position - P); const float3 L = worldToLocal(gL, N, S, T); const float dotNL = cosTheta(L); if(dotNL > 0.0f) hit.color = hit.color + dotNL * shader.diffuse * lights[i].intensity * shader.color * lights[i].color; } } } void cudaRaycast(const Geometry& geometry, const ShadersArray& shaders, const LightsArray& lights, const unsigned int numRays, Ray* rays, HitPoint* hitpoints) { dim3 blockSize(256); dim3 gridSize = make_grid(blockSize, dim3(numRays)); cudaRaycastKernel<<<gridSize, blockSize>>>(geometry, shaders, lights, numRays, rays, hitpoints); }
b6cdbe1db9a3fc344ec683d19fb1238df7144a35.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <stdio.h> #include <stdlib.h> #include <cuda_utils.cuh> #include <limits> #include <random/rng.cuh> #include <stats/minmax.cuh> #include "test_utils.h" namespace MLCommon { namespace Stats { ///@todo: need to add tests for verifying the column subsampling feature template <typename T> struct MinMaxInputs { T tolerance; int rows, cols; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MinMaxInputs<T>& dims) { return os; } template <typename T> __global__ void naiveMinMaxInitKernel(int ncols, T* globalmin, T* globalmax, T init_val) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= ncols) return; globalmin[tid] = init_val; globalmax[tid] = -init_val; } template <typename T> __global__ void naiveMinMaxKernel(const T* data, int nrows, int ncols, T* globalmin, T* globalmax) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int col = tid / nrows; if (col < ncols) { T val = data[tid]; if (!isnan(val)) { raft::myAtomicMin(&globalmin[col], val); raft::myAtomicMax(&globalmax[col], val); } } } template <typename T> void naiveMinMax(const T* data, int nrows, int ncols, T* globalmin, T* globalmax, hipStream_t stream) { const int TPB = 128; int nblks = raft::ceildiv(ncols, TPB); T init_val = std::numeric_limits<T>::max(); hipLaunchKernelGGL(( naiveMinMaxInitKernel), dim3(nblks), dim3(TPB), 0, stream, ncols, globalmin, globalmax, init_val); CUDA_CHECK(hipGetLastError()); nblks = raft::ceildiv(nrows * ncols, TPB); hipLaunchKernelGGL(( naiveMinMaxKernel), dim3(nblks), dim3(TPB), 0, stream, data, nrows, ncols, globalmin, globalmax); CUDA_CHECK(hipGetLastError()); } template <typename T> __global__ void nanKernel(T* data, const bool* mask, int len, T nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } template <typename T> class MinMaxTest : public ::testing::TestWithParam<MinMaxInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<MinMaxInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.rows * params.cols; CUDA_CHECK(hipStreamCreate(&stream)); raft::allocate(data, len); raft::allocate(mask, len); raft::allocate(minmax_act, 2 * params.cols); raft::allocate(minmax_ref, 2 * params.cols); r.normal(data, len, (T)0.0, (T)1.0, stream); T nan_prob = 0.01; r.bernoulli(mask, len, nan_prob, stream); const int TPB = 256; hipLaunchKernelGGL(( nanKernel), dim3(raft::ceildiv(len, TPB)), dim3(TPB), 0, stream, data, mask, len, std::numeric_limits<T>::quiet_NaN()); CUDA_CHECK(hipPeekAtLastError()); naiveMinMax(data, params.rows, params.cols, minmax_ref, minmax_ref + params.cols, stream); minmax<T, 512>(data, nullptr, nullptr, params.rows, params.cols, params.rows, minmax_act, minmax_act + params.cols, nullptr, stream); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(mask)); CUDA_CHECK(hipFree(minmax_act)); CUDA_CHECK(hipFree(minmax_ref)); } protected: MinMaxInputs<T> params; T *data, *minmax_act, *minmax_ref; bool* mask; hipStream_t stream; }; const std::vector<MinMaxInputs<float>> inputsf = { {0.00001f, 1024, 32, 1234ULL}, {0.00001f, 1024, 64, 1234ULL}, {0.00001f, 1024, 128, 1234ULL}, {0.00001f, 1024, 256, 1234ULL}, {0.00001f, 1024, 512, 1234ULL}, {0.00001f, 1024, 1024, 1234ULL}, {0.00001f, 4096, 32, 1234ULL}, {0.00001f, 4096, 64, 1234ULL}, {0.00001f, 4096, 128, 1234ULL}, {0.00001f, 4096, 256, 1234ULL}, {0.00001f, 4096, 512, 1234ULL}, {0.00001f, 4096, 1024, 1234ULL}, {0.00001f, 8192, 32, 1234ULL}, {0.00001f, 8192, 64, 1234ULL}, {0.00001f, 8192, 128, 1234ULL}, {0.00001f, 8192, 256, 1234ULL}, {0.00001f, 8192, 512, 1234ULL}, {0.00001f, 8192, 1024, 1234ULL}, {0.00001f, 1024, 8192, 1234ULL}}; const std::vector<MinMaxInputs<double>> inputsd = { {0.0000001, 1024, 32, 1234ULL}, {0.0000001, 1024, 64, 1234ULL}, {0.0000001, 1024, 128, 1234ULL}, {0.0000001, 1024, 256, 1234ULL}, {0.0000001, 1024, 512, 1234ULL}, {0.0000001, 1024, 1024, 1234ULL}, {0.0000001, 4096, 32, 1234ULL}, {0.0000001, 4096, 64, 1234ULL}, {0.0000001, 4096, 128, 1234ULL}, {0.0000001, 4096, 256, 1234ULL}, {0.0000001, 4096, 512, 1234ULL}, {0.0000001, 4096, 1024, 1234ULL}, {0.0000001, 8192, 32, 1234ULL}, {0.0000001, 8192, 64, 1234ULL}, {0.0000001, 8192, 128, 1234ULL}, {0.0000001, 8192, 256, 1234ULL}, {0.0000001, 8192, 512, 1234ULL}, {0.0000001, 8192, 1024, 1234ULL}, {0.0000001, 1024, 8192, 1234ULL}}; typedef MinMaxTest<float> MinMaxTestF; TEST_P(MinMaxTestF, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref, minmax_act, 2 * params.cols, raft::CompareApprox<float>(params.tolerance))); } typedef MinMaxTest<double> MinMaxTestD; TEST_P(MinMaxTestD, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref, minmax_act, 2 * params.cols, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestD, ::testing::ValuesIn(inputsd)); } // end namespace Stats } // end namespace MLCommon
b6cdbe1db9a3fc344ec683d19fb1238df7144a35.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <stdio.h> #include <stdlib.h> #include <cuda_utils.cuh> #include <limits> #include <random/rng.cuh> #include <stats/minmax.cuh> #include "test_utils.h" namespace MLCommon { namespace Stats { ///@todo: need to add tests for verifying the column subsampling feature template <typename T> struct MinMaxInputs { T tolerance; int rows, cols; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MinMaxInputs<T>& dims) { return os; } template <typename T> __global__ void naiveMinMaxInitKernel(int ncols, T* globalmin, T* globalmax, T init_val) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= ncols) return; globalmin[tid] = init_val; globalmax[tid] = -init_val; } template <typename T> __global__ void naiveMinMaxKernel(const T* data, int nrows, int ncols, T* globalmin, T* globalmax) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int col = tid / nrows; if (col < ncols) { T val = data[tid]; if (!isnan(val)) { raft::myAtomicMin(&globalmin[col], val); raft::myAtomicMax(&globalmax[col], val); } } } template <typename T> void naiveMinMax(const T* data, int nrows, int ncols, T* globalmin, T* globalmax, cudaStream_t stream) { const int TPB = 128; int nblks = raft::ceildiv(ncols, TPB); T init_val = std::numeric_limits<T>::max(); naiveMinMaxInitKernel<<<nblks, TPB, 0, stream>>>(ncols, globalmin, globalmax, init_val); CUDA_CHECK(cudaGetLastError()); nblks = raft::ceildiv(nrows * ncols, TPB); naiveMinMaxKernel<<<nblks, TPB, 0, stream>>>(data, nrows, ncols, globalmin, globalmax); CUDA_CHECK(cudaGetLastError()); } template <typename T> __global__ void nanKernel(T* data, const bool* mask, int len, T nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } template <typename T> class MinMaxTest : public ::testing::TestWithParam<MinMaxInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<MinMaxInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.rows * params.cols; CUDA_CHECK(cudaStreamCreate(&stream)); raft::allocate(data, len); raft::allocate(mask, len); raft::allocate(minmax_act, 2 * params.cols); raft::allocate(minmax_ref, 2 * params.cols); r.normal(data, len, (T)0.0, (T)1.0, stream); T nan_prob = 0.01; r.bernoulli(mask, len, nan_prob, stream); const int TPB = 256; nanKernel<<<raft::ceildiv(len, TPB), TPB, 0, stream>>>( data, mask, len, std::numeric_limits<T>::quiet_NaN()); CUDA_CHECK(cudaPeekAtLastError()); naiveMinMax(data, params.rows, params.cols, minmax_ref, minmax_ref + params.cols, stream); minmax<T, 512>(data, nullptr, nullptr, params.rows, params.cols, params.rows, minmax_act, minmax_act + params.cols, nullptr, stream); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(mask)); CUDA_CHECK(cudaFree(minmax_act)); CUDA_CHECK(cudaFree(minmax_ref)); } protected: MinMaxInputs<T> params; T *data, *minmax_act, *minmax_ref; bool* mask; cudaStream_t stream; }; const std::vector<MinMaxInputs<float>> inputsf = { {0.00001f, 1024, 32, 1234ULL}, {0.00001f, 1024, 64, 1234ULL}, {0.00001f, 1024, 128, 1234ULL}, {0.00001f, 1024, 256, 1234ULL}, {0.00001f, 1024, 512, 1234ULL}, {0.00001f, 1024, 1024, 1234ULL}, {0.00001f, 4096, 32, 1234ULL}, {0.00001f, 4096, 64, 1234ULL}, {0.00001f, 4096, 128, 1234ULL}, {0.00001f, 4096, 256, 1234ULL}, {0.00001f, 4096, 512, 1234ULL}, {0.00001f, 4096, 1024, 1234ULL}, {0.00001f, 8192, 32, 1234ULL}, {0.00001f, 8192, 64, 1234ULL}, {0.00001f, 8192, 128, 1234ULL}, {0.00001f, 8192, 256, 1234ULL}, {0.00001f, 8192, 512, 1234ULL}, {0.00001f, 8192, 1024, 1234ULL}, {0.00001f, 1024, 8192, 1234ULL}}; const std::vector<MinMaxInputs<double>> inputsd = { {0.0000001, 1024, 32, 1234ULL}, {0.0000001, 1024, 64, 1234ULL}, {0.0000001, 1024, 128, 1234ULL}, {0.0000001, 1024, 256, 1234ULL}, {0.0000001, 1024, 512, 1234ULL}, {0.0000001, 1024, 1024, 1234ULL}, {0.0000001, 4096, 32, 1234ULL}, {0.0000001, 4096, 64, 1234ULL}, {0.0000001, 4096, 128, 1234ULL}, {0.0000001, 4096, 256, 1234ULL}, {0.0000001, 4096, 512, 1234ULL}, {0.0000001, 4096, 1024, 1234ULL}, {0.0000001, 8192, 32, 1234ULL}, {0.0000001, 8192, 64, 1234ULL}, {0.0000001, 8192, 128, 1234ULL}, {0.0000001, 8192, 256, 1234ULL}, {0.0000001, 8192, 512, 1234ULL}, {0.0000001, 8192, 1024, 1234ULL}, {0.0000001, 1024, 8192, 1234ULL}}; typedef MinMaxTest<float> MinMaxTestF; TEST_P(MinMaxTestF, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref, minmax_act, 2 * params.cols, raft::CompareApprox<float>(params.tolerance))); } typedef MinMaxTest<double> MinMaxTestD; TEST_P(MinMaxTestD, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref, minmax_act, 2 * params.cols, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestD, ::testing::ValuesIn(inputsd)); } // end namespace Stats } // end namespace MLCommon
79976d905fc9e680beb3699af67f6c71b36ec18c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hip/hip_runtime.h> // ____ Experiment inputs _____ #define N 10 // Number of variables #define F_LINE pow(my_var,2) // For Sphere function x_i^2 #define INIT_VALUE 0 #define POPULATION 128 // Population #define UB 10 // Upper bound #define LB -10 // Lower bound #define N_ITERS 1000 #define N_EXPERIMENTS 1 // _____________________________ #if ((N % 32) == 0) #define THREADS N #define WARP_GAP 0 #else #define THREADS (N/32 + 1)*32 #define WARP_GAP (N/32 + 1)*32 - N #endif #define W 0.6 #define PHI_P 1.6 #define PHI_G 1.6 #define N_WARPS N/WARP_SIZE #define F_KEY FUNCT #define WARP_SIZE 32 #define N_WARPS N/WARP_SIZE __device__ int iMin = 0; __device__ int fitMin = 9999999; // Prints an array of floats from Device void fcudaPrint(float * array, int elements, int n_jump){ float * aux; aux = (float *) malloc(elements * sizeof(float)); hipMemcpy(aux, array, elements * sizeof(float), hipMemcpyDeviceToHost); int i; for (i = 0; i < elements; i++){ if ((i % n_jump) == 0 ) {printf("\n");} printf("%.5f ", aux[i]); } free(aux); aux = NULL; } // Prints an array of ints from Device void icudaPrint(int * array, int elements, int n_jump){ int * aux; aux = (int *) malloc(elements * sizeof(int)); hipMemcpy(aux, array, elements * sizeof(int), hipMemcpyDeviceToHost); int i; for (i = 0; i < elements; i++){ if ((i % n_jump) == 0 ) {printf("\n");} printf("%d ", aux[i]); } free(aux); aux = NULL; } __global__ void init_states(unsigned int seed, hiprandState_t * states) { /* we have to initialize the state */ hiprand_init(seed, blockIdx.x, 0, &states[blockIdx.x]); } __global__ void U_01(float * arr, hiprandState_t * states){ int id = blockIdx.x * blockDim.x + threadIdx.x; arr[id] = hiprand_uniform(&states[id]); } __global__ void N_01(float * arr, hiprandState_t * states){ int id = blockIdx.x * blockDim.x + threadIdx.x; arr[id] = hiprand_normal(&states[id])/1; } // Computes random integers between 0 and n __global__ void irand(int n, int * arr, hiprandState_t * states){ int id = blockIdx.x * blockDim.x + threadIdx.x; arr[id] = hiprand(&states[id]) % n; } __global__ void FUNCT(float * x, float * evals){ // Launch 1 block of N threads for each element fo the population: <<<POPULATION, N>>> // variable sum is shared by all threads in each block __shared__ float sum; float value; int phantom_id = blockIdx.x * blockDim.x + threadIdx.x; // Id with gaps included int real_id = phantom_id - blockIdx.x * ((int)WARP_GAP); // Id withouth gaps included if (threadIdx.x == 0) { sum = INIT_VALUE; // } // The main operation if (threadIdx.x < N){ float my_var = x[real_id]; value = F_LINE; } else { value = 0; } // A little warp reduction in order to reduce the amount of atomic operations int offset; for (offset = WARP_SIZE/2; offset>0; offset >>= 1){ value += __shfl_down(value, offset); } // The first thread of each warp adds its value if ((threadIdx.x & 31) == 0){ atomicAdd(&sum, value); } // Thread synchronization, because this is not a warp operation __syncthreads(); // Only one thread writes the result of this block-bee if (threadIdx.x == 0){ evals[blockIdx.x] = sum; } } // Sets up the initial population __global__ void init_pos(float * pos, float * rand_uniform){ int id = blockIdx.x * blockDim.x + threadIdx.x; pos[id] = LB + rand_uniform[id] * (UB - LB); } // Sets up the initial velocity __global__ void init_vel(float * vel, float * rand_A, float * rand_B){ int id = blockIdx.x * blockDim.x + threadIdx.x; vel[id] = LB + (rand_B[id] - rand_A[id]) * (UB - LB); } // Updates the min array with the actual position of each particle __global__ void update_mins(float * pos, float * min, float * evals, float * min_evals){ // Needed to lauch as many blocks as particles, with N threads per block //int id = blockIdx.x * blockDim.x + threadIdx.x; int phantom_id = blockIdx.x * blockDim.x + threadIdx.x; // Id with gaps included int real_id = phantom_id - blockIdx.x * ((int)WARP_GAP); // Id withouth gaps included int j = blockIdx.x; // We're using more threads than needed, but we maximize the GPU usage if (threadIdx.x < N){ if (evals[j] < min_evals[j]){ if (threadIdx.x == 0){min_evals[j] = evals[j];} min[real_id] = pos[real_id]; } } } // Operacin atmica que escribe el mnimo de un ndice __device__ float atomicMinIndex(float * array, int * address, int val){ int lo_que_tengo, lo_que_tenia; lo_que_tengo = * address; while (array[val] < array[lo_que_tengo]){ lo_que_tenia = lo_que_tengo; lo_que_tengo = atomicCAS(address, lo_que_tenia, val); } return lo_que_tengo; } __global__ void arrayReduction(float * array){ int id = blockDim.x * blockIdx.x + threadIdx.x; int thisThreadId = id; float value = array[id]; int gap, id2; float value2; for (gap = WARP_SIZE/2; gap > 0; gap >>= 1){ id2 = __shfl_down(id, gap); value2 = __shfl_down(value, gap); if (value2 < value){ value = value2; id = id2; } } if (((thisThreadId & (WARP_SIZE - 1)) == 0)){ atomicMinIndex(array, &iMin, id); } } // Updates the velocity & pos __global__ void update_vel_pos(float * vel, float * pos, float * min, float * ru01, float * ru02){ // It's necessary launch as many blocks of N threads as particles //int real_id = blockIdx.x * blockDim.x + threadIdx.x; int i = threadIdx.x; int phantom_id = blockIdx.x * blockDim.x + threadIdx.x; // Id with gaps included int real_id = phantom_id - blockIdx.x * ((int)WARP_GAP); // Id withouth gaps included if (threadIdx.x < N){ // Update speed //vel[id] = W * vel[id] + PHI_P * ru01[id] * (min[id] - pos[id]) + PHI_G * ru02[id] * (min[iMin*N + i] - pos[id]); vel[real_id] = __fmaf_rd(W, vel[real_id], __fmul_rd(__fmul_rd(PHI_P, ru01[real_id]), __fsub_rd(min[real_id], pos[real_id])) + __fmul_rd(__fmul_rd(PHI_G, ru02[real_id]), __fsub_rd(min[iMin*N + i], pos[real_id]))); // Update position //pos[id] = pos[id] + vel[id]; pos[real_id] = __fadd_rd(pos[real_id], vel[real_id]); } } // Updates the velocity & pos __global__ void PSO_step(float * vel, float * pos, float * min, float * evals, float * min_evals, float * ru01){ // It's necessary launch as many blocks of N threads as particles //int real_id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float sum; int phantom_id = blockIdx.x * blockDim.x + threadIdx.x; // Id with gaps included int real_id = phantom_id - blockIdx.x * ((int)WARP_GAP); // Id withouth gaps included float my_var, my_vel; my_var = pos[real_id]; my_vel = vel[real_id]; if (threadIdx.x == 0) {sum = INIT_VALUE;} int i = threadIdx.x; float value; // --------- Updating pos and speed if (threadIdx.x < N){ // Update speed my_vel = __fmaf_rd(W, my_vel, __fmul_rd(__fmul_rd(PHI_P, ru01[real_id]), __fsub_rd(min[real_id], my_var)) + __fmul_rd(__fmul_rd(PHI_G, ru01[N*POPULATION - real_id]), __fsub_rd(min[iMin*N + i], my_var))); vel[real_id] = my_vel; // Saving it in global mem... // Update position my_var = __fadd_rd(my_var, my_vel); pos[real_id] = my_var; // Saving it in global mem... } __syncthreads(); // --------- Evaluating the function if (threadIdx.x < N){ value = F_LINE; } else { value = 0; } // A little warp reduction in order to reduce the amount of atomic operations int offset; for (offset = WARP_SIZE/2; offset>0; offset >>= 1){ value += __shfl_down(value, offset); } // The first thread of each warp adds its value if ((threadIdx.x & 31) == 0){ atomicAdd(&sum, value); //printf("-->> Thread %d sums %f to %d bee\n", threadIdx.x, value, blockIdx.x); } // Thread synchronization, because this is not a warp operation __syncthreads(); // Only one thread writes the result of this block-bee if (threadIdx.x == 0){ //evals[blockIdx.x] = fitness(sum); evals[blockIdx.x] = sum; // From here, sum is the complete evaluation //printf("--%f, bee: %d\n", sum, blockIdx.x); } __syncthreads(); int j = blockIdx.x; // ------------- Updating mins if (threadIdx.x < N){ //if (evals[j] < min_evals[j]){ if (sum < min_evals[j]){ if (threadIdx.x == 0){min_evals[j] = sum;} min[real_id] = my_var; //printf("done %d \n", i); } } } __global__ void print_best_pos(float * pos){ int i; for (i = 0; i < N; i++){ printf("%f ", pos[iMin * N + i]); } printf("\n"); } __global__ void print_best_val(float * part_min_evals){ printf("[Solution] Value: %f\n", part_min_evals[iMin]); } // Return the floating mean of the n first elements from arr float fmean(float * arr, int n){ float ssum = 0; int i; for (i = 0; i < n; i++){ ssum += arr[i]; } return ssum/n; } // Prints an array of floats void fMatrixPrint(float * array, int elements, int n_jump){ int i; for (i = 0; i < elements; i++){ if ((i % n_jump) == 0 ) {printf("\n");} printf("%f ", array[i]); } } int main(void){ // States hiprandState_t * states; hipMalloc((void**) &states, N * POPULATION * sizeof(hiprandState_t)); hipLaunchKernelGGL(( init_states), dim3(N*POPULATION),dim3(1), 0, 0, time(0), states); // The random things float * rand_float_A, * rand_float_B; hipMalloc((void **) &rand_float_A, N * POPULATION * sizeof(float)); hipMalloc((void **) &rand_float_B, N * POPULATION * sizeof(float)); // The data structures float * particles, * evals; float * part_min, * part_min_evals; float * vel; hipMalloc((void **) &particles, N * POPULATION * sizeof(float)); hipMalloc((void **) &evals, POPULATION * sizeof(float)); hipMalloc((void **) &part_min, N * POPULATION * sizeof(float)); hipMalloc((void **) &part_min_evals, POPULATION * sizeof(float)); hipMalloc((void **) &vel, N * POPULATION * sizeof(float)); hipLaunchKernelGGL(( U_01), dim3(N), dim3(POPULATION), 0, 0, rand_float_A, states); hipLaunchKernelGGL(( U_01), dim3(N), dim3(POPULATION), 0, 0, rand_float_B, states); hipLaunchKernelGGL(( init_pos), dim3(POPULATION), dim3(N), 0, 0, particles, rand_float_A); hipLaunchKernelGGL(( init_pos), dim3(POPULATION), dim3(N), 0, 0, part_min, rand_float_B); hipLaunchKernelGGL(( init_vel), dim3(POPULATION), dim3(N), 0, 0, vel, rand_float_A, rand_float_B); hipLaunchKernelGGL(( F_KEY), dim3(POPULATION),dim3(THREADS), 0, 0, particles, evals); hipLaunchKernelGGL(( F_KEY), dim3(POPULATION),dim3(THREADS), 0, 0, part_min, part_min_evals); hipDeviceSynchronize(); clock_t start, end; double cpu_time_used; float timings[N_EXPERIMENTS]; int experiment; for (experiment = 0; experiment < N_EXPERIMENTS; experiment ++){ start = clock(); int iter; for (iter = 0; iter < N_ITERS; iter++){ hipLaunchKernelGGL(( U_01), dim3(N), dim3(POPULATION), 0, 0, rand_float_A, states); hipLaunchKernelGGL(( PSO_step), dim3(POPULATION),dim3(THREADS), 0, 0, vel, particles, part_min, evals, part_min_evals, rand_float_A); // Look for global best hipLaunchKernelGGL(( arrayReduction), dim3(1),dim3(POPULATION), 0, 0, part_min_evals); } hipDeviceSynchronize(); end = clock(); cpu_time_used = ((double) (end - start))/ CLOCKS_PER_SEC; timings[experiment] = (float) cpu_time_used; } float time_mean = fmean(timings, N_EXPERIMENTS); printf("\n[Info] Iterations : %d\n[Info] Experiments: %d\n[Info] Variables : %d\n[Info] Mean time : %f\n", N_ITERS, N_EXPERIMENTS, N, time_mean); hipLaunchKernelGGL(( print_best_val), dim3(1),dim3(1), 0, 0, part_min_evals); hipDeviceSynchronize(); printf("[Solution] Location: "); hipLaunchKernelGGL(( print_best_pos), dim3(1),dim3(1), 0, 0, part_min); hipDeviceSynchronize(); printf("\n"); return 0; }
79976d905fc9e680beb3699af67f6c71b36ec18c.cu
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> #include <curand.h> #include <curand_kernel.h> #include <cuda_runtime.h> // ____ Experiment inputs _____ #define N 10 // Number of variables #define F_LINE pow(my_var,2) // For Sphere function x_i^2 #define INIT_VALUE 0 #define POPULATION 128 // Population #define UB 10 // Upper bound #define LB -10 // Lower bound #define N_ITERS 1000 #define N_EXPERIMENTS 1 // _____________________________ #if ((N % 32) == 0) #define THREADS N #define WARP_GAP 0 #else #define THREADS (N/32 + 1)*32 #define WARP_GAP (N/32 + 1)*32 - N #endif #define W 0.6 #define PHI_P 1.6 #define PHI_G 1.6 #define N_WARPS N/WARP_SIZE #define F_KEY FUNCT #define WARP_SIZE 32 #define N_WARPS N/WARP_SIZE __device__ int iMin = 0; __device__ int fitMin = 9999999; // Prints an array of floats from Device void fcudaPrint(float * array, int elements, int n_jump){ float * aux; aux = (float *) malloc(elements * sizeof(float)); cudaMemcpy(aux, array, elements * sizeof(float), cudaMemcpyDeviceToHost); int i; for (i = 0; i < elements; i++){ if ((i % n_jump) == 0 ) {printf("\n");} printf("%.5f ", aux[i]); } free(aux); aux = NULL; } // Prints an array of ints from Device void icudaPrint(int * array, int elements, int n_jump){ int * aux; aux = (int *) malloc(elements * sizeof(int)); cudaMemcpy(aux, array, elements * sizeof(int), cudaMemcpyDeviceToHost); int i; for (i = 0; i < elements; i++){ if ((i % n_jump) == 0 ) {printf("\n");} printf("%d ", aux[i]); } free(aux); aux = NULL; } __global__ void init_states(unsigned int seed, curandState_t * states) { /* we have to initialize the state */ curand_init(seed, blockIdx.x, 0, &states[blockIdx.x]); } __global__ void U_01(float * arr, curandState_t * states){ int id = blockIdx.x * blockDim.x + threadIdx.x; arr[id] = curand_uniform(&states[id]); } __global__ void N_01(float * arr, curandState_t * states){ int id = blockIdx.x * blockDim.x + threadIdx.x; arr[id] = curand_normal(&states[id])/1; } // Computes random integers between 0 and n __global__ void irand(int n, int * arr, curandState_t * states){ int id = blockIdx.x * blockDim.x + threadIdx.x; arr[id] = curand(&states[id]) % n; } __global__ void FUNCT(float * x, float * evals){ // Launch 1 block of N threads for each element fo the population: <<<POPULATION, N>>> // variable sum is shared by all threads in each block __shared__ float sum; float value; int phantom_id = blockIdx.x * blockDim.x + threadIdx.x; // Id with gaps included int real_id = phantom_id - blockIdx.x * ((int)WARP_GAP); // Id withouth gaps included if (threadIdx.x == 0) { sum = INIT_VALUE; // } // The main operation if (threadIdx.x < N){ float my_var = x[real_id]; value = F_LINE; } else { value = 0; } // A little warp reduction in order to reduce the amount of atomic operations int offset; for (offset = WARP_SIZE/2; offset>0; offset >>= 1){ value += __shfl_down(value, offset); } // The first thread of each warp adds its value if ((threadIdx.x & 31) == 0){ atomicAdd(&sum, value); } // Thread synchronization, because this is not a warp operation __syncthreads(); // Only one thread writes the result of this block-bee if (threadIdx.x == 0){ evals[blockIdx.x] = sum; } } // Sets up the initial population __global__ void init_pos(float * pos, float * rand_uniform){ int id = blockIdx.x * blockDim.x + threadIdx.x; pos[id] = LB + rand_uniform[id] * (UB - LB); } // Sets up the initial velocity __global__ void init_vel(float * vel, float * rand_A, float * rand_B){ int id = blockIdx.x * blockDim.x + threadIdx.x; vel[id] = LB + (rand_B[id] - rand_A[id]) * (UB - LB); } // Updates the min array with the actual position of each particle __global__ void update_mins(float * pos, float * min, float * evals, float * min_evals){ // Needed to lauch as many blocks as particles, with N threads per block //int id = blockIdx.x * blockDim.x + threadIdx.x; int phantom_id = blockIdx.x * blockDim.x + threadIdx.x; // Id with gaps included int real_id = phantom_id - blockIdx.x * ((int)WARP_GAP); // Id withouth gaps included int j = blockIdx.x; // We're using more threads than needed, but we maximize the GPU usage if (threadIdx.x < N){ if (evals[j] < min_evals[j]){ if (threadIdx.x == 0){min_evals[j] = evals[j];} min[real_id] = pos[real_id]; } } } // Operación atómica que escribe el mínimo de un índice __device__ float atomicMinIndex(float * array, int * address, int val){ int lo_que_tengo, lo_que_tenia; lo_que_tengo = * address; while (array[val] < array[lo_que_tengo]){ lo_que_tenia = lo_que_tengo; lo_que_tengo = atomicCAS(address, lo_que_tenia, val); } return lo_que_tengo; } __global__ void arrayReduction(float * array){ int id = blockDim.x * blockIdx.x + threadIdx.x; int thisThreadId = id; float value = array[id]; int gap, id2; float value2; for (gap = WARP_SIZE/2; gap > 0; gap >>= 1){ id2 = __shfl_down(id, gap); value2 = __shfl_down(value, gap); if (value2 < value){ value = value2; id = id2; } } if (((thisThreadId & (WARP_SIZE - 1)) == 0)){ atomicMinIndex(array, &iMin, id); } } // Updates the velocity & pos __global__ void update_vel_pos(float * vel, float * pos, float * min, float * ru01, float * ru02){ // It's necessary launch as many blocks of N threads as particles //int real_id = blockIdx.x * blockDim.x + threadIdx.x; int i = threadIdx.x; int phantom_id = blockIdx.x * blockDim.x + threadIdx.x; // Id with gaps included int real_id = phantom_id - blockIdx.x * ((int)WARP_GAP); // Id withouth gaps included if (threadIdx.x < N){ // Update speed //vel[id] = W * vel[id] + PHI_P * ru01[id] * (min[id] - pos[id]) + PHI_G * ru02[id] * (min[iMin*N + i] - pos[id]); vel[real_id] = __fmaf_rd(W, vel[real_id], __fmul_rd(__fmul_rd(PHI_P, ru01[real_id]), __fsub_rd(min[real_id], pos[real_id])) + __fmul_rd(__fmul_rd(PHI_G, ru02[real_id]), __fsub_rd(min[iMin*N + i], pos[real_id]))); // Update position //pos[id] = pos[id] + vel[id]; pos[real_id] = __fadd_rd(pos[real_id], vel[real_id]); } } // Updates the velocity & pos __global__ void PSO_step(float * vel, float * pos, float * min, float * evals, float * min_evals, float * ru01){ // It's necessary launch as many blocks of N threads as particles //int real_id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float sum; int phantom_id = blockIdx.x * blockDim.x + threadIdx.x; // Id with gaps included int real_id = phantom_id - blockIdx.x * ((int)WARP_GAP); // Id withouth gaps included float my_var, my_vel; my_var = pos[real_id]; my_vel = vel[real_id]; if (threadIdx.x == 0) {sum = INIT_VALUE;} int i = threadIdx.x; float value; // --------- Updating pos and speed if (threadIdx.x < N){ // Update speed my_vel = __fmaf_rd(W, my_vel, __fmul_rd(__fmul_rd(PHI_P, ru01[real_id]), __fsub_rd(min[real_id], my_var)) + __fmul_rd(__fmul_rd(PHI_G, ru01[N*POPULATION - real_id]), __fsub_rd(min[iMin*N + i], my_var))); vel[real_id] = my_vel; // Saving it in global mem... // Update position my_var = __fadd_rd(my_var, my_vel); pos[real_id] = my_var; // Saving it in global mem... } __syncthreads(); // --------- Evaluating the function if (threadIdx.x < N){ value = F_LINE; } else { value = 0; } // A little warp reduction in order to reduce the amount of atomic operations int offset; for (offset = WARP_SIZE/2; offset>0; offset >>= 1){ value += __shfl_down(value, offset); } // The first thread of each warp adds its value if ((threadIdx.x & 31) == 0){ atomicAdd(&sum, value); //printf("-->> Thread %d sums %f to %d bee\n", threadIdx.x, value, blockIdx.x); } // Thread synchronization, because this is not a warp operation __syncthreads(); // Only one thread writes the result of this block-bee if (threadIdx.x == 0){ //evals[blockIdx.x] = fitness(sum); evals[blockIdx.x] = sum; // From here, sum is the complete evaluation //printf("--%f, bee: %d\n", sum, blockIdx.x); } __syncthreads(); int j = blockIdx.x; // ------------- Updating mins if (threadIdx.x < N){ //if (evals[j] < min_evals[j]){ if (sum < min_evals[j]){ if (threadIdx.x == 0){min_evals[j] = sum;} min[real_id] = my_var; //printf("done %d \n", i); } } } __global__ void print_best_pos(float * pos){ int i; for (i = 0; i < N; i++){ printf("%f ", pos[iMin * N + i]); } printf("\n"); } __global__ void print_best_val(float * part_min_evals){ printf("[Solution] Value: %f\n", part_min_evals[iMin]); } // Return the floating mean of the n first elements from arr float fmean(float * arr, int n){ float ssum = 0; int i; for (i = 0; i < n; i++){ ssum += arr[i]; } return ssum/n; } // Prints an array of floats void fMatrixPrint(float * array, int elements, int n_jump){ int i; for (i = 0; i < elements; i++){ if ((i % n_jump) == 0 ) {printf("\n");} printf("%f ", array[i]); } } int main(void){ // States curandState_t * states; cudaMalloc((void**) &states, N * POPULATION * sizeof(curandState_t)); init_states<<<N*POPULATION,1>>>(time(0), states); // The random things float * rand_float_A, * rand_float_B; cudaMalloc((void **) &rand_float_A, N * POPULATION * sizeof(float)); cudaMalloc((void **) &rand_float_B, N * POPULATION * sizeof(float)); // The data structures float * particles, * evals; float * part_min, * part_min_evals; float * vel; cudaMalloc((void **) &particles, N * POPULATION * sizeof(float)); cudaMalloc((void **) &evals, POPULATION * sizeof(float)); cudaMalloc((void **) &part_min, N * POPULATION * sizeof(float)); cudaMalloc((void **) &part_min_evals, POPULATION * sizeof(float)); cudaMalloc((void **) &vel, N * POPULATION * sizeof(float)); U_01<<<N, POPULATION>>>(rand_float_A, states); U_01<<<N, POPULATION>>>(rand_float_B, states); init_pos<<<POPULATION, N>>>(particles, rand_float_A); init_pos<<<POPULATION, N>>>(part_min, rand_float_B); init_vel<<<POPULATION, N>>>(vel, rand_float_A, rand_float_B); F_KEY<<<POPULATION,THREADS>>>(particles, evals); F_KEY<<<POPULATION,THREADS>>>(part_min, part_min_evals); cudaDeviceSynchronize(); clock_t start, end; double cpu_time_used; float timings[N_EXPERIMENTS]; int experiment; for (experiment = 0; experiment < N_EXPERIMENTS; experiment ++){ start = clock(); int iter; for (iter = 0; iter < N_ITERS; iter++){ U_01<<<N, POPULATION>>>(rand_float_A, states); PSO_step<<<POPULATION,THREADS>>>(vel, particles, part_min, evals, part_min_evals, rand_float_A); // Look for global best arrayReduction<<<1,POPULATION>>>(part_min_evals); } cudaDeviceSynchronize(); end = clock(); cpu_time_used = ((double) (end - start))/ CLOCKS_PER_SEC; timings[experiment] = (float) cpu_time_used; } float time_mean = fmean(timings, N_EXPERIMENTS); printf("\n[Info] Iterations : %d\n[Info] Experiments: %d\n[Info] Variables : %d\n[Info] Mean time : %f\n", N_ITERS, N_EXPERIMENTS, N, time_mean); print_best_val<<<1,1>>>(part_min_evals); cudaDeviceSynchronize(); printf("[Solution] Location: "); print_best_pos<<<1,1>>>(part_min); cudaDeviceSynchronize(); printf("\n"); return 0; }
493b94e0f6dfa7727088f7f8b10c5bbf9e0304cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/accuracy_op.h" #include "caffe2/utils/math.h" #include <hipcub/hipcub.hpp> namespace caffe2 { namespace { __global__ void AccuracyKernel( const int N, const int D, const int top_k, const float* Xdata, const int* labelData, float* accuracy) { typedef hipcub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int correct = 0; for (int row = blockIdx.x; row < N; row += gridDim.x) { const int label = labelData[row]; const float label_pred = Xdata[row * D + label]; int ngt = 0; for (int col = threadIdx.x; col < D; col += blockDim.x) { const float pred = Xdata[row * D + col]; if (pred > label_pred || (pred == label_pred && col <= label)) { ++ngt; } } ngt = BlockReduce(temp_storage).Sum(ngt); if (ngt <= top_k) { ++correct; } __syncthreads(); } if (threadIdx.x == 0) { atomicAdd(accuracy, static_cast<float>(correct)); } } __global__ void AccuracyDivideKernel(const int N, float* accuracy) { *accuracy /= N; } } // namespace template <> bool AccuracyOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(PREDICTION); auto& label = Input(LABEL); auto* Y = Output(0); CAFFE_ENFORCE_EQ(X.ndim(), 2); int N = X.dim32(0); int D = X.dim32(1); CAFFE_ENFORCE_EQ(label.ndim(), 1); CAFFE_ENFORCE_EQ(label.dim32(0), N); Y->Resize(vector<int64_t>()); float* Ydata = Y->template mutable_data<float>(); math::Set<float, CUDAContext>(1, 0, Ydata, &context_); hipLaunchKernelGGL(( AccuracyKernel), dim3(::min(CAFFE_MAXIMUM_NUM_BLOCKS, N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, top_k_, X.data<float>(), label.data<int>(), Ydata); // This is going to be executed only in one single kernel. Not very beautiful, // but probably we have to do this? hipLaunchKernelGGL(( AccuracyDivideKernel), dim3(1), dim3(1), 0, context_.cuda_stream(), N, Ydata); return true; } REGISTER_CUDA_OPERATOR(Accuracy, AccuracyOp<float, CUDAContext>); } // namespace caffe2
493b94e0f6dfa7727088f7f8b10c5bbf9e0304cc.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/accuracy_op.h" #include "caffe2/utils/math.h" #include <cub/block/block_reduce.cuh> namespace caffe2 { namespace { __global__ void AccuracyKernel( const int N, const int D, const int top_k, const float* Xdata, const int* labelData, float* accuracy) { typedef cub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int correct = 0; for (int row = blockIdx.x; row < N; row += gridDim.x) { const int label = labelData[row]; const float label_pred = Xdata[row * D + label]; int ngt = 0; for (int col = threadIdx.x; col < D; col += blockDim.x) { const float pred = Xdata[row * D + col]; if (pred > label_pred || (pred == label_pred && col <= label)) { ++ngt; } } ngt = BlockReduce(temp_storage).Sum(ngt); if (ngt <= top_k) { ++correct; } __syncthreads(); } if (threadIdx.x == 0) { atomicAdd(accuracy, static_cast<float>(correct)); } } __global__ void AccuracyDivideKernel(const int N, float* accuracy) { *accuracy /= N; } } // namespace template <> bool AccuracyOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(PREDICTION); auto& label = Input(LABEL); auto* Y = Output(0); CAFFE_ENFORCE_EQ(X.ndim(), 2); int N = X.dim32(0); int D = X.dim32(1); CAFFE_ENFORCE_EQ(label.ndim(), 1); CAFFE_ENFORCE_EQ(label.dim32(0), N); Y->Resize(vector<int64_t>()); float* Ydata = Y->template mutable_data<float>(); math::Set<float, CUDAContext>(1, 0, Ydata, &context_); AccuracyKernel<<< std::min(CAFFE_MAXIMUM_NUM_BLOCKS, N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, top_k_, X.data<float>(), label.data<int>(), Ydata); // This is going to be executed only in one single kernel. Not very beautiful, // but probably we have to do this? AccuracyDivideKernel<<<1, 1, 0, context_.cuda_stream()>>>( N, Ydata); return true; } REGISTER_CUDA_OPERATOR(Accuracy, AccuracyOp<float, CUDAContext>); } // namespace caffe2
3a93d138802b626e60d9bcd590a180288a0ecd47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common/device_intrinsics.h" #include "common/global_configs.h" #include "core/warp_solver/PenaltyConstants.h" #include "core/warp_solver/geometry_icp_jacobian.cuh" #include "core/warp_solver/apply_jt_dot.cuh" #include "core/warp_solver/PreconditionerRhsBuilder.h" namespace surfelwarp { namespace device { enum { warp_size = 32, num_warps = 8, thread_blk_size = num_warps * warp_size, }; __device__ __forceinline__ void computeScalarJtResidual( const ScalarCostTerm2Jacobian& term2jacobian, unsigned node_idx, unsigned typed_term_idx, float jt_residual[jt_dot_blk_size] ) { const float* residual = term2jacobian.residual_array.RawPtr() + typed_term_idx; computeScalarJacobianTransposeDot(term2jacobian, residual, node_idx, typed_term_idx, jt_residual); } __device__ __forceinline__ void computeSmoothJtResidual( const NodeGraphSmoothTerm2Jacobian& term2jacobian, unsigned node_idx, unsigned typed_term, float jt_residual[jt_dot_blk_size] ) { const ushort2 node_ij = term2jacobian.node_graph[typed_term]; const auto Ti_xj = term2jacobian.Ti_xj[typed_term]; const auto Tj_xj = term2jacobian.Tj_xj[typed_term]; const auto validity = term2jacobian.validity_indicator[typed_term]; const bool is_node_i = (node_idx == node_ij.x); if(validity == 0) { #pragma unroll for(auto i = 0; i < jt_dot_blk_size; i++) jt_residual[i] = 0.0f; return; } computeSmoothTermJtResidual(Ti_xj, Tj_xj, is_node_i, jt_residual); } __device__ __forceinline__ void computeSmoothJtResidualOnline( const NodeGraphSmoothTerm2Jacobian& term2jacobian, unsigned node_idx, unsigned typed_term, float jt_residual[jt_dot_blk_size] ) { const ushort2 node_ij = term2jacobian.node_graph[typed_term]; const auto xi = term2jacobian.reference_node_coords[node_ij.x]; const auto xj = term2jacobian.reference_node_coords[node_ij.y]; DualQuaternion dq_i = term2jacobian.node_se3[node_ij.x]; DualQuaternion dq_j = term2jacobian.node_se3[node_ij.y]; const auto validity = term2jacobian.validity_indicator[typed_term]; const mat34 Ti = dq_i.se3_matrix(); const mat34 Tj = dq_j.se3_matrix(); const bool is_node_i = (node_idx == node_ij.x); if(validity == 0) { #pragma unroll for(auto i = 0; i < jt_dot_blk_size; i++) jt_residual[i] = 0.0f; return; } computeSmoothTermJtResidual(xj, Ti, Tj, is_node_i, jt_residual); } __device__ __forceinline__ void computePoint2PointJtResidual( const Point2PointICPTerm2Jacobian& term2jacobian, unsigned node_idx, unsigned typed_term_idx, float jt_residual[jt_dot_blk_size] ) { const float4 target_vertex = term2jacobian.target_vertex[typed_term_idx]; //const float4 reference_vertex = term2jacobian.reference_vertex[typed_term_idx]; const ushort4 knn = term2jacobian.knn[typed_term_idx]; const float4 knn_weight = term2jacobian.knn_weight[typed_term_idx]; const float4 warped_vertex = term2jacobian.warped_vertex[typed_term_idx]; computePoint2PointJtResidual(target_vertex, warped_vertex, jt_residual); //Multiple with the weight const auto offset = 0 * (node_idx == knn.x) + 1 * (node_idx == knn.y) + 2 * (node_idx == knn.z) + 3 * (node_idx == knn.w); const float this_weight = ((const float*)(&knn_weight))[offset]; for(auto i = 0; i < jt_dot_blk_size; i++) { //Note that the residual do not need to be augment, only jacobian should be multiplied with weight jt_residual[i] *= this_weight; } } __device__ __forceinline__ void fillScalarJtResidualToSharedBlock( const float jt_redisual[jt_dot_blk_size], float shared_blks[jt_dot_blk_size][thread_blk_size], const float weight_square = 1.0f ) { #pragma unroll for(auto i = 0; i < jt_dot_blk_size; i++) { shared_blks[i][threadIdx.x] = - weight_square * jt_redisual[i]; } } __device__ __forceinline__ void incrementScalarJtResidualToSharedBlock( const float jt_redisual[jt_dot_blk_size], float shared_blks[jt_dot_blk_size][thread_blk_size], const float weight_square = 1.0f ) { #pragma unroll for (auto i = 0; i < jt_dot_blk_size; i++) { shared_blks[i][threadIdx.x] += (-weight_square * jt_redisual[i]); } } __global__ void computeJtResidualWithIndexKernel( const Node2TermsIndex::Node2TermMap node2term, const Term2JacobianMaps term2jacobian, float* jt_residual, const PenaltyConstants constants = PenaltyConstants() ) { const auto node_idx = blockIdx.x; const auto term_begin = node2term.offset[node_idx]; const auto term_end = node2term.offset[node_idx + 1]; const auto term_size = term_end - term_begin; const auto padded_term_size = thread_blk_size * ((term_size + thread_blk_size - 1) / thread_blk_size); const auto warp_id = threadIdx.x >> 5; const auto lane_id = threadIdx.x & 31; //The memory for store the JtResidual result of each threads __shared__ float shared_blks[jt_dot_blk_size][thread_blk_size]; __shared__ float shared_warp_tmp[num_warps]; //The memory to perform the reduction __shared__ float reduced_blks[jt_dot_blk_size]; #pragma unroll for (auto iter = threadIdx.x; iter < jt_dot_blk_size; iter += thread_blk_size) { reduced_blks[iter] = 0.0f; } // __syncthreads(); //The warp compute terms in the multiple of 32 (the warp size) for (auto iter = threadIdx.x; iter < padded_term_size; iter += thread_blk_size) { //The global term index bool term_valid = true; //Do computation when the term is inside if(iter < term_size) { //Query the term type const auto term_idx = node2term.term_index[term_begin + iter]; unsigned typed_term_idx; TermType term_type; query_typed_index(term_idx, node2term.term_offset, term_type, typed_term_idx); //Do computation given term_type switch (term_type) { case TermType::DenseImage: { float term_jt_residual[6] = {0}; computeScalarJtResidual(term2jacobian.dense_depth_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.DenseDepthSquared()); #if defined(USE_DENSE_IMAGE_DENSITY_TERM) computeScalarJtResidual(term2jacobian.density_map_term, node_idx, typed_term_idx, term_jt_residual); incrementScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.DensitySquared()); #endif } break; case TermType::Smooth: { float term_jt_residual[6] = {0}; computeSmoothJtResidual(term2jacobian.smooth_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.SmoothSquared()); } break; case TermType::Foreground: { float term_jt_residual[6] = {0}; computeScalarJtResidual(term2jacobian.foreground_mask_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.ForegroundSquared()); } break; case TermType::Feature: { float term_jt_residual[6] = {0}; computePoint2PointJtResidual(term2jacobian.sparse_feature_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.SparseFeatureSquared()); } break; default: term_valid = false; break; } } //Do a reduction to reduced_men for (int i = 0; i < jt_dot_blk_size; i++) { float data = (iter < term_size && term_valid) ? shared_blks[i][threadIdx.x] : 0.0f; data = warp_scan(data); if (lane_id == warpSize - 1) { shared_warp_tmp[warp_id] = data; } __syncthreads(); data = threadIdx.x < num_warps ? shared_warp_tmp[threadIdx.x] : 0.0f; data = warp_scan(data); if(threadIdx.x == warpSize - 1) { reduced_blks[i] += data; } //Sync again? // __syncthreads(); } } //All the terms that contribute to this value is done, store to global memory if(threadIdx.x < jt_dot_blk_size) jt_residual[jt_dot_blk_size * node_idx + threadIdx.x] = reduced_blks[threadIdx.x]; } __global__ void computeJtResidualWithIndexGlobalIterationKernel( const Node2TermsIndex::Node2TermMap node2term, const Term2JacobianMaps term2jacobian, float* jt_residual, const PenaltyConstants constants = PenaltyConstants() ) { const auto node_idx = blockIdx.x; const auto term_begin = node2term.offset[node_idx]; const auto term_end = node2term.offset[node_idx + 1]; const auto term_size = term_end - term_begin; const auto padded_term_size = thread_blk_size * ((term_size + thread_blk_size - 1) / thread_blk_size); const auto warp_id = threadIdx.x >> 5; const auto lane_id = threadIdx.x & 31; //The memory for store the JtResidual result of each threads __shared__ float shared_blks[jt_dot_blk_size][thread_blk_size]; __shared__ float shared_warp_tmp[num_warps]; //The memory to perform the reduction __shared__ float reduced_blks[jt_dot_blk_size]; #pragma unroll for (auto iter = threadIdx.x; iter < jt_dot_blk_size; iter += thread_blk_size) { reduced_blks[iter] = 0.0f; } // __syncthreads(); //The warp compute terms in the multiple of 32 (the warp size) for (auto iter = threadIdx.x; iter < padded_term_size; iter += thread_blk_size) { //The global term index bool term_valid = true; //Do computation when the term is inside if (iter < term_size) { //Query the term type const auto term_idx = node2term.term_index[term_begin + iter]; unsigned typed_term_idx; TermType term_type; query_typed_index(term_idx, node2term.term_offset, term_type, typed_term_idx); //Do computation given term_type switch (term_type) { case TermType::DenseImage: { float term_jt_residual[6] = { 0 }; computeScalarJtResidual(term2jacobian.dense_depth_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.DenseDepthSquared()); } break; case TermType::Smooth: { float term_jt_residual[6] = { 0 }; computeSmoothJtResidual(term2jacobian.smooth_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.SmoothSquared()); } break; case TermType::Foreground: { float term_jt_residual[6] = { 0 }; computeScalarJtResidual(term2jacobian.foreground_mask_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.ForegroundSquared()); } break; case TermType::Feature: { float term_jt_residual[6] = { 0 }; computePoint2PointJtResidual(term2jacobian.sparse_feature_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.SparseFeatureSquared()); } break; default: term_valid = false; break; } } //Do a reduction to reduced_men // __syncthreads(); for (int i = 0; i < jt_dot_blk_size; i++) { float data = (iter < term_size && term_valid) ? shared_blks[i][threadIdx.x] : 0.0f; data = warp_scan(data); if (lane_id == warpSize - 1) { shared_warp_tmp[warp_id] = data; } __syncthreads(); data = threadIdx.x < num_warps ? shared_warp_tmp[threadIdx.x] : 0.0f; data = warp_scan(data); if (threadIdx.x == warpSize - 1) { reduced_blks[i] += data; } //Sync again? // __syncthreads(); } } //All the terms that contribute to this value is done, store to global memory if (threadIdx.x < jt_dot_blk_size) jt_residual[jt_dot_blk_size * node_idx + threadIdx.x] = reduced_blks[threadIdx.x]; } } // namespace device } // namespace surfelwarp //Compute the Jt.dot(residual) using the index from node to term void surfelwarp::PreconditionerRhsBuilder::ComputeJtResidualIndexed(hipStream_t stream) { const auto num_nodes = m_node2term_map.offset.Size() - 1; m_jt_residual.ResizeArrayOrException(num_nodes * device::jt_dot_blk_size); dim3 blk(device::thread_blk_size); dim3 grid(num_nodes); hipLaunchKernelGGL(( device::computeJtResidualWithIndexKernel), dim3(grid), dim3(blk), 0, stream, m_node2term_map, m_term2jacobian_map, m_jt_residual.Ptr(), m_penalty_constants ); } //The interface distingish between the use of local and global interface void surfelwarp::PreconditionerRhsBuilder::ComputeJtResidualGlobalIteration(hipStream_t stream) { const auto num_nodes = m_node2term_map.offset.Size() - 1; m_jt_residual.ResizeArrayOrException(num_nodes * device::jt_dot_blk_size); dim3 blk(device::thread_blk_size); dim3 grid(num_nodes); hipLaunchKernelGGL(( device::computeJtResidualWithIndexGlobalIterationKernel), dim3(grid), dim3(blk), 0, stream, m_node2term_map, m_term2jacobian_map, m_jt_residual.Ptr(), m_penalty_constants ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif } void surfelwarp::PreconditionerRhsBuilder::ComputeJtResidualLocalIteration(hipStream_t stream) { ComputeJtResidualIndexed(stream); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif }
3a93d138802b626e60d9bcd590a180288a0ecd47.cu
#include "common/device_intrinsics.h" #include "common/global_configs.h" #include "core/warp_solver/PenaltyConstants.h" #include "core/warp_solver/geometry_icp_jacobian.cuh" #include "core/warp_solver/apply_jt_dot.cuh" #include "core/warp_solver/PreconditionerRhsBuilder.h" namespace surfelwarp { namespace device { enum { warp_size = 32, num_warps = 8, thread_blk_size = num_warps * warp_size, }; __device__ __forceinline__ void computeScalarJtResidual( const ScalarCostTerm2Jacobian& term2jacobian, unsigned node_idx, unsigned typed_term_idx, float jt_residual[jt_dot_blk_size] ) { const float* residual = term2jacobian.residual_array.RawPtr() + typed_term_idx; computeScalarJacobianTransposeDot(term2jacobian, residual, node_idx, typed_term_idx, jt_residual); } __device__ __forceinline__ void computeSmoothJtResidual( const NodeGraphSmoothTerm2Jacobian& term2jacobian, unsigned node_idx, unsigned typed_term, float jt_residual[jt_dot_blk_size] ) { const ushort2 node_ij = term2jacobian.node_graph[typed_term]; const auto Ti_xj = term2jacobian.Ti_xj[typed_term]; const auto Tj_xj = term2jacobian.Tj_xj[typed_term]; const auto validity = term2jacobian.validity_indicator[typed_term]; const bool is_node_i = (node_idx == node_ij.x); if(validity == 0) { #pragma unroll for(auto i = 0; i < jt_dot_blk_size; i++) jt_residual[i] = 0.0f; return; } computeSmoothTermJtResidual(Ti_xj, Tj_xj, is_node_i, jt_residual); } __device__ __forceinline__ void computeSmoothJtResidualOnline( const NodeGraphSmoothTerm2Jacobian& term2jacobian, unsigned node_idx, unsigned typed_term, float jt_residual[jt_dot_blk_size] ) { const ushort2 node_ij = term2jacobian.node_graph[typed_term]; const auto xi = term2jacobian.reference_node_coords[node_ij.x]; const auto xj = term2jacobian.reference_node_coords[node_ij.y]; DualQuaternion dq_i = term2jacobian.node_se3[node_ij.x]; DualQuaternion dq_j = term2jacobian.node_se3[node_ij.y]; const auto validity = term2jacobian.validity_indicator[typed_term]; const mat34 Ti = dq_i.se3_matrix(); const mat34 Tj = dq_j.se3_matrix(); const bool is_node_i = (node_idx == node_ij.x); if(validity == 0) { #pragma unroll for(auto i = 0; i < jt_dot_blk_size; i++) jt_residual[i] = 0.0f; return; } computeSmoothTermJtResidual(xj, Ti, Tj, is_node_i, jt_residual); } __device__ __forceinline__ void computePoint2PointJtResidual( const Point2PointICPTerm2Jacobian& term2jacobian, unsigned node_idx, unsigned typed_term_idx, float jt_residual[jt_dot_blk_size] ) { const float4 target_vertex = term2jacobian.target_vertex[typed_term_idx]; //const float4 reference_vertex = term2jacobian.reference_vertex[typed_term_idx]; const ushort4 knn = term2jacobian.knn[typed_term_idx]; const float4 knn_weight = term2jacobian.knn_weight[typed_term_idx]; const float4 warped_vertex = term2jacobian.warped_vertex[typed_term_idx]; computePoint2PointJtResidual(target_vertex, warped_vertex, jt_residual); //Multiple with the weight const auto offset = 0 * (node_idx == knn.x) + 1 * (node_idx == knn.y) + 2 * (node_idx == knn.z) + 3 * (node_idx == knn.w); const float this_weight = ((const float*)(&knn_weight))[offset]; for(auto i = 0; i < jt_dot_blk_size; i++) { //Note that the residual do not need to be augment, only jacobian should be multiplied with weight jt_residual[i] *= this_weight; } } __device__ __forceinline__ void fillScalarJtResidualToSharedBlock( const float jt_redisual[jt_dot_blk_size], float shared_blks[jt_dot_blk_size][thread_blk_size], const float weight_square = 1.0f ) { #pragma unroll for(auto i = 0; i < jt_dot_blk_size; i++) { shared_blks[i][threadIdx.x] = - weight_square * jt_redisual[i]; } } __device__ __forceinline__ void incrementScalarJtResidualToSharedBlock( const float jt_redisual[jt_dot_blk_size], float shared_blks[jt_dot_blk_size][thread_blk_size], const float weight_square = 1.0f ) { #pragma unroll for (auto i = 0; i < jt_dot_blk_size; i++) { shared_blks[i][threadIdx.x] += (-weight_square * jt_redisual[i]); } } __global__ void computeJtResidualWithIndexKernel( const Node2TermsIndex::Node2TermMap node2term, const Term2JacobianMaps term2jacobian, float* jt_residual, const PenaltyConstants constants = PenaltyConstants() ) { const auto node_idx = blockIdx.x; const auto term_begin = node2term.offset[node_idx]; const auto term_end = node2term.offset[node_idx + 1]; const auto term_size = term_end - term_begin; const auto padded_term_size = thread_blk_size * ((term_size + thread_blk_size - 1) / thread_blk_size); const auto warp_id = threadIdx.x >> 5; const auto lane_id = threadIdx.x & 31; //The memory for store the JtResidual result of each threads __shared__ float shared_blks[jt_dot_blk_size][thread_blk_size]; __shared__ float shared_warp_tmp[num_warps]; //The memory to perform the reduction __shared__ float reduced_blks[jt_dot_blk_size]; #pragma unroll for (auto iter = threadIdx.x; iter < jt_dot_blk_size; iter += thread_blk_size) { reduced_blks[iter] = 0.0f; } // __syncthreads(); //The warp compute terms in the multiple of 32 (the warp size) for (auto iter = threadIdx.x; iter < padded_term_size; iter += thread_blk_size) { //The global term index bool term_valid = true; //Do computation when the term is inside if(iter < term_size) { //Query the term type const auto term_idx = node2term.term_index[term_begin + iter]; unsigned typed_term_idx; TermType term_type; query_typed_index(term_idx, node2term.term_offset, term_type, typed_term_idx); //Do computation given term_type switch (term_type) { case TermType::DenseImage: { float term_jt_residual[6] = {0}; computeScalarJtResidual(term2jacobian.dense_depth_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.DenseDepthSquared()); #if defined(USE_DENSE_IMAGE_DENSITY_TERM) computeScalarJtResidual(term2jacobian.density_map_term, node_idx, typed_term_idx, term_jt_residual); incrementScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.DensitySquared()); #endif } break; case TermType::Smooth: { float term_jt_residual[6] = {0}; computeSmoothJtResidual(term2jacobian.smooth_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.SmoothSquared()); } break; case TermType::Foreground: { float term_jt_residual[6] = {0}; computeScalarJtResidual(term2jacobian.foreground_mask_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.ForegroundSquared()); } break; case TermType::Feature: { float term_jt_residual[6] = {0}; computePoint2PointJtResidual(term2jacobian.sparse_feature_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.SparseFeatureSquared()); } break; default: term_valid = false; break; } } //Do a reduction to reduced_men for (int i = 0; i < jt_dot_blk_size; i++) { float data = (iter < term_size && term_valid) ? shared_blks[i][threadIdx.x] : 0.0f; data = warp_scan(data); if (lane_id == warpSize - 1) { shared_warp_tmp[warp_id] = data; } __syncthreads(); data = threadIdx.x < num_warps ? shared_warp_tmp[threadIdx.x] : 0.0f; data = warp_scan(data); if(threadIdx.x == warpSize - 1) { reduced_blks[i] += data; } //Sync again? // __syncthreads(); } } //All the terms that contribute to this value is done, store to global memory if(threadIdx.x < jt_dot_blk_size) jt_residual[jt_dot_blk_size * node_idx + threadIdx.x] = reduced_blks[threadIdx.x]; } __global__ void computeJtResidualWithIndexGlobalIterationKernel( const Node2TermsIndex::Node2TermMap node2term, const Term2JacobianMaps term2jacobian, float* jt_residual, const PenaltyConstants constants = PenaltyConstants() ) { const auto node_idx = blockIdx.x; const auto term_begin = node2term.offset[node_idx]; const auto term_end = node2term.offset[node_idx + 1]; const auto term_size = term_end - term_begin; const auto padded_term_size = thread_blk_size * ((term_size + thread_blk_size - 1) / thread_blk_size); const auto warp_id = threadIdx.x >> 5; const auto lane_id = threadIdx.x & 31; //The memory for store the JtResidual result of each threads __shared__ float shared_blks[jt_dot_blk_size][thread_blk_size]; __shared__ float shared_warp_tmp[num_warps]; //The memory to perform the reduction __shared__ float reduced_blks[jt_dot_blk_size]; #pragma unroll for (auto iter = threadIdx.x; iter < jt_dot_blk_size; iter += thread_blk_size) { reduced_blks[iter] = 0.0f; } // __syncthreads(); //The warp compute terms in the multiple of 32 (the warp size) for (auto iter = threadIdx.x; iter < padded_term_size; iter += thread_blk_size) { //The global term index bool term_valid = true; //Do computation when the term is inside if (iter < term_size) { //Query the term type const auto term_idx = node2term.term_index[term_begin + iter]; unsigned typed_term_idx; TermType term_type; query_typed_index(term_idx, node2term.term_offset, term_type, typed_term_idx); //Do computation given term_type switch (term_type) { case TermType::DenseImage: { float term_jt_residual[6] = { 0 }; computeScalarJtResidual(term2jacobian.dense_depth_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.DenseDepthSquared()); } break; case TermType::Smooth: { float term_jt_residual[6] = { 0 }; computeSmoothJtResidual(term2jacobian.smooth_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.SmoothSquared()); } break; case TermType::Foreground: { float term_jt_residual[6] = { 0 }; computeScalarJtResidual(term2jacobian.foreground_mask_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.ForegroundSquared()); } break; case TermType::Feature: { float term_jt_residual[6] = { 0 }; computePoint2PointJtResidual(term2jacobian.sparse_feature_term, node_idx, typed_term_idx, term_jt_residual); fillScalarJtResidualToSharedBlock(term_jt_residual, shared_blks, constants.SparseFeatureSquared()); } break; default: term_valid = false; break; } } //Do a reduction to reduced_men // __syncthreads(); for (int i = 0; i < jt_dot_blk_size; i++) { float data = (iter < term_size && term_valid) ? shared_blks[i][threadIdx.x] : 0.0f; data = warp_scan(data); if (lane_id == warpSize - 1) { shared_warp_tmp[warp_id] = data; } __syncthreads(); data = threadIdx.x < num_warps ? shared_warp_tmp[threadIdx.x] : 0.0f; data = warp_scan(data); if (threadIdx.x == warpSize - 1) { reduced_blks[i] += data; } //Sync again? // __syncthreads(); } } //All the terms that contribute to this value is done, store to global memory if (threadIdx.x < jt_dot_blk_size) jt_residual[jt_dot_blk_size * node_idx + threadIdx.x] = reduced_blks[threadIdx.x]; } } // namespace device } // namespace surfelwarp //Compute the Jt.dot(residual) using the index from node to term void surfelwarp::PreconditionerRhsBuilder::ComputeJtResidualIndexed(cudaStream_t stream) { const auto num_nodes = m_node2term_map.offset.Size() - 1; m_jt_residual.ResizeArrayOrException(num_nodes * device::jt_dot_blk_size); dim3 blk(device::thread_blk_size); dim3 grid(num_nodes); device::computeJtResidualWithIndexKernel<<<grid, blk, 0, stream>>>( m_node2term_map, m_term2jacobian_map, m_jt_residual.Ptr(), m_penalty_constants ); } //The interface distingish between the use of local and global interface void surfelwarp::PreconditionerRhsBuilder::ComputeJtResidualGlobalIteration(cudaStream_t stream) { const auto num_nodes = m_node2term_map.offset.Size() - 1; m_jt_residual.ResizeArrayOrException(num_nodes * device::jt_dot_blk_size); dim3 blk(device::thread_blk_size); dim3 grid(num_nodes); device::computeJtResidualWithIndexGlobalIterationKernel<<<grid, blk, 0, stream>>>( m_node2term_map, m_term2jacobian_map, m_jt_residual.Ptr(), m_penalty_constants ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } void surfelwarp::PreconditionerRhsBuilder::ComputeJtResidualLocalIteration(cudaStream_t stream) { ComputeJtResidualIndexed(stream); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif }
092702dd4d8cbaf54b7ed4e3462bbe39fbf01ab4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/CellThermoComputeGPU.cu * \brief Explicitly instantiates reduction operators and declares kernel drivers * for mpcd::CellThermoComputeGPU. */ #include "CellThermoComputeGPU.cuh" #include "CellThermoTypes.h" #include "CellCommunicator.cuh" #include "ReductionOperators.h" #include "hoomd/WarpTools.cuh" namespace mpcd { namespace gpu { namespace kernel { //! Begins the cell thermo compute by summing cell quantities on outer cells /*! * \param d_cell_vel Velocity and mass per cell (output) * \param d_cell_energy Energy, temperature, number of particles per cell (output) * \param d_cells Cell indexes to compute * \param d_cell_np Number of particles per cell * \param d_cell_list MPCD cell list * \param cli Indexer into the cell list * \param d_vel MPCD particle velocities * \param N_mpcd Number of MPCD particles * \param mpcd_mass Mass of MPCD particle * \param d_embed_vel Embedded particle velocity * \param d_embed_idx Embedded particle indexes * \param num_cells Number of cells to compute for * * \tparam need_energy If true, compute the cell-level energy properties * \tparam tpp Number of threads to use per cell * * \b Implementation details: * Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel * and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated * properties per-cell, and the first thread for each cell writes the result into * global memory. */ template<bool need_energy, unsigned int tpp> __global__ void begin_cell_thermo(double4 *d_cell_vel, double3 *d_cell_energy, const unsigned int *d_cells, const unsigned int *d_cell_np, const unsigned int *d_cell_list, const Index2D cli, const Scalar4 *d_vel, const unsigned int N_mpcd, const Scalar mpcd_mass, const Scalar4 *d_embed_vel, const unsigned int *d_embed_idx, const unsigned int num_cells) { // tpp threads per cell unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= tpp * num_cells) return; const unsigned int cell_id = d_cells[idx / tpp]; const unsigned int np = d_cell_np[cell_id]; double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0); double ke(0.0); for (unsigned int offset = (idx % tpp); offset < np; offset += tpp) { // Load particle data const unsigned int cur_p = d_cell_list[cli(offset, cell_id)]; double3 vel_i; double mass_i; if (cur_p < N_mpcd) { Scalar4 vel_cell = d_vel[cur_p]; vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z); mass_i = mpcd_mass; } else { Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]]; vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z); mass_i = vel_m.w; } // add momentum momentum.x += mass_i * vel_i.x; momentum.y += mass_i * vel_i.y; momentum.z += mass_i * vel_i.z; momentum.w += mass_i; // also compute ke of the particle if (need_energy) ke += (double)(0.5) * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z); } // reduce quantities down into the 0-th lane per logical warp if (tpp > 1) { hoomd::detail::WarpReduce<Scalar, tpp> reducer; momentum.x = reducer.Sum(momentum.x); momentum.y = reducer.Sum(momentum.y); momentum.z = reducer.Sum(momentum.z); momentum.w = reducer.Sum(momentum.w); if (need_energy) ke = reducer.Sum(ke); } // 0-th lane in each warp writes the result if (idx % tpp == 0) { d_cell_vel[cell_id] = make_double4(momentum.x, momentum.y, momentum.z, momentum.w); if (need_energy) d_cell_energy[cell_id] = make_double3(ke, 0.0, __int_as_double(np)); } } //! Finalizes the cell thermo compute by properly averaging cell quantities /*! * \param d_cell_vel Cell velocity and masses * \param d_cell_energy Cell energy and temperature * \param d_cells Cells to compute for * \param Ncell Number of cells * \param n_dimensions Number of dimensions in system * * \tparam need_energy If true, compute the cell-level energy properties. * * \b Implementation details: * Using one thread per cell, the properties are averaged by mass, number of particles, * etc. The temperature is computed from the cell kinetic energy. */ template<bool need_energy> __global__ void end_cell_thermo(double4 *d_cell_vel, double3 *d_cell_energy, const unsigned int *d_cells, const unsigned int Ncell, const unsigned int n_dimensions) { // one thread per cell unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= Ncell) return; const unsigned int cell_id = d_cells[idx]; // average cell properties if the cell has mass const double4 cell_vel = d_cell_vel[cell_id]; double3 vel_cm = make_double3(cell_vel.x, cell_vel.y, cell_vel.z); const double mass = cell_vel.w; if (mass > 0.) { // average velocity is only defined when there is some mass in the cell vel_cm.x /= mass; vel_cm.y /= mass; vel_cm.z /= mass; } d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass); if (need_energy) { const double3 cell_energy = d_cell_energy[cell_id]; const double ke = cell_energy.x; double temp(0.0); const unsigned int np = __double_as_int(cell_energy.z); // temperature is only defined for 2 or more particles if (np > 1) { const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z); temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1)); } d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np)); } } //! Computes the cell thermo for inner cells /*! * \param d_cell_vel Velocity and mass per cell (output) * \param d_cell_energy Energy, temperature, number of particles per cell (output) * \param ci Cell indexer * \param inner_ci Cell indexer for the inner cells * \param offset Offset of \a inner_ci from \a ci * \param d_cell_np Number of particles per cell * \param d_cell_list MPCD cell list * \param cli Indexer into the cell list * \param d_vel MPCD particle velocities * \param N_mpcd Number of MPCD particles * \param mpcd_mass Mass of MPCD particle * \param d_embed_vel Embedded particle velocity * \param d_embed_idx Embedded particle indexes * \param n_dimensions System dimensionality * * \tparam need_energy If true, compute the cell-level energy properties. * \tparam tpp Number of threads to use per cell * * \b Implementation details: * Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel * and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated * properties per-cell, and the first thread for each cell writes the result into * global memory. The properties are properly normalized * * See mpcd::gpu::kernel::begin_cell_thermo for an almost identical implementation * without the normalization at the end, which is used for the outer cells. */ template<bool need_energy, unsigned int tpp> __global__ void inner_cell_thermo(double4 *d_cell_vel, double3 *d_cell_energy, const Index3D ci, const Index3D inner_ci, const uint3 offset, const unsigned int *d_cell_np, const unsigned int *d_cell_list, const Index2D cli, const Scalar4 *d_vel, const unsigned int N_mpcd, const Scalar mpcd_mass, const Scalar4 *d_embed_vel, const unsigned int *d_embed_idx, const unsigned int n_dimensions) { // tpp threads per cell unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= tpp * inner_ci.getNumElements()) return; // reinterpret the thread id as a cell by first mapping the thread into the inner indexer, // shifting by the offset of the inner indexer from the full indexer, and then compressing // back into a 1D cell id const uint3 inner_cell = inner_ci.getTriple(idx/tpp); const uint3 cell = make_uint3(inner_cell.x + offset.x, inner_cell.y + offset.y, inner_cell.z + offset.z); const unsigned int cell_id = ci(cell.x, cell.y, cell.z); const unsigned int np = d_cell_np[cell_id]; double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0); double ke(0.0); for (unsigned int offset = (idx % tpp); offset < np; offset += tpp) { // Load particle data const unsigned int cur_p = d_cell_list[cli(offset, cell_id)]; double3 vel_i; double mass_i; if (cur_p < N_mpcd) { Scalar4 vel_cell = d_vel[cur_p]; vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z); mass_i = mpcd_mass; } else { Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]]; vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z); mass_i = vel_m.w; } // add momentum momentum.x += mass_i * vel_i.x; momentum.y += mass_i * vel_i.y; momentum.z += mass_i * vel_i.z; momentum.w += mass_i; // also compute ke of the particle if (need_energy) ke += 0.5 * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z); } // reduce quantities down into the 0-th lane per logical warp if (tpp > 1) { hoomd::detail::WarpReduce<Scalar, tpp> reducer; momentum.x = reducer.Sum(momentum.x); momentum.y = reducer.Sum(momentum.y); momentum.z = reducer.Sum(momentum.z); momentum.w = reducer.Sum(momentum.w); if (need_energy) ke = reducer.Sum(ke); } // 0-th lane in each warp writes the result if (idx % tpp == 0) { const double mass = momentum.w; double3 vel_cm = make_double3(0.0,0.0,0.0); if (mass > 0.) { vel_cm.x = momentum.x / mass; vel_cm.y = momentum.y / mass; vel_cm.z = momentum.z / mass; } d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass); if (need_energy) { double temp(0.0); if (np > 1) { const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z); temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1)); } d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np)); } } } /*! * \param d_tmp_thermo Temporary cell packed thermo element * \param d_cell_vel Cell velocity to reduce * \param d_cell_energy Cell energy to reduce * \param tmp_ci Temporary cell indexer for cells undergoing reduction * \param ci Cell indexer Regular cell list indexer * * \tparam need_energy If true, compute the cell-level energy properties. * * \b Implementation details: * Using one thread per \a temporary cell, the cell properties are normalized * in a way suitable for reduction of net properties, e.g. the cell velocities * are converted to momentum. The temperature is set to the cell energy, and a * flag is set to 1 or 0 to indicate whether this cell has an energy that should * be used in averaging the total temperature. */ template<bool need_energy> __global__ void stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo, const double4 *d_cell_vel, const double3 *d_cell_energy, const Index3D tmp_ci, const Index3D ci) { // one thread per cell unsigned int tmp_idx = blockIdx.x * blockDim.x + threadIdx.x; if (tmp_idx >= tmp_ci.getNumElements()) return; // use the temporary cell indexer to map to a cell, then use the real cell indexer to // get the read index uint3 cell = tmp_ci.getTriple(tmp_idx); const unsigned int idx = ci(cell.x, cell.y, cell.z); const double4 vel_mass = d_cell_vel[idx]; const double3 vel = make_double3(vel_mass.x, vel_mass.y, vel_mass.z); const double mass = vel_mass.w; mpcd::detail::cell_thermo_element thermo; thermo.momentum = make_double3(mass * vel.x, mass * vel.y, mass * vel.z); if (need_energy) { const double3 cell_energy = d_cell_energy[idx]; thermo.energy = cell_energy.x; if (__double_as_int(cell_energy.z) > 1) { thermo.temperature = cell_energy.y; thermo.flag = 1; } else { thermo.temperature = 0.0; thermo.flag = 0; } } else { thermo.energy = 0.; thermo.temperature = 0.; thermo.flag = 0; } d_tmp_thermo[tmp_idx] = thermo; } } // end namespace kernel //! Templated launcher for multiple threads-per-cell kernel for outer cells /* * \param args Common arguments to thermo kernels * \param d_cells Cell indexes to compute * \param num_cells Number of cells to compute for * \param block_size Number of threads per block * \param tpp Number of threads to use per-cell * * \tparam cur_tpp Number of threads-per-cell for this template instantiation * * Launchers are recursively instantiated at compile-time in order to match the * correct number of threads at runtime. If the templated number of threads matches * the runtime number of threads, then the kernel is launched. Otherwise, the * next template (with threads reduced by a factor of 2) is launched. This * recursion is broken by a specialized template for 0 threads, which does no * work. */ template<unsigned int cur_tpp> inline void launch_begin_cell_thermo(const mpcd::detail::thermo_args_t& args, const unsigned int *d_cells, const unsigned int num_cells, const unsigned int block_size, const unsigned int tpp) { if (cur_tpp == tpp) { if (args.need_energy) { static unsigned int max_block_size_energy = UINT_MAX; if (max_block_size_energy == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp>); max_block_size_energy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_energy); dim3 grid(cur_tpp*num_cells / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel, args.cell_energy, d_cells, args.cell_np, args.cell_list, args.cli, args.vel, args.N_mpcd, args.mass, args.embed_vel, args.embed_idx, num_cells); } else { static unsigned int max_block_size_noenergy = UINT_MAX; if (max_block_size_noenergy == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp>); max_block_size_noenergy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_noenergy); dim3 grid(cur_tpp*num_cells / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel, args.cell_energy, d_cells, args.cell_np, args.cell_list, args.cli, args.vel, args.N_mpcd, args.mass, args.embed_vel, args.embed_idx, num_cells); } } else { launch_begin_cell_thermo<cur_tpp/2>(args, d_cells, num_cells, block_size, tpp); } } //! Template specialization to break recursion template<> inline void launch_begin_cell_thermo<0>(const mpcd::detail::thermo_args_t& args, const unsigned int *d_cells, const unsigned int num_cells, const unsigned int block_size, const unsigned int tpp) { } /* * \param args Common arguments to thermo kernels * \param d_cells Cell indexes to compute * \param num_cells Number of cells to compute for * \param block_size Number of threads per block * \param tpp Number of threads per cell * * \returns hipSuccess on completion * * \sa mpcd::gpu::launch_begin_cell_thermo * \sa mpcd::gpu::kernel::begin_cell_thermo */ hipError_t begin_cell_thermo(const mpcd::detail::thermo_args_t& args, const unsigned int *d_cells, const unsigned int num_cells, const unsigned int block_size, const unsigned int tpp) { if (num_cells == 0) return hipSuccess; launch_begin_cell_thermo<32>(args, d_cells, num_cells, block_size, tpp); return hipSuccess; } /*! * \param d_cell_vel Cell velocity and masses * \param d_cell_energy Cell energy and temperature * \param d_cells Cells to compute for * \param Ncell Number of cells * \param n_dimensions Number of dimensions in system * \param need_energy If true, compute the cell-level energy properties * * \returns hipSuccess on completion * * \sa mpcd::gpu::kernel::end_cell_thermo */ hipError_t end_cell_thermo(double4 *d_cell_vel, double3 *d_cell_energy, const unsigned int *d_cells, const unsigned int Ncell, const unsigned int n_dimensions, const bool need_energy, const unsigned int block_size) { if (Ncell == 0) return hipSuccess; if (need_energy) { static unsigned int max_block_size_energy = UINT_MAX; if (max_block_size_energy == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>); max_block_size_energy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_energy); dim3 grid(Ncell / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::end_cell_thermo<true>), dim3(grid), dim3(run_block_size), 0, 0, d_cell_vel, d_cell_energy, d_cells, Ncell, n_dimensions); } else { static unsigned int max_block_size_noenergy = UINT_MAX; if (max_block_size_noenergy == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>); max_block_size_noenergy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_noenergy); dim3 grid(Ncell / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::end_cell_thermo<false>), dim3(grid), dim3(run_block_size), 0, 0, d_cell_vel, d_cell_energy, d_cells, Ncell, n_dimensions); } return hipSuccess; } //! Templated launcher for multiple threads-per-cell kernel for inner cells /* * \param args Common arguments to thermo kernels * \param ci Cell indexer * \param inner_ci Cell indexer for the inner cells * \param offset Offset of \a inner_ci from \a ci * \param n_dimensions System dimensionality * \param block_size Number of threads per block * \param tpp Number of threads per cell * * \tparam cur_tpp Number of threads-per-cell for this template instantiation * * Launchers are recursively instantiated at compile-time in order to match the * correct number of threads at runtime. If the templated number of threads matches * the runtime number of threads, then the kernel is launched. Otherwise, the * next template (with threads reduced by a factor of 2) is launched. This * recursion is broken by a specialized template for 0 threads, which does no * work. */ template<unsigned int cur_tpp> inline void launch_inner_cell_thermo(const mpcd::detail::thermo_args_t& args, const Index3D& ci, const Index3D& inner_ci, const uint3& offset, const unsigned int n_dimensions, const unsigned int block_size, const unsigned int tpp) { if (cur_tpp == tpp) { if (args.need_energy) { static unsigned int max_block_size_energy = UINT_MAX; if (max_block_size_energy == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp>); max_block_size_energy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_energy); dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel, args.cell_energy, ci, inner_ci, offset, args.cell_np, args.cell_list, args.cli, args.vel, args.N_mpcd, args.mass, args.embed_vel, args.embed_idx, n_dimensions); } else { static unsigned int max_block_size_noenergy = UINT_MAX; if (max_block_size_noenergy == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp>); max_block_size_noenergy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_noenergy); dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel, args.cell_energy, ci, inner_ci, offset, args.cell_np, args.cell_list, args.cli, args.vel, args.N_mpcd, args.mass, args.embed_vel, args.embed_idx, n_dimensions); } } else { launch_inner_cell_thermo<cur_tpp/2>(args, ci, inner_ci, offset, n_dimensions, block_size, tpp); } } //! Template specialization to break recursion template<> inline void launch_inner_cell_thermo<0>(const mpcd::detail::thermo_args_t& args, const Index3D& ci, const Index3D& inner_ci, const uint3& offset, const unsigned int n_dimensions, const unsigned int block_size, const unsigned int tpp) { } /*! * \param args Common arguments for cell thermo compute * \param ci Cell indexer * \param inner_ci Cell indexer for the inner cells * \param offset Offset of \a inner_ci from \a ci * \param n_dimensions System dimensionality * \param block_size Number of threads per block * \param tpp Number of threads per cell * * \returns hipSuccess on completion * * \sa mpcd::gpu::launch_inner_cell_thermo * \sa mpcd::gpu::kernel::inner_cell_thermo */ hipError_t inner_cell_thermo(const mpcd::detail::thermo_args_t& args, const Index3D& ci, const Index3D& inner_ci, const uint3& offset, const unsigned int n_dimensions, const unsigned int block_size, const unsigned int tpp) { if (inner_ci.getNumElements() == 0) return hipSuccess; launch_inner_cell_thermo<32>(args, ci, inner_ci, offset, n_dimensions, block_size, tpp); return hipSuccess; } /*! * \param d_tmp_thermo Temporary cell packed thermo element * \param d_cell_vel Cell velocity to reduce * \param d_cell_energy Cell energy to reduce * \param tmp_ci Temporary cell indexer for cells undergoing reduction * \param ci Cell indexer Regular cell list indexer * \param need_energy If true, compute the cell-level energy properties * \param block_size Number of threads per block * * \returns hipSuccess on completion * * \sa mpcd::gpu::kernel::stage_net_cell_thermo */ hipError_t stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo, const double4 *d_cell_vel, const double3 *d_cell_energy, const Index3D& tmp_ci, const Index3D& ci, bool need_energy, const unsigned int block_size) { if (need_energy) { static unsigned int max_block_size_energy = UINT_MAX; if (max_block_size_energy == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<true>); max_block_size_energy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_energy); dim3 grid(tmp_ci.getNumElements() / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::stage_net_cell_thermo<true>), dim3(grid), dim3(run_block_size), 0, 0, d_tmp_thermo, d_cell_vel, d_cell_energy, tmp_ci, ci); } else { static unsigned int max_block_size_noenergy = UINT_MAX; if (max_block_size_noenergy == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<false>); max_block_size_noenergy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_noenergy); dim3 grid(tmp_ci.getNumElements() / run_block_size + 1); hipLaunchKernelGGL(( mpcd::gpu::kernel::stage_net_cell_thermo<false>), dim3(grid), dim3(run_block_size), 0, 0, d_tmp_thermo, d_cell_vel, d_cell_energy, tmp_ci, ci); } return hipSuccess; } /*! * \param d_reduced Cell thermo properties reduced across all cells (output on second call) * \param d_tmp Temporary storage for reduction (output on first call) * \param tmp_bytes Number of bytes allocated for temporary storage (output on first call) * \param d_tmp_thermo Cell thermo properties to reduce * \param Ncell The number of cells to reduce across * * \returns hipSuccess on completion * * \b Implementation details: * CUB DeviceReduce is used to perform the reduction. Hence, this function requires * two calls to perform the reduction. The first call sizes the temporary storage, * which is returned in \a d_tmp and \a tmp_bytes. The caller must then allocate * the required bytes, and call the function a second time. This performs the * reduction and returns the result in \a d_reduced. */ hipError_t reduce_net_cell_thermo(mpcd::detail::cell_thermo_element *d_reduced, void *d_tmp, size_t& tmp_bytes, const mpcd::detail::cell_thermo_element *d_tmp_thermo, const unsigned int Ncell) { HOOMD_CUB::DeviceReduce::Sum(d_tmp, tmp_bytes, d_tmp_thermo, d_reduced, Ncell); return hipSuccess; } //! Explicit template instantiation of pack for cell velocity template hipError_t pack_cell_buffer(typename mpcd::detail::CellVelocityPackOp::element *d_send_buf, const double4 *d_props, const unsigned int *d_send_idx, const mpcd::detail::CellVelocityPackOp op, const unsigned int num_send, unsigned int block_size); //! Explicit template instantiation of pack for cell energy template hipError_t pack_cell_buffer(typename mpcd::detail::CellEnergyPackOp::element *d_send_buf, const double3 *d_props, const unsigned int *d_send_idx, const mpcd::detail::CellEnergyPackOp op, const unsigned int num_send, unsigned int block_size); //! Explicit template instantiation of unpack for cell velocity template hipError_t unpack_cell_buffer(double4 *d_props, const unsigned int *d_cells, const unsigned int *d_recv, const unsigned int *d_recv_begin, const unsigned int *d_recv_end, const typename mpcd::detail::CellVelocityPackOp::element *d_recv_buf, const mpcd::detail::CellVelocityPackOp op, const unsigned int num_cells, const unsigned int block_size); //! Explicit template instantiation of unpack for cell energy template hipError_t unpack_cell_buffer(double3 *d_props, const unsigned int *d_cells, const unsigned int *d_recv, const unsigned int *d_recv_begin, const unsigned int *d_recv_end, const typename mpcd::detail::CellEnergyPackOp::element *d_recv_buf, const mpcd::detail::CellEnergyPackOp op, const unsigned int num_cells, const unsigned int block_size); } // end namespace gpu } // end namespace mpcd
092702dd4d8cbaf54b7ed4e3462bbe39fbf01ab4.cu
// Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/CellThermoComputeGPU.cu * \brief Explicitly instantiates reduction operators and declares kernel drivers * for mpcd::CellThermoComputeGPU. */ #include "CellThermoComputeGPU.cuh" #include "CellThermoTypes.h" #include "CellCommunicator.cuh" #include "ReductionOperators.h" #include "hoomd/WarpTools.cuh" namespace mpcd { namespace gpu { namespace kernel { //! Begins the cell thermo compute by summing cell quantities on outer cells /*! * \param d_cell_vel Velocity and mass per cell (output) * \param d_cell_energy Energy, temperature, number of particles per cell (output) * \param d_cells Cell indexes to compute * \param d_cell_np Number of particles per cell * \param d_cell_list MPCD cell list * \param cli Indexer into the cell list * \param d_vel MPCD particle velocities * \param N_mpcd Number of MPCD particles * \param mpcd_mass Mass of MPCD particle * \param d_embed_vel Embedded particle velocity * \param d_embed_idx Embedded particle indexes * \param num_cells Number of cells to compute for * * \tparam need_energy If true, compute the cell-level energy properties * \tparam tpp Number of threads to use per cell * * \b Implementation details: * Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel * and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated * properties per-cell, and the first thread for each cell writes the result into * global memory. */ template<bool need_energy, unsigned int tpp> __global__ void begin_cell_thermo(double4 *d_cell_vel, double3 *d_cell_energy, const unsigned int *d_cells, const unsigned int *d_cell_np, const unsigned int *d_cell_list, const Index2D cli, const Scalar4 *d_vel, const unsigned int N_mpcd, const Scalar mpcd_mass, const Scalar4 *d_embed_vel, const unsigned int *d_embed_idx, const unsigned int num_cells) { // tpp threads per cell unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= tpp * num_cells) return; const unsigned int cell_id = d_cells[idx / tpp]; const unsigned int np = d_cell_np[cell_id]; double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0); double ke(0.0); for (unsigned int offset = (idx % tpp); offset < np; offset += tpp) { // Load particle data const unsigned int cur_p = d_cell_list[cli(offset, cell_id)]; double3 vel_i; double mass_i; if (cur_p < N_mpcd) { Scalar4 vel_cell = d_vel[cur_p]; vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z); mass_i = mpcd_mass; } else { Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]]; vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z); mass_i = vel_m.w; } // add momentum momentum.x += mass_i * vel_i.x; momentum.y += mass_i * vel_i.y; momentum.z += mass_i * vel_i.z; momentum.w += mass_i; // also compute ke of the particle if (need_energy) ke += (double)(0.5) * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z); } // reduce quantities down into the 0-th lane per logical warp if (tpp > 1) { hoomd::detail::WarpReduce<Scalar, tpp> reducer; momentum.x = reducer.Sum(momentum.x); momentum.y = reducer.Sum(momentum.y); momentum.z = reducer.Sum(momentum.z); momentum.w = reducer.Sum(momentum.w); if (need_energy) ke = reducer.Sum(ke); } // 0-th lane in each warp writes the result if (idx % tpp == 0) { d_cell_vel[cell_id] = make_double4(momentum.x, momentum.y, momentum.z, momentum.w); if (need_energy) d_cell_energy[cell_id] = make_double3(ke, 0.0, __int_as_double(np)); } } //! Finalizes the cell thermo compute by properly averaging cell quantities /*! * \param d_cell_vel Cell velocity and masses * \param d_cell_energy Cell energy and temperature * \param d_cells Cells to compute for * \param Ncell Number of cells * \param n_dimensions Number of dimensions in system * * \tparam need_energy If true, compute the cell-level energy properties. * * \b Implementation details: * Using one thread per cell, the properties are averaged by mass, number of particles, * etc. The temperature is computed from the cell kinetic energy. */ template<bool need_energy> __global__ void end_cell_thermo(double4 *d_cell_vel, double3 *d_cell_energy, const unsigned int *d_cells, const unsigned int Ncell, const unsigned int n_dimensions) { // one thread per cell unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= Ncell) return; const unsigned int cell_id = d_cells[idx]; // average cell properties if the cell has mass const double4 cell_vel = d_cell_vel[cell_id]; double3 vel_cm = make_double3(cell_vel.x, cell_vel.y, cell_vel.z); const double mass = cell_vel.w; if (mass > 0.) { // average velocity is only defined when there is some mass in the cell vel_cm.x /= mass; vel_cm.y /= mass; vel_cm.z /= mass; } d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass); if (need_energy) { const double3 cell_energy = d_cell_energy[cell_id]; const double ke = cell_energy.x; double temp(0.0); const unsigned int np = __double_as_int(cell_energy.z); // temperature is only defined for 2 or more particles if (np > 1) { const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z); temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1)); } d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np)); } } //! Computes the cell thermo for inner cells /*! * \param d_cell_vel Velocity and mass per cell (output) * \param d_cell_energy Energy, temperature, number of particles per cell (output) * \param ci Cell indexer * \param inner_ci Cell indexer for the inner cells * \param offset Offset of \a inner_ci from \a ci * \param d_cell_np Number of particles per cell * \param d_cell_list MPCD cell list * \param cli Indexer into the cell list * \param d_vel MPCD particle velocities * \param N_mpcd Number of MPCD particles * \param mpcd_mass Mass of MPCD particle * \param d_embed_vel Embedded particle velocity * \param d_embed_idx Embedded particle indexes * \param n_dimensions System dimensionality * * \tparam need_energy If true, compute the cell-level energy properties. * \tparam tpp Number of threads to use per cell * * \b Implementation details: * Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel * and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated * properties per-cell, and the first thread for each cell writes the result into * global memory. The properties are properly normalized * * See mpcd::gpu::kernel::begin_cell_thermo for an almost identical implementation * without the normalization at the end, which is used for the outer cells. */ template<bool need_energy, unsigned int tpp> __global__ void inner_cell_thermo(double4 *d_cell_vel, double3 *d_cell_energy, const Index3D ci, const Index3D inner_ci, const uint3 offset, const unsigned int *d_cell_np, const unsigned int *d_cell_list, const Index2D cli, const Scalar4 *d_vel, const unsigned int N_mpcd, const Scalar mpcd_mass, const Scalar4 *d_embed_vel, const unsigned int *d_embed_idx, const unsigned int n_dimensions) { // tpp threads per cell unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= tpp * inner_ci.getNumElements()) return; // reinterpret the thread id as a cell by first mapping the thread into the inner indexer, // shifting by the offset of the inner indexer from the full indexer, and then compressing // back into a 1D cell id const uint3 inner_cell = inner_ci.getTriple(idx/tpp); const uint3 cell = make_uint3(inner_cell.x + offset.x, inner_cell.y + offset.y, inner_cell.z + offset.z); const unsigned int cell_id = ci(cell.x, cell.y, cell.z); const unsigned int np = d_cell_np[cell_id]; double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0); double ke(0.0); for (unsigned int offset = (idx % tpp); offset < np; offset += tpp) { // Load particle data const unsigned int cur_p = d_cell_list[cli(offset, cell_id)]; double3 vel_i; double mass_i; if (cur_p < N_mpcd) { Scalar4 vel_cell = d_vel[cur_p]; vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z); mass_i = mpcd_mass; } else { Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]]; vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z); mass_i = vel_m.w; } // add momentum momentum.x += mass_i * vel_i.x; momentum.y += mass_i * vel_i.y; momentum.z += mass_i * vel_i.z; momentum.w += mass_i; // also compute ke of the particle if (need_energy) ke += 0.5 * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z); } // reduce quantities down into the 0-th lane per logical warp if (tpp > 1) { hoomd::detail::WarpReduce<Scalar, tpp> reducer; momentum.x = reducer.Sum(momentum.x); momentum.y = reducer.Sum(momentum.y); momentum.z = reducer.Sum(momentum.z); momentum.w = reducer.Sum(momentum.w); if (need_energy) ke = reducer.Sum(ke); } // 0-th lane in each warp writes the result if (idx % tpp == 0) { const double mass = momentum.w; double3 vel_cm = make_double3(0.0,0.0,0.0); if (mass > 0.) { vel_cm.x = momentum.x / mass; vel_cm.y = momentum.y / mass; vel_cm.z = momentum.z / mass; } d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass); if (need_energy) { double temp(0.0); if (np > 1) { const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z); temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1)); } d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np)); } } } /*! * \param d_tmp_thermo Temporary cell packed thermo element * \param d_cell_vel Cell velocity to reduce * \param d_cell_energy Cell energy to reduce * \param tmp_ci Temporary cell indexer for cells undergoing reduction * \param ci Cell indexer Regular cell list indexer * * \tparam need_energy If true, compute the cell-level energy properties. * * \b Implementation details: * Using one thread per \a temporary cell, the cell properties are normalized * in a way suitable for reduction of net properties, e.g. the cell velocities * are converted to momentum. The temperature is set to the cell energy, and a * flag is set to 1 or 0 to indicate whether this cell has an energy that should * be used in averaging the total temperature. */ template<bool need_energy> __global__ void stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo, const double4 *d_cell_vel, const double3 *d_cell_energy, const Index3D tmp_ci, const Index3D ci) { // one thread per cell unsigned int tmp_idx = blockIdx.x * blockDim.x + threadIdx.x; if (tmp_idx >= tmp_ci.getNumElements()) return; // use the temporary cell indexer to map to a cell, then use the real cell indexer to // get the read index uint3 cell = tmp_ci.getTriple(tmp_idx); const unsigned int idx = ci(cell.x, cell.y, cell.z); const double4 vel_mass = d_cell_vel[idx]; const double3 vel = make_double3(vel_mass.x, vel_mass.y, vel_mass.z); const double mass = vel_mass.w; mpcd::detail::cell_thermo_element thermo; thermo.momentum = make_double3(mass * vel.x, mass * vel.y, mass * vel.z); if (need_energy) { const double3 cell_energy = d_cell_energy[idx]; thermo.energy = cell_energy.x; if (__double_as_int(cell_energy.z) > 1) { thermo.temperature = cell_energy.y; thermo.flag = 1; } else { thermo.temperature = 0.0; thermo.flag = 0; } } else { thermo.energy = 0.; thermo.temperature = 0.; thermo.flag = 0; } d_tmp_thermo[tmp_idx] = thermo; } } // end namespace kernel //! Templated launcher for multiple threads-per-cell kernel for outer cells /* * \param args Common arguments to thermo kernels * \param d_cells Cell indexes to compute * \param num_cells Number of cells to compute for * \param block_size Number of threads per block * \param tpp Number of threads to use per-cell * * \tparam cur_tpp Number of threads-per-cell for this template instantiation * * Launchers are recursively instantiated at compile-time in order to match the * correct number of threads at runtime. If the templated number of threads matches * the runtime number of threads, then the kernel is launched. Otherwise, the * next template (with threads reduced by a factor of 2) is launched. This * recursion is broken by a specialized template for 0 threads, which does no * work. */ template<unsigned int cur_tpp> inline void launch_begin_cell_thermo(const mpcd::detail::thermo_args_t& args, const unsigned int *d_cells, const unsigned int num_cells, const unsigned int block_size, const unsigned int tpp) { if (cur_tpp == tpp) { if (args.need_energy) { static unsigned int max_block_size_energy = UINT_MAX; if (max_block_size_energy == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp>); max_block_size_energy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_energy); dim3 grid(cur_tpp*num_cells / run_block_size + 1); mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp><<<grid, run_block_size>>>(args.cell_vel, args.cell_energy, d_cells, args.cell_np, args.cell_list, args.cli, args.vel, args.N_mpcd, args.mass, args.embed_vel, args.embed_idx, num_cells); } else { static unsigned int max_block_size_noenergy = UINT_MAX; if (max_block_size_noenergy == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp>); max_block_size_noenergy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_noenergy); dim3 grid(cur_tpp*num_cells / run_block_size + 1); mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp><<<grid, run_block_size>>>(args.cell_vel, args.cell_energy, d_cells, args.cell_np, args.cell_list, args.cli, args.vel, args.N_mpcd, args.mass, args.embed_vel, args.embed_idx, num_cells); } } else { launch_begin_cell_thermo<cur_tpp/2>(args, d_cells, num_cells, block_size, tpp); } } //! Template specialization to break recursion template<> inline void launch_begin_cell_thermo<0>(const mpcd::detail::thermo_args_t& args, const unsigned int *d_cells, const unsigned int num_cells, const unsigned int block_size, const unsigned int tpp) { } /* * \param args Common arguments to thermo kernels * \param d_cells Cell indexes to compute * \param num_cells Number of cells to compute for * \param block_size Number of threads per block * \param tpp Number of threads per cell * * \returns cudaSuccess on completion * * \sa mpcd::gpu::launch_begin_cell_thermo * \sa mpcd::gpu::kernel::begin_cell_thermo */ cudaError_t begin_cell_thermo(const mpcd::detail::thermo_args_t& args, const unsigned int *d_cells, const unsigned int num_cells, const unsigned int block_size, const unsigned int tpp) { if (num_cells == 0) return cudaSuccess; launch_begin_cell_thermo<32>(args, d_cells, num_cells, block_size, tpp); return cudaSuccess; } /*! * \param d_cell_vel Cell velocity and masses * \param d_cell_energy Cell energy and temperature * \param d_cells Cells to compute for * \param Ncell Number of cells * \param n_dimensions Number of dimensions in system * \param need_energy If true, compute the cell-level energy properties * * \returns cudaSuccess on completion * * \sa mpcd::gpu::kernel::end_cell_thermo */ cudaError_t end_cell_thermo(double4 *d_cell_vel, double3 *d_cell_energy, const unsigned int *d_cells, const unsigned int Ncell, const unsigned int n_dimensions, const bool need_energy, const unsigned int block_size) { if (Ncell == 0) return cudaSuccess; if (need_energy) { static unsigned int max_block_size_energy = UINT_MAX; if (max_block_size_energy == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>); max_block_size_energy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_energy); dim3 grid(Ncell / run_block_size + 1); mpcd::gpu::kernel::end_cell_thermo<true><<<grid, run_block_size>>>(d_cell_vel, d_cell_energy, d_cells, Ncell, n_dimensions); } else { static unsigned int max_block_size_noenergy = UINT_MAX; if (max_block_size_noenergy == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>); max_block_size_noenergy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_noenergy); dim3 grid(Ncell / run_block_size + 1); mpcd::gpu::kernel::end_cell_thermo<false><<<grid, run_block_size>>>(d_cell_vel, d_cell_energy, d_cells, Ncell, n_dimensions); } return cudaSuccess; } //! Templated launcher for multiple threads-per-cell kernel for inner cells /* * \param args Common arguments to thermo kernels * \param ci Cell indexer * \param inner_ci Cell indexer for the inner cells * \param offset Offset of \a inner_ci from \a ci * \param n_dimensions System dimensionality * \param block_size Number of threads per block * \param tpp Number of threads per cell * * \tparam cur_tpp Number of threads-per-cell for this template instantiation * * Launchers are recursively instantiated at compile-time in order to match the * correct number of threads at runtime. If the templated number of threads matches * the runtime number of threads, then the kernel is launched. Otherwise, the * next template (with threads reduced by a factor of 2) is launched. This * recursion is broken by a specialized template for 0 threads, which does no * work. */ template<unsigned int cur_tpp> inline void launch_inner_cell_thermo(const mpcd::detail::thermo_args_t& args, const Index3D& ci, const Index3D& inner_ci, const uint3& offset, const unsigned int n_dimensions, const unsigned int block_size, const unsigned int tpp) { if (cur_tpp == tpp) { if (args.need_energy) { static unsigned int max_block_size_energy = UINT_MAX; if (max_block_size_energy == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp>); max_block_size_energy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_energy); dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1); mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp><<<grid, run_block_size>>>(args.cell_vel, args.cell_energy, ci, inner_ci, offset, args.cell_np, args.cell_list, args.cli, args.vel, args.N_mpcd, args.mass, args.embed_vel, args.embed_idx, n_dimensions); } else { static unsigned int max_block_size_noenergy = UINT_MAX; if (max_block_size_noenergy == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp>); max_block_size_noenergy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_noenergy); dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1); mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp><<<grid, run_block_size>>>(args.cell_vel, args.cell_energy, ci, inner_ci, offset, args.cell_np, args.cell_list, args.cli, args.vel, args.N_mpcd, args.mass, args.embed_vel, args.embed_idx, n_dimensions); } } else { launch_inner_cell_thermo<cur_tpp/2>(args, ci, inner_ci, offset, n_dimensions, block_size, tpp); } } //! Template specialization to break recursion template<> inline void launch_inner_cell_thermo<0>(const mpcd::detail::thermo_args_t& args, const Index3D& ci, const Index3D& inner_ci, const uint3& offset, const unsigned int n_dimensions, const unsigned int block_size, const unsigned int tpp) { } /*! * \param args Common arguments for cell thermo compute * \param ci Cell indexer * \param inner_ci Cell indexer for the inner cells * \param offset Offset of \a inner_ci from \a ci * \param n_dimensions System dimensionality * \param block_size Number of threads per block * \param tpp Number of threads per cell * * \returns cudaSuccess on completion * * \sa mpcd::gpu::launch_inner_cell_thermo * \sa mpcd::gpu::kernel::inner_cell_thermo */ cudaError_t inner_cell_thermo(const mpcd::detail::thermo_args_t& args, const Index3D& ci, const Index3D& inner_ci, const uint3& offset, const unsigned int n_dimensions, const unsigned int block_size, const unsigned int tpp) { if (inner_ci.getNumElements() == 0) return cudaSuccess; launch_inner_cell_thermo<32>(args, ci, inner_ci, offset, n_dimensions, block_size, tpp); return cudaSuccess; } /*! * \param d_tmp_thermo Temporary cell packed thermo element * \param d_cell_vel Cell velocity to reduce * \param d_cell_energy Cell energy to reduce * \param tmp_ci Temporary cell indexer for cells undergoing reduction * \param ci Cell indexer Regular cell list indexer * \param need_energy If true, compute the cell-level energy properties * \param block_size Number of threads per block * * \returns cudaSuccess on completion * * \sa mpcd::gpu::kernel::stage_net_cell_thermo */ cudaError_t stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo, const double4 *d_cell_vel, const double3 *d_cell_energy, const Index3D& tmp_ci, const Index3D& ci, bool need_energy, const unsigned int block_size) { if (need_energy) { static unsigned int max_block_size_energy = UINT_MAX; if (max_block_size_energy == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<true>); max_block_size_energy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_energy); dim3 grid(tmp_ci.getNumElements() / run_block_size + 1); mpcd::gpu::kernel::stage_net_cell_thermo<true><<<grid, run_block_size>>>(d_tmp_thermo, d_cell_vel, d_cell_energy, tmp_ci, ci); } else { static unsigned int max_block_size_noenergy = UINT_MAX; if (max_block_size_noenergy == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<false>); max_block_size_noenergy = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size_noenergy); dim3 grid(tmp_ci.getNumElements() / run_block_size + 1); mpcd::gpu::kernel::stage_net_cell_thermo<false><<<grid, run_block_size>>>(d_tmp_thermo, d_cell_vel, d_cell_energy, tmp_ci, ci); } return cudaSuccess; } /*! * \param d_reduced Cell thermo properties reduced across all cells (output on second call) * \param d_tmp Temporary storage for reduction (output on first call) * \param tmp_bytes Number of bytes allocated for temporary storage (output on first call) * \param d_tmp_thermo Cell thermo properties to reduce * \param Ncell The number of cells to reduce across * * \returns cudaSuccess on completion * * \b Implementation details: * CUB DeviceReduce is used to perform the reduction. Hence, this function requires * two calls to perform the reduction. The first call sizes the temporary storage, * which is returned in \a d_tmp and \a tmp_bytes. The caller must then allocate * the required bytes, and call the function a second time. This performs the * reduction and returns the result in \a d_reduced. */ cudaError_t reduce_net_cell_thermo(mpcd::detail::cell_thermo_element *d_reduced, void *d_tmp, size_t& tmp_bytes, const mpcd::detail::cell_thermo_element *d_tmp_thermo, const unsigned int Ncell) { HOOMD_CUB::DeviceReduce::Sum(d_tmp, tmp_bytes, d_tmp_thermo, d_reduced, Ncell); return cudaSuccess; } //! Explicit template instantiation of pack for cell velocity template cudaError_t pack_cell_buffer(typename mpcd::detail::CellVelocityPackOp::element *d_send_buf, const double4 *d_props, const unsigned int *d_send_idx, const mpcd::detail::CellVelocityPackOp op, const unsigned int num_send, unsigned int block_size); //! Explicit template instantiation of pack for cell energy template cudaError_t pack_cell_buffer(typename mpcd::detail::CellEnergyPackOp::element *d_send_buf, const double3 *d_props, const unsigned int *d_send_idx, const mpcd::detail::CellEnergyPackOp op, const unsigned int num_send, unsigned int block_size); //! Explicit template instantiation of unpack for cell velocity template cudaError_t unpack_cell_buffer(double4 *d_props, const unsigned int *d_cells, const unsigned int *d_recv, const unsigned int *d_recv_begin, const unsigned int *d_recv_end, const typename mpcd::detail::CellVelocityPackOp::element *d_recv_buf, const mpcd::detail::CellVelocityPackOp op, const unsigned int num_cells, const unsigned int block_size); //! Explicit template instantiation of unpack for cell energy template cudaError_t unpack_cell_buffer(double3 *d_props, const unsigned int *d_cells, const unsigned int *d_recv, const unsigned int *d_recv_begin, const unsigned int *d_recv_end, const typename mpcd::detail::CellEnergyPackOp::element *d_recv_buf, const mpcd::detail::CellEnergyPackOp op, const unsigned int num_cells, const unsigned int block_size); } // end namespace gpu } // end namespace mpcd
6b1096952d650dd10ffbf89785b771398616ada1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; /*** Definitions ***/ // Block width for CUDA kernels #define BW 128 #define RANDOM_SEED -1 #ifdef USE_GFLAGS #ifndef _WIN32 #define gflags google #endif #else // Constant versions of gflags #define DEFINE_int32(flag, default_value, description) const int FLAGS_##flag = (default_value) #define DEFINE_uint64(flag, default_value, description) const unsigned long long FLAGS_##flag = (default_value) #define DEFINE_bool(flag, default_value, description) const bool FLAGS_##flag = (default_value) #define DEFINE_double(flag, default_value, description) const double FLAGS_##flag = (default_value) #define DEFINE_string(flag, default_value, description) const std::string FLAGS_##flag ((default_value)) #endif __global__ void MSELossBackprop(float *grad_data, float *output, float *target, float *mask, int batch_size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size) return; // const int label_value = static_cast<int>(label[idx]); // For each item in the batch, decrease the result of the label's value by 1 // diff[idx * num_labels + label_value] -= 1.0f; if(mask[idx] == -1.0) grad_data[idx] = 0.05 * (output[idx] - target[idx]); else if(mask[idx] == 1.0) grad_data[idx] = 5.0 * (output[idx] - target[idx]); else grad_data[idx] = 0.0; }
6b1096952d650dd10ffbf89785b771398616ada1.cu
#include "includes.h" using namespace std; /*** Definitions ***/ // Block width for CUDA kernels #define BW 128 #define RANDOM_SEED -1 #ifdef USE_GFLAGS #ifndef _WIN32 #define gflags google #endif #else // Constant versions of gflags #define DEFINE_int32(flag, default_value, description) const int FLAGS_##flag = (default_value) #define DEFINE_uint64(flag, default_value, description) const unsigned long long FLAGS_##flag = (default_value) #define DEFINE_bool(flag, default_value, description) const bool FLAGS_##flag = (default_value) #define DEFINE_double(flag, default_value, description) const double FLAGS_##flag = (default_value) #define DEFINE_string(flag, default_value, description) const std::string FLAGS_##flag ((default_value)) #endif __global__ void MSELossBackprop(float *grad_data, float *output, float *target, float *mask, int batch_size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size) return; // const int label_value = static_cast<int>(label[idx]); // For each item in the batch, decrease the result of the label's value by 1 // diff[idx * num_labels + label_value] -= 1.0f; if(mask[idx] == -1.0) grad_data[idx] = 0.05 * (output[idx] - target[idx]); else if(mask[idx] == 1.0) grad_data[idx] = 5.0 * (output[idx] - target[idx]); else grad_data[idx] = 0.0; }
456ba40c0146993f732652b522b0465eb63562f7.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" #include "sbs/solver/CUDASoftBodySolver.h" #include "sbs/solver/CUDAVector.h" #include "sbs/solver/Math.h" #include "sbs/solver/CUDASoftBodySolverKernel.h" #include <cstring> #include <set> #include <queue> using namespace std; #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> #define DEFAULT_SOLVER_STEPS 10 class CUDAContext { public: typedef std::vector<SoftBodyDescriptor> descriptorArray_t; //cudaContextCreate(softbodyList_t*); CUDAContext(softbodyList_t *list); //void cudaContextShutdown(SolverPrivate*); ~CUDAContext(void); bool InitDevice(); bool ShutdownDevice(); void UpdateVertexBuffers(bool async); void ProjectSystem(glm::float_t dt, CUDASoftBodySolver::SoftBodyWorldParameters &parms); bool InitSoftBody(SoftBody *body); private: void UpdateConstraintStiffness(SoftBodyDescriptor &descr, int mSolverSteps); void CreateDescriptor(SoftBody *body); void CreateShapeDescriptor(SoftBody *body); bool RegisterVertexBuffers(SoftBodyDescriptor &descr); cudaGraphicsResource *RegisterGLGraphicsResource(const VertexBuffer *vb); int mDeviceId; hipDeviceProp_t mDevProp; hipStream_t mStream; int mSolverSteps; descriptorArray_t mDescriptors; CUDAVector<SoftBodyDescriptor> mDescriptorsDev; CUDAVector<ShapeDescriptor> mShapeDescriptors; // shape matching CUDAVector<ShapeRegionStaticInfo> mRegions; CUDAVector<ShapeRegionDynamicInfo> mRegionsDynamicInfo; CUDAVector<glm::uint_t> mRegionsMembersOffsets; CUDAVector<glm::uint_t> mMembersRegionsOffsets; CUDAVector<glm::vec3> mShapeInitialPositions; // initial particle locations (x0i); CUDAVector<glm::float_t> mPartials; CUDAVector<ParticleInfo> mParticlesInfo; CUDAVector<glm::vec3> mPositions; CUDAVector<glm::vec3> mProjections; CUDAVector<glm::vec3> mVelocities; CUDAVector<glm::float_t> mInvMasses; CUDAVector<glm::vec3> mForces; CUDAVector<LinkConstraint> mLinks; CUDAVector<glm::uint_t> mMapping; CUDAVector<glm::uvec3> mTriangles; CUDAVector<glm::vec3> mTrianglesNormals; CUDAVector<ParticleTrianglesInfo> mParticleTriangleInfo; CUDAVector<glm::uint_t> mParticleTriangleIndexes; vector<cudaGraphicsResource*> mResArray; /* helper array to map all resources in one call */ }; bool CUDAContext::InitDevice() { hipError_t err; hipDeviceProp_t prop; memset(&prop, 0x0, sizeof(prop)); prop.major = 3; prop.minor = 5; // choose device for us. Prefer with compute capabilities ~ 3.5 err = hipChooseDevice(&mDeviceId, &prop); if (err != hipSuccess) goto on_error; err = hipSetDevice(mDeviceId); if (err != hipSuccess) goto on_error; err = hipGetDeviceProperties(&mDevProp, mDeviceId); if (err != hipSuccess) goto on_error; err = hipStreamCreate(&mStream); if (err != hipSuccess) goto on_error; DBG("Choosen CUDA Device: %s", mDevProp.name); return true; on_error: ERR("Device initialization error: %s", hipGetErrorString(hipGetLastError())); return false; } bool CUDAContext::ShutdownDevice() { hipError_t err; err = hipDeviceSynchronize(); if (err != hipSuccess) return false; err = hipDeviceReset(); if (err != hipSuccess) return false; return true; } struct Node { Node(int i, int d) : idx(i), distance(d) {} int idx; int distance; }; void GetRegion(int idx, const MeshData::neighboursArray_t &nei, int max, indexArray_t &out) { std::queue<Node> toprocess; std::set<int> processed; toprocess.push(Node(idx, 0)); while (!toprocess.empty()) { Node n = toprocess.front(); if (processed.find(n.idx) == processed.end()) { out.push_back(n.idx); processed.insert(n.idx); } toprocess.pop(); if (n.distance >= max) continue; FOREACH_R(it, nei[n.idx]) toprocess.push(Node(*it, n.distance + 1)); } } void CUDAContext::CreateShapeDescriptor(SoftBody *obj) { ShapeDescriptor d; vec3Array_t initQ; long len = 0; unsigned int smin = 999999; unsigned int smax = 0; int region_size = 2; d.mc0 = calculateMassCenter( &(obj->mParticles[0]), &(obj->mMassInv[0]), obj->mParticles.size()); d.initPosBaseIdx = mShapeInitialPositions.size(); // shuld depend on mesh mShapeInitialPositions.push_back(&(obj->mParticles[0]), obj->mParticles.size()); d.radius = 0; const MeshData::neighboursArray_t &na = obj->mMesh->GetNeighboursArray(); int regions_base_id = mRegions.size(); std::vector< std::vector<glm::uint_t> > particlesInRegions; particlesInRegions.resize(obj->mParticles.size()); REP(i, obj->mParticles.size()) { indexArray_t indexes; GetRegion(i, na, region_size, indexes); REP(p, indexes.size()) { particlesInRegions[indexes[p]].push_back(i); } } // triangles info std::vector< std::set<glm::uint_t> > particlesInTriangles; std::vector< std::vector<glm::uint_t> > particlesInTriangles2; particlesInTriangles.resize(obj->mParticles.size()); particlesInTriangles2.resize(obj->mParticles.size()); REP(i, obj->mTriangles.size()) { glm::uvec3 idxs = obj->mTriangles[i]; particlesInTriangles[idxs[0]].insert(i); particlesInTriangles[idxs[1]].insert(i); particlesInTriangles[idxs[2]].insert(i); } REP(i, particlesInTriangles.size()) { FOREACH_R(it, particlesInTriangles[i]) particlesInTriangles2[i].push_back(*it); } #if 0 eEP(i, particlesInRegions.size()) { printf("%d:", particlesInRegions[i].size()); REP(j, particlesInRegions[i].size()) { printf("%d,", particlesInRegions[i][j]); } printf("\n"); } #endif // create shape regions REP(i, obj->mParticles.size()) { ShapeRegionStaticInfo reg; ParticleInfo info; info.region_id = mRegions.size(); info.body_info_id = mShapeDescriptors.size(); info.body_offset = mPositions.size(); indexArray_t indexes; float_t mass = 0.0f; glm::vec3 mc(0,0,0); GetRegion(i, na, region_size, indexes); len += indexes.size(); if (smin > indexes.size()) smin = indexes.size(); if (smax < indexes.size()) smax = indexes.size(); FOREACH_R(it, indexes) { mass += obj->mMassInv[*it]; mc += obj->mParticles[*it] * obj->mMassInv[*it]; } reg.mass = mass; reg.mc0 = mc / mass; reg.n_particles = indexes.size(); reg.members_offsets_offset = mRegionsMembersOffsets.size(); reg.shapes_init_positions_offset = d.initPosBaseIdx; reg.regions_offsets_offset = mMembersRegionsOffsets.size(); reg.n_regions = particlesInRegions[i].size(); ParticleTrianglesInfo pti; pti.triangle_id_offset = mParticleTriangleIndexes.size(); pti.n_triangles = particlesInTriangles2[i].size(); mRegions.push_back(reg); mRegionsMembersOffsets.push_back(&indexes[0], indexes.size()); mParticlesInfo.push_back(info); mMembersRegionsOffsets.push_back(&(particlesInRegions[i][0]), particlesInRegions[i].size()); mParticleTriangleInfo.push_back(pti); mParticleTriangleIndexes.push_back(&(particlesInTriangles2[i][0]), particlesInTriangles2[i].size()); } DBG("==MODEL INFORMATION=="); DBG("Particles total: %ld", obj->mParticles.size()); DBG("Vertexes total: %ld", obj->mMesh->GetVertexes().size()); DBG("Triangles total: %ld", obj->mMesh->GetFaces().size()); DBG("Rest Volume :%f", d.volume); DBG("Regions total: %ld", mRegions.size()); DBG("Average region size: %f", (float)len / mRegions.size()); DBG("Max region size: %d", smax); DBG("Min region size: %d", smin); DBG("ParticeInfo size: %d", mParticlesInfo.size()); mShapeDescriptors.push_back(d); } void CUDAContext::CreateDescriptor(SoftBody *body) { SoftBodyDescriptor descr; descr.body = body; descr.graphics = NULL; descr.baseIdx = mPositions.size(); descr.nParticles = body->mParticles.size(); descr.linkIdx = mLinks.size(); descr.nLinks = body->mLinks.size(); descr.mappingIdx = mMapping.size(); descr.nMapping = body->mMeshVertexParticleMapping.size(); descr.trianglesIdx = mTriangles.size(); descr.nTriangles = body->mTriangles.size(); descr.volume0 = calculateVolume(&(body->mParticles[0]), &(body->mTriangles[0]), NULL, NULL, body->mTriangles.size()); bool res = RegisterVertexBuffers(descr); if (!res) { ERR("Error occured registering SoftBody vertex buffers."); return; } mDescriptors.push_back(descr); mDescriptorsDev.push_back(descr); mPartials.resize(body->mTriangles.size() / 128 + 1); mResArray.push_back(descr.graphics); } cudaGraphicsResource *CUDAContext::RegisterGLGraphicsResource(const VertexBuffer *vb) { hipError_t err; cudaGraphicsResource *ret = NULL; GLuint id = vb->GetVBO(); err = hipGraphicsGLRegisterBuffer(&ret, id, hipGraphicsRegisterFlagsNone); if (err != hipSuccess) { ERR("Unable to register GL buffer object %d", id); return NULL; } return ret; } bool CUDAContext::RegisterVertexBuffers(SoftBodyDescriptor &descr) { if (!descr.body) { ERR("No SoftBody reference in descriptor!"); return false; } const VertexBuffer *buf = descr.body->GetVertexes(); if (buf) descr.graphics = RegisterGLGraphicsResource(buf); return true; } void CUDAContext::UpdateConstraintStiffness(SoftBodyDescriptor &descr, int mSolverSteps) { //int blocks = descr.nLinks / 128; //calculateLinkStiffness<<<blocks, 128>>>(mSolverSteps, mLinks.data(), // descr.linkIdx, descr.nLinks); } bool CUDAContext::InitSoftBody(SoftBody *body) { CreateDescriptor(body); CreateShapeDescriptor(body); long nParticles = body->mParticles.size(); mPositions.push_back(&(body->mParticles[0]), nParticles); mProjections.push_back(&(body->mParticles[0]), nParticles); mVelocities.resize(mVelocities.size() + nParticles); mInvMasses.push_back(&(body->mMassInv[0]), nParticles); mForces.resize(mForces.size() + nParticles); mLinks.push_back(&(body->mLinks[0]), body->mLinks.size()); mMapping.push_back(&(body->mMeshVertexParticleMapping[0]), body->mMeshVertexParticleMapping.size()); mRegionsDynamicInfo.resize(mPositions.size()); mTriangles.push_back(&(body->mTriangles[0]), body->mTriangles.size()); mTrianglesNormals.resize(body->mTriangles.size()); return true; } #if 0 bool CUDAContext::InitDymmyBodyCollisionConstraint() { long int total = 0, bytes = 0; vector<PointTriangleConstraint> constraints; PointTriangleConstraint con; // constant collision handling // create m * x collsion constraints - to be optimized later. FOREACH(it, &mDescriptors) { constraints.clear(); FOREACH(vx, &it->body->mPositions) { int idx = std::distance(it->body->mPositions.begin(), vx); con.pointObjectId = std::distance(mDescriptors.begin(), it); con.pointIdx = idx; /* FOREACH(tr, &it->body->mTriangles) { if (idx == (*tr)[0] || idx == (*tr)[1] || idx == (*tr)[2]) continue; con.triangleObjectId = std::distance(mDescriptors.begin(), it); con.triangleId = std::distance(it->body->mTriangles.begin(), tr); constraints.push_back(con); } */ FOREACH(it2, &mDescriptors) { if (it == it2) continue; FOREACH(tr, &it->body->mTriangles) { con.triangleObjectId = std::distance( mDescriptors.begin(), it2); con.triangleId = std::distance(it->body->mTriangles.begin(), tr); constraints.push_back(con); total++; } } } if (it->collisions) hipFree(it->collisions); it->collisions = (PointTriangleConstraint*)allocateCUDABuffer(sizeof(PointTriangleConstraint) * constraints.size()); it->nCollisions = constraints.size(); hipMemcpy(it->collisions, &constraints[0], sizeof(PointTriangleConstraint) * constraints.size(), hipMemcpyHostToDevice); bytes += sizeof(PointTriangleConstraint) * constraints.size(); } DBG("allocated constraints %d, bytes %d", total, bytes); return true; } #endif CUDAContext::CUDAContext(softbodyList_t *bodies) { mSolverSteps = DEFAULT_SOLVER_STEPS; if (!InitDevice()) { ERR("CUDA Device initialization failed!"); return; } FOREACH(it, bodies) { if (!*it) continue; if (!InitSoftBody(*it)) { ShutdownDevice(); return; } } #if 0 if (!InitDymmyBodyCollisionConstraint()) { ERR("Unable to allocate collision constraints on device!"); ShutdownDevice(); return; } #endif } CUDAContext::~CUDAContext() { ShutdownDevice(); } void CUDAContext::UpdateVertexBuffers(bool async) { hipError_t err; glm::vec3 *ptr; int threadsPerBlock = 128; // map all in one call err = hipGraphicsMapResources(mResArray.size(), &mResArray[0]); if (err != hipSuccess) return; FOREACH(it, &mDescriptors) { size_t size; err = hipGraphicsResourceGetMappedPointer((void**)&ptr, &size, it->graphics); if (err != hipSuccess) { ERR("Unable to map VBO pointer"); return; } int blockCount = it->nMapping / threadsPerBlock + 1; hipLaunchKernelGGL(( cudaUpdateVertexBufferKernel), dim3(blockCount), dim3(threadsPerBlock) , 0, 0, ptr, mPositions.data(), mMapping.data(), it->baseIdx, it->mappingIdx, it->nMapping); } hipGraphicsUnmapResources(mResArray.size(), &mResArray[0]); } CUDASoftBodySolver::CUDASoftBodySolver(void) : mContext(0), mInitialized(false) { } CUDASoftBodySolver::~CUDASoftBodySolver(void) { if (mContext) delete mContext; } bool CUDASoftBodySolver::Initialize(void) { if (mInitialized) return true; mContext = new CUDAContext(&mBodies); if (!mContext) { ERR("Unable to create CUDA context."); return false; } mInitialized = true; return true; } void CUDASoftBodySolver::Shutdown(void) { if (!mInitialized) return; if (mContext) { delete mContext; mContext = NULL; } mInitialized = false; SoftBodySolver::Shutdown(); } void CUDASoftBodySolver::UpdateVertexBuffers(void) { if (mInitialized) mContext->UpdateVertexBuffers(false); } void CUDAContext::ProjectSystem(glm::float_t dt, CUDASoftBodySolver::SoftBodyWorldParameters &world) { int threadsPerBlock = 128; int blockCount; if (!mPositions.size()) return; // predict motion blockCount = mPositions.size() / threadsPerBlock + 1; hipLaunchKernelGGL(( cudaProjectPositionsAndVelocitiesKernel), dim3(blockCount), dim3( threadsPerBlock), 0, 0, world.gravity, mPositions.data(), mProjections.data(), mVelocities.data(), NULL, mInvMasses.data(), dt, mPositions.size()); // solver blockCount = mPositions.size() / threadsPerBlock + 1; hipLaunchKernelGGL(( solveShapeMatchingConstraints1), dim3(blockCount), dim3(threadsPerBlock), 0, 0, mParticlesInfo.data(), mRegions.data(), mRegionsMembersOffsets.data(), mShapeInitialPositions.data(), mProjections.data(), mInvMasses.data(), mRegionsDynamicInfo.data(), mPositions.size() ); hipLaunchKernelGGL(( solveShapeMatchingConstraints2), dim3(blockCount), dim3(threadsPerBlock), 0, 0, mParticlesInfo.data(), mRegions.data(), mRegionsDynamicInfo.data(), mRegionsMembersOffsets.data(), mShapeInitialPositions.data(), mMembersRegionsOffsets.data(), mProjections.data(), mPositions.size() ); blockCount = mTriangles.size() / threadsPerBlock + 1; hipLaunchKernelGGL(( solveVolumePreservationConstraint1), dim3(blockCount), dim3(threadsPerBlock), 0, 0, mParticlesInfo.data(), mPartials.data(), mProjections.data(), mTriangles.data(), mTrianglesNormals.data(), mTriangles.size() ); blockCount = mPartials.size() / threadsPerBlock + 1; hipLaunchKernelGGL(( solveVolumePreservationConstraint2), dim3(blockCount), dim3(threadsPerBlock), 0, 0, mDescriptorsDev.data(), mPartials.data(), mPartials.size() ); blockCount = mPositions.size() / threadsPerBlock + 1; hipLaunchKernelGGL(( solveVolumePreservationConstraint3), dim3(blockCount), dim3(threadsPerBlock), 0, 0, mParticleTriangleInfo.data(), mDescriptorsDev.data(), mTrianglesNormals.data(), mProjections.data(), mParticleTriangleIndexes.data(), mPositions.size()); hipLaunchKernelGGL(( solveGroundWallCollisionConstraints), dim3(blockCount), dim3(threadsPerBlock), 0, 0, mProjections.data(), mInvMasses.data(), world.groundLevel, world.leftWall, world.rightWall, world.frontWall, world.backWall, mPositions.size()); #if 0 vector<SoftBodyDescriptor> info; info.resize(1); hipMemcpy(&info[0], mDescriptorsDev.data(), sizeof(SoftBodyDescriptor), hipMemcpyDeviceToHost); ERR("%f", info[0].volume); #endif #if 0 vector<glm::float_t> info; info.resize(mPartials.size()); hipMemcpy(&info[0], mPartials.data(), sizeof(glm::float_t) * mPartials.size(), hipMemcpyDeviceToHost); float sum = 0; FOREACH_R(it, info) sum += *it; ERR("%f", sum); #endif #if 0 vector<glm::mat3> info; info.resize(mTest.size()); hipMemcpy(&info[0], mTest.data(), sizeof(glm::mat3) * mTest.size(), hipMemcpyDeviceToHost); vector<glm::vec3> info2; info2.resize(mTest2.size()); hipMemcpy(&info2[0], mTest2.data(), sizeof(glm::vec3) * mTest2.size(), hipMemcpyDeviceToHost); REP(k, info.size()) { glm::mat3 R = info[k]; glm::vec3 mc = info2[k]; ERR("[%f %f %f]", mc[0], mc[1], mc[2]); ERR("[%f %f %f %f %f %f %f %f %f]", R[0][0], R[1][0], R[2][0], R[0][1], R[1][1], R[2][1], R[0][2], R[1][2], R[2][2]); } #endif #if 0 vector<glm::uint> info; info.resize(mMembersRegionsOffsets.size()); hipMemcpy(&info[0], mMembersRegionsOffsets.data(), sizeof(glm::uint) * mMembersRegionsOffsets.size(), hipMemcpyDeviceToHost); FOREACH_R(i, info) { ERR("%d,", *i); } #endif // integrate motion threadsPerBlock = 128; blockCount = mPositions.size() / threadsPerBlock + 1; hipLaunchKernelGGL(( integrateMotionKernel), dim3(blockCount), dim3(threadsPerBlock), 0, 0, dt, mPositions.data(), mProjections.data(), mVelocities.data(), mPositions.size()); hipDeviceSynchronize(); } void CUDASoftBodySolver::ProjectSystem(glm::float_t dt) { if (mInitialized) mContext->ProjectSystem(dt, mWorldParams); } void CUDASoftBodySolver::AddSoftBody(SoftBody *body) { mBodies.push_back(body); if (!mInitialized || !mContext->InitSoftBody(body)) ERR("Failed to add SoftBody!"); }
456ba40c0146993f732652b522b0465eb63562f7.cu
#include "common.h" #include "sbs/solver/CUDASoftBodySolver.h" #include "sbs/solver/CUDAVector.h" #include "sbs/solver/Math.h" #include "sbs/solver/CUDASoftBodySolverKernel.h" #include <cstring> #include <set> #include <queue> using namespace std; #include <cuda_runtime.h> #include <cuda_gl_interop.h> #define DEFAULT_SOLVER_STEPS 10 class CUDAContext { public: typedef std::vector<SoftBodyDescriptor> descriptorArray_t; //cudaContextCreate(softbodyList_t*); CUDAContext(softbodyList_t *list); //void cudaContextShutdown(SolverPrivate*); ~CUDAContext(void); bool InitDevice(); bool ShutdownDevice(); void UpdateVertexBuffers(bool async); void ProjectSystem(glm::float_t dt, CUDASoftBodySolver::SoftBodyWorldParameters &parms); bool InitSoftBody(SoftBody *body); private: void UpdateConstraintStiffness(SoftBodyDescriptor &descr, int mSolverSteps); void CreateDescriptor(SoftBody *body); void CreateShapeDescriptor(SoftBody *body); bool RegisterVertexBuffers(SoftBodyDescriptor &descr); cudaGraphicsResource *RegisterGLGraphicsResource(const VertexBuffer *vb); int mDeviceId; cudaDeviceProp mDevProp; cudaStream_t mStream; int mSolverSteps; descriptorArray_t mDescriptors; CUDAVector<SoftBodyDescriptor> mDescriptorsDev; CUDAVector<ShapeDescriptor> mShapeDescriptors; // shape matching CUDAVector<ShapeRegionStaticInfo> mRegions; CUDAVector<ShapeRegionDynamicInfo> mRegionsDynamicInfo; CUDAVector<glm::uint_t> mRegionsMembersOffsets; CUDAVector<glm::uint_t> mMembersRegionsOffsets; CUDAVector<glm::vec3> mShapeInitialPositions; // initial particle locations (x0i); CUDAVector<glm::float_t> mPartials; CUDAVector<ParticleInfo> mParticlesInfo; CUDAVector<glm::vec3> mPositions; CUDAVector<glm::vec3> mProjections; CUDAVector<glm::vec3> mVelocities; CUDAVector<glm::float_t> mInvMasses; CUDAVector<glm::vec3> mForces; CUDAVector<LinkConstraint> mLinks; CUDAVector<glm::uint_t> mMapping; CUDAVector<glm::uvec3> mTriangles; CUDAVector<glm::vec3> mTrianglesNormals; CUDAVector<ParticleTrianglesInfo> mParticleTriangleInfo; CUDAVector<glm::uint_t> mParticleTriangleIndexes; vector<cudaGraphicsResource*> mResArray; /* helper array to map all resources in one call */ }; bool CUDAContext::InitDevice() { cudaError_t err; cudaDeviceProp prop; memset(&prop, 0x0, sizeof(prop)); prop.major = 3; prop.minor = 5; // choose device for us. Prefer with compute capabilities ~ 3.5 err = cudaChooseDevice(&mDeviceId, &prop); if (err != cudaSuccess) goto on_error; err = cudaSetDevice(mDeviceId); if (err != cudaSuccess) goto on_error; err = cudaGetDeviceProperties(&mDevProp, mDeviceId); if (err != cudaSuccess) goto on_error; err = cudaStreamCreate(&mStream); if (err != cudaSuccess) goto on_error; DBG("Choosen CUDA Device: %s", mDevProp.name); return true; on_error: ERR("Device initialization error: %s", cudaGetErrorString(cudaGetLastError())); return false; } bool CUDAContext::ShutdownDevice() { cudaError_t err; err = cudaDeviceSynchronize(); if (err != cudaSuccess) return false; err = cudaDeviceReset(); if (err != cudaSuccess) return false; return true; } struct Node { Node(int i, int d) : idx(i), distance(d) {} int idx; int distance; }; void GetRegion(int idx, const MeshData::neighboursArray_t &nei, int max, indexArray_t &out) { std::queue<Node> toprocess; std::set<int> processed; toprocess.push(Node(idx, 0)); while (!toprocess.empty()) { Node n = toprocess.front(); if (processed.find(n.idx) == processed.end()) { out.push_back(n.idx); processed.insert(n.idx); } toprocess.pop(); if (n.distance >= max) continue; FOREACH_R(it, nei[n.idx]) toprocess.push(Node(*it, n.distance + 1)); } } void CUDAContext::CreateShapeDescriptor(SoftBody *obj) { ShapeDescriptor d; vec3Array_t initQ; long len = 0; unsigned int smin = 999999; unsigned int smax = 0; int region_size = 2; d.mc0 = calculateMassCenter( &(obj->mParticles[0]), &(obj->mMassInv[0]), obj->mParticles.size()); d.initPosBaseIdx = mShapeInitialPositions.size(); // shuld depend on mesh mShapeInitialPositions.push_back(&(obj->mParticles[0]), obj->mParticles.size()); d.radius = 0; const MeshData::neighboursArray_t &na = obj->mMesh->GetNeighboursArray(); int regions_base_id = mRegions.size(); std::vector< std::vector<glm::uint_t> > particlesInRegions; particlesInRegions.resize(obj->mParticles.size()); REP(i, obj->mParticles.size()) { indexArray_t indexes; GetRegion(i, na, region_size, indexes); REP(p, indexes.size()) { particlesInRegions[indexes[p]].push_back(i); } } // triangles info std::vector< std::set<glm::uint_t> > particlesInTriangles; std::vector< std::vector<glm::uint_t> > particlesInTriangles2; particlesInTriangles.resize(obj->mParticles.size()); particlesInTriangles2.resize(obj->mParticles.size()); REP(i, obj->mTriangles.size()) { glm::uvec3 idxs = obj->mTriangles[i]; particlesInTriangles[idxs[0]].insert(i); particlesInTriangles[idxs[1]].insert(i); particlesInTriangles[idxs[2]].insert(i); } REP(i, particlesInTriangles.size()) { FOREACH_R(it, particlesInTriangles[i]) particlesInTriangles2[i].push_back(*it); } #if 0 eEP(i, particlesInRegions.size()) { printf("%d:", particlesInRegions[i].size()); REP(j, particlesInRegions[i].size()) { printf("%d,", particlesInRegions[i][j]); } printf("\n"); } #endif // create shape regions REP(i, obj->mParticles.size()) { ShapeRegionStaticInfo reg; ParticleInfo info; info.region_id = mRegions.size(); info.body_info_id = mShapeDescriptors.size(); info.body_offset = mPositions.size(); indexArray_t indexes; float_t mass = 0.0f; glm::vec3 mc(0,0,0); GetRegion(i, na, region_size, indexes); len += indexes.size(); if (smin > indexes.size()) smin = indexes.size(); if (smax < indexes.size()) smax = indexes.size(); FOREACH_R(it, indexes) { mass += obj->mMassInv[*it]; mc += obj->mParticles[*it] * obj->mMassInv[*it]; } reg.mass = mass; reg.mc0 = mc / mass; reg.n_particles = indexes.size(); reg.members_offsets_offset = mRegionsMembersOffsets.size(); reg.shapes_init_positions_offset = d.initPosBaseIdx; reg.regions_offsets_offset = mMembersRegionsOffsets.size(); reg.n_regions = particlesInRegions[i].size(); ParticleTrianglesInfo pti; pti.triangle_id_offset = mParticleTriangleIndexes.size(); pti.n_triangles = particlesInTriangles2[i].size(); mRegions.push_back(reg); mRegionsMembersOffsets.push_back(&indexes[0], indexes.size()); mParticlesInfo.push_back(info); mMembersRegionsOffsets.push_back(&(particlesInRegions[i][0]), particlesInRegions[i].size()); mParticleTriangleInfo.push_back(pti); mParticleTriangleIndexes.push_back(&(particlesInTriangles2[i][0]), particlesInTriangles2[i].size()); } DBG("==MODEL INFORMATION=="); DBG("Particles total: %ld", obj->mParticles.size()); DBG("Vertexes total: %ld", obj->mMesh->GetVertexes().size()); DBG("Triangles total: %ld", obj->mMesh->GetFaces().size()); DBG("Rest Volume :%f", d.volume); DBG("Regions total: %ld", mRegions.size()); DBG("Average region size: %f", (float)len / mRegions.size()); DBG("Max region size: %d", smax); DBG("Min region size: %d", smin); DBG("ParticeInfo size: %d", mParticlesInfo.size()); mShapeDescriptors.push_back(d); } void CUDAContext::CreateDescriptor(SoftBody *body) { SoftBodyDescriptor descr; descr.body = body; descr.graphics = NULL; descr.baseIdx = mPositions.size(); descr.nParticles = body->mParticles.size(); descr.linkIdx = mLinks.size(); descr.nLinks = body->mLinks.size(); descr.mappingIdx = mMapping.size(); descr.nMapping = body->mMeshVertexParticleMapping.size(); descr.trianglesIdx = mTriangles.size(); descr.nTriangles = body->mTriangles.size(); descr.volume0 = calculateVolume(&(body->mParticles[0]), &(body->mTriangles[0]), NULL, NULL, body->mTriangles.size()); bool res = RegisterVertexBuffers(descr); if (!res) { ERR("Error occured registering SoftBody vertex buffers."); return; } mDescriptors.push_back(descr); mDescriptorsDev.push_back(descr); mPartials.resize(body->mTriangles.size() / 128 + 1); mResArray.push_back(descr.graphics); } cudaGraphicsResource *CUDAContext::RegisterGLGraphicsResource(const VertexBuffer *vb) { cudaError_t err; cudaGraphicsResource *ret = NULL; GLuint id = vb->GetVBO(); err = cudaGraphicsGLRegisterBuffer(&ret, id, cudaGraphicsRegisterFlagsNone); if (err != cudaSuccess) { ERR("Unable to register GL buffer object %d", id); return NULL; } return ret; } bool CUDAContext::RegisterVertexBuffers(SoftBodyDescriptor &descr) { if (!descr.body) { ERR("No SoftBody reference in descriptor!"); return false; } const VertexBuffer *buf = descr.body->GetVertexes(); if (buf) descr.graphics = RegisterGLGraphicsResource(buf); return true; } void CUDAContext::UpdateConstraintStiffness(SoftBodyDescriptor &descr, int mSolverSteps) { //int blocks = descr.nLinks / 128; //calculateLinkStiffness<<<blocks, 128>>>(mSolverSteps, mLinks.data(), // descr.linkIdx, descr.nLinks); } bool CUDAContext::InitSoftBody(SoftBody *body) { CreateDescriptor(body); CreateShapeDescriptor(body); long nParticles = body->mParticles.size(); mPositions.push_back(&(body->mParticles[0]), nParticles); mProjections.push_back(&(body->mParticles[0]), nParticles); mVelocities.resize(mVelocities.size() + nParticles); mInvMasses.push_back(&(body->mMassInv[0]), nParticles); mForces.resize(mForces.size() + nParticles); mLinks.push_back(&(body->mLinks[0]), body->mLinks.size()); mMapping.push_back(&(body->mMeshVertexParticleMapping[0]), body->mMeshVertexParticleMapping.size()); mRegionsDynamicInfo.resize(mPositions.size()); mTriangles.push_back(&(body->mTriangles[0]), body->mTriangles.size()); mTrianglesNormals.resize(body->mTriangles.size()); return true; } #if 0 bool CUDAContext::InitDymmyBodyCollisionConstraint() { long int total = 0, bytes = 0; vector<PointTriangleConstraint> constraints; PointTriangleConstraint con; // constant collision handling // create m * x collsion constraints - to be optimized later. FOREACH(it, &mDescriptors) { constraints.clear(); FOREACH(vx, &it->body->mPositions) { int idx = std::distance(it->body->mPositions.begin(), vx); con.pointObjectId = std::distance(mDescriptors.begin(), it); con.pointIdx = idx; /* FOREACH(tr, &it->body->mTriangles) { if (idx == (*tr)[0] || idx == (*tr)[1] || idx == (*tr)[2]) continue; con.triangleObjectId = std::distance(mDescriptors.begin(), it); con.triangleId = std::distance(it->body->mTriangles.begin(), tr); constraints.push_back(con); } */ FOREACH(it2, &mDescriptors) { if (it == it2) continue; FOREACH(tr, &it->body->mTriangles) { con.triangleObjectId = std::distance( mDescriptors.begin(), it2); con.triangleId = std::distance(it->body->mTriangles.begin(), tr); constraints.push_back(con); total++; } } } if (it->collisions) cudaFree(it->collisions); it->collisions = (PointTriangleConstraint*)allocateCUDABuffer(sizeof(PointTriangleConstraint) * constraints.size()); it->nCollisions = constraints.size(); cudaMemcpy(it->collisions, &constraints[0], sizeof(PointTriangleConstraint) * constraints.size(), cudaMemcpyHostToDevice); bytes += sizeof(PointTriangleConstraint) * constraints.size(); } DBG("allocated constraints %d, bytes %d", total, bytes); return true; } #endif CUDAContext::CUDAContext(softbodyList_t *bodies) { mSolverSteps = DEFAULT_SOLVER_STEPS; if (!InitDevice()) { ERR("CUDA Device initialization failed!"); return; } FOREACH(it, bodies) { if (!*it) continue; if (!InitSoftBody(*it)) { ShutdownDevice(); return; } } #if 0 if (!InitDymmyBodyCollisionConstraint()) { ERR("Unable to allocate collision constraints on device!"); ShutdownDevice(); return; } #endif } CUDAContext::~CUDAContext() { ShutdownDevice(); } void CUDAContext::UpdateVertexBuffers(bool async) { cudaError_t err; glm::vec3 *ptr; int threadsPerBlock = 128; // map all in one call err = cudaGraphicsMapResources(mResArray.size(), &mResArray[0]); if (err != cudaSuccess) return; FOREACH(it, &mDescriptors) { size_t size; err = cudaGraphicsResourceGetMappedPointer((void**)&ptr, &size, it->graphics); if (err != cudaSuccess) { ERR("Unable to map VBO pointer"); return; } int blockCount = it->nMapping / threadsPerBlock + 1; cudaUpdateVertexBufferKernel<<<blockCount, threadsPerBlock >>>( ptr, mPositions.data(), mMapping.data(), it->baseIdx, it->mappingIdx, it->nMapping); } cudaGraphicsUnmapResources(mResArray.size(), &mResArray[0]); } CUDASoftBodySolver::CUDASoftBodySolver(void) : mContext(0), mInitialized(false) { } CUDASoftBodySolver::~CUDASoftBodySolver(void) { if (mContext) delete mContext; } bool CUDASoftBodySolver::Initialize(void) { if (mInitialized) return true; mContext = new CUDAContext(&mBodies); if (!mContext) { ERR("Unable to create CUDA context."); return false; } mInitialized = true; return true; } void CUDASoftBodySolver::Shutdown(void) { if (!mInitialized) return; if (mContext) { delete mContext; mContext = NULL; } mInitialized = false; SoftBodySolver::Shutdown(); } void CUDASoftBodySolver::UpdateVertexBuffers(void) { if (mInitialized) mContext->UpdateVertexBuffers(false); } void CUDAContext::ProjectSystem(glm::float_t dt, CUDASoftBodySolver::SoftBodyWorldParameters &world) { int threadsPerBlock = 128; int blockCount; if (!mPositions.size()) return; // predict motion blockCount = mPositions.size() / threadsPerBlock + 1; cudaProjectPositionsAndVelocitiesKernel<<<blockCount, threadsPerBlock>>>(world.gravity, mPositions.data(), mProjections.data(), mVelocities.data(), NULL, mInvMasses.data(), dt, mPositions.size()); // solver blockCount = mPositions.size() / threadsPerBlock + 1; solveShapeMatchingConstraints1<<<blockCount, threadsPerBlock>>>( mParticlesInfo.data(), mRegions.data(), mRegionsMembersOffsets.data(), mShapeInitialPositions.data(), mProjections.data(), mInvMasses.data(), mRegionsDynamicInfo.data(), mPositions.size() ); solveShapeMatchingConstraints2<<<blockCount, threadsPerBlock>>>( mParticlesInfo.data(), mRegions.data(), mRegionsDynamicInfo.data(), mRegionsMembersOffsets.data(), mShapeInitialPositions.data(), mMembersRegionsOffsets.data(), mProjections.data(), mPositions.size() ); blockCount = mTriangles.size() / threadsPerBlock + 1; solveVolumePreservationConstraint1<<<blockCount, threadsPerBlock>>>( mParticlesInfo.data(), mPartials.data(), mProjections.data(), mTriangles.data(), mTrianglesNormals.data(), mTriangles.size() ); blockCount = mPartials.size() / threadsPerBlock + 1; solveVolumePreservationConstraint2<<<blockCount, threadsPerBlock>>>( mDescriptorsDev.data(), mPartials.data(), mPartials.size() ); blockCount = mPositions.size() / threadsPerBlock + 1; solveVolumePreservationConstraint3<<<blockCount, threadsPerBlock>>>( mParticleTriangleInfo.data(), mDescriptorsDev.data(), mTrianglesNormals.data(), mProjections.data(), mParticleTriangleIndexes.data(), mPositions.size()); solveGroundWallCollisionConstraints<<<blockCount, threadsPerBlock>>>( mProjections.data(), mInvMasses.data(), world.groundLevel, world.leftWall, world.rightWall, world.frontWall, world.backWall, mPositions.size()); #if 0 vector<SoftBodyDescriptor> info; info.resize(1); cudaMemcpy(&info[0], mDescriptorsDev.data(), sizeof(SoftBodyDescriptor), cudaMemcpyDeviceToHost); ERR("%f", info[0].volume); #endif #if 0 vector<glm::float_t> info; info.resize(mPartials.size()); cudaMemcpy(&info[0], mPartials.data(), sizeof(glm::float_t) * mPartials.size(), cudaMemcpyDeviceToHost); float sum = 0; FOREACH_R(it, info) sum += *it; ERR("%f", sum); #endif #if 0 vector<glm::mat3> info; info.resize(mTest.size()); cudaMemcpy(&info[0], mTest.data(), sizeof(glm::mat3) * mTest.size(), cudaMemcpyDeviceToHost); vector<glm::vec3> info2; info2.resize(mTest2.size()); cudaMemcpy(&info2[0], mTest2.data(), sizeof(glm::vec3) * mTest2.size(), cudaMemcpyDeviceToHost); REP(k, info.size()) { glm::mat3 R = info[k]; glm::vec3 mc = info2[k]; ERR("[%f %f %f]", mc[0], mc[1], mc[2]); ERR("[%f %f %f %f %f %f %f %f %f]", R[0][0], R[1][0], R[2][0], R[0][1], R[1][1], R[2][1], R[0][2], R[1][2], R[2][2]); } #endif #if 0 vector<glm::uint> info; info.resize(mMembersRegionsOffsets.size()); cudaMemcpy(&info[0], mMembersRegionsOffsets.data(), sizeof(glm::uint) * mMembersRegionsOffsets.size(), cudaMemcpyDeviceToHost); FOREACH_R(i, info) { ERR("%d,", *i); } #endif // integrate motion threadsPerBlock = 128; blockCount = mPositions.size() / threadsPerBlock + 1; integrateMotionKernel<<<blockCount, threadsPerBlock>>>( dt, mPositions.data(), mProjections.data(), mVelocities.data(), mPositions.size()); cudaDeviceSynchronize(); } void CUDASoftBodySolver::ProjectSystem(glm::float_t dt) { if (mInitialized) mContext->ProjectSystem(dt, mWorldParams); } void CUDASoftBodySolver::AddSoftBody(SoftBody *body) { mBodies.push_back(body); if (!mInitialized || !mContext->InitSoftBody(body)) ERR("Failed to add SoftBody!"); }
714e4db982cfa5b1f931623437febc9490ee2bcf.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=1024 --gridDim=1 --no-inline #include <hip/hip_runtime.h> #include <stdio.h> #define N 2 //1024 __global__ void definitions (int* A, unsigned int* B, unsigned long long int* C) { atomicMin(A,10); atomicMin(B,1); atomicMin(C,5); }
714e4db982cfa5b1f931623437febc9490ee2bcf.cu
//pass //--blockDim=1024 --gridDim=1 --no-inline #include <cuda.h> #include <stdio.h> #define N 2 //1024 __global__ void definitions (int* A, unsigned int* B, unsigned long long int* C) { atomicMin(A,10); atomicMin(B,1); atomicMin(C,5); }
8281c63e8c24b5b3d0e806f2d453c7ae5968d508.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <linalg/transpose.h> #include <test_utils.h> #include <cuml/linear_model/glm.hpp> #include <glm/qn/glm_linear.cuh> #include <glm/qn/glm_logistic.cuh> #include <glm/qn/glm_softmax.cuh> #include <glm/qn/qn.cuh> #include <vector> namespace ML { namespace GLM { using namespace MLCommon; struct QuasiNewtonTest : ::testing::Test { static constexpr int N = 10; static constexpr int D = 2; const static double *nobptr; const static double tol; const static double X[N][D]; cumlHandle cuml_handle; const cumlHandle_impl &handle; hipStream_t stream; std::shared_ptr<SimpleMatOwning<double>> Xdev; std::shared_ptr<SimpleVecOwning<double>> ydev; std::shared_ptr<deviceAllocator> allocator; QuasiNewtonTest() : handle(cuml_handle.getImpl()) {} void SetUp() { stream = cuml_handle.getStream(); Xdev.reset(new SimpleMatOwning<double>(handle.getDeviceAllocator(), N, D, stream, ROW_MAJOR)); updateDevice(Xdev->data, &X[0][0], Xdev->len, stream); ydev.reset( new SimpleVecOwning<double>(handle.getDeviceAllocator(), N, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); allocator = handle.getDeviceAllocator(); } void TearDown() {} }; const double *QuasiNewtonTest::nobptr = 0; const double QuasiNewtonTest::tol = 5e-6; const double QuasiNewtonTest::X[QuasiNewtonTest::N][QuasiNewtonTest::D] = { {-0.2047076594847130, 0.4789433380575482}, {-0.5194387150567381, -0.5557303043474900}, {1.9657805725027142, 1.3934058329729904}, {0.0929078767437177, 0.2817461528302025}, {0.7690225676118387, 1.2464347363862822}, {1.0071893575830049, -1.2962211091122635}, {0.2749916334321240, 0.2289128789353159}, {1.3529168351654497, 0.8864293405915888}, {-2.0016373096603974, -0.3718425371402544}, {1.6690253095248706, -0.4385697358355719}}; template <typename T, class Comp> ::testing::AssertionResult checkParamsEqual(const cumlHandle_impl &handle, const T *host_weights, const T *host_bias, const T *w, const GLMDims &dims, Comp &comp, hipStream_t stream) { int C = dims.C; int D = dims.D; bool fit_intercept = dims.fit_intercept; std::vector<T> w_ref_cm(C * D); int idx = 0; for (int d = 0; d < D; d++) for (int c = 0; c < C; c++) { w_ref_cm[idx++] = host_weights[c * D + d]; } SimpleVecOwning<T> w_ref(handle.getDeviceAllocator(), dims.n_param, stream); updateDevice(w_ref.data, &w_ref_cm[0], C * D, stream); if (fit_intercept) { updateDevice(&w_ref.data[C * D], host_bias, C, stream); } CUDA_CHECK(hipStreamSynchronize(stream)); return devArrMatch(w_ref.data, w, w_ref.len, comp); } template <typename T, class LossFunction> T run(const cumlHandle_impl &handle, LossFunction &loss, const SimpleMat<T> &X, const SimpleVec<T> &y, T l1, T l2, T *w, SimpleMat<T> &z, int verbosity, hipStream_t stream) { int max_iter = 100; T grad_tol = 1e-16; int linesearch_max_iter = 50; int lbfgs_memory = 5; int num_iters = 0; T fx; SimpleVec<T> w0(w, loss.n_param); qn_fit<T, LossFunction>(handle, loss, X.data, y.data, z.data, X.m, l1, l2, max_iter, grad_tol, linesearch_max_iter, lbfgs_memory, verbosity, w0.data, &fx, &num_iters, X.ord, stream); return fx; } template <typename T> T run_api(const cumlHandle &cuml_handle, int loss_type, int C, bool fit_intercept, const SimpleMat<T> &X, const SimpleVec<T> &y, T l1, T l2, T *w, SimpleMat<T> &z, int verbosity, hipStream_t stream) { int max_iter = 100; T grad_tol = 1e-8; int linesearch_max_iter = 50; int lbfgs_memory = 5; int num_iters = 0; SimpleVec<T> w0(w, X.n + fit_intercept); w0.fill(T(0), stream); T fx; qnFit(cuml_handle, X.data, y.data, X.m, X.n, C, fit_intercept, l1, l2, max_iter, grad_tol, linesearch_max_iter, lbfgs_memory, verbosity, w, &fx, &num_iters, false, loss_type); return fx; } TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) { CompareApprox<double> compApprox(tol); // Test case generated in python and solved with sklearn double y[N] = {1, 1, 1, 0, 1, 0, 1, 0, 1, 0}; updateDevice(ydev->data, &y[0], ydev->len, stream); CUDA_CHECK(hipStreamSynchronize(stream)); double alpha = 0.01 * N; LogisticLoss<double> loss_b(handle, D, true); LogisticLoss<double> loss_no_b(handle, D, false); SimpleVecOwning<double> w0(allocator, D + 1, stream); SimpleVecOwning<double> z(allocator, N, stream); double l1, l2, fx; double w_l1_b[2] = {-1.6899370396155091, 1.9021577534928300}; double b_l1_b = 0.8057670813749118; double obj_l1_b = 0.44295941481024703; l1 = alpha; l2 = 0.0; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b, compApprox, stream)); fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); double w_l2_b[2] = {-1.5339880402781370, 1.6788639581350926}; double b_l2_b = 0.806087868102401; double obj_l2_b = 0.4378085369889721; l1 = 0; l2 = alpha; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b, compApprox, stream)); fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); double w_l1_no_b[2] = {-1.6215035298864591, 2.3650868394981086}; double obj_l1_no_b = 0.4769896009200278; l1 = alpha; l2 = 0.0; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); double w_l2_no_b[2] = {-1.3931049893764620, 2.0140103094119621}; double obj_l2_no_b = 0.47502098062114273; l1 = 0; l2 = alpha; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) { // The data seems to small for the objective to be strongly convex // leaving out exact param checks CompareApprox<double> compApprox(tol); double y[N] = {2, 2, 0, 3, 3, 0, 0, 0, 1, 0}; updateDevice(ydev->data, &y[0], ydev->len, stream); CUDA_CHECK(hipStreamSynchronize(stream)); double fx, l1, l2; int C = 4; double alpha = 0.016 * N; SimpleMatOwning<double> z(allocator, C, N, stream); SimpleVecOwning<double> w0(allocator, C * (D + 1), stream); Softmax<double> loss_b(handle, D, C, true); Softmax<double> loss_no_b(handle, D, C, false); l1 = alpha; l2 = 0.0; double obj_l1_b = 0.5407911382311313; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); l1 = 0.0; l2 = alpha; double obj_l2_b = 0.5721784062720949; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); l1 = alpha; l2 = 0.0; double obj_l1_no_b = 0.6606929813245878; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); l1 = 0.0; l2 = alpha; double obj_l2_no_b = 0.6597171282106854; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) { CompareApprox<double> compApprox(tol); double y[N] = {0.2675836026202781, -0.0678277759663704, -0.6334027174275105, -0.1018336189077367, 0.0933815935886932, -1.1058853496996381, -0.1658298189619160, -0.2954290675648911, 0.7966520536712608, -1.0767450516284769}; updateDevice(ydev->data, &y[0], ydev->len, stream); CUDA_CHECK(hipStreamSynchronize(stream)); double fx, l1, l2; double alpha = 0.01 * N; SimpleVecOwning<double> w0(allocator, D + 1, stream); SimpleVecOwning<double> z(allocator, N, stream); SquaredLoss<double> loss_b(handle, D, true); SquaredLoss<double> loss_no_b(handle, D, false); l1 = alpha; l2 = 0.0; double w_l1_b[2] = {-0.4952397281519840, 0.3813315300180231}; double b_l1_b = -0.08140861819001188; double obj_l1_b = 0.011136986298775138; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b, compApprox, stream)); fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); l1 = 0.0; l2 = alpha; double w_l2_b[2] = {-0.5022384743587150, 0.3937352417485087}; double b_l2_b = -0.08062397391797513; double obj_l2_b = 0.004268621967866347; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b, compApprox, stream)); fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); l1 = alpha; l2 = 0.0; double w_l1_no_b[2] = {-0.5175178128147135, 0.3720844589831813}; double obj_l1_no_b = 0.013981355746112447; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); l1 = 0.0; l2 = alpha; double w_l2_no_b[2] = {-0.5241651041233270, 0.3846317886627560}; double obj_l2_no_b = 0.007061261366969662; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } TEST_F(QuasiNewtonTest, predict) { CompareApprox<double> compApprox(1e-8); std::vector<double> w_host(D); w_host[0] = 1; std::vector<double> preds_host(N); SimpleVecOwning<double> w(allocator, D, stream); SimpleVecOwning<double> preds(allocator, N, stream); updateDevice(w.data, &w_host[0], w.len, stream); qnPredict(handle, Xdev->data, N, D, 2, false, w.data, false, 0, preds.data, stream); updateHost(&preds_host[0], preds.data, preds.len, stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (int it = 0; it < N; it++) { ASSERT_TRUE(X[it][0] > 0 ? compApprox(preds_host[it], 1) : compApprox(preds_host[it], 0)); } qnPredict(handle, Xdev->data, N, D, 1, false, w.data, false, 1, preds.data, stream); updateHost(&preds_host[0], preds.data, preds.len, stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (int it = 0; it < N; it++) { ASSERT_TRUE(compApprox(X[it][0], preds_host[it])); } } TEST_F(QuasiNewtonTest, predict_softmax) { CompareApprox<double> compApprox(1e-8); int C = 4; std::vector<double> w_host(C * D); w_host[0] = 1; w_host[D * C - 1] = 1; std::vector<double> preds_host(N); SimpleVecOwning<double> w(allocator, w_host.size(), stream); SimpleVecOwning<double> preds(allocator, N, stream); updateDevice(w.data, &w_host[0], w.len, stream); qnPredict(handle, Xdev->data, N, D, C, false, w.data, false, 2, preds.data, stream); updateHost(&preds_host[0], preds.data, preds.len, stream); CUDA_CHECK(hipStreamSynchronize(stream)); for (int it = 0; it < N; it++) { if (X[it][0] < 0 && X[it][1] < 0) { ASSERT_TRUE(compApprox(1, preds_host[it])); } else if (X[it][0] > X[it][1]) { ASSERT_TRUE(compApprox(0, preds_host[it])); } else { ASSERT_TRUE(compApprox(C - 1, preds_host[it])); } } } } // namespace GLM } // end namespace ML
8281c63e8c24b5b3d0e806f2d453c7ae5968d508.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <linalg/transpose.h> #include <test_utils.h> #include <cuml/linear_model/glm.hpp> #include <glm/qn/glm_linear.cuh> #include <glm/qn/glm_logistic.cuh> #include <glm/qn/glm_softmax.cuh> #include <glm/qn/qn.cuh> #include <vector> namespace ML { namespace GLM { using namespace MLCommon; struct QuasiNewtonTest : ::testing::Test { static constexpr int N = 10; static constexpr int D = 2; const static double *nobptr; const static double tol; const static double X[N][D]; cumlHandle cuml_handle; const cumlHandle_impl &handle; cudaStream_t stream; std::shared_ptr<SimpleMatOwning<double>> Xdev; std::shared_ptr<SimpleVecOwning<double>> ydev; std::shared_ptr<deviceAllocator> allocator; QuasiNewtonTest() : handle(cuml_handle.getImpl()) {} void SetUp() { stream = cuml_handle.getStream(); Xdev.reset(new SimpleMatOwning<double>(handle.getDeviceAllocator(), N, D, stream, ROW_MAJOR)); updateDevice(Xdev->data, &X[0][0], Xdev->len, stream); ydev.reset( new SimpleVecOwning<double>(handle.getDeviceAllocator(), N, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); allocator = handle.getDeviceAllocator(); } void TearDown() {} }; const double *QuasiNewtonTest::nobptr = 0; const double QuasiNewtonTest::tol = 5e-6; const double QuasiNewtonTest::X[QuasiNewtonTest::N][QuasiNewtonTest::D] = { {-0.2047076594847130, 0.4789433380575482}, {-0.5194387150567381, -0.5557303043474900}, {1.9657805725027142, 1.3934058329729904}, {0.0929078767437177, 0.2817461528302025}, {0.7690225676118387, 1.2464347363862822}, {1.0071893575830049, -1.2962211091122635}, {0.2749916334321240, 0.2289128789353159}, {1.3529168351654497, 0.8864293405915888}, {-2.0016373096603974, -0.3718425371402544}, {1.6690253095248706, -0.4385697358355719}}; template <typename T, class Comp> ::testing::AssertionResult checkParamsEqual(const cumlHandle_impl &handle, const T *host_weights, const T *host_bias, const T *w, const GLMDims &dims, Comp &comp, cudaStream_t stream) { int C = dims.C; int D = dims.D; bool fit_intercept = dims.fit_intercept; std::vector<T> w_ref_cm(C * D); int idx = 0; for (int d = 0; d < D; d++) for (int c = 0; c < C; c++) { w_ref_cm[idx++] = host_weights[c * D + d]; } SimpleVecOwning<T> w_ref(handle.getDeviceAllocator(), dims.n_param, stream); updateDevice(w_ref.data, &w_ref_cm[0], C * D, stream); if (fit_intercept) { updateDevice(&w_ref.data[C * D], host_bias, C, stream); } CUDA_CHECK(cudaStreamSynchronize(stream)); return devArrMatch(w_ref.data, w, w_ref.len, comp); } template <typename T, class LossFunction> T run(const cumlHandle_impl &handle, LossFunction &loss, const SimpleMat<T> &X, const SimpleVec<T> &y, T l1, T l2, T *w, SimpleMat<T> &z, int verbosity, cudaStream_t stream) { int max_iter = 100; T grad_tol = 1e-16; int linesearch_max_iter = 50; int lbfgs_memory = 5; int num_iters = 0; T fx; SimpleVec<T> w0(w, loss.n_param); qn_fit<T, LossFunction>(handle, loss, X.data, y.data, z.data, X.m, l1, l2, max_iter, grad_tol, linesearch_max_iter, lbfgs_memory, verbosity, w0.data, &fx, &num_iters, X.ord, stream); return fx; } template <typename T> T run_api(const cumlHandle &cuml_handle, int loss_type, int C, bool fit_intercept, const SimpleMat<T> &X, const SimpleVec<T> &y, T l1, T l2, T *w, SimpleMat<T> &z, int verbosity, cudaStream_t stream) { int max_iter = 100; T grad_tol = 1e-8; int linesearch_max_iter = 50; int lbfgs_memory = 5; int num_iters = 0; SimpleVec<T> w0(w, X.n + fit_intercept); w0.fill(T(0), stream); T fx; qnFit(cuml_handle, X.data, y.data, X.m, X.n, C, fit_intercept, l1, l2, max_iter, grad_tol, linesearch_max_iter, lbfgs_memory, verbosity, w, &fx, &num_iters, false, loss_type); return fx; } TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) { CompareApprox<double> compApprox(tol); // Test case generated in python and solved with sklearn double y[N] = {1, 1, 1, 0, 1, 0, 1, 0, 1, 0}; updateDevice(ydev->data, &y[0], ydev->len, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); double alpha = 0.01 * N; LogisticLoss<double> loss_b(handle, D, true); LogisticLoss<double> loss_no_b(handle, D, false); SimpleVecOwning<double> w0(allocator, D + 1, stream); SimpleVecOwning<double> z(allocator, N, stream); double l1, l2, fx; double w_l1_b[2] = {-1.6899370396155091, 1.9021577534928300}; double b_l1_b = 0.8057670813749118; double obj_l1_b = 0.44295941481024703; l1 = alpha; l2 = 0.0; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b, compApprox, stream)); fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); double w_l2_b[2] = {-1.5339880402781370, 1.6788639581350926}; double b_l2_b = 0.806087868102401; double obj_l2_b = 0.4378085369889721; l1 = 0; l2 = alpha; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b, compApprox, stream)); fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); double w_l1_no_b[2] = {-1.6215035298864591, 2.3650868394981086}; double obj_l1_no_b = 0.4769896009200278; l1 = alpha; l2 = 0.0; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); double w_l2_no_b[2] = {-1.3931049893764620, 2.0140103094119621}; double obj_l2_no_b = 0.47502098062114273; l1 = 0; l2 = alpha; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) { // The data seems to small for the objective to be strongly convex // leaving out exact param checks CompareApprox<double> compApprox(tol); double y[N] = {2, 2, 0, 3, 3, 0, 0, 0, 1, 0}; updateDevice(ydev->data, &y[0], ydev->len, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); double fx, l1, l2; int C = 4; double alpha = 0.016 * N; SimpleMatOwning<double> z(allocator, C, N, stream); SimpleVecOwning<double> w0(allocator, C * (D + 1), stream); Softmax<double> loss_b(handle, D, C, true); Softmax<double> loss_no_b(handle, D, C, false); l1 = alpha; l2 = 0.0; double obj_l1_b = 0.5407911382311313; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); l1 = 0.0; l2 = alpha; double obj_l2_b = 0.5721784062720949; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); l1 = alpha; l2 = 0.0; double obj_l1_no_b = 0.6606929813245878; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); l1 = 0.0; l2 = alpha; double obj_l2_no_b = 0.6597171282106854; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) { CompareApprox<double> compApprox(tol); double y[N] = {0.2675836026202781, -0.0678277759663704, -0.6334027174275105, -0.1018336189077367, 0.0933815935886932, -1.1058853496996381, -0.1658298189619160, -0.2954290675648911, 0.7966520536712608, -1.0767450516284769}; updateDevice(ydev->data, &y[0], ydev->len, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); double fx, l1, l2; double alpha = 0.01 * N; SimpleVecOwning<double> w0(allocator, D + 1, stream); SimpleVecOwning<double> z(allocator, N, stream); SquaredLoss<double> loss_b(handle, D, true); SquaredLoss<double> loss_no_b(handle, D, false); l1 = alpha; l2 = 0.0; double w_l1_b[2] = {-0.4952397281519840, 0.3813315300180231}; double b_l1_b = -0.08140861819001188; double obj_l1_b = 0.011136986298775138; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b, compApprox, stream)); fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_b, fx)); l1 = 0.0; l2 = alpha; double w_l2_b[2] = {-0.5022384743587150, 0.3937352417485087}; double b_l2_b = -0.08062397391797513; double obj_l2_b = 0.004268621967866347; fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b, compApprox, stream)); fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_b, fx)); l1 = alpha; l2 = 0.0; double w_l1_no_b[2] = {-0.5175178128147135, 0.3720844589831813}; double obj_l1_no_b = 0.013981355746112447; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l1_no_b, fx)); l1 = 0.0; l2 = alpha; double w_l2_no_b[2] = {-0.5241651041233270, 0.3846317886627560}; double obj_l2_no_b = 0.007061261366969662; fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data, loss_no_b, compApprox, stream)); fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream); ASSERT_TRUE(compApprox(obj_l2_no_b, fx)); } TEST_F(QuasiNewtonTest, predict) { CompareApprox<double> compApprox(1e-8); std::vector<double> w_host(D); w_host[0] = 1; std::vector<double> preds_host(N); SimpleVecOwning<double> w(allocator, D, stream); SimpleVecOwning<double> preds(allocator, N, stream); updateDevice(w.data, &w_host[0], w.len, stream); qnPredict(handle, Xdev->data, N, D, 2, false, w.data, false, 0, preds.data, stream); updateHost(&preds_host[0], preds.data, preds.len, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int it = 0; it < N; it++) { ASSERT_TRUE(X[it][0] > 0 ? compApprox(preds_host[it], 1) : compApprox(preds_host[it], 0)); } qnPredict(handle, Xdev->data, N, D, 1, false, w.data, false, 1, preds.data, stream); updateHost(&preds_host[0], preds.data, preds.len, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int it = 0; it < N; it++) { ASSERT_TRUE(compApprox(X[it][0], preds_host[it])); } } TEST_F(QuasiNewtonTest, predict_softmax) { CompareApprox<double> compApprox(1e-8); int C = 4; std::vector<double> w_host(C * D); w_host[0] = 1; w_host[D * C - 1] = 1; std::vector<double> preds_host(N); SimpleVecOwning<double> w(allocator, w_host.size(), stream); SimpleVecOwning<double> preds(allocator, N, stream); updateDevice(w.data, &w_host[0], w.len, stream); qnPredict(handle, Xdev->data, N, D, C, false, w.data, false, 2, preds.data, stream); updateHost(&preds_host[0], preds.data, preds.len, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int it = 0; it < N; it++) { if (X[it][0] < 0 && X[it][1] < 0) { ASSERT_TRUE(compApprox(1, preds_host[it])); } else if (X[it][0] > X[it][1]) { ASSERT_TRUE(compApprox(0, preds_host[it])); } else { ASSERT_TRUE(compApprox(C - 1, preds_host[it])); } } } } // namespace GLM } // end namespace ML
2021ade8040efd6379221447f974afd5badd7ceb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> __global__ void silly_kernel(int n, int* in, int* out) { if (threadIdx.x == 0){ int acc = 0; for (int i = 0; i < n; ++i){ acc += in[i]; out[i] = acc; } } } #define SIZE 10 int main(int argc, char **argv) { int *in = NULL; int *out = NULL; in = (int*)malloc(SIZE*sizeof(int)); out = (int*)malloc(SIZE*sizeof(int)); int *din = NULL; int *dout = NULL; hipMalloc((void**)&din,SIZE*sizeof(int)); hipMalloc((void**)&dout,SIZE*sizeof(int)); // Generate some data for (int i = 0; i < SIZE; i ++) { in[i] = i+1; } hipMemcpy(din,in,SIZE*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( silly_kernel), dim3(1),dim3(1),0, 0, SIZE,din,dout); hipMemcpy(out,dout, SIZE*sizeof(int),hipMemcpyDeviceToHost); hipFree(din); hipFree(dout); for (int i = 0; i < SIZE; ++i) { printf("%d ", out[i]); } printf("\n"); return 0; }
2021ade8040efd6379221447f974afd5badd7ceb.cu
#include <stdio.h> #include <stdlib.h> #include <stdint.h> __global__ void silly_kernel(int n, int* in, int* out) { if (threadIdx.x == 0){ int acc = 0; for (int i = 0; i < n; ++i){ acc += in[i]; out[i] = acc; } } } #define SIZE 10 int main(int argc, char **argv) { int *in = NULL; int *out = NULL; in = (int*)malloc(SIZE*sizeof(int)); out = (int*)malloc(SIZE*sizeof(int)); int *din = NULL; int *dout = NULL; cudaMalloc((void**)&din,SIZE*sizeof(int)); cudaMalloc((void**)&dout,SIZE*sizeof(int)); // Generate some data for (int i = 0; i < SIZE; i ++) { in[i] = i+1; } cudaMemcpy(din,in,SIZE*sizeof(int),cudaMemcpyHostToDevice); silly_kernel<<<1,1,0>>>(SIZE,din,dout); cudaMemcpy(out,dout, SIZE*sizeof(int),cudaMemcpyDeviceToHost); cudaFree(din); cudaFree(dout); for (int i = 0; i < SIZE; ++i) { printf("%d ", out[i]); } printf("\n"); return 0; }
ce47ac10f9c916ef3fbd601779bdc49e2fb98be3.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> namespace { template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) { const auto s = sigmoid(z); return (1.0 - s) * s; } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh(scalar_t z) { const auto t = tanh(z); return 1 - (t * t); } template <typename scalar_t> __device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) { return fmaxf(0.0, z) + fminf(0.0, alpha * (exp(z) - 1.0)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) { const auto e = exp(z); const auto d_relu = z < 0.0 ? 0.0 : 1.0; return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0); } template <typename scalar_t> __global__ void lltm_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,int32_t> gates, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> old_cell, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> new_h, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> new_cell, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> input_gate, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> output_gate, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> candidate_cell) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < gates.size(2)){ input_gate[n][c] = sigmoid(gates[n][0][c]); output_gate[n][c] = sigmoid(gates[n][1][c]); candidate_cell[n][c] = elu(gates[n][2][c]); new_cell[n][c] = old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c]; new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c]; } } template <typename scalar_t> __global__ void lltm_cuda_backward_kernel( torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> d_old_cell, torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,int32_t> d_gates, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> grad_h, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> grad_cell, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> new_cell, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> input_gate, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> output_gate, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> candidate_cell, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,int32_t> gate_weights) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < d_gates.size(2)){ const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c]; const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c]; const auto d_new_cell = d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c]; d_old_cell[n][c] = d_new_cell; const auto d_candidate_cell = input_gate[n][c] * d_new_cell; const auto d_input_gate = candidate_cell[n][c] * d_new_cell; d_gates[n][0][c] = d_input_gate * d_sigmoid(gate_weights[n][0][c]); d_gates[n][1][c] = d_output_gate * d_sigmoid(gate_weights[n][1][c]); d_gates[n][2][c] = d_candidate_cell * d_elu(gate_weights[n][2][c]); } } } // namespace std::vector<torch::Tensor> lltm_cuda_forward( torch::Tensor input, torch::Tensor weights, torch::Tensor bias, torch::Tensor old_h, torch::Tensor old_cell) { auto X = torch::cat({old_h, input}, /*dim=*/1); auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1)); const auto batch_size = old_cell.size(0); const auto state_size = old_cell.size(1); auto gates = gate_weights.reshape({batch_size, 3, state_size}); auto new_h = torch::zeros_like(old_cell); auto new_cell = torch::zeros_like(old_cell); auto input_gate = torch::zeros_like(old_cell); auto output_gate = torch::zeros_like(old_cell); auto candidate_cell = torch::zeros_like(old_cell); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] { hipLaunchKernelGGL(( lltm_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,int32_t>(), old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), new_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>()); })); return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates}; } std::vector<torch::Tensor> lltm_cuda_backward( torch::Tensor grad_h, torch::Tensor grad_cell, torch::Tensor new_cell, torch::Tensor input_gate, torch::Tensor output_gate, torch::Tensor candidate_cell, torch::Tensor X, torch::Tensor gates, torch::Tensor weights) { auto d_old_cell = torch::zeros_like(new_cell); auto d_gates = torch::zeros_like(gates); const auto batch_size = new_cell.size(0); const auto state_size = new_cell.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] { hipLaunchKernelGGL(( lltm_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, d_old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), d_gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,int32_t>(), grad_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), grad_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,int32_t>()); })); auto d_gate_weights = d_gates.flatten(1, 2); auto d_weights = d_gate_weights.t().mm(X); auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true); auto d_X = d_gate_weights.mm(weights); auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size); auto d_input = d_X.slice(/*dim=*/1, state_size); return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates}; }
ce47ac10f9c916ef3fbd601779bdc49e2fb98be3.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace { template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) { const auto s = sigmoid(z); return (1.0 - s) * s; } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh(scalar_t z) { const auto t = tanh(z); return 1 - (t * t); } template <typename scalar_t> __device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) { return fmaxf(0.0, z) + fminf(0.0, alpha * (exp(z) - 1.0)); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) { const auto e = exp(z); const auto d_relu = z < 0.0 ? 0.0 : 1.0; return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0); } template <typename scalar_t> __global__ void lltm_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,int32_t> gates, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> old_cell, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> new_h, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> new_cell, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> input_gate, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> output_gate, torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> candidate_cell) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < gates.size(2)){ input_gate[n][c] = sigmoid(gates[n][0][c]); output_gate[n][c] = sigmoid(gates[n][1][c]); candidate_cell[n][c] = elu(gates[n][2][c]); new_cell[n][c] = old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c]; new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c]; } } template <typename scalar_t> __global__ void lltm_cuda_backward_kernel( torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> d_old_cell, torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,int32_t> d_gates, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> grad_h, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> grad_cell, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> new_cell, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> input_gate, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> output_gate, const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,int32_t> candidate_cell, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,int32_t> gate_weights) { //batch index const int n = blockIdx.y; // column index const int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < d_gates.size(2)){ const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c]; const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c]; const auto d_new_cell = d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c]; d_old_cell[n][c] = d_new_cell; const auto d_candidate_cell = input_gate[n][c] * d_new_cell; const auto d_input_gate = candidate_cell[n][c] * d_new_cell; d_gates[n][0][c] = d_input_gate * d_sigmoid(gate_weights[n][0][c]); d_gates[n][1][c] = d_output_gate * d_sigmoid(gate_weights[n][1][c]); d_gates[n][2][c] = d_candidate_cell * d_elu(gate_weights[n][2][c]); } } } // namespace std::vector<torch::Tensor> lltm_cuda_forward( torch::Tensor input, torch::Tensor weights, torch::Tensor bias, torch::Tensor old_h, torch::Tensor old_cell) { auto X = torch::cat({old_h, input}, /*dim=*/1); auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1)); const auto batch_size = old_cell.size(0); const auto state_size = old_cell.size(1); auto gates = gate_weights.reshape({batch_size, 3, state_size}); auto new_h = torch::zeros_like(old_cell); auto new_cell = torch::zeros_like(old_cell); auto input_gate = torch::zeros_like(old_cell); auto output_gate = torch::zeros_like(old_cell); auto candidate_cell = torch::zeros_like(old_cell); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] { lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,int32_t>(), old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), new_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>()); })); return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates}; } std::vector<torch::Tensor> lltm_cuda_backward( torch::Tensor grad_h, torch::Tensor grad_cell, torch::Tensor new_cell, torch::Tensor input_gate, torch::Tensor output_gate, torch::Tensor candidate_cell, torch::Tensor X, torch::Tensor gates, torch::Tensor weights) { auto d_old_cell = torch::zeros_like(new_cell); auto d_gates = torch::zeros_like(gates); const auto batch_size = new_cell.size(0); const auto state_size = new_cell.size(1); const int threads = 1024; const dim3 blocks((state_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] { lltm_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( d_old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), d_gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,int32_t>(), grad_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), grad_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,int32_t>(), gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,int32_t>()); })); auto d_gate_weights = d_gates.flatten(1, 2); auto d_weights = d_gate_weights.t().mm(X); auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true); auto d_X = d_gate_weights.mm(weights); auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size); auto d_input = d_X.slice(/*dim=*/1, state_size); return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates}; }
cce61ceb74a6c9731e606343be0923df4de31ad8.hip
// !!! This is a file automatically generated by hipify!!! // GPU VERSION #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include <sm_20_atomic_functions.h> #include <iostream> #include <algorithm> #include <vector> #include <cmath> #include <ctime> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "gaussian.h" #define BLUR_RADIUS 3 #define PATHS_PER_SCAN 4 #define MAX_SHORT 65535 #define SMALL_PENALTY 3 #define LARGE_PENALTY 20 struct path { short rowDiff; short colDiff; short index; }; void CUDA_CHECK_RETURN(hipError_t status, int i = 0) { if (status != hipSuccess) { std::cout << hipGetErrorString(status) << i << std::endl; exit(1); } } __global__ void calculatePixelCostBT(int *param, uchar *im_left, uchar *im_right, unsigned short *tmp_C) { int rows = param[0], cols = param[1], disps = param[2]; int id = threadIdx.x + blockDim.x * blockIdx.x; if (id >= rows * cols * disps) return; int row = (id / (disps * cols)) % rows; int leftCol = (id / disps) % cols; int d = id % disps; int rightCol = leftCol - d; char leftValue, rightValue, beforeRightValue, afterRightValue, rightValueMinus, rightValuePlus, rightValueMin, rightValueMax; // Interpolation on the right image. int col1 = leftCol, col2 = rightCol; if (col1 < 0) leftValue = 0; else leftValue = im_left[row * cols + col1]; if (col2 < 0) rightValue = 0; else rightValue = im_right[row * cols + col2]; if (col2 > 0) { beforeRightValue = im_right[row * cols + col2 - 1]; } else { beforeRightValue = rightValue; } if (col2 + 1 < cols && col2>0) { afterRightValue = im_right[row * cols + col2 + 1]; } else { afterRightValue = rightValue; } // Use the median value to interpolate rightValueMinus = round((rightValue + beforeRightValue) / 2.f); rightValuePlus = round((rightValue + afterRightValue) / 2.f); char tmp; rightValueMin = rightValue < (tmp = (rightValueMinus < rightValuePlus ? rightValueMinus : rightValuePlus)) ? rightValue : tmp; rightValueMax = rightValue >(tmp = (rightValueMinus > rightValuePlus ? rightValueMinus : rightValuePlus)) ? rightValue : tmp; unsigned short firstVal = (0 > ((leftValue - rightValueMax) > (rightValueMin - leftValue) ? (leftValue - rightValueMax) : (rightValueMin - leftValue)) ? 0 : ((leftValue - rightValueMax) > (rightValueMin - leftValue) ? (leftValue - rightValueMax) : (rightValueMin - leftValue))); // Interpolation on the left image col1 = rightCol; col2 = leftCol; if (col1 < 0) leftValue = 0; else leftValue = im_right[row * cols + col1]; if (col2 < 0) rightValue = 0; else rightValue = im_left[row * cols + col2]; if (col2 > 0) { beforeRightValue = im_left[row * cols + col2 - 1]; } else { beforeRightValue = rightValue; } if (col2 + 1 < cols && col2>0) { afterRightValue = im_left[row * cols + col2 + 1]; } else { afterRightValue = rightValue; } rightValueMinus = round((rightValue + beforeRightValue) / 2.f); rightValuePlus = round((rightValue + afterRightValue) / 2.f); rightValueMin = rightValue < (tmp = (rightValueMinus < rightValuePlus ? rightValueMinus : rightValuePlus)) ? rightValue : tmp; rightValueMax = rightValue >(tmp = (rightValueMinus > rightValuePlus ? rightValueMinus : rightValuePlus)) ? rightValue : tmp; unsigned short secondVal = 0 > ((leftValue - rightValueMax) > (rightValueMin - leftValue) ? (leftValue - rightValueMax) : (rightValueMin - leftValue)) ? 0 : ((leftValue - rightValueMax) > (rightValueMin - leftValue) ? (leftValue - rightValueMax) : (rightValueMin - leftValue)); tmp_C[id] = (firstVal < secondVal ? firstVal : secondVal); } void calculatePixelCost(cv::Mat &firstImage, cv::Mat &secondImage, int disparityRange, unsigned short ***C) { int row = firstImage.rows; int col = firstImage.cols; int size = row * col; uchar *im_left, *im_right; unsigned short *Cc; Cc = (unsigned short *)malloc(sizeof(unsigned short) * size * disparityRange); unsigned short *tmp_C; CUDA_CHECK_RETURN(hipMalloc((void**)&tmp_C, sizeof(unsigned short) * size * disparityRange), -1); // Allocate GPU memory for left image and right image. CUDA_CHECK_RETURN(hipMalloc((void**)&im_left, sizeof(uchar) * size), 1); CUDA_CHECK_RETURN(hipMalloc((void**)&im_right, sizeof(uchar) * size), 2); CUDA_CHECK_RETURN(hipMemcpy(im_left, firstImage.ptr<uchar>(), size * sizeof(uchar), hipMemcpyHostToDevice), 3); CUDA_CHECK_RETURN(hipMemcpy(im_right, secondImage.ptr<uchar>(), size * sizeof(uchar), hipMemcpyHostToDevice), 4); // Because a warp in CUDA is 32, block is supposed to be 32 * a dim3 block_size; block_size.x = 3; // (Total + perblock - 1) / perblock dim3 grid_size; grid_size.x = (size * disparityRange + block_size.x - 1) / block_size.x; // basic params int *passed_para; int param[3] = {row, col, disparityRange}; CUDA_CHECK_RETURN(hipMalloc((void**)&passed_para, sizeof(int) * 3), 5); CUDA_CHECK_RETURN(hipMemcpy(passed_para, param, 3 * sizeof(int), hipMemcpyHostToDevice), 6); calculatePixelCostBT << <grid_size, block_size >> >(passed_para, im_left, im_right, tmp_C); // synchronize CUDA_CHECK_RETURN(hipDeviceSynchronize(), 7); // copy fron GPU to CPU CUDA_CHECK_RETURN(hipMemcpy(C[0][0], tmp_C, sizeof(unsigned short) * size * disparityRange, hipMemcpyDeviceToHost), 8); hipFree(im_left); hipFree(im_right); hipFree(passed_para); hipFree(tmp_C); } // pathCount can be 1, 2, 4, or 8 void initializeFirstScanPaths(std::vector<path> &paths, unsigned short pathCount) { for (unsigned short i = 0; i < pathCount; ++i) { paths.push_back(path()); } if (paths.size() >= 1) { paths[0].rowDiff = 0; paths[0].colDiff = -1; paths[0].index = 1; } if (paths.size() >= 2) { paths[1].rowDiff = -1; paths[1].colDiff = 0; paths[1].index = 2; } if (paths.size() >= 4) { paths[2].rowDiff = -1; paths[2].colDiff = 1; paths[2].index = 4; paths[3].rowDiff = -1; paths[3].colDiff = -1; paths[3].index = 7; } if (paths.size() >= 8) { paths[4].rowDiff = -2; paths[4].colDiff = 1; paths[4].index = 8; paths[5].rowDiff = -2; paths[5].colDiff = -1; paths[5].index = 9; paths[6].rowDiff = -1; paths[6].colDiff = -2; paths[6].index = 13; paths[7].rowDiff = -1; paths[7].colDiff = 2; paths[7].index = 15; } } // pathCount can be 1, 2, 4, or 8 void initializeSecondScanPaths(std::vector<path> &paths, unsigned short pathCount) { for (unsigned short i = 0; i < pathCount; ++i) { paths.push_back(path()); } if (paths.size() >= 1) { paths[0].rowDiff = 0; paths[0].colDiff = 1; paths[0].index = 0; } if (paths.size() >= 2) { paths[1].rowDiff = 1; paths[1].colDiff = 0; paths[1].index = 3; } if (paths.size() >= 4) { paths[2].rowDiff = 1; paths[2].colDiff = 1; paths[2].index = 5; paths[3].rowDiff = 1; paths[3].colDiff = -1; paths[3].index = 6; } if (paths.size() >= 8) { paths[4].rowDiff = 2; paths[4].colDiff = 1; paths[4].index = 10; paths[5].rowDiff = 2; paths[5].colDiff = -1; paths[5].index = 11; paths[6].rowDiff = 1; paths[6].colDiff = -2; paths[6].index = 12; paths[7].rowDiff = 1; paths[7].colDiff = 2; paths[7].index = 14; } } __global__ void aggregate(int row, int col, int cols, int rows, int disparityRange, int rowDiff, int colDiff, unsigned short *C, unsigned short *temp, unsigned short *temp1, int *S) { int d = threadIdx.x; unsigned aggregatedCost = 0; aggregatedCost += C[row * cols * disparityRange + col * disparityRange + d]; if (row + rowDiff < 0 || row + rowDiff >= rows || col + colDiff < 0 || col + colDiff >= cols) { // border temp[d] = aggregatedCost; atomicAdd(S + d, (int)temp[d]); return; } unsigned short minPrev, minPrevOther, prev, prevPlus, prevMinus, tmp; prev = minPrev = minPrevOther = prevPlus = prevMinus = MAX_SHORT; // traverse all disparity for (int disp = 0; disp < disparityRange; ++disp) { tmp = temp1[disp]; if (minPrev > tmp) { minPrev = tmp; } if (disp == d) { prev = tmp; } else if (disp == d + 1) { prevPlus = tmp; } else if (disp == d - 1) { prevMinus = tmp; } else { if (minPrevOther > tmp) { minPrevOther = tmp; } } } // Caculate Lr int tmp1 = (int)prevPlus + SMALL_PENALTY < (int)prevMinus + SMALL_PENALTY ? (int)prevPlus + SMALL_PENALTY : (int)prevMinus + SMALL_PENALTY; int tmp2 = (int)prev < (int)minPrevOther + LARGE_PENALTY ? (int)prev : (int)minPrevOther + LARGE_PENALTY; int s = tmp1 < tmp2 ? tmp1 : tmp2; aggregatedCost = aggregatedCost + s; aggregatedCost -= minPrev; // record for DP temp[d] = aggregatedCost; // atomic operation atomicAdd(S + d, (int)temp[d]); } __global__ void kernel_aggregateCosts(int *cuda_rowDiff, int *cuda_colDiff, int *params, unsigned short *C, int *S) { // get basic params int rows = params[0]; int cols = params[1]; int disparityRange = params[2]; int id = blockIdx.x; int rowDiff = cuda_rowDiff[id]; int colDiff = cuda_colDiff[id]; // the array used for DP unsigned short *temp, *temp1; temp = (unsigned short *)malloc(sizeof(unsigned short) * disparityRange); temp1 = (unsigned short *)malloc(sizeof(unsigned short) * disparityRange); int row, col, i, j, k; // DP in different path would rely on different "last" value. if (id == 0) { //printf("%d\n", id); for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { row = i; col = j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); hipError_t e = hipDeviceSynchronize(); if (e != hipSuccess) { printf("%s\n", hipGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 1) { //printf("%d\n", id); for (i = 0; i < cols; i++) { for (j = 0; j < rows; j++) { row = j; col = i; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); hipError_t e = hipDeviceSynchronize(); if (e != hipSuccess) { printf("%s\n", hipGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 3) { //printf("%d\n", id); for (i = cols - 1; i >= 1 - rows; i--) { for (j = 0; i + j < cols && j < rows; j++) { if (i + j < 0) continue; row = j; col = i + j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); hipError_t e = hipDeviceSynchronize(); if (e != hipSuccess) { printf("%s\n", hipGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 2) { //printf("%d\n", id); for (i = 0; i < rows + cols - 1; i++) { for (j = 0; i - j >= 0 && j < rows; j++) { if (i - j >= cols) continue; row = j; col = i - j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); hipError_t e = hipDeviceSynchronize(); if (e != hipSuccess) { printf("%s\n", hipGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 4) { //printf("%d\n", id); for (i = 0; i < rows; i++) { for (j = cols - 1; j >= 0; j--) { row = i; col = j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); hipError_t e = hipDeviceSynchronize(); if (e != hipSuccess) { printf("%s\n", hipGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 5) { //printf("%d\n", id); for (i = 0; i < cols; i++) { for (j = rows - 1; j >= 0; j--) { row = j; col = i; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); hipError_t e = hipDeviceSynchronize(); if (e != hipSuccess) { printf("%s\n", hipGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 6) { //printf("%d\n", id); for (i = 1 - cols; i <= rows - 1; i++) { for (j = cols - 1; i + j >= 0 && j >= 0; j--) { if (i + j >= cols) continue; row = i + j; col = j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); hipError_t e = hipDeviceSynchronize(); if (e != hipSuccess) { printf("%s\n", hipGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 7) { //printf("%d\n", id); for (i = 0; i < rows + cols - 1; i++) { for (j = 0; i - j >= 0 && j < cols; j++) { if (i - j >= rows) continue; row = i - j; col = j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); hipError_t e = hipDeviceSynchronize(); if (e != hipSuccess) { printf("%s\n", hipGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } } void aggregateCosts(int rows, int cols, int disparityRange, unsigned short ***C, unsigned short ***S) { std::vector<path> firstScanPaths; std::vector<path> secondScanPaths; // the scan path to aggregate initializeFirstScanPaths(firstScanPaths, PATHS_PER_SCAN); initializeSecondScanPaths(secondScanPaths, PATHS_PER_SCAN); int rowDiff[PATHS_PER_SCAN * 2], colDiff[PATHS_PER_SCAN * 2]; for (int i = 0; i<firstScanPaths.size(); i++) { rowDiff[i] = firstScanPaths[i].rowDiff; colDiff[i] = firstScanPaths[i].colDiff; } for (int i = 0; i<secondScanPaths.size(); i++) { rowDiff[i + PATHS_PER_SCAN] = secondScanPaths[i].rowDiff; colDiff[i + PATHS_PER_SCAN] = secondScanPaths[i].colDiff; } // the paths used in GPU int *cuda_rowDiff, *cuda_colDiff; CUDA_CHECK_RETURN(hipMalloc((void**)&cuda_colDiff, sizeof(int) * PATHS_PER_SCAN * 2)); CUDA_CHECK_RETURN(hipMalloc((void**)&cuda_rowDiff, sizeof(int) * PATHS_PER_SCAN * 2)); CUDA_CHECK_RETURN(hipMemcpy(cuda_rowDiff, rowDiff, sizeof(int) * PATHS_PER_SCAN * 2, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(cuda_colDiff, colDiff, sizeof(int) * PATHS_PER_SCAN * 2, hipMemcpyHostToDevice)); // the pixel cost transferred to GPU unsigned short *tmp_C; CUDA_CHECK_RETURN(hipMalloc((void**)&tmp_C, sizeof(unsigned short) * rows * cols * disparityRange)); CUDA_CHECK_RETURN(hipMemcpy(tmp_C, C[0][0], sizeof(unsigned short) * rows * cols * disparityRange, hipMemcpyHostToDevice)); // the aggregate cost calculated in parallel int *tmp_S; CUDA_CHECK_RETURN(hipMalloc((void**)&tmp_S, sizeof(int) * rows * cols * disparityRange)); CUDA_CHECK_RETURN(hipMemset(tmp_S, 0, sizeof(int) * rows * cols * disparityRange)); int *Ss; Ss = (int*)malloc(sizeof(int)*rows * cols * disparityRange); // basic params int param[3] = {rows, cols, disparityRange}; int *passed_para; CUDA_CHECK_RETURN(hipMalloc((void**)&passed_para, sizeof(int) * 3), 5); CUDA_CHECK_RETURN(hipMemcpy(passed_para, param, 3 * sizeof(int), hipMemcpyHostToDevice), 6); kernel_aggregateCosts << <PATHS_PER_SCAN * 2, 1 >> > (cuda_rowDiff, cuda_colDiff, passed_para, tmp_C, tmp_S); // synchronize CUDA_CHECK_RETURN(hipDeviceSynchronize()); // copy from GPU to CPU CUDA_CHECK_RETURN(hipMemcpy(Ss, tmp_S, sizeof(int)*rows * cols * disparityRange, hipMemcpyDeviceToHost)); // convert unsigned short to int for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { for (int k = 0; k < disparityRange; k++) { S[i][j][k] = (unsigned short)Ss[i*cols*disparityRange + j * disparityRange + k]; } } } hipFree(cuda_rowDiff); hipFree(cuda_colDiff); hipFree(tmp_C); hipFree(passed_para); hipFree(tmp_S); } void computeDisparity(unsigned short ***S, int rows, int cols, int disparityRange, cv::Mat &disparityMap) { unsigned int disparity = 0, minCost; for (int row = 0; row < rows; ++row) { for (int col = 0; col < cols; ++col) { minCost = MAX_SHORT; // S for (int d = disparityRange - 1; d >= 0; --d) { if (minCost > S[row][col][d]) { minCost = S[row][col][d]; disparity = d; } } disparityMap.at<uchar>(row, col) = disparity; } } } void saveDisparityMap(cv::Mat &disparityMap, int disparityRange, char* outputFile) { double factor = 256.0 / disparityRange; for (int row = 0; row < disparityMap.rows; ++row) { for (int col = 0; col < disparityMap.cols; ++col) { disparityMap.at<uchar>(row, col) *= factor; } } cv::imwrite(outputFile, disparityMap); } int main(int argc, char** argv) { // left image. right image. output image. char *firstFileName = "left.png"; char *secondFileName = "right.png"; char *outFileName = "out.png"; cv::Mat firstImage; cv::Mat secondImage; // read the grayscale image firstImage = cv::imread(firstFileName, CV_LOAD_IMAGE_GRAYSCALE); secondImage = cv::imread(secondFileName, CV_LOAD_IMAGE_GRAYSCALE); if (!firstImage.data || !secondImage.data) { std::cerr << "Could not open or find one of the images!" << std::endl; return -1; } // the range of disparity unsigned int disparityRange = 20; unsigned short ***C; // pixel cost array W x H x D unsigned short ***S; // aggregated cost array W x H x D clock_t begin = clock(); std::cout << "Allocating space..." << std::endl; C = (unsigned short ***)malloc(sizeof(unsigned short **) * firstImage.rows); C[0] = (unsigned short **)malloc(sizeof(unsigned short *) * firstImage.rows * firstImage.cols); C[0][0] = (unsigned short *)malloc(sizeof(unsigned short) * firstImage.rows * firstImage.cols * disparityRange); S = (unsigned short ***)malloc(sizeof(unsigned short **) * firstImage.rows); S[0] = (unsigned short **)malloc(sizeof(unsigned short *) * firstImage.rows * firstImage.cols); S[0][0] = (unsigned short *)malloc(sizeof(unsigned short) * firstImage.rows * firstImage.cols * disparityRange); // allocate cost arrays make sure the memory is continuous for (int row = 1; row<firstImage.rows; row++) { C[row] = C[row - 1] + firstImage.cols; S[row] = S[row - 1] + firstImage.cols; } for (int row = 0; row < firstImage.rows; ++row) { if (row != 0) { C[row][0] = C[row - 1][firstImage.cols - 1] + disparityRange; S[row][0] = S[row - 1][firstImage.cols - 1] + disparityRange; } for (int col = 0; col < firstImage.cols; ++col) { if (col > 0) { C[row][col] = C[row][col - 1] + disparityRange; S[row][col] = S[row][col - 1] + disparityRange; } } } std::cout << "Smoothing images..." << std::endl; grayscaleGaussianBlur(firstImage, firstImage, BLUR_RADIUS); grayscaleGaussianBlur(secondImage, secondImage, BLUR_RADIUS); std::cout << "Calculating pixel cost for the image..." << std::endl; calculatePixelCost(firstImage, secondImage, disparityRange, C); std::cout << "Aggregating costs..." << std::endl; aggregateCosts(firstImage.rows, firstImage.cols, disparityRange, C, S); /* used to print to file for comparision with CPU output. FILE *fp = fopen("gpu_output.txt", "w+"); for (int i=0; i<firstImage.rows; i++) for(int j=0; j<firstImage.cols; j++) for (int k=0; k<disparityRange; k++) fprintf(fp, "%d\n", S[i][j][k]); fclose(fp); */ cv::Mat disparityMap = cv::Mat(cv::Size(firstImage.cols, firstImage.rows), CV_8UC1, cv::Scalar::all(0)); std::cout << "Computing disparity..." << std::endl; computeDisparity(S, firstImage.rows, firstImage.cols, disparityRange, disparityMap); clock_t end = clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Done in %.2lf seconds.\n", elapsed_secs); saveDisparityMap(disparityMap, disparityRange, outFileName); return 0; }
cce61ceb74a6c9731e606343be0923df4de31ad8.cu
// GPU VERSION #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cuda.h> #include <sm_20_atomic_functions.h> #include <iostream> #include <algorithm> #include <vector> #include <cmath> #include <ctime> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "gaussian.h" #define BLUR_RADIUS 3 #define PATHS_PER_SCAN 4 #define MAX_SHORT 65535 #define SMALL_PENALTY 3 #define LARGE_PENALTY 20 struct path { short rowDiff; short colDiff; short index; }; void CUDA_CHECK_RETURN(cudaError_t status, int i = 0) { if (status != cudaSuccess) { std::cout << cudaGetErrorString(status) << i << std::endl; exit(1); } } __global__ void calculatePixelCostBT(int *param, uchar *im_left, uchar *im_right, unsigned short *tmp_C) { int rows = param[0], cols = param[1], disps = param[2]; int id = threadIdx.x + blockDim.x * blockIdx.x; if (id >= rows * cols * disps) return; int row = (id / (disps * cols)) % rows; int leftCol = (id / disps) % cols; int d = id % disps; int rightCol = leftCol - d; char leftValue, rightValue, beforeRightValue, afterRightValue, rightValueMinus, rightValuePlus, rightValueMin, rightValueMax; // Interpolation on the right image. int col1 = leftCol, col2 = rightCol; if (col1 < 0) leftValue = 0; else leftValue = im_left[row * cols + col1]; if (col2 < 0) rightValue = 0; else rightValue = im_right[row * cols + col2]; if (col2 > 0) { beforeRightValue = im_right[row * cols + col2 - 1]; } else { beforeRightValue = rightValue; } if (col2 + 1 < cols && col2>0) { afterRightValue = im_right[row * cols + col2 + 1]; } else { afterRightValue = rightValue; } // Use the median value to interpolate rightValueMinus = round((rightValue + beforeRightValue) / 2.f); rightValuePlus = round((rightValue + afterRightValue) / 2.f); char tmp; rightValueMin = rightValue < (tmp = (rightValueMinus < rightValuePlus ? rightValueMinus : rightValuePlus)) ? rightValue : tmp; rightValueMax = rightValue >(tmp = (rightValueMinus > rightValuePlus ? rightValueMinus : rightValuePlus)) ? rightValue : tmp; unsigned short firstVal = (0 > ((leftValue - rightValueMax) > (rightValueMin - leftValue) ? (leftValue - rightValueMax) : (rightValueMin - leftValue)) ? 0 : ((leftValue - rightValueMax) > (rightValueMin - leftValue) ? (leftValue - rightValueMax) : (rightValueMin - leftValue))); // Interpolation on the left image col1 = rightCol; col2 = leftCol; if (col1 < 0) leftValue = 0; else leftValue = im_right[row * cols + col1]; if (col2 < 0) rightValue = 0; else rightValue = im_left[row * cols + col2]; if (col2 > 0) { beforeRightValue = im_left[row * cols + col2 - 1]; } else { beforeRightValue = rightValue; } if (col2 + 1 < cols && col2>0) { afterRightValue = im_left[row * cols + col2 + 1]; } else { afterRightValue = rightValue; } rightValueMinus = round((rightValue + beforeRightValue) / 2.f); rightValuePlus = round((rightValue + afterRightValue) / 2.f); rightValueMin = rightValue < (tmp = (rightValueMinus < rightValuePlus ? rightValueMinus : rightValuePlus)) ? rightValue : tmp; rightValueMax = rightValue >(tmp = (rightValueMinus > rightValuePlus ? rightValueMinus : rightValuePlus)) ? rightValue : tmp; unsigned short secondVal = 0 > ((leftValue - rightValueMax) > (rightValueMin - leftValue) ? (leftValue - rightValueMax) : (rightValueMin - leftValue)) ? 0 : ((leftValue - rightValueMax) > (rightValueMin - leftValue) ? (leftValue - rightValueMax) : (rightValueMin - leftValue)); tmp_C[id] = (firstVal < secondVal ? firstVal : secondVal); } void calculatePixelCost(cv::Mat &firstImage, cv::Mat &secondImage, int disparityRange, unsigned short ***C) { int row = firstImage.rows; int col = firstImage.cols; int size = row * col; uchar *im_left, *im_right; unsigned short *Cc; Cc = (unsigned short *)malloc(sizeof(unsigned short) * size * disparityRange); unsigned short *tmp_C; CUDA_CHECK_RETURN(cudaMalloc((void**)&tmp_C, sizeof(unsigned short) * size * disparityRange), -1); // Allocate GPU memory for left image and right image. CUDA_CHECK_RETURN(cudaMalloc((void**)&im_left, sizeof(uchar) * size), 1); CUDA_CHECK_RETURN(cudaMalloc((void**)&im_right, sizeof(uchar) * size), 2); CUDA_CHECK_RETURN(cudaMemcpy(im_left, firstImage.ptr<uchar>(), size * sizeof(uchar), cudaMemcpyHostToDevice), 3); CUDA_CHECK_RETURN(cudaMemcpy(im_right, secondImage.ptr<uchar>(), size * sizeof(uchar), cudaMemcpyHostToDevice), 4); // Because a warp in CUDA is 32, block is supposed to be 32 * a dim3 block_size; block_size.x = 3; // (Total + perblock - 1) / perblock dim3 grid_size; grid_size.x = (size * disparityRange + block_size.x - 1) / block_size.x; // basic params int *passed_para; int param[3] = {row, col, disparityRange}; CUDA_CHECK_RETURN(cudaMalloc((void**)&passed_para, sizeof(int) * 3), 5); CUDA_CHECK_RETURN(cudaMemcpy(passed_para, param, 3 * sizeof(int), cudaMemcpyHostToDevice), 6); calculatePixelCostBT << <grid_size, block_size >> >(passed_para, im_left, im_right, tmp_C); // synchronize CUDA_CHECK_RETURN(cudaDeviceSynchronize(), 7); // copy fron GPU to CPU CUDA_CHECK_RETURN(cudaMemcpy(C[0][0], tmp_C, sizeof(unsigned short) * size * disparityRange, cudaMemcpyDeviceToHost), 8); cudaFree(im_left); cudaFree(im_right); cudaFree(passed_para); cudaFree(tmp_C); } // pathCount can be 1, 2, 4, or 8 void initializeFirstScanPaths(std::vector<path> &paths, unsigned short pathCount) { for (unsigned short i = 0; i < pathCount; ++i) { paths.push_back(path()); } if (paths.size() >= 1) { paths[0].rowDiff = 0; paths[0].colDiff = -1; paths[0].index = 1; } if (paths.size() >= 2) { paths[1].rowDiff = -1; paths[1].colDiff = 0; paths[1].index = 2; } if (paths.size() >= 4) { paths[2].rowDiff = -1; paths[2].colDiff = 1; paths[2].index = 4; paths[3].rowDiff = -1; paths[3].colDiff = -1; paths[3].index = 7; } if (paths.size() >= 8) { paths[4].rowDiff = -2; paths[4].colDiff = 1; paths[4].index = 8; paths[5].rowDiff = -2; paths[5].colDiff = -1; paths[5].index = 9; paths[6].rowDiff = -1; paths[6].colDiff = -2; paths[6].index = 13; paths[7].rowDiff = -1; paths[7].colDiff = 2; paths[7].index = 15; } } // pathCount can be 1, 2, 4, or 8 void initializeSecondScanPaths(std::vector<path> &paths, unsigned short pathCount) { for (unsigned short i = 0; i < pathCount; ++i) { paths.push_back(path()); } if (paths.size() >= 1) { paths[0].rowDiff = 0; paths[0].colDiff = 1; paths[0].index = 0; } if (paths.size() >= 2) { paths[1].rowDiff = 1; paths[1].colDiff = 0; paths[1].index = 3; } if (paths.size() >= 4) { paths[2].rowDiff = 1; paths[2].colDiff = 1; paths[2].index = 5; paths[3].rowDiff = 1; paths[3].colDiff = -1; paths[3].index = 6; } if (paths.size() >= 8) { paths[4].rowDiff = 2; paths[4].colDiff = 1; paths[4].index = 10; paths[5].rowDiff = 2; paths[5].colDiff = -1; paths[5].index = 11; paths[6].rowDiff = 1; paths[6].colDiff = -2; paths[6].index = 12; paths[7].rowDiff = 1; paths[7].colDiff = 2; paths[7].index = 14; } } __global__ void aggregate(int row, int col, int cols, int rows, int disparityRange, int rowDiff, int colDiff, unsigned short *C, unsigned short *temp, unsigned short *temp1, int *S) { int d = threadIdx.x; unsigned aggregatedCost = 0; aggregatedCost += C[row * cols * disparityRange + col * disparityRange + d]; if (row + rowDiff < 0 || row + rowDiff >= rows || col + colDiff < 0 || col + colDiff >= cols) { // border temp[d] = aggregatedCost; atomicAdd(S + d, (int)temp[d]); return; } unsigned short minPrev, minPrevOther, prev, prevPlus, prevMinus, tmp; prev = minPrev = minPrevOther = prevPlus = prevMinus = MAX_SHORT; // traverse all disparity for (int disp = 0; disp < disparityRange; ++disp) { tmp = temp1[disp]; if (minPrev > tmp) { minPrev = tmp; } if (disp == d) { prev = tmp; } else if (disp == d + 1) { prevPlus = tmp; } else if (disp == d - 1) { prevMinus = tmp; } else { if (minPrevOther > tmp) { minPrevOther = tmp; } } } // Caculate Lr int tmp1 = (int)prevPlus + SMALL_PENALTY < (int)prevMinus + SMALL_PENALTY ? (int)prevPlus + SMALL_PENALTY : (int)prevMinus + SMALL_PENALTY; int tmp2 = (int)prev < (int)minPrevOther + LARGE_PENALTY ? (int)prev : (int)minPrevOther + LARGE_PENALTY; int s = tmp1 < tmp2 ? tmp1 : tmp2; aggregatedCost = aggregatedCost + s; aggregatedCost -= minPrev; // record for DP temp[d] = aggregatedCost; // atomic operation atomicAdd(S + d, (int)temp[d]); } __global__ void kernel_aggregateCosts(int *cuda_rowDiff, int *cuda_colDiff, int *params, unsigned short *C, int *S) { // get basic params int rows = params[0]; int cols = params[1]; int disparityRange = params[2]; int id = blockIdx.x; int rowDiff = cuda_rowDiff[id]; int colDiff = cuda_colDiff[id]; // the array used for DP unsigned short *temp, *temp1; temp = (unsigned short *)malloc(sizeof(unsigned short) * disparityRange); temp1 = (unsigned short *)malloc(sizeof(unsigned short) * disparityRange); int row, col, i, j, k; // DP in different path would rely on different "last" value. if (id == 0) { //printf("%d\n", id); for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { row = i; col = j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); cudaError_t e = cudaDeviceSynchronize(); if (e != cudaSuccess) { printf("%s\n", cudaGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 1) { //printf("%d\n", id); for (i = 0; i < cols; i++) { for (j = 0; j < rows; j++) { row = j; col = i; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); cudaError_t e = cudaDeviceSynchronize(); if (e != cudaSuccess) { printf("%s\n", cudaGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 3) { //printf("%d\n", id); for (i = cols - 1; i >= 1 - rows; i--) { for (j = 0; i + j < cols && j < rows; j++) { if (i + j < 0) continue; row = j; col = i + j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); cudaError_t e = cudaDeviceSynchronize(); if (e != cudaSuccess) { printf("%s\n", cudaGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 2) { //printf("%d\n", id); for (i = 0; i < rows + cols - 1; i++) { for (j = 0; i - j >= 0 && j < rows; j++) { if (i - j >= cols) continue; row = j; col = i - j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); cudaError_t e = cudaDeviceSynchronize(); if (e != cudaSuccess) { printf("%s\n", cudaGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 4) { //printf("%d\n", id); for (i = 0; i < rows; i++) { for (j = cols - 1; j >= 0; j--) { row = i; col = j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); cudaError_t e = cudaDeviceSynchronize(); if (e != cudaSuccess) { printf("%s\n", cudaGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 5) { //printf("%d\n", id); for (i = 0; i < cols; i++) { for (j = rows - 1; j >= 0; j--) { row = j; col = i; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); cudaError_t e = cudaDeviceSynchronize(); if (e != cudaSuccess) { printf("%s\n", cudaGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 6) { //printf("%d\n", id); for (i = 1 - cols; i <= rows - 1; i++) { for (j = cols - 1; i + j >= 0 && j >= 0; j--) { if (i + j >= cols) continue; row = i + j; col = j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); cudaError_t e = cudaDeviceSynchronize(); if (e != cudaSuccess) { printf("%s\n", cudaGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } else if (id == 7) { //printf("%d\n", id); for (i = 0; i < rows + cols - 1; i++) { for (j = 0; i - j >= 0 && j < cols; j++) { if (i - j >= rows) continue; row = i - j; col = j; aggregate << <1, disparityRange >> > (row, col, cols, rows, disparityRange, rowDiff, colDiff, C, temp, temp1, S + row * cols*disparityRange + col * disparityRange); cudaError_t e = cudaDeviceSynchronize(); if (e != cudaSuccess) { printf("%s\n", cudaGetErrorString(e)); } memcpy(temp1, temp, sizeof(unsigned short) * disparityRange); } } //printf("%d\n", id); } } void aggregateCosts(int rows, int cols, int disparityRange, unsigned short ***C, unsigned short ***S) { std::vector<path> firstScanPaths; std::vector<path> secondScanPaths; // the scan path to aggregate initializeFirstScanPaths(firstScanPaths, PATHS_PER_SCAN); initializeSecondScanPaths(secondScanPaths, PATHS_PER_SCAN); int rowDiff[PATHS_PER_SCAN * 2], colDiff[PATHS_PER_SCAN * 2]; for (int i = 0; i<firstScanPaths.size(); i++) { rowDiff[i] = firstScanPaths[i].rowDiff; colDiff[i] = firstScanPaths[i].colDiff; } for (int i = 0; i<secondScanPaths.size(); i++) { rowDiff[i + PATHS_PER_SCAN] = secondScanPaths[i].rowDiff; colDiff[i + PATHS_PER_SCAN] = secondScanPaths[i].colDiff; } // the paths used in GPU int *cuda_rowDiff, *cuda_colDiff; CUDA_CHECK_RETURN(cudaMalloc((void**)&cuda_colDiff, sizeof(int) * PATHS_PER_SCAN * 2)); CUDA_CHECK_RETURN(cudaMalloc((void**)&cuda_rowDiff, sizeof(int) * PATHS_PER_SCAN * 2)); CUDA_CHECK_RETURN(cudaMemcpy(cuda_rowDiff, rowDiff, sizeof(int) * PATHS_PER_SCAN * 2, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(cuda_colDiff, colDiff, sizeof(int) * PATHS_PER_SCAN * 2, cudaMemcpyHostToDevice)); // the pixel cost transferred to GPU unsigned short *tmp_C; CUDA_CHECK_RETURN(cudaMalloc((void**)&tmp_C, sizeof(unsigned short) * rows * cols * disparityRange)); CUDA_CHECK_RETURN(cudaMemcpy(tmp_C, C[0][0], sizeof(unsigned short) * rows * cols * disparityRange, cudaMemcpyHostToDevice)); // the aggregate cost calculated in parallel int *tmp_S; CUDA_CHECK_RETURN(cudaMalloc((void**)&tmp_S, sizeof(int) * rows * cols * disparityRange)); CUDA_CHECK_RETURN(cudaMemset(tmp_S, 0, sizeof(int) * rows * cols * disparityRange)); int *Ss; Ss = (int*)malloc(sizeof(int)*rows * cols * disparityRange); // basic params int param[3] = {rows, cols, disparityRange}; int *passed_para; CUDA_CHECK_RETURN(cudaMalloc((void**)&passed_para, sizeof(int) * 3), 5); CUDA_CHECK_RETURN(cudaMemcpy(passed_para, param, 3 * sizeof(int), cudaMemcpyHostToDevice), 6); kernel_aggregateCosts << <PATHS_PER_SCAN * 2, 1 >> > (cuda_rowDiff, cuda_colDiff, passed_para, tmp_C, tmp_S); // synchronize CUDA_CHECK_RETURN(cudaDeviceSynchronize()); // copy from GPU to CPU CUDA_CHECK_RETURN(cudaMemcpy(Ss, tmp_S, sizeof(int)*rows * cols * disparityRange, cudaMemcpyDeviceToHost)); // convert unsigned short to int for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { for (int k = 0; k < disparityRange; k++) { S[i][j][k] = (unsigned short)Ss[i*cols*disparityRange + j * disparityRange + k]; } } } cudaFree(cuda_rowDiff); cudaFree(cuda_colDiff); cudaFree(tmp_C); cudaFree(passed_para); cudaFree(tmp_S); } void computeDisparity(unsigned short ***S, int rows, int cols, int disparityRange, cv::Mat &disparityMap) { unsigned int disparity = 0, minCost; for (int row = 0; row < rows; ++row) { for (int col = 0; col < cols; ++col) { minCost = MAX_SHORT; // Ñ¡ÔñSÖµ×îСµÄÉî¶È for (int d = disparityRange - 1; d >= 0; --d) { if (minCost > S[row][col][d]) { minCost = S[row][col][d]; disparity = d; } } disparityMap.at<uchar>(row, col) = disparity; } } } void saveDisparityMap(cv::Mat &disparityMap, int disparityRange, char* outputFile) { double factor = 256.0 / disparityRange; for (int row = 0; row < disparityMap.rows; ++row) { for (int col = 0; col < disparityMap.cols; ++col) { disparityMap.at<uchar>(row, col) *= factor; } } cv::imwrite(outputFile, disparityMap); } int main(int argc, char** argv) { // left image. right image. output image. char *firstFileName = "left.png"; char *secondFileName = "right.png"; char *outFileName = "out.png"; cv::Mat firstImage; cv::Mat secondImage; // read the grayscale image firstImage = cv::imread(firstFileName, CV_LOAD_IMAGE_GRAYSCALE); secondImage = cv::imread(secondFileName, CV_LOAD_IMAGE_GRAYSCALE); if (!firstImage.data || !secondImage.data) { std::cerr << "Could not open or find one of the images!" << std::endl; return -1; } // the range of disparity unsigned int disparityRange = 20; unsigned short ***C; // pixel cost array W x H x D unsigned short ***S; // aggregated cost array W x H x D clock_t begin = clock(); std::cout << "Allocating space..." << std::endl; C = (unsigned short ***)malloc(sizeof(unsigned short **) * firstImage.rows); C[0] = (unsigned short **)malloc(sizeof(unsigned short *) * firstImage.rows * firstImage.cols); C[0][0] = (unsigned short *)malloc(sizeof(unsigned short) * firstImage.rows * firstImage.cols * disparityRange); S = (unsigned short ***)malloc(sizeof(unsigned short **) * firstImage.rows); S[0] = (unsigned short **)malloc(sizeof(unsigned short *) * firstImage.rows * firstImage.cols); S[0][0] = (unsigned short *)malloc(sizeof(unsigned short) * firstImage.rows * firstImage.cols * disparityRange); // allocate cost arrays make sure the memory is continuous for (int row = 1; row<firstImage.rows; row++) { C[row] = C[row - 1] + firstImage.cols; S[row] = S[row - 1] + firstImage.cols; } for (int row = 0; row < firstImage.rows; ++row) { if (row != 0) { C[row][0] = C[row - 1][firstImage.cols - 1] + disparityRange; S[row][0] = S[row - 1][firstImage.cols - 1] + disparityRange; } for (int col = 0; col < firstImage.cols; ++col) { if (col > 0) { C[row][col] = C[row][col - 1] + disparityRange; S[row][col] = S[row][col - 1] + disparityRange; } } } std::cout << "Smoothing images..." << std::endl; grayscaleGaussianBlur(firstImage, firstImage, BLUR_RADIUS); grayscaleGaussianBlur(secondImage, secondImage, BLUR_RADIUS); std::cout << "Calculating pixel cost for the image..." << std::endl; calculatePixelCost(firstImage, secondImage, disparityRange, C); std::cout << "Aggregating costs..." << std::endl; aggregateCosts(firstImage.rows, firstImage.cols, disparityRange, C, S); /* used to print to file for comparision with CPU output. FILE *fp = fopen("gpu_output.txt", "w+"); for (int i=0; i<firstImage.rows; i++) for(int j=0; j<firstImage.cols; j++) for (int k=0; k<disparityRange; k++) fprintf(fp, "%d\n", S[i][j][k]); fclose(fp); */ cv::Mat disparityMap = cv::Mat(cv::Size(firstImage.cols, firstImage.rows), CV_8UC1, cv::Scalar::all(0)); std::cout << "Computing disparity..." << std::endl; computeDisparity(S, firstImage.rows, firstImage.cols, disparityRange, disparityMap); clock_t end = clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Done in %.2lf seconds.\n", elapsed_secs); saveDisparityMap(disparityMap, disparityRange, outFileName); return 0; }
409f2653266771d098baf97f94d8d807a6407cdf.hip
// !!! This is a file automatically generated by hipify!!! /* ============================================================================ Filename : algorithm.c Author : Vincent Rinaldi SCIPER : 239759 ============================================================================ */ #include <iostream> #include <iomanip> #include <sys/time.h> #include <hip/hip_runtime.h> using namespace std; // CPU Baseline void array_process(double *input, double *output, int length, int iterations) { double *temp; for(int n=0; n<(int) iterations; n++) { for(int i=1; i<length-1; i++) { for(int j=1; j<length-1; j++) { output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] + input[(i-1)*(length)+(j)] + input[(i-1)*(length)+(j+1)] + input[(i)*(length)+(j-1)] + input[(i)*(length)+(j)] + input[(i)*(length)+(j+1)] + input[(i+1)*(length)+(j-1)] + input[(i+1)*(length)+(j)] + input[(i+1)*(length)+(j+1)] ) / 9; } } output[(length/2-1)*length+(length/2-1)] = 1000; output[(length/2)*length+(length/2-1)] = 1000; output[(length/2-1)*length+(length/2)] = 1000; output[(length/2)*length+(length/2)] = 1000; temp = input; input = output; output = temp; } } // CUDA Kernel function __global__ void kernel(double* input, double* output, int length) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int midSquare = (i == length/2 - 1 && j == length/2 - 1) || (i == length/2 && j == length/2 - 1) || (i == length/2 - 1 && j == length/2) || (i == length/2 && j == length/2); if ((i > 0) && (i < length-1) && (j > 0) && (j < length-1) && (!midSquare)) { output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] + input[(i-1)*(length)+(j)] + input[(i-1)*(length)+(j+1)] + input[(i)*(length)+(j-1)] + input[(i)*(length)+(j)] + input[(i)*(length)+(j+1)] + input[(i+1)*(length)+(j-1)] + input[(i+1)*(length)+(j)] + input[(i+1)*(length)+(j+1)] ) / 9; } } // GPU Optimized function void GPU_array_process(double *input, double *output, int length, int iterations) { //Cuda events for calculating elapsed time hipEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end; hipEventCreate(&cpy_H2D_start); hipEventCreate(&cpy_H2D_end); hipEventCreate(&cpy_D2H_start); hipEventCreate(&cpy_D2H_end); hipEventCreate(&comp_start); hipEventCreate(&comp_end); /* Preprocessing goes here */ // declare device arrays double *input_d; double *output_d; // set device to be used for GPU executions hipSetDevice(0); // set number of threads per block and number of blocks in the grid used in a kernel invocation int numThreadsBlock = 8; int numBlocksGrid = (length % numThreadsBlock != 0) ? (length / numThreadsBlock + 1) : (length / numThreadsBlock); // makes coordinates of blocks and threads indexes to work in 2 dimensions dim3 numThreadsPerBlock(numThreadsBlock, numThreadsBlock); dim3 numBlocksInGrid(numBlocksGrid, numBlocksGrid); // allocate arrays on device if (hipMalloc((void **) &input_d, length*length*sizeof(double)) != hipSuccess) cout << "error in hipMalloc" << endl; if (hipMalloc((void **) &output_d, length*length*sizeof(double)) != hipSuccess) cout << "error in hipMalloc" << endl; // copy from host to device step hipEventRecord(cpy_H2D_start); /* Copying array from host to device goes here */ if (hipMemcpy(input_d, input, length*length*sizeof(double), hipMemcpyHostToDevice) != hipSuccess) cout << "error in hipMemcpy" << endl; if (hipMemcpy(output_d, output, length*length*sizeof(double), hipMemcpyHostToDevice) != hipSuccess) cout << "error in hipMemcpy" << endl; hipEventRecord(cpy_H2D_end); hipEventSynchronize(cpy_H2D_end); // GPU calculation step hipEventRecord(comp_start); /* GPU calculation goes here */ double *temp_d; for (int i = 0; i < iterations; i++) { hipLaunchKernelGGL(( kernel), dim3(numBlocksInGrid), dim3(numThreadsPerBlock), 0, 0, input_d, output_d, length); if (i != iterations-1) { temp_d = input_d; input_d = output_d; output_d = temp_d; } } hipDeviceSynchronize(); hipEventRecord(comp_end); hipEventSynchronize(comp_end); // copy from device to host step hipEventRecord(cpy_D2H_start); /* Copying array from device to host goes here */ if (hipMemcpy(output, output_d, length*length*sizeof(double), hipMemcpyDeviceToHost) != hipSuccess) cout << "error in hipMemcpy" << endl; hipEventRecord(cpy_D2H_end); hipEventSynchronize(cpy_D2H_end); /* Postprocessing goes here */ // cleanup hipFree(input_d); hipFree(output_d); float time; hipEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end); cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; hipEventElapsedTime(&time, comp_start, comp_end); cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl; hipEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end); cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; }
409f2653266771d098baf97f94d8d807a6407cdf.cu
/* ============================================================================ Filename : algorithm.c Author : Vincent Rinaldi SCIPER : 239759 ============================================================================ */ #include <iostream> #include <iomanip> #include <sys/time.h> #include <cuda_runtime.h> using namespace std; // CPU Baseline void array_process(double *input, double *output, int length, int iterations) { double *temp; for(int n=0; n<(int) iterations; n++) { for(int i=1; i<length-1; i++) { for(int j=1; j<length-1; j++) { output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] + input[(i-1)*(length)+(j)] + input[(i-1)*(length)+(j+1)] + input[(i)*(length)+(j-1)] + input[(i)*(length)+(j)] + input[(i)*(length)+(j+1)] + input[(i+1)*(length)+(j-1)] + input[(i+1)*(length)+(j)] + input[(i+1)*(length)+(j+1)] ) / 9; } } output[(length/2-1)*length+(length/2-1)] = 1000; output[(length/2)*length+(length/2-1)] = 1000; output[(length/2-1)*length+(length/2)] = 1000; output[(length/2)*length+(length/2)] = 1000; temp = input; input = output; output = temp; } } // CUDA Kernel function __global__ void kernel(double* input, double* output, int length) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int midSquare = (i == length/2 - 1 && j == length/2 - 1) || (i == length/2 && j == length/2 - 1) || (i == length/2 - 1 && j == length/2) || (i == length/2 && j == length/2); if ((i > 0) && (i < length-1) && (j > 0) && (j < length-1) && (!midSquare)) { output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] + input[(i-1)*(length)+(j)] + input[(i-1)*(length)+(j+1)] + input[(i)*(length)+(j-1)] + input[(i)*(length)+(j)] + input[(i)*(length)+(j+1)] + input[(i+1)*(length)+(j-1)] + input[(i+1)*(length)+(j)] + input[(i+1)*(length)+(j+1)] ) / 9; } } // GPU Optimized function void GPU_array_process(double *input, double *output, int length, int iterations) { //Cuda events for calculating elapsed time cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end; cudaEventCreate(&cpy_H2D_start); cudaEventCreate(&cpy_H2D_end); cudaEventCreate(&cpy_D2H_start); cudaEventCreate(&cpy_D2H_end); cudaEventCreate(&comp_start); cudaEventCreate(&comp_end); /* Preprocessing goes here */ // declare device arrays double *input_d; double *output_d; // set device to be used for GPU executions cudaSetDevice(0); // set number of threads per block and number of blocks in the grid used in a kernel invocation int numThreadsBlock = 8; int numBlocksGrid = (length % numThreadsBlock != 0) ? (length / numThreadsBlock + 1) : (length / numThreadsBlock); // makes coordinates of blocks and threads indexes to work in 2 dimensions dim3 numThreadsPerBlock(numThreadsBlock, numThreadsBlock); dim3 numBlocksInGrid(numBlocksGrid, numBlocksGrid); // allocate arrays on device if (cudaMalloc((void **) &input_d, length*length*sizeof(double)) != cudaSuccess) cout << "error in cudaMalloc" << endl; if (cudaMalloc((void **) &output_d, length*length*sizeof(double)) != cudaSuccess) cout << "error in cudaMalloc" << endl; // copy from host to device step cudaEventRecord(cpy_H2D_start); /* Copying array from host to device goes here */ if (cudaMemcpy(input_d, input, length*length*sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) cout << "error in cudaMemcpy" << endl; if (cudaMemcpy(output_d, output, length*length*sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess) cout << "error in cudaMemcpy" << endl; cudaEventRecord(cpy_H2D_end); cudaEventSynchronize(cpy_H2D_end); // GPU calculation step cudaEventRecord(comp_start); /* GPU calculation goes here */ double *temp_d; for (int i = 0; i < iterations; i++) { kernel<<<numBlocksInGrid, numThreadsPerBlock>>>(input_d, output_d, length); if (i != iterations-1) { temp_d = input_d; input_d = output_d; output_d = temp_d; } } cudaThreadSynchronize(); cudaEventRecord(comp_end); cudaEventSynchronize(comp_end); // copy from device to host step cudaEventRecord(cpy_D2H_start); /* Copying array from device to host goes here */ if (cudaMemcpy(output, output_d, length*length*sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess) cout << "error in cudaMemcpy" << endl; cudaEventRecord(cpy_D2H_end); cudaEventSynchronize(cpy_D2H_end); /* Postprocessing goes here */ // cleanup cudaFree(input_d); cudaFree(output_d); float time; cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end); cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; cudaEventElapsedTime(&time, comp_start, comp_end); cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl; cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end); cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; }
9b38d67a9dcb0839f14ab65f161b4718bed5c39e.hip
// !!! This is a file automatically generated by hipify!!! #include <cstddef> #include <stdexcept> namespace impala { void* allocatePinnedMemory(std::size_t size) { void* ptr; auto ret = hipHostMalloc(&ptr, size); if (ret != hipSuccess) { throw std::runtime_error("hipHostMalloc failed"); } return ptr; } void freePinnedMemory(void* ptr) { auto ret = hipHostFree(ptr); if (ret != hipSuccess) { throw std::runtime_error("hipHostFree failed"); } } } // namespace impala
9b38d67a9dcb0839f14ab65f161b4718bed5c39e.cu
#include <cstddef> #include <stdexcept> namespace impala { void* allocatePinnedMemory(std::size_t size) { void* ptr; auto ret = cudaMallocHost(&ptr, size); if (ret != cudaSuccess) { throw std::runtime_error("cudaMallocHost failed"); } return ptr; } void freePinnedMemory(void* ptr) { auto ret = cudaFreeHost(ptr); if (ret != cudaSuccess) { throw std::runtime_error("cudaFreeHost failed"); } } } // namespace impala
8ca35372c5b2167b72d9451cacac88920c3fd67d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "activate_array_normalize_channels_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); int size = XSIZE*YSIZE; int batch = 2; int channels = 1; int wh_step = 1; float *output_gpu = NULL; hipMalloc(&output_gpu, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( activate_array_normalize_channels_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,size,batch,channels,wh_step,output_gpu); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( activate_array_normalize_channels_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,size,batch,channels,wh_step,output_gpu); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( activate_array_normalize_channels_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,size,batch,channels,wh_step,output_gpu); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8ca35372c5b2167b72d9451cacac88920c3fd67d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "activate_array_normalize_channels_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); int size = XSIZE*YSIZE; int batch = 2; int channels = 1; int wh_step = 1; float *output_gpu = NULL; cudaMalloc(&output_gpu, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); activate_array_normalize_channels_kernel<<<gridBlock,threadBlock>>>(x,size,batch,channels,wh_step,output_gpu); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { activate_array_normalize_channels_kernel<<<gridBlock,threadBlock>>>(x,size,batch,channels,wh_step,output_gpu); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { activate_array_normalize_channels_kernel<<<gridBlock,threadBlock>>>(x,size,batch,channels,wh_step,output_gpu); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
52245a39940b8ce41a622125fcb5eba09c521894.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Program by Arthur Alves Araujo Ferreira - All rights reserved // ITESM ID: A01022593 #include <iostream> #include <chrono> const bool CPU_AND_COMPARE = true; // Function that multiplies 2 matrixes with cuda __global__ void matrixMultiplyGPU(int *A, int *B, int *C, const int n) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < n) { for (int iy = 0; iy < n; iy++) { for(int k = 0; k < n; k++) { C[iy * n + ix] += A[iy * n + k] * B[k * n + ix]; } } } } // Function that multiplies 2 matrixes with cpu void matrixMultiply(int *A, int *B, int *C, const int n) { for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { for(int k = 0; k < n; k++) { C[i * n + j] += A[i * n + k] * B[j + k * n]; } } } } // Compares two matrices bool checkEquals(int *hostRef,int *gpuRef, const int n) { double ep = 1.0E-8; bool same = true; for (int i = 0; i < n*n; i++) { if (abs(hostRef[i] - gpuRef[i]) > ep) { same = false; printf("[%d] host %d gpu %d\n", i, hostRef[i], gpuRef[i]); return same; } } return same; } int main(int argc, char* argv[]) { // Device setup int dev = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("Using Device %d: %s\n", dev, deviceProp.name); hipSetDevice(dev); // Code configuration int repetitions = 20; int n = 50; int nBytes = n*n * sizeof(int*); // Input matrix initialization and fill int *h_A = (int*)malloc(nBytes); int *h_B = (int*)malloc(nBytes); for(int i = 0; i < n*n; i++) { h_A[i] = i+1; h_B[i] = i+1; } // Result matrixes initialization and zero fill int *gpuRef = (int*)malloc(nBytes); int *hostRef = (int*)malloc(nBytes); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // Device matrix global memory int *d_A, *d_B, *d_C; hipMalloc((void**)&d_A, nBytes); hipMalloc((void**)&d_B, nBytes); hipMalloc((void**)&d_C, nBytes); // Transfer data from host to device hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); hipMemset(d_C, 0, nBytes); // Initialize matrix with 0s // Kernel execution configuration int dimx = 128; dim3 block(dimx, 1); dim3 grid((n + block.x - 1) / block.x, 1); printf("grid.x %d grid.y %d block.x %d block.y %d\n", grid.x, grid.y, block.x, block.y); // Variable initialization for repetitions double totalTimeGPU = 0; double totalTimeCPU = 0; std::chrono::duration<float, std::milli> duration_ms; // Repeat however may times was configured for (int i = 0; i < repetitions; i++) { // Multiply on GPU auto start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( matrixMultiplyGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, n); hipDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); duration_ms = end - start; totalTimeGPU += duration_ms.count(); if (CPU_AND_COMPARE) { // Multiply on CPU start = std::chrono::high_resolution_clock::now(); matrixMultiply(h_A, h_B, hostRef, n); end = std::chrono::high_resolution_clock::now(); duration_ms = end - start; totalTimeCPU += duration_ms.count(); } // Copy result from device to host hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); // Check if equals if (CPU_AND_COMPARE) { if (checkEquals(hostRef, gpuRef, n)) { printf("Matrix equal %d\n", i); } else { printf("Matrixes not equal %d\n", i); break; } } } // Print results printf("GPU matrix multiplication done in %f ms\n", totalTimeGPU / repetitions); if (CPU_AND_COMPARE) printf("CPU matrix multiplication done in %f ms\n", totalTimeCPU / repetitions); // Free memory hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(hostRef); free(gpuRef); hipDeviceReset(); return 0; }
52245a39940b8ce41a622125fcb5eba09c521894.cu
// Program by Arthur Alves Araujo Ferreira - All rights reserved // ITESM ID: A01022593 #include <iostream> #include <chrono> const bool CPU_AND_COMPARE = true; // Function that multiplies 2 matrixes with cuda __global__ void matrixMultiplyGPU(int *A, int *B, int *C, const int n) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < n) { for (int iy = 0; iy < n; iy++) { for(int k = 0; k < n; k++) { C[iy * n + ix] += A[iy * n + k] * B[k * n + ix]; } } } } // Function that multiplies 2 matrixes with cpu void matrixMultiply(int *A, int *B, int *C, const int n) { for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { for(int k = 0; k < n; k++) { C[i * n + j] += A[i * n + k] * B[j + k * n]; } } } } // Compares two matrices bool checkEquals(int *hostRef,int *gpuRef, const int n) { double ep = 1.0E-8; bool same = true; for (int i = 0; i < n*n; i++) { if (abs(hostRef[i] - gpuRef[i]) > ep) { same = false; printf("[%d] host %d gpu %d\n", i, hostRef[i], gpuRef[i]); return same; } } return same; } int main(int argc, char* argv[]) { // Device setup int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Using Device %d: %s\n", dev, deviceProp.name); cudaSetDevice(dev); // Code configuration int repetitions = 20; int n = 50; int nBytes = n*n * sizeof(int*); // Input matrix initialization and fill int *h_A = (int*)malloc(nBytes); int *h_B = (int*)malloc(nBytes); for(int i = 0; i < n*n; i++) { h_A[i] = i+1; h_B[i] = i+1; } // Result matrixes initialization and zero fill int *gpuRef = (int*)malloc(nBytes); int *hostRef = (int*)malloc(nBytes); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // Device matrix global memory int *d_A, *d_B, *d_C; cudaMalloc((void**)&d_A, nBytes); cudaMalloc((void**)&d_B, nBytes); cudaMalloc((void**)&d_C, nBytes); // Transfer data from host to device cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); cudaMemset(d_C, 0, nBytes); // Initialize matrix with 0s // Kernel execution configuration int dimx = 128; dim3 block(dimx, 1); dim3 grid((n + block.x - 1) / block.x, 1); printf("grid.x %d grid.y %d block.x %d block.y %d\n", grid.x, grid.y, block.x, block.y); // Variable initialization for repetitions double totalTimeGPU = 0; double totalTimeCPU = 0; std::chrono::duration<float, std::milli> duration_ms; // Repeat however may times was configured for (int i = 0; i < repetitions; i++) { // Multiply on GPU auto start = std::chrono::high_resolution_clock::now(); matrixMultiplyGPU<<<grid, block>>>(d_A, d_B, d_C, n); cudaDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); duration_ms = end - start; totalTimeGPU += duration_ms.count(); if (CPU_AND_COMPARE) { // Multiply on CPU start = std::chrono::high_resolution_clock::now(); matrixMultiply(h_A, h_B, hostRef, n); end = std::chrono::high_resolution_clock::now(); duration_ms = end - start; totalTimeCPU += duration_ms.count(); } // Copy result from device to host cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); // Check if equals if (CPU_AND_COMPARE) { if (checkEquals(hostRef, gpuRef, n)) { printf("Matrix equal %d\n", i); } else { printf("Matrixes not equal %d\n", i); break; } } } // Print results printf("GPU matrix multiplication done in %f ms\n", totalTimeGPU / repetitions); if (CPU_AND_COMPARE) printf("CPU matrix multiplication done in %f ms\n", totalTimeCPU / repetitions); // Free memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(hostRef); free(gpuRef); cudaDeviceReset(); return 0; }
b9be3a0e172eb5bba28f8c8ea15adc1425861a7e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "iostream" #include "string" #include "fstream" #include <thread> #include <sstream> #include <iomanip> #include <mutex> #include <thrust/host_vector.h> #include <thrust/device_vector.h> using namespace std; using namespace thrust; const char data_filename[] = "../../L4data/IFF-6-10_BurakauskasM_L4.csv"; const char results_filename[] = "../../L4data/IFF-6-10_BurakauskasM_L4b_rez.txt"; const int max_string_length = 47; const int group_count = 5; const int n = 30; // Structure of Car type data struct car { char manufacturer[max_string_length]; char model[max_string_length]; int year; double price; // Default constructor // Called from either CPU or GPU __host__ __device__ car() {}; // Constructor that assigns passed values to properties // Called from either CPU or GPU __host__ __device__ car(char manufacturerNew[], char modelNew[], int yearNew, double priceNew) { strcpy(manufacturer, manufacturerNew); strcpy(model, modelNew); year = yearNew; price = priceNew; } }; // Main data array car Cars_data[group_count][n]; // Main results array car Car_results[n]; // Main data array's elements' length int Car_data_sizes[group_count]; // Clears results file // Run on CPU void clear_results_file() { ofstream file; file.open(results_filename, ofstream::out | ofstream::trunc); file.close(); } // Reads from file to Car structure // Run on CPU void read_data() { ifstream data_file(data_filename); for (auto i = 0; i < group_count; i++) { data_file >> Car_data_sizes[i]; for (auto j = 0; j < Car_data_sizes[i]; j++) { char manufacturer[100], model[100]; data_file >> manufacturer >> model >> Cars_data[i][j].year >> Cars_data[i][j].price; strcpy_s(Cars_data[i][j].manufacturer, manufacturer); strcpy_s(Cars_data[i][j].model, model); } } data_file.close(); } // Writes Car type data to file // Run on CPU void write_data() { stringstream buffer; buffer << left << setw(4) << "Nr." << setw(15) << "Gamintojas" << setw(20) << "Modelis" << right << setw(5) << "Metai" << setw(10) << fixed << setprecision(2) << "Kaina" << endl; buffer << string(54, '-') << "\n"; for (auto i = 0; i < group_count; i++) { auto line_index = 0; for (auto j = 0; j < Car_data_sizes[i]; j++) { buffer << setw(3) << ++line_index << " " << left << setw(15) << Cars_data[i][j].manufacturer << setw(20) << Cars_data[i][j].model << right << setw(5) << Cars_data[i][j].year << setw(10) << fixed << setprecision(2) << Cars_data[i][j].price << "\n"; } buffer << string(54, '-') << "\n"; } ofstream results_file(results_filename, ios::app); results_file << buffer.str(); cout << buffer.str(); } // Appends results data of type Car to file // Run on CPU void write_result() { stringstream buffer; buffer << left << setw(4) << "Nr." << setw(max_string_length) << "Gamintojas" << setw(max_string_length) << "Modelis" << right << setw(7) << "Metai" << setw(12) << fixed << setprecision(2) << "Kaina" << endl; buffer << string(23 + max_string_length * 2, '-') << "\n"; auto line_index = 0; for (auto i = 0; i < n; i++) { if (Car_results[i].year != 0) { buffer << setw(3) << ++line_index << " " << left << setw(max_string_length) << Car_results[i].manufacturer << setw(max_string_length) << Car_results[i].model << right << setw(7) << Car_results[i].year << setw(12) << fixed << setprecision(2) << Car_results[i].price << "\n"; } } buffer << string(23 + max_string_length * 2, '-') << "\n"; ofstream results_file(results_filename, ios::app); results_file << buffer.str(); cout << buffer.str(); } // strcpy function replacement that is called from GPU and run on GPU __device__ char * custom_strcpy(char *destination, const char *source) { auto i = 0; do { destination[i] = source[i]; } while (source[i++] != 0); return destination; } // strcat function replacement that is called from GPU and run on GPU __device__ char * custom_strcat(char *destination, const char *source) { auto i = 0; while (destination[i] != 0) i++; custom_strcpy(destination + i, source); return destination; } struct functor { // Add Car type objects' elements to accumulator object by concatenating strings and summing numeric values // Called and run on GPU __device__ car operator()(car accumulator, car item) { const auto manufacturer = accumulator.manufacturer; custom_strcat(manufacturer, item.manufacturer); const auto model = accumulator.model; custom_strcat(model, item.model); accumulator.year += item.year; accumulator.price += item.price; return accumulator; } }; int main() { clear_results_file(); // Read data from and write it to file read_data(); write_data(); // Create vector on CPU memory to store data host_vector<car> host_car[n]; // Create vector on GPU memory to store data device_vector<car> device_car[n]; // Add data from main data array to host vector and copy it to GPU memory by assigning it's value to device vector for (auto i = 0; i < n; i++) { for (auto o = 0; o < group_count; o++) { host_car[i].push_back(Cars_data[o][i]); } device_car[i] = host_car[i]; } // Call reduce function for each vector in device vector array. // Reduce function uses operatot() override in functor struct for (auto i = 0; i < n; i++) { Car_results[i] = reduce(device_car[i].begin(), device_car[i].end(), car("", "", 0, 0), functor()); } // Write results to file write_result(); // Pause console window std::system("pause"); return 0; }
b9be3a0e172eb5bba28f8c8ea15adc1425861a7e.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "iostream" #include "string" #include "fstream" #include <thread> #include <sstream> #include <iomanip> #include <mutex> #include <thrust/host_vector.h> #include <thrust/device_vector.h> using namespace std; using namespace thrust; const char data_filename[] = "../../L4data/IFF-6-10_BurakauskasM_L4.csv"; const char results_filename[] = "../../L4data/IFF-6-10_BurakauskasM_L4b_rez.txt"; const int max_string_length = 47; const int group_count = 5; const int n = 30; // Structure of Car type data struct car { char manufacturer[max_string_length]; char model[max_string_length]; int year; double price; // Default constructor // Called from either CPU or GPU __host__ __device__ car() {}; // Constructor that assigns passed values to properties // Called from either CPU or GPU __host__ __device__ car(char manufacturerNew[], char modelNew[], int yearNew, double priceNew) { strcpy(manufacturer, manufacturerNew); strcpy(model, modelNew); year = yearNew; price = priceNew; } }; // Main data array car Cars_data[group_count][n]; // Main results array car Car_results[n]; // Main data array's elements' length int Car_data_sizes[group_count]; // Clears results file // Run on CPU void clear_results_file() { ofstream file; file.open(results_filename, ofstream::out | ofstream::trunc); file.close(); } // Reads from file to Car structure // Run on CPU void read_data() { ifstream data_file(data_filename); for (auto i = 0; i < group_count; i++) { data_file >> Car_data_sizes[i]; for (auto j = 0; j < Car_data_sizes[i]; j++) { char manufacturer[100], model[100]; data_file >> manufacturer >> model >> Cars_data[i][j].year >> Cars_data[i][j].price; strcpy_s(Cars_data[i][j].manufacturer, manufacturer); strcpy_s(Cars_data[i][j].model, model); } } data_file.close(); } // Writes Car type data to file // Run on CPU void write_data() { stringstream buffer; buffer << left << setw(4) << "Nr." << setw(15) << "Gamintojas" << setw(20) << "Modelis" << right << setw(5) << "Metai" << setw(10) << fixed << setprecision(2) << "Kaina" << endl; buffer << string(54, '-') << "\n"; for (auto i = 0; i < group_count; i++) { auto line_index = 0; for (auto j = 0; j < Car_data_sizes[i]; j++) { buffer << setw(3) << ++line_index << " " << left << setw(15) << Cars_data[i][j].manufacturer << setw(20) << Cars_data[i][j].model << right << setw(5) << Cars_data[i][j].year << setw(10) << fixed << setprecision(2) << Cars_data[i][j].price << "\n"; } buffer << string(54, '-') << "\n"; } ofstream results_file(results_filename, ios::app); results_file << buffer.str(); cout << buffer.str(); } // Appends results data of type Car to file // Run on CPU void write_result() { stringstream buffer; buffer << left << setw(4) << "Nr." << setw(max_string_length) << "Gamintojas" << setw(max_string_length) << "Modelis" << right << setw(7) << "Metai" << setw(12) << fixed << setprecision(2) << "Kaina" << endl; buffer << string(23 + max_string_length * 2, '-') << "\n"; auto line_index = 0; for (auto i = 0; i < n; i++) { if (Car_results[i].year != 0) { buffer << setw(3) << ++line_index << " " << left << setw(max_string_length) << Car_results[i].manufacturer << setw(max_string_length) << Car_results[i].model << right << setw(7) << Car_results[i].year << setw(12) << fixed << setprecision(2) << Car_results[i].price << "\n"; } } buffer << string(23 + max_string_length * 2, '-') << "\n"; ofstream results_file(results_filename, ios::app); results_file << buffer.str(); cout << buffer.str(); } // strcpy function replacement that is called from GPU and run on GPU __device__ char * custom_strcpy(char *destination, const char *source) { auto i = 0; do { destination[i] = source[i]; } while (source[i++] != 0); return destination; } // strcat function replacement that is called from GPU and run on GPU __device__ char * custom_strcat(char *destination, const char *source) { auto i = 0; while (destination[i] != 0) i++; custom_strcpy(destination + i, source); return destination; } struct functor { // Add Car type objects' elements to accumulator object by concatenating strings and summing numeric values // Called and run on GPU __device__ car operator()(car accumulator, car item) { const auto manufacturer = accumulator.manufacturer; custom_strcat(manufacturer, item.manufacturer); const auto model = accumulator.model; custom_strcat(model, item.model); accumulator.year += item.year; accumulator.price += item.price; return accumulator; } }; int main() { clear_results_file(); // Read data from and write it to file read_data(); write_data(); // Create vector on CPU memory to store data host_vector<car> host_car[n]; // Create vector on GPU memory to store data device_vector<car> device_car[n]; // Add data from main data array to host vector and copy it to GPU memory by assigning it's value to device vector for (auto i = 0; i < n; i++) { for (auto o = 0; o < group_count; o++) { host_car[i].push_back(Cars_data[o][i]); } device_car[i] = host_car[i]; } // Call reduce function for each vector in device vector array. // Reduce function uses operatot() override in functor struct for (auto i = 0; i < n; i++) { Car_results[i] = reduce(device_car[i].begin(), device_car[i].end(), car("", "", 0, 0), functor()); } // Write results to file write_result(); // Pause console window std::system("pause"); return 0; }
449a35552df2a6215c58d608109e349e872105d9.hip
// !!! This is a file automatically generated by hipify!!! #include "srad.h" #include "graphics.c" #include "resize.c" #include <thrust/window_transform.h> // includes, kernels #include "srad_kernel.hip" void runTest( int argc, char** argv); void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <rows> <cols> <lamda> <no. of iter>\n", argv[0]); fprintf(stderr, "\t<rows> - number of rows\n"); fprintf(stderr, "\t<cols> - number of cols\n"); fprintf(stderr, "\t<lamda> - lambda (0,1)\n"); fprintf(stderr, "\t<no. of iter> - number of iterations\n"); fprintf(stderr, "\t<input_file> - input file\n"); fprintf(stderr, "\t<output file> - output file\n"); exit(1); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { time_t t; srand((unsigned) time(&t)); runTest( argc, argv); return EXIT_SUCCESS; } void runTest( int argc, char** argv) { int rows, cols, size_I, size_R, niter = 10, iter; float *J,lambda, q0sqr, sum, sum2,meanROI,varROI ; int r1, r2, c1, c2; if (argc == 5) { rows = atoi(argv[3]); //number of rows in the domain cols = atoi(argv[4]); //number of cols in the domain lambda = atof(argv[2]); //Lambda value niter = atoi(argv[1]); //number of iterations } else { usage(argc, argv); } r1 = 0; r2 = rows - 1; c1 = 0; c2 = cols - 1; size_R = (r2-r1+1)*(c2-c1+1); long image_ori_rows = 502; long image_ori_cols = 458; long image_ori_elem = image_ori_rows * image_ori_cols; float * image_ori = (float*)malloc(sizeof(float) * image_ori_elem); read_graphics( (char *)"./image.pgm", image_ori, image_ori_rows, image_ori_cols, 1); size_I = cols * rows; J = (float*) malloc(sizeof(float) * size_I); resize( image_ori,image_ori_rows,image_ori_cols,J,rows,cols,0); thrust::block_2d<float> J_cuda (cols,rows); // printf("%d %d\n", cols,rows); // printf("%d %d\n", J_cuda.dim_x,J_cuda.dim_y); thrust::block_2d<float> J_square(cols,rows); thrust::block_2d<float> d_c(cols,rows,0.0f); // thrust::fill(d_c.begin(),d_c.end(),0); J_cuda.upload(J); thrust::for_each(J_cuda.begin(),J_cuda.end(),extractFunctor()); // printf("Start the SRAD main loop\n"); for (iter=0; iter< niter; iter++) { thrust::transform(J_cuda.begin(),J_cuda.end(),J_square.begin(),square()); // printf("%d %d\n",J_cuda.end().position ,J_cuda.begin().position ); sum = thrust::reduce(thrust::hip::shared,J_cuda.begin(),J_cuda.end(),0.0f,thrust::plus<float>()); sum2 = thrust::reduce(thrust::hip::shared,J_square.begin(),J_square.end(),0.0f,thrust::plus<float>()); // printf("%f %f\n", sum,sum2); meanROI = sum / size_R; varROI = (sum2 / size_R) - meanROI*meanROI; q0sqr = varROI / (meanROI*meanROI); SRADFunctor1 functor1(cols,rows,q0sqr); SRADFunctor2 functor2(cols,rows,lambda,q0sqr); thrust::window_vector<float> wv(&(J_cuda),3,3,1,1); thrust::window_vector<float> d_cwv(&(d_c),3,3,1,1); thrust::transform(thrust::hip::shared,wv.begin(),wv.end(),d_cwv.begin(),functor1); // thrust::for_each(J_cuda.begin(),J_cuda.end(),printFunctor()); // thrust::for_each(d_c.begin(),d_c.end(),printFunctor()); thrust::transform(thrust::hip::shared,d_cwv.begin(),d_cwv.end(),wv.begin(),functor2); } // printf("Computation Done\n"); thrust::for_each(J_cuda.begin(),J_cuda.end(),compressFunctor()); J_cuda.download(&J); write_graphics((char *)"./image_out.pgm",J,rows,cols,0,255); free(J); }
449a35552df2a6215c58d608109e349e872105d9.cu
#include "srad.h" #include "graphics.c" #include "resize.c" #include <thrust/window_transform.h> // includes, kernels #include "srad_kernel.cu" void runTest( int argc, char** argv); void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <rows> <cols> <lamda> <no. of iter>\n", argv[0]); fprintf(stderr, "\t<rows> - number of rows\n"); fprintf(stderr, "\t<cols> - number of cols\n"); fprintf(stderr, "\t<lamda> - lambda (0,1)\n"); fprintf(stderr, "\t<no. of iter> - number of iterations\n"); fprintf(stderr, "\t<input_file> - input file\n"); fprintf(stderr, "\t<output file> - output file\n"); exit(1); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { time_t t; srand((unsigned) time(&t)); runTest( argc, argv); return EXIT_SUCCESS; } void runTest( int argc, char** argv) { int rows, cols, size_I, size_R, niter = 10, iter; float *J,lambda, q0sqr, sum, sum2,meanROI,varROI ; int r1, r2, c1, c2; if (argc == 5) { rows = atoi(argv[3]); //number of rows in the domain cols = atoi(argv[4]); //number of cols in the domain lambda = atof(argv[2]); //Lambda value niter = atoi(argv[1]); //number of iterations } else { usage(argc, argv); } r1 = 0; r2 = rows - 1; c1 = 0; c2 = cols - 1; size_R = (r2-r1+1)*(c2-c1+1); long image_ori_rows = 502; long image_ori_cols = 458; long image_ori_elem = image_ori_rows * image_ori_cols; float * image_ori = (float*)malloc(sizeof(float) * image_ori_elem); read_graphics( (char *)"./image.pgm", image_ori, image_ori_rows, image_ori_cols, 1); size_I = cols * rows; J = (float*) malloc(sizeof(float) * size_I); resize( image_ori,image_ori_rows,image_ori_cols,J,rows,cols,0); thrust::block_2d<float> J_cuda (cols,rows); // printf("%d %d\n", cols,rows); // printf("%d %d\n", J_cuda.dim_x,J_cuda.dim_y); thrust::block_2d<float> J_square(cols,rows); thrust::block_2d<float> d_c(cols,rows,0.0f); // thrust::fill(d_c.begin(),d_c.end(),0); J_cuda.upload(J); thrust::for_each(J_cuda.begin(),J_cuda.end(),extractFunctor()); // printf("Start the SRAD main loop\n"); for (iter=0; iter< niter; iter++) { thrust::transform(J_cuda.begin(),J_cuda.end(),J_square.begin(),square()); // printf("%d %d\n",J_cuda.end().position ,J_cuda.begin().position ); sum = thrust::reduce(thrust::cuda::shared,J_cuda.begin(),J_cuda.end(),0.0f,thrust::plus<float>()); sum2 = thrust::reduce(thrust::cuda::shared,J_square.begin(),J_square.end(),0.0f,thrust::plus<float>()); // printf("%f %f\n", sum,sum2); meanROI = sum / size_R; varROI = (sum2 / size_R) - meanROI*meanROI; q0sqr = varROI / (meanROI*meanROI); SRADFunctor1 functor1(cols,rows,q0sqr); SRADFunctor2 functor2(cols,rows,lambda,q0sqr); thrust::window_vector<float> wv(&(J_cuda),3,3,1,1); thrust::window_vector<float> d_cwv(&(d_c),3,3,1,1); thrust::transform(thrust::cuda::shared,wv.begin(),wv.end(),d_cwv.begin(),functor1); // thrust::for_each(J_cuda.begin(),J_cuda.end(),printFunctor()); // thrust::for_each(d_c.begin(),d_c.end(),printFunctor()); thrust::transform(thrust::cuda::shared,d_cwv.begin(),d_cwv.end(),wv.begin(),functor2); } // printf("Computation Done\n"); thrust::for_each(J_cuda.begin(),J_cuda.end(),compressFunctor()); J_cuda.download(&J); write_graphics((char *)"./image_out.pgm",J,rows,cols,0,255); free(J); }
731c5f4690636b70585360b005af12204b92b45e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "displayAttributeValues.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( displayAttributeValues), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( displayAttributeValues), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( displayAttributeValues), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
731c5f4690636b70585360b005af12204b92b45e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "displayAttributeValues.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); displayAttributeValues<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { displayAttributeValues<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { displayAttributeValues<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
071b3ba487ac0699f9923fcaea737c7dfe525056.hip
// !!! This is a file automatically generated by hipify!!! /* * utils.cu - this file is part of CuPoisson * * Copyright 2011-2013, Folkert Bleichrodt * * CuPoisson is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CuPoisson is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with CuPoisson. If not, see <http://www.gnu.org/licenses/>. */ /* * These functions are not necessary for running the code. However, they * can be used to simplify error checking and safeguard CUDA kernels. */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <string.h> #include "utils.cuh" #define BLOCK_SIZE 16 #define THREADSPB (BLOCK_SIZE * BLOCK_SIZE) #define IDX(i,j,ld) ((i-1)*ld+(j-1)) /* initialize a vector to a constant value */ __global__ void cuFillArray(unsigned int n, real *dest, const real value) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n) return; dest[idx] = value; } /* error checking */ void checkCudaError(const char *msg) { hipError_t err = hipGetLastError(); if(hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } void writeFile(unsigned int gridSizeX, unsigned int gridSizeY, const real *x, const char *filename) { unsigned int n = (gridSizeX-2) * (gridSizeY-2); real *temp = (real *)malloc(n*sizeof(real)); if (hipblasGetVector(n, sizeof(real), x, 1, temp, 1) != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "Error: cublas failed!\n"); exit(EXIT_FAILURE); } FILE *fp = fopen(filename, "w"); // leading dimensions gridsize int ld = gridSizeY - 2; int i, j; for (j = 0; j < gridSizeY; j++) { for (i = 0; i < gridSizeX; i++) { if (i == 0 || j == 0 || i == gridSizeX - 1 || j == gridSizeY - 1) fprintf(fp, "0 "); else fprintf(fp, "%f ", temp[IDX(i,j,ld)]); } fprintf(fp, "\n"); } fclose(fp); free(temp); } void writeBinaryFile(unsigned int gridSizeX, unsigned int gridSizeY, const real *x, const char *filename) { size_t FSIZE = sizeof(real); unsigned int n = (gridSizeX-2) * (gridSizeY-2); real *temp = (real*)malloc(n*FSIZE); if (hipblasGetVector(n, FSIZE, x, 1, temp, 1) != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "Error: cublas failed!\n"); exit(EXIT_FAILURE); } FILE *fp = fopen(filename, "wb"); // header int dimensions[2] = {gridSizeX, gridSizeY}; fwrite(dimensions, sizeof(int), 2, fp); real *zeros; // boundary conditions zeros = (real*)malloc(gridSizeY*FSIZE); int i; for (i = 0; i < gridSizeY; i++) { zeros[i] = 0.0; } fwrite(zeros, FSIZE, gridSizeY, fp); for (i = 0; i < gridSizeX-2; i++) { // boundary condition fwrite(zeros, FSIZE, 1, fp); // one column fwrite(temp+i*(gridSizeY-2), FSIZE, gridSizeY-2, fp); // boundary condition fwrite(zeros, FSIZE, 1, fp); } fwrite(zeros, FSIZE, gridSizeY, fp); // clean up fclose(fp); free(temp); free(zeros); } /** * This function is a wrapper for the CUDA kernel * cuFillArray, to use in C code without loading CUDA specific * libraries. * * @param dest, device pointer for destination * @param value, the value to set for each element * @param count, number of elements to fill */ void fillArray(real *dest, const real value, unsigned int count) { // determine number of blocks needed int nBlocks = (count + THREADSPB-1)/THREADSPB; // call CUDA kernel hipLaunchKernelGGL(( cuFillArray), dim3(nBlocks), dim3(THREADSPB), 0, 0, count, dest, value); }
071b3ba487ac0699f9923fcaea737c7dfe525056.cu
/* * utils.cu - this file is part of CuPoisson * * Copyright © 2011-2013, Folkert Bleichrodt * * CuPoisson is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CuPoisson is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with CuPoisson. If not, see <http://www.gnu.org/licenses/>. */ /* * These functions are not necessary for running the code. However, they * can be used to simplify error checking and safeguard CUDA kernels. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cublas.h> #include <string.h> #include "utils.cuh" #define BLOCK_SIZE 16 #define THREADSPB (BLOCK_SIZE * BLOCK_SIZE) #define IDX(i,j,ld) ((i-1)*ld+(j-1)) /* initialize a vector to a constant value */ __global__ void cuFillArray(unsigned int n, real *dest, const real value) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n) return; dest[idx] = value; } /* error checking */ void checkCudaError(const char *msg) { cudaError_t err = cudaGetLastError(); if(cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void writeFile(unsigned int gridSizeX, unsigned int gridSizeY, const real *x, const char *filename) { unsigned int n = (gridSizeX-2) * (gridSizeY-2); real *temp = (real *)malloc(n*sizeof(real)); if (cublasGetVector(n, sizeof(real), x, 1, temp, 1) != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "Error: cublas failed!\n"); exit(EXIT_FAILURE); } FILE *fp = fopen(filename, "w"); // leading dimensions gridsize int ld = gridSizeY - 2; int i, j; for (j = 0; j < gridSizeY; j++) { for (i = 0; i < gridSizeX; i++) { if (i == 0 || j == 0 || i == gridSizeX - 1 || j == gridSizeY - 1) fprintf(fp, "0 "); else fprintf(fp, "%f ", temp[IDX(i,j,ld)]); } fprintf(fp, "\n"); } fclose(fp); free(temp); } void writeBinaryFile(unsigned int gridSizeX, unsigned int gridSizeY, const real *x, const char *filename) { size_t FSIZE = sizeof(real); unsigned int n = (gridSizeX-2) * (gridSizeY-2); real *temp = (real*)malloc(n*FSIZE); if (cublasGetVector(n, FSIZE, x, 1, temp, 1) != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "Error: cublas failed!\n"); exit(EXIT_FAILURE); } FILE *fp = fopen(filename, "wb"); // header int dimensions[2] = {gridSizeX, gridSizeY}; fwrite(dimensions, sizeof(int), 2, fp); real *zeros; // boundary conditions zeros = (real*)malloc(gridSizeY*FSIZE); int i; for (i = 0; i < gridSizeY; i++) { zeros[i] = 0.0; } fwrite(zeros, FSIZE, gridSizeY, fp); for (i = 0; i < gridSizeX-2; i++) { // boundary condition fwrite(zeros, FSIZE, 1, fp); // one column fwrite(temp+i*(gridSizeY-2), FSIZE, gridSizeY-2, fp); // boundary condition fwrite(zeros, FSIZE, 1, fp); } fwrite(zeros, FSIZE, gridSizeY, fp); // clean up fclose(fp); free(temp); free(zeros); } /** * This function is a wrapper for the CUDA kernel * cuFillArray, to use in C code without loading CUDA specific * libraries. * * @param dest, device pointer for destination * @param value, the value to set for each element * @param count, number of elements to fill */ void fillArray(real *dest, const real value, unsigned int count) { // determine number of blocks needed int nBlocks = (count + THREADSPB-1)/THREADSPB; // call CUDA kernel cuFillArray<<<nBlocks, THREADSPB>>>(count, dest, value); }
a226899f97b61a0af43e6b489fb63f7b93c0430a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_2_a; int xdim0_update_halo_kernel5_plus_2_a_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_2_a; int ydim0_update_halo_kernel5_plus_2_a_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_2_a; int xdim1_update_halo_kernel5_plus_2_a_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_2_a; int ydim1_update_halo_kernel5_plus_2_a_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_2_a*(y)+xdim0_update_halo_kernel5_plus_2_a*ydim0_update_halo_kernel5_plus_2_a*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_2_a*(y)+xdim1_update_halo_kernel5_plus_2_a*ydim1_update_halo_kernel5_plus_2_a*(z)) //user function __device__ inline void update_halo_kernel5_plus_2_a(double *vol_flux_z, double *mass_flux_z, const int* fields) { if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = vol_flux_z[OPS_ACC0(0,2,0)]; if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = mass_flux_z[OPS_ACC1(0,2,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_2_a( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel5_plus_2_a + idx_z * 1 * xdim0_update_halo_kernel5_plus_2_a * ydim0_update_halo_kernel5_plus_2_a; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel5_plus_2_a + idx_z * 1 * xdim1_update_halo_kernel5_plus_2_a * ydim1_update_halo_kernel5_plus_2_a; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_2_a(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel5_plus_2_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(114,"update_halo_kernel5_plus_2_a"); OPS_kernels[114].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel5_plus_2_a_h || ydim0 != ydim0_update_halo_kernel5_plus_2_a_h || xdim1 != xdim1_update_halo_kernel5_plus_2_a_h || ydim1 != ydim1_update_halo_kernel5_plus_2_a_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel5_plus_2_a, &xdim0, sizeof(int) ); xdim0_update_halo_kernel5_plus_2_a_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel5_plus_2_a, &ydim0, sizeof(int) ); ydim0_update_halo_kernel5_plus_2_a_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel5_plus_2_a, &xdim1, sizeof(int) ); xdim1_update_halo_kernel5_plus_2_a_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel5_plus_2_a, &ydim1, sizeof(int) ); ydim1_update_halo_kernel5_plus_2_a_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[114].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_2_a), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[114].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[114].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[114].transfer += ops_compute_transfer(dim, range, &arg1); }
a226899f97b61a0af43e6b489fb63f7b93c0430a.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_2_a; int xdim0_update_halo_kernel5_plus_2_a_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_2_a; int ydim0_update_halo_kernel5_plus_2_a_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_2_a; int xdim1_update_halo_kernel5_plus_2_a_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_2_a; int ydim1_update_halo_kernel5_plus_2_a_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_2_a*(y)+xdim0_update_halo_kernel5_plus_2_a*ydim0_update_halo_kernel5_plus_2_a*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_2_a*(y)+xdim1_update_halo_kernel5_plus_2_a*ydim1_update_halo_kernel5_plus_2_a*(z)) //user function __device__ inline void update_halo_kernel5_plus_2_a(double *vol_flux_z, double *mass_flux_z, const int* fields) { if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = vol_flux_z[OPS_ACC0(0,2,0)]; if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = mass_flux_z[OPS_ACC1(0,2,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_2_a( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel5_plus_2_a + idx_z * 1 * xdim0_update_halo_kernel5_plus_2_a * ydim0_update_halo_kernel5_plus_2_a; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel5_plus_2_a + idx_z * 1 * xdim1_update_halo_kernel5_plus_2_a * ydim1_update_halo_kernel5_plus_2_a; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_2_a(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel5_plus_2_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(114,"update_halo_kernel5_plus_2_a"); OPS_kernels[114].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel5_plus_2_a_h || ydim0 != ydim0_update_halo_kernel5_plus_2_a_h || xdim1 != xdim1_update_halo_kernel5_plus_2_a_h || ydim1 != ydim1_update_halo_kernel5_plus_2_a_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel5_plus_2_a, &xdim0, sizeof(int) ); xdim0_update_halo_kernel5_plus_2_a_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel5_plus_2_a, &ydim0, sizeof(int) ); ydim0_update_halo_kernel5_plus_2_a_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel5_plus_2_a, &xdim1, sizeof(int) ); xdim1_update_halo_kernel5_plus_2_a_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel5_plus_2_a, &ydim1, sizeof(int) ); ydim1_update_halo_kernel5_plus_2_a_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[114].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_update_halo_kernel5_plus_2_a<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[114].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[114].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[114].transfer += ops_compute_transfer(dim, range, &arg1); }
09e724a467aa8154bf3bfcd799bce95afb4fd353.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "CUDA error: ", hipGetErrorString(err)); \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while (0) //@@ Define any useful program-wide constants here #define MASK_WIDTH 3 #define TILE_WIDTH 4 //@@ Define constant memory for device kernel here __constant__ float Mc[MASK_WIDTH][MASK_WIDTH][MASK_WIDTH]; __global__ void conv3d(float *input, float *output, const int z_size, const int y_size, const int x_size) { //@@ Insert kernel code here const int N_width = TILE_WIDTH + MASK_WIDTH - 1; __shared__ float N[N_width][N_width][N_width]; int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int bx = blockIdx.x; int by = blockIdx.y; int bz = blockIdx.z; int col_o = bx * TILE_WIDTH + tx; int row_o = by * TILE_WIDTH + ty; int hei_o = bz * TILE_WIDTH + tz; int col_i = col_o - (MASK_WIDTH-1)/2; int row_i = row_o - (MASK_WIDTH-1)/2; int hei_i = hei_o - (MASK_WIDTH-1)/2; if (col_i >= 0 && col_i < x_size && row_i >= 0 && row_i < y_size && hei_i >= 0 && hei_i < z_size) { int intput_index = hei_i*x_size*y_size + row_i*x_size + col_i; N[tz][ty][tx] = input[intput_index]; } else { N[tz][ty][tx] = 0.0f; } __syncthreads(); float sum = 0.0f; if (tz < TILE_WIDTH && ty < TILE_WIDTH && tx < TILE_WIDTH) { for (int i = 0; i < MASK_WIDTH; i++) { for (int j = 0; j < MASK_WIDTH; j++) { for (int k = 0; k < MASK_WIDTH; k++) { sum += Mc[i][j][k] * N[i+tz][j+ty][tx+k]; } } } if (hei_o < z_size && row_o < y_size && col_o < x_size) { output[hei_o*y_size*x_size + row_o*x_size + col_o] = sum; } } } int main(int argc, char *argv[]) { wbArg_t args; int z_size; int y_size; int x_size; int inputLength, kernelLength; float *hostInput; float *hostKernel; float *hostOutput; float *deviceInput; float *deviceOutput; args = wbArg_read(argc, argv); // Import data hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &inputLength); hostKernel = (float *)wbImport(wbArg_getInputFile(args, 1), &kernelLength); hostOutput = (float *)malloc(inputLength * sizeof(float)); // First three elements are the input dimensions z_size = hostInput[0]; y_size = hostInput[1]; x_size = hostInput[2]; wbLog(TRACE, "The input size is ", z_size, "x", y_size, "x", x_size); assert(z_size * y_size * x_size == inputLength - 3); assert(kernelLength == 27); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); //@@ Allocate GPU memory here int size = (inputLength - 3) * sizeof(float); hipMalloc((void **) &deviceInput, size); hipMalloc((void **) &deviceOutput, size); // Recall that inputLength is 3 elements longer than the input data // because the first three elements were the dimensions wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); //@@ Copy input and kernel to GPU here hipMemcpy(deviceInput, &hostInput[3], size, hipMemcpyHostToDevice); hipMemcpyToSymbol(Mc, hostKernel, kernelLength*sizeof(float)); // Recall that the first three elements of hostInput are dimensions and // do // not need to be copied to the gpu wbTime_stop(Copy, "Copying data to the GPU"); wbTime_start(Compute, "Doing the computation on the GPU"); //@@ Initialize grid and block dimensions here dim3 DimBlock(TILE_WIDTH+MASK_WIDTH-1, TILE_WIDTH+MASK_WIDTH-1, TILE_WIDTH+MASK_WIDTH-1); dim3 DimGrid( ceil(x_size/(1.0 * TILE_WIDTH)), ceil(y_size/(1.0 * TILE_WIDTH)), ceil(z_size/(1.0 * TILE_WIDTH))); //@@ Launch the GPU kernel here hipLaunchKernelGGL(( conv3d), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceInput, deviceOutput, z_size, y_size, x_size); hipDeviceSynchronize(); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); //@@ Copy the device memory back to the host here hipMemcpy(&hostOutput[3], deviceOutput, size, hipMemcpyDeviceToHost); // Recall that the first three elements of the output are the dimensions // and should not be set here (they are set below) wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); // Set the output dimensions for correctness checking hostOutput[0] = z_size; hostOutput[1] = y_size; hostOutput[2] = x_size; wbSolution(args, hostOutput, inputLength); // Free device memory hipFree(deviceInput); hipFree(deviceOutput); // Free host memory free(hostInput); free(hostOutput); return 0; }
09e724a467aa8154bf3bfcd799bce95afb4fd353.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "CUDA error: ", cudaGetErrorString(err)); \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while (0) //@@ Define any useful program-wide constants here #define MASK_WIDTH 3 #define TILE_WIDTH 4 //@@ Define constant memory for device kernel here __constant__ float Mc[MASK_WIDTH][MASK_WIDTH][MASK_WIDTH]; __global__ void conv3d(float *input, float *output, const int z_size, const int y_size, const int x_size) { //@@ Insert kernel code here const int N_width = TILE_WIDTH + MASK_WIDTH - 1; __shared__ float N[N_width][N_width][N_width]; int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int bx = blockIdx.x; int by = blockIdx.y; int bz = blockIdx.z; int col_o = bx * TILE_WIDTH + tx; int row_o = by * TILE_WIDTH + ty; int hei_o = bz * TILE_WIDTH + tz; int col_i = col_o - (MASK_WIDTH-1)/2; int row_i = row_o - (MASK_WIDTH-1)/2; int hei_i = hei_o - (MASK_WIDTH-1)/2; if (col_i >= 0 && col_i < x_size && row_i >= 0 && row_i < y_size && hei_i >= 0 && hei_i < z_size) { int intput_index = hei_i*x_size*y_size + row_i*x_size + col_i; N[tz][ty][tx] = input[intput_index]; } else { N[tz][ty][tx] = 0.0f; } __syncthreads(); float sum = 0.0f; if (tz < TILE_WIDTH && ty < TILE_WIDTH && tx < TILE_WIDTH) { for (int i = 0; i < MASK_WIDTH; i++) { for (int j = 0; j < MASK_WIDTH; j++) { for (int k = 0; k < MASK_WIDTH; k++) { sum += Mc[i][j][k] * N[i+tz][j+ty][tx+k]; } } } if (hei_o < z_size && row_o < y_size && col_o < x_size) { output[hei_o*y_size*x_size + row_o*x_size + col_o] = sum; } } } int main(int argc, char *argv[]) { wbArg_t args; int z_size; int y_size; int x_size; int inputLength, kernelLength; float *hostInput; float *hostKernel; float *hostOutput; float *deviceInput; float *deviceOutput; args = wbArg_read(argc, argv); // Import data hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &inputLength); hostKernel = (float *)wbImport(wbArg_getInputFile(args, 1), &kernelLength); hostOutput = (float *)malloc(inputLength * sizeof(float)); // First three elements are the input dimensions z_size = hostInput[0]; y_size = hostInput[1]; x_size = hostInput[2]; wbLog(TRACE, "The input size is ", z_size, "x", y_size, "x", x_size); assert(z_size * y_size * x_size == inputLength - 3); assert(kernelLength == 27); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); //@@ Allocate GPU memory here int size = (inputLength - 3) * sizeof(float); cudaMalloc((void **) &deviceInput, size); cudaMalloc((void **) &deviceOutput, size); // Recall that inputLength is 3 elements longer than the input data // because the first three elements were the dimensions wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); //@@ Copy input and kernel to GPU here cudaMemcpy(deviceInput, &hostInput[3], size, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Mc, hostKernel, kernelLength*sizeof(float)); // Recall that the first three elements of hostInput are dimensions and // do // not need to be copied to the gpu wbTime_stop(Copy, "Copying data to the GPU"); wbTime_start(Compute, "Doing the computation on the GPU"); //@@ Initialize grid and block dimensions here dim3 DimBlock(TILE_WIDTH+MASK_WIDTH-1, TILE_WIDTH+MASK_WIDTH-1, TILE_WIDTH+MASK_WIDTH-1); dim3 DimGrid( ceil(x_size/(1.0 * TILE_WIDTH)), ceil(y_size/(1.0 * TILE_WIDTH)), ceil(z_size/(1.0 * TILE_WIDTH))); //@@ Launch the GPU kernel here conv3d<<<DimGrid, DimBlock>>>(deviceInput, deviceOutput, z_size, y_size, x_size); cudaDeviceSynchronize(); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); //@@ Copy the device memory back to the host here cudaMemcpy(&hostOutput[3], deviceOutput, size, cudaMemcpyDeviceToHost); // Recall that the first three elements of the output are the dimensions // and should not be set here (they are set below) wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); // Set the output dimensions for correctness checking hostOutput[0] = z_size; hostOutput[1] = y_size; hostOutput[2] = x_size; wbSolution(args, hostOutput, inputLength); // Free device memory cudaFree(deviceInput); cudaFree(deviceOutput); // Free host memory free(hostInput); free(hostOutput); return 0; }
a5f84dd50d2794a2cdcd2dfd98b99883691a9226.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void sumArraysOnHost(float *A, float *B, float *C, const int N) { for(int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnDevice(float *a, float *b, float *c, const int N) { int tID = blockIdx.x; printf("blockIdx.x = %d\n", blockIdx.x); // if (tID < N) { c[tID] = a[tID] + b[tID]; printf("tID = %d\n", tID); // } } void initialData(float *ip, int size) { time_t t; srand((unsigned int) time(&t)); for(int i = 0; i < size; i++) { ip[i] = (float) (rand() & 0xFF) / 10.0f; } } int main(int argc, char **argv) { int nElem = 1024; size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); float *d_A, *d_B, *d_C; hipMalloc((float **) &d_A, nBytes); hipMalloc((float **) &d_B, nBytes); hipMalloc((float **) &d_C, nBytes); initialData(h_A, nElem); initialData(h_B, nElem); hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); // sumArraysOnHost(h_A, h_B, h_C, nElem); hipLaunchKernelGGL(( sumArraysOnDevice), dim3(32),dim3(32), 0, 0, d_A, d_B, d_C, nElem); hipMemcpy(h_C, d_C, nBytes, hipMemcpyDeviceToHost); free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); return 0; }
a5f84dd50d2794a2cdcd2dfd98b99883691a9226.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void sumArraysOnHost(float *A, float *B, float *C, const int N) { for(int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnDevice(float *a, float *b, float *c, const int N) { int tID = blockIdx.x; printf("blockIdx.x = %d\n", blockIdx.x); // if (tID < N) { c[tID] = a[tID] + b[tID]; printf("tID = %d\n", tID); // } } void initialData(float *ip, int size) { time_t t; srand((unsigned int) time(&t)); for(int i = 0; i < size; i++) { ip[i] = (float) (rand() & 0xFF) / 10.0f; } } int main(int argc, char **argv) { int nElem = 1024; size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *h_C; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); float *d_A, *d_B, *d_C; cudaMalloc((float **) &d_A, nBytes); cudaMalloc((float **) &d_B, nBytes); cudaMalloc((float **) &d_C, nBytes); initialData(h_A, nElem); initialData(h_B, nElem); cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); // sumArraysOnHost(h_A, h_B, h_C, nElem); sumArraysOnDevice<<<32,32>>>(d_A, d_B, d_C, nElem); cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost); free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; }
5455c7ae464879b17ae094c09f5751512614bc47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @File nbody.cu * * Implementation of the N-Body problem * * Paraleln programovn na GPU (PCG 2020) * Projekt c. 1 (cuda) * Login: xpavel34 */ #include <cmath> #include <cfloat> #include "nbody.h" /** * CUDA kernel to calculate gravitation velocity * @param p - particles * @param tmp_vel - temp array for velocities * @param N - Number of particles * @param dt - Size of the time step */ __global__ void calculate_gravitation_velocity(t_particles p, t_velocities tmp_vel, int N, float dt) { for (unsigned int gID = blockIdx.x * blockDim.x + threadIdx.x; gID < N; gID += blockDim.x * gridDim.x) { float dx, dy, dz; float accVelocityX = 0; float accVelocityY = 0; float accVelocityZ = 0; float p1_x = p.positionsX[gID]; float p1_y = p.positionsY[gID]; float p1_z = p.positionsZ[gID]; float p1_weight = p.weights[gID]; for (int particleIdx = 0; particleIdx < N; particleIdx++) { dx = p1_x - p.positionsX[particleIdx]; dy = p1_y - p.positionsY[particleIdx]; dz = p1_z - p.positionsZ[particleIdx]; float rr = dx * dx + dy * dy + dz * dz; float r = sqrt(rr); float F = -G * p1_weight * p.weights[particleIdx] / (rr + FLT_MIN); float dtw = dt / p1_weight; bool notColliding = r > COLLISION_DISTANCE; accVelocityX += notColliding ? (F * dx / (r + FLT_MIN)) * dtw : 0.0f; accVelocityY += notColliding ? (F * dy / (r + FLT_MIN)) * dtw : 0.0f; accVelocityZ += notColliding ? (F * dz / (r + FLT_MIN)) * dtw : 0.0f; } tmp_vel.directionX[gID] += accVelocityX; tmp_vel.directionY[gID] += accVelocityY; tmp_vel.directionZ[gID] += accVelocityZ; } }// end of calculate_gravitation_velocity //---------------------------------------------------------------------------------------------------------------------- /** * CUDA kernel to calculate collision velocity * @param p - particles * @param tmp_vel - temp array for velocities * @param N - Number of particles * @param dt - Size of the time step */ __global__ void calculate_collision_velocity(t_particles p, t_velocities tmp_vel, int N, float dt) { for (unsigned int gID = blockIdx.x * blockDim.x + threadIdx.x; gID < N; gID += blockDim.x * gridDim.x) { // Use accumulators to avoid unnecessary global memory access between iterations float accVelocityX = 0; float accVelocityY = 0; float accVelocityZ = 0; float p1_x = p.positionsX[gID]; float p1_y = p.positionsY[gID]; float p1_z = p.positionsZ[gID]; float p1_vel_x = p.velocitiesX[gID]; float p1_vel_y = p.velocitiesY[gID]; float p1_vel_z = p.velocitiesZ[gID]; float p1_weight = p.weights[gID]; for (int particleIdx = 0; particleIdx < N; particleIdx++) { float p2_vel_x = p.velocitiesX[particleIdx]; float p2_vel_y = p.velocitiesY[particleIdx]; float p2_vel_z = p.velocitiesZ[particleIdx]; float p2_weight = p.weights[particleIdx]; float dx = p1_x - p.positionsX[particleIdx]; float dy = p1_y - p.positionsY[particleIdx]; float dz = p1_z - p.positionsZ[particleIdx]; float rr = dx*dx + dy*dy + dz*dz; float r = sqrtf(rr); // Use temp variables to reduce redundant computations float weightSum = p1_weight + p2_weight; float weightDiff = p1_weight - p2_weight; float p2_w2 = 2 * p2_weight; // p1_weight * p1_vel_x - p2_weight * p1_vel_x --> p1_vel_x * (p1_weight - p2_weight) // --> p1_vel_x * (weightDiff) bool colliding = r > 0.0f && r < COLLISION_DISTANCE; accVelocityX += colliding ? ((p1_vel_x * weightDiff + p2_w2 * p2_vel_x) / weightSum) - p1_vel_x : 0.0f; accVelocityY += colliding ? ((p1_vel_y * weightDiff + p2_w2 * p2_vel_y) / weightSum) - p1_vel_y : 0.0f; accVelocityZ += colliding ? ((p1_vel_z * weightDiff + p2_w2 * p2_vel_z) / weightSum) - p1_vel_z : 0.0f; } tmp_vel.directionX[gID] += accVelocityX; tmp_vel.directionY[gID] += accVelocityY; tmp_vel.directionZ[gID] += accVelocityZ; } }// end of calculate_collision_velocity //---------------------------------------------------------------------------------------------------------------------- /** * CUDA kernel to update particles * @param p - particles * @param tmp_vel - temp array for velocities * @param N - Number of particles * @param dt - Size of the time step */ __global__ void update_particle(t_particles p, t_velocities tmp_vel, int N, float dt) { for (unsigned int gID = blockIdx.x * blockDim.x + threadIdx.x; gID < N; gID += blockDim.x * gridDim.x) { p.velocitiesX[gID] += tmp_vel.directionX[gID]; p.positionsX[gID] += p.velocitiesX[gID] * dt; p.velocitiesY[gID] += tmp_vel.directionY[gID]; p.positionsY[gID] += p.velocitiesY[gID] * dt; p.velocitiesZ[gID] += tmp_vel.directionZ[gID]; p.positionsZ[gID] += p.velocitiesZ[gID] * dt; } }// end of update_particle //---------------------------------------------------------------------------------------------------------------------- /** * CUDA kernel to update particles * @param p - particles * @param comX - pointer to a center of mass position in X * @param comY - pointer to a center of mass position in Y * @param comZ - pointer to a center of mass position in Z * @param comW - pointer to a center of mass weight * @param lock - pointer to a user-implemented lock * @param N - Number of particles */ __global__ void centerOfMass(t_particles p, float *comX, float *comY, float *comZ, float *comW, int *lock, const int N) { }// end of centerOfMass //---------------------------------------------------------------------------------------------------------------------- /** * CPU implementation of the Center of Mass calculation * @param particles - All particles in the system * @param N - Number of particles */ __host__ float4 centerOfMassCPU(MemDesc &memDesc) { float4 com = {0, 0, 0, 0}; for (int i = 0; i < memDesc.getDataSize(); i++) { // Calculate the vector on the line connecting points and most recent position of center-of-mass const float dx = memDesc.getPosX(i) - com.x; const float dy = memDesc.getPosY(i) - com.y; const float dz = memDesc.getPosZ(i) - com.z; // Calculate weight ratio only if at least one particle isn't massless const float dw = ((memDesc.getWeight(i) + com.w) > 0.0f) ? (memDesc.getWeight(i) / (memDesc.getWeight(i) + com.w)) : 0.0f; // Update position and weight of the center-of-mass according to the weight ration and vector com.x += dx * dw; com.y += dy * dw; com.z += dz * dw; com.w += memDesc.getWeight(i); } return com; }// enf of centerOfMassCPU //----------------------------------------------------------------------------------------------------------------------
5455c7ae464879b17ae094c09f5751512614bc47.cu
/** * @File nbody.cu * * Implementation of the N-Body problem * * Paralelní programování na GPU (PCG 2020) * Projekt c. 1 (cuda) * Login: xpavel34 */ #include <cmath> #include <cfloat> #include "nbody.h" /** * CUDA kernel to calculate gravitation velocity * @param p - particles * @param tmp_vel - temp array for velocities * @param N - Number of particles * @param dt - Size of the time step */ __global__ void calculate_gravitation_velocity(t_particles p, t_velocities tmp_vel, int N, float dt) { for (unsigned int gID = blockIdx.x * blockDim.x + threadIdx.x; gID < N; gID += blockDim.x * gridDim.x) { float dx, dy, dz; float accVelocityX = 0; float accVelocityY = 0; float accVelocityZ = 0; float p1_x = p.positionsX[gID]; float p1_y = p.positionsY[gID]; float p1_z = p.positionsZ[gID]; float p1_weight = p.weights[gID]; for (int particleIdx = 0; particleIdx < N; particleIdx++) { dx = p1_x - p.positionsX[particleIdx]; dy = p1_y - p.positionsY[particleIdx]; dz = p1_z - p.positionsZ[particleIdx]; float rr = dx * dx + dy * dy + dz * dz; float r = sqrt(rr); float F = -G * p1_weight * p.weights[particleIdx] / (rr + FLT_MIN); float dtw = dt / p1_weight; bool notColliding = r > COLLISION_DISTANCE; accVelocityX += notColliding ? (F * dx / (r + FLT_MIN)) * dtw : 0.0f; accVelocityY += notColliding ? (F * dy / (r + FLT_MIN)) * dtw : 0.0f; accVelocityZ += notColliding ? (F * dz / (r + FLT_MIN)) * dtw : 0.0f; } tmp_vel.directionX[gID] += accVelocityX; tmp_vel.directionY[gID] += accVelocityY; tmp_vel.directionZ[gID] += accVelocityZ; } }// end of calculate_gravitation_velocity //---------------------------------------------------------------------------------------------------------------------- /** * CUDA kernel to calculate collision velocity * @param p - particles * @param tmp_vel - temp array for velocities * @param N - Number of particles * @param dt - Size of the time step */ __global__ void calculate_collision_velocity(t_particles p, t_velocities tmp_vel, int N, float dt) { for (unsigned int gID = blockIdx.x * blockDim.x + threadIdx.x; gID < N; gID += blockDim.x * gridDim.x) { // Use accumulators to avoid unnecessary global memory access between iterations float accVelocityX = 0; float accVelocityY = 0; float accVelocityZ = 0; float p1_x = p.positionsX[gID]; float p1_y = p.positionsY[gID]; float p1_z = p.positionsZ[gID]; float p1_vel_x = p.velocitiesX[gID]; float p1_vel_y = p.velocitiesY[gID]; float p1_vel_z = p.velocitiesZ[gID]; float p1_weight = p.weights[gID]; for (int particleIdx = 0; particleIdx < N; particleIdx++) { float p2_vel_x = p.velocitiesX[particleIdx]; float p2_vel_y = p.velocitiesY[particleIdx]; float p2_vel_z = p.velocitiesZ[particleIdx]; float p2_weight = p.weights[particleIdx]; float dx = p1_x - p.positionsX[particleIdx]; float dy = p1_y - p.positionsY[particleIdx]; float dz = p1_z - p.positionsZ[particleIdx]; float rr = dx*dx + dy*dy + dz*dz; float r = sqrtf(rr); // Use temp variables to reduce redundant computations float weightSum = p1_weight + p2_weight; float weightDiff = p1_weight - p2_weight; float p2_w2 = 2 * p2_weight; // p1_weight * p1_vel_x - p2_weight * p1_vel_x --> p1_vel_x * (p1_weight - p2_weight) // --> p1_vel_x * (weightDiff) bool colliding = r > 0.0f && r < COLLISION_DISTANCE; accVelocityX += colliding ? ((p1_vel_x * weightDiff + p2_w2 * p2_vel_x) / weightSum) - p1_vel_x : 0.0f; accVelocityY += colliding ? ((p1_vel_y * weightDiff + p2_w2 * p2_vel_y) / weightSum) - p1_vel_y : 0.0f; accVelocityZ += colliding ? ((p1_vel_z * weightDiff + p2_w2 * p2_vel_z) / weightSum) - p1_vel_z : 0.0f; } tmp_vel.directionX[gID] += accVelocityX; tmp_vel.directionY[gID] += accVelocityY; tmp_vel.directionZ[gID] += accVelocityZ; } }// end of calculate_collision_velocity //---------------------------------------------------------------------------------------------------------------------- /** * CUDA kernel to update particles * @param p - particles * @param tmp_vel - temp array for velocities * @param N - Number of particles * @param dt - Size of the time step */ __global__ void update_particle(t_particles p, t_velocities tmp_vel, int N, float dt) { for (unsigned int gID = blockIdx.x * blockDim.x + threadIdx.x; gID < N; gID += blockDim.x * gridDim.x) { p.velocitiesX[gID] += tmp_vel.directionX[gID]; p.positionsX[gID] += p.velocitiesX[gID] * dt; p.velocitiesY[gID] += tmp_vel.directionY[gID]; p.positionsY[gID] += p.velocitiesY[gID] * dt; p.velocitiesZ[gID] += tmp_vel.directionZ[gID]; p.positionsZ[gID] += p.velocitiesZ[gID] * dt; } }// end of update_particle //---------------------------------------------------------------------------------------------------------------------- /** * CUDA kernel to update particles * @param p - particles * @param comX - pointer to a center of mass position in X * @param comY - pointer to a center of mass position in Y * @param comZ - pointer to a center of mass position in Z * @param comW - pointer to a center of mass weight * @param lock - pointer to a user-implemented lock * @param N - Number of particles */ __global__ void centerOfMass(t_particles p, float *comX, float *comY, float *comZ, float *comW, int *lock, const int N) { }// end of centerOfMass //---------------------------------------------------------------------------------------------------------------------- /** * CPU implementation of the Center of Mass calculation * @param particles - All particles in the system * @param N - Number of particles */ __host__ float4 centerOfMassCPU(MemDesc &memDesc) { float4 com = {0, 0, 0, 0}; for (int i = 0; i < memDesc.getDataSize(); i++) { // Calculate the vector on the line connecting points and most recent position of center-of-mass const float dx = memDesc.getPosX(i) - com.x; const float dy = memDesc.getPosY(i) - com.y; const float dz = memDesc.getPosZ(i) - com.z; // Calculate weight ratio only if at least one particle isn't massless const float dw = ((memDesc.getWeight(i) + com.w) > 0.0f) ? (memDesc.getWeight(i) / (memDesc.getWeight(i) + com.w)) : 0.0f; // Update position and weight of the center-of-mass according to the weight ration and vector com.x += dx * dw; com.y += dy * dw; com.z += dz * dw; com.w += memDesc.getWeight(i); } return com; }// enf of centerOfMassCPU //----------------------------------------------------------------------------------------------------------------------
f7c1d535557575545bcea1472c16ea5db2e0c314.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pooling_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; if (mask) { mask[index] = maxidx; } else { top_mask[index] = maxidx; } } } template <typename Dtype> __global__ void SamplePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; hstart = max(hstart, 0); wstart = max(wstart, 0); const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; int sampleidx = hstart * width + wstart; Dtype sampleval = bottom_slice[sampleidx]; top_data[index] = sampleval; if (mask) { mask[index] = sampleidx; } else { top_mask[index] = sampleidx; } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = 0.; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } if (this->layer_param_.pooling_param().sample()){ hipLaunchKernelGGL(( SamplePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; } case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const Dtype* const top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
f7c1d535557575545bcea1472c16ea5db2e0c314.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pooling_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; if (mask) { mask[index] = maxidx; } else { top_mask[index] = maxidx; } } } template <typename Dtype> __global__ void SamplePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; hstart = max(hstart, 0); wstart = max(wstart, 0); const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; int sampleidx = hstart * width + wstart; Dtype sampleval = bottom_slice[sampleidx]; top_data[index] = sampleval; if (mask) { mask[index] = sampleidx; } else { top_mask[index] = sampleidx; } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = 0.; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } if (this->layer_param_.pooling_param().sample()){ SamplePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; } else { // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; } case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const Dtype* const top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
696fb8d7e943b76a59faf4cc468a0eba18cf70b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <builtin_types.h> #include <stdlib.h> #include "kernels/disarium_number.cuh" #include "cudaUtils.cuh" const unsigned int NUMBERS_COUNT = 100000; const unsigned int THREADS_NUM = 128; void runKernel(unsigned int *generatedNumbersGPU, bool *resultGPU); void generateNumbers(unsigned int *table, unsigned int n); void printResult(const unsigned int *generatedNumbers, const bool *result); int main(int argc, char **argv) { unsigned int *generatedNumbersCPU = new unsigned int[NUMBERS_COUNT]; generateNumbers(generatedNumbersCPU, NUMBERS_COUNT); unsigned int *generatedNumbersGPU = allocateArrayOnGPU<unsigned int>(NUMBERS_COUNT); transferDataToGPU(generatedNumbersGPU, generatedNumbersCPU, NUMBERS_COUNT); bool *resultGPU = allocateArrayOnGPU<bool>(NUMBERS_COUNT); runKernel(generatedNumbersGPU, resultGPU); bool *resultCPU = new bool[NUMBERS_COUNT]; transferDataFromGPU<bool>(resultCPU, resultGPU, NUMBERS_COUNT); freeArrayGPU(resultGPU); freeArrayGPU(generatedNumbersGPU); printResult(generatedNumbersCPU, resultCPU); delete[] generatedNumbersCPU; delete[] resultCPU; return 0; } void generateNumbers(unsigned int *table, unsigned int n) { srand(time(NULL)); for (unsigned int i = 0; i < n; i++) table[i] = rand(); } void runKernel(unsigned int *generatedNumbersGPU, bool *resultGPU) { unsigned int blocksNumber = getBlocksNumber(THREADS_NUM, NUMBERS_COUNT); std::cout << "block num: " << blocksNumber << std::endl << "threads count: " << THREADS_NUM << std::endl; float time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( generateDisariumNumbers) , dim3(blocksNumber), dim3(THREADS_NUM) , 0, 0, generatedNumbersGPU, resultGPU, NUMBERS_COUNT); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("Time to generate: %f ms \n", time); hipEventDestroy(start); hipEventDestroy(stop); } void printResult(const unsigned int *generatedNumbers, const bool *result) { std::cout << "Results: " << std::endl; for (unsigned int i = 0; i < NUMBERS_COUNT; i++) if (result[i]) std::cout << generatedNumbers[i] << std::endl; std::cout << "----------" << std::endl; }
696fb8d7e943b76a59faf4cc468a0eba18cf70b9.cu
#include <iostream> #include <builtin_types.h> #include <stdlib.h> #include "kernels/disarium_number.cuh" #include "cudaUtils.cuh" const unsigned int NUMBERS_COUNT = 100000; const unsigned int THREADS_NUM = 128; void runKernel(unsigned int *generatedNumbersGPU, bool *resultGPU); void generateNumbers(unsigned int *table, unsigned int n); void printResult(const unsigned int *generatedNumbers, const bool *result); int main(int argc, char **argv) { unsigned int *generatedNumbersCPU = new unsigned int[NUMBERS_COUNT]; generateNumbers(generatedNumbersCPU, NUMBERS_COUNT); unsigned int *generatedNumbersGPU = allocateArrayOnGPU<unsigned int>(NUMBERS_COUNT); transferDataToGPU(generatedNumbersGPU, generatedNumbersCPU, NUMBERS_COUNT); bool *resultGPU = allocateArrayOnGPU<bool>(NUMBERS_COUNT); runKernel(generatedNumbersGPU, resultGPU); bool *resultCPU = new bool[NUMBERS_COUNT]; transferDataFromGPU<bool>(resultCPU, resultGPU, NUMBERS_COUNT); freeArrayGPU(resultGPU); freeArrayGPU(generatedNumbersGPU); printResult(generatedNumbersCPU, resultCPU); delete[] generatedNumbersCPU; delete[] resultCPU; return 0; } void generateNumbers(unsigned int *table, unsigned int n) { srand(time(NULL)); for (unsigned int i = 0; i < n; i++) table[i] = rand(); } void runKernel(unsigned int *generatedNumbersGPU, bool *resultGPU) { unsigned int blocksNumber = getBlocksNumber(THREADS_NUM, NUMBERS_COUNT); std::cout << "block num: " << blocksNumber << std::endl << "threads count: " << THREADS_NUM << std::endl; float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); generateDisariumNumbers <<< blocksNumber, THREADS_NUM >>>(generatedNumbersGPU, resultGPU, NUMBERS_COUNT); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Time to generate: %f ms \n", time); cudaEventDestroy(start); cudaEventDestroy(stop); } void printResult(const unsigned int *generatedNumbers, const bool *result) { std::cout << "Results: " << std::endl; for (unsigned int i = 0; i < NUMBERS_COUNT; i++) if (result[i]) std::cout << generatedNumbers[i] << std::endl; std::cout << "----------" << std::endl; }
45776dfa9fdf4d02b6029500159589e45eb51a0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Mark Gates @author Azzam Haidar @generated from magmablas/zlaset.cu, normal z -> c, Mon Jun 25 18:24:13 2018 */ #include "magma_internal.h" #include "batched_kernel_param.h" // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for claset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to claset, clacpy, clag2z, clag2z, cgeadd. */ static __device__ void claset_full_device( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag || above diag || offdiag == diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_C_EQUAL( offdiag, diag ))); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block or offdiag == diag #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to claset_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to claset, clacpy, zlat2c, clat2z. */ static __device__ void claset_lower_device( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < m && ind + BLK_X > iby ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind > iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to claset_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to claset, clacpy, zlat2c, clat2z. */ static __device__ void claset_upper_device( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind < iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void claset_full_kernel( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *dA, int ldda ) { claset_full_device(m, n, offdiag, diag, dA, ldda); } __global__ void claset_lower_kernel( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *dA, int ldda ) { claset_lower_device(m, n, offdiag, diag, dA, ldda); } __global__ void claset_upper_kernel( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *dA, int ldda ) { claset_upper_device(m, n, offdiag, diag, dA, ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the batched routine. */ __global__ void claset_full_kernel_batched( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, int ldda ) { int batchid = blockIdx.z; claset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void claset_lower_kernel_batched( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, int ldda ) { int batchid = blockIdx.z; claset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void claset_upper_kernel_batched( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, int ldda ) { int batchid = blockIdx.z; claset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the vbatched routine. */ __global__ void claset_full_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; claset_full_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void claset_lower_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; claset_lower_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void claset_upper_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; claset_upper_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } /***************************************************************************//** Purpose ------- CLASET initializes a 2-D array A to DIAG on the diagonal and OFFDIAG on the off-diagonals. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be set. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] offdiag COMPLEX The scalar OFFDIAG. (In LAPACK this is called ALPHA.) @param[in] diag COMPLEX The scalar DIAG. (In LAPACK this is called BETA.) @param[in] dA COMPLEX array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j; and A(i,i) = DIAG, 1 <= i <= min(m,n) @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laset *******************************************************************************/ extern "C" void magmablas_claset( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_queue_t queue) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; if (uplo == MagmaLower) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( claset_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( claset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else if (uplo == MagmaUpper) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( claset_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( claset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else { // if continuous in memory & set to zero, hipMemset is faster. // TODO: use hipMemset2D ? if ( m == ldda && MAGMA_C_EQUAL( offdiag, MAGMA_C_ZERO ) && MAGMA_C_EQUAL( diag, MAGMA_C_ZERO ) ) { size_t size = m*n; hipError_t err = hipMemsetAsync( dA, 0, size*sizeof(magmaFloatComplex), queue->cuda_stream() ); assert( err == hipSuccess ); MAGMA_UNUSED( err ); } else { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( claset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block hipLaunchKernelGGL(( claset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } } } /******************************************************************************/ extern "C" void magmablas_claset_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex_ptr dAarray[], magma_int_t ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount ); if (uplo == MagmaLower) { hipLaunchKernelGGL(( claset_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( claset_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } else { hipLaunchKernelGGL(( claset_full_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } } /******************************************************************************/ extern "C" void magmablas_claset_vbatched( magma_uplo_t uplo, magma_int_t max_m, magma_int_t max_n, magma_int_t* m, magma_int_t* n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex_ptr dAarray[], magma_int_t* ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( max_m < 0 ) info = -2; else if ( max_n < 0 ) info = -3; //else if ( ldda < max(1,m) ) // info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( max_m == 0 || max_n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), batchCount ); if (uplo == MagmaLower) { hipLaunchKernelGGL(( claset_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( claset_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } else { hipLaunchKernelGGL(( claset_full_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda); } }
45776dfa9fdf4d02b6029500159589e45eb51a0e.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Mark Gates @author Azzam Haidar @generated from magmablas/zlaset.cu, normal z -> c, Mon Jun 25 18:24:13 2018 */ #include "magma_internal.h" #include "batched_kernel_param.h" // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for claset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to claset, clacpy, clag2z, clag2z, cgeadd. */ static __device__ void claset_full_device( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag || above diag || offdiag == diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_C_EQUAL( offdiag, diag ))); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block or offdiag == diag #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to claset_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to claset, clacpy, zlat2c, clat2z. */ static __device__ void claset_lower_device( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < m && ind + BLK_X > iby ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind > iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* Similar to claset_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to claset, clacpy, zlat2c, clat2z. */ static __device__ void claset_upper_device( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { A += ind + iby*lda; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = offdiag; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( iby+j == ind ) A[j*lda] = diag; else if ( ind < iby+j ) A[j*lda] = offdiag; } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void claset_full_kernel( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *dA, int ldda ) { claset_full_device(m, n, offdiag, diag, dA, ldda); } __global__ void claset_lower_kernel( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *dA, int ldda ) { claset_lower_device(m, n, offdiag, diag, dA, ldda); } __global__ void claset_upper_kernel( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex *dA, int ldda ) { claset_upper_device(m, n, offdiag, diag, dA, ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the batched routine. */ __global__ void claset_full_kernel_batched( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, int ldda ) { int batchid = blockIdx.z; claset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void claset_lower_kernel_batched( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, int ldda ) { int batchid = blockIdx.z; claset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda); } __global__ void claset_upper_kernel_batched( int m, int n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, int ldda ) { int batchid = blockIdx.z; claset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda); } /******************************************************************************/ /* kernel wrappers to call the device functions for the vbatched routine. */ __global__ void claset_full_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; claset_full_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void claset_lower_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; claset_lower_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } __global__ void claset_upper_kernel_vbatched( magma_int_t* m, magma_int_t* n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex **dAarray, magma_int_t* ldda ) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int my_n = (int)n[batchid]; if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return; if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return; claset_upper_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]); } /***************************************************************************//** Purpose ------- CLASET initializes a 2-D array A to DIAG on the diagonal and OFFDIAG on the off-diagonals. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be set. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] offdiag COMPLEX The scalar OFFDIAG. (In LAPACK this is called ALPHA.) @param[in] diag COMPLEX The scalar DIAG. (In LAPACK this is called BETA.) @param[in] dA COMPLEX array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j; and A(i,i) = DIAG, 1 <= i <= min(m,n) @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laset *******************************************************************************/ extern "C" void magmablas_claset( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_queue_t queue) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; if (uplo == MagmaLower) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block claset_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block claset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else if (uplo == MagmaUpper) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block claset_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block claset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } else { // if continuous in memory & set to zero, cudaMemset is faster. // TODO: use cudaMemset2D ? if ( m == ldda && MAGMA_C_EQUAL( offdiag, MAGMA_C_ZERO ) && MAGMA_C_EQUAL( diag, MAGMA_C_ZERO ) ) { size_t size = m*n; cudaError_t err = cudaMemsetAsync( dA, 0, size*sizeof(magmaFloatComplex), queue->cuda_stream() ); assert( err == cudaSuccess ); MAGMA_UNUSED( err ); } else { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block claset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda ); } else { // off diagonal super block claset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda ); } } } } } } /******************************************************************************/ extern "C" void magmablas_claset_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex_ptr dAarray[], magma_int_t ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m) ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount ); if (uplo == MagmaLower) { claset_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } else if (uplo == MagmaUpper) { claset_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } else { claset_full_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } } /******************************************************************************/ extern "C" void magmablas_claset_vbatched( magma_uplo_t uplo, magma_int_t max_m, magma_int_t max_n, magma_int_t* m, magma_int_t* n, magmaFloatComplex offdiag, magmaFloatComplex diag, magmaFloatComplex_ptr dAarray[], magma_int_t* ldda, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( max_m < 0 ) info = -2; else if ( max_n < 0 ) info = -3; //else if ( ldda < max(1,m) ) // info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if ( max_m == 0 || max_n == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), batchCount ); if (uplo == MagmaLower) { claset_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } else if (uplo == MagmaUpper) { claset_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } else { claset_full_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda); } }
4177a40b1ead90b31ad7899cd6a489bbcaab6b05.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <tiffio.h> #include <math.h> #include <iostream> #include <string.h> #include <string> #include <fstream> #include <sstream> #include <map> #include <vector> #include <algorithm> #include <time.h> #include <chrono> #include <queue> #include <set> #include <hip/hip_runtime.h> static const int WORK_SIZE = 256; const int AGP = 14, PAS = 15, AGR = 18, CAP = 19, CSP = 20, MAP = 21; #define CHECK_CUDA_RESULT(N) { \ hipError_t result = N; \ if (result != 0) { \ printf("CUDA call on line %d returned error %d\n", __LINE__, \ result); \ exit(1); \ } } void setup(TIFF* new_tif, int width, int length, int bitsPerSample, int sampleFormat){ TIFFSetField(new_tif, TIFFTAG_IMAGEWIDTH , width); TIFFSetField(new_tif, TIFFTAG_IMAGELENGTH , length); TIFFSetField(new_tif, TIFFTAG_BITSPERSAMPLE , bitsPerSample); TIFFSetField(new_tif, TIFFTAG_SAMPLEFORMAT , sampleFormat); TIFFSetField(new_tif, TIFFTAG_COMPRESSION , 1); TIFFSetField(new_tif, TIFFTAG_PHOTOMETRIC , 1); TIFFSetField(new_tif, TIFFTAG_SAMPLESPERPIXEL, 1); TIFFSetField(new_tif, TIFFTAG_ROWSPERSTRIP , 1); TIFFSetField(new_tif, TIFFTAG_RESOLUTIONUNIT , 1); TIFFSetField(new_tif, TIFFTAG_XRESOLUTION , 1); TIFFSetField(new_tif, TIFFTAG_YRESOLUTION , 1); TIFFSetField(new_tif, TIFFTAG_PLANARCONFIG , PLANARCONFIG_CONTIG); }; void write_line_tiff(TIFF* tif, double tif_line[], int line){ if (TIFFWriteScanline(tif, tif_line, line) < 0){ std::cerr << "Write problem!" << std::endl; exit(4); } }; void write_line_tiff(TIFF* tif, int tif_line[], int line){ if (TIFFWriteScanline(tif, tif_line, line) < 0){ std::cerr << "Write problem!" << std::endl; exit(4); } }; void read_line_tiff(TIFF* tif, double tif_line[], int line){ if(TIFFReadScanline(tif, tif_line, line) < 0){ std::cerr << "Read problem" << std::endl; exit(3); } }; void read_line_tiff(TIFF* tif, int tif_line[], int line){ if(TIFFReadScanline(tif, tif_line, line) < 0){ std::cerr << "Read problem" << std::endl; exit(3); } }; __host__ __device__ bool checkLandCode(int value){ return (value == AGP) || (value == PAS) || (value == AGR) || (value == CAP) || (value == CSP) || (value == MAP); } __global__ void landCoverHomogeneity(double* inputBuffer, int* output, int line, int numCol, int numLine){ int column = threadIdx.x + blockIdx.x * blockDim.x; double pixel_value; int aux; while (column < numCol) { aux = line % 7; pixel_value = inputBuffer[aux * numCol + column]; output[column] = false; if(checkLandCode(pixel_value)) { //Verify if the pixel is an AGR pixel output[column] = true; for(int i = -3; i <= 3 && output[column]; i++){ for(int j = -3; j <= 3 && output[column]; j++){ // Check if the neighbor is AGR too if (column + i >= 0 && column + i < numCol && line + j >= 0 && line + j < numLine) { aux = (line + j) % 7; pixel_value = inputBuffer[aux * numCol + column]; if(!isnan(pixel_value)) if(!checkLandCode(pixel_value)) output[column] = false; } } } } column += blockDim.x * gridDim.x; } } void testLandCoverHomogeneity(TIFF* landCover, TIFF* mask){ uint32 height_band, width_band; TIFFGetField(landCover, TIFFTAG_IMAGELENGTH, &height_band); TIFFGetField(landCover, TIFFTAG_IMAGEWIDTH, &width_band); double* buffer = (double *) malloc(7 * width_band * sizeof(double)); int relation[7] = {-1, -1, -1, -1, -1, -1, -1}, aux; for(int line = 0; line < height_band; line++) { // Create the respective line of the binary map of eligibles pixels int mask_line[width_band]; for(int column = 0; column < width_band; column++) { int pixel_value; aux = line % 7; if(relation[aux] != line) { read_line_tiff(landCover, buffer + aux * width_band, line); relation[aux] = line; } pixel_value = buffer[aux * width_band + column]; mask_line[column] = false; if(checkLandCode(pixel_value)) { //Verify if the pixel is an AGR pixel mask_line[column] = true; for(int i = -3; i <= 3 && mask_line[column]; i++){ for(int j = -3; j <= 3 && mask_line[column]; j++){ // Check if the neighbor is AGR too if (column + i >= 0 && column + i < width_band && line + j >= 0 && line + j < height_band) { aux = (line + j) % 7; if(relation[aux] != (line + j)) { read_line_tiff(landCover, buffer + aux * width_band, line + j); relation[aux] = (line + j); } pixel_value = buffer[aux * width_band + column]; if(!std::isnan(pixel_value)) if(!checkLandCode(pixel_value)) mask_line[column] = false; } } } } } write_line_tiff(mask, mask_line, line); } // for(int i = 0; i < 7; i++){ // free(buffer[i]); // } free(buffer); } int main(int argc, char **argv) { std::string landCoverPath = argv[1]; std::string outputPath = argv[2]; std::string outputCPU = outputPath + "/CPU.tif"; std::string outputGPU = outputPath + "/GPU.tif"; TIFF* landCover = TIFFOpen(landCoverPath.c_str(), "rm"); uint32 height_band, width_band; TIFFGetField(landCover, TIFFTAG_IMAGEWIDTH, &width_band); TIFFGetField(landCover, TIFFTAG_IMAGELENGTH, &height_band); TIFF* CPU = TIFFOpen(outputCPU.c_str(), "w8m"); setup(CPU, width_band, height_band, 32, 2); testLandCoverHomogeneity(landCover, CPU); TIFFClose(CPU); TIFF* GPU = TIFFOpen(outputGPU.c_str(), "w8m"); setup(GPU, width_band, height_band, 32, 2); double* buffer = (double *) malloc(7 * width_band * sizeof(double)); int* output_line = (int*) malloc(width_band * sizeof(int)); double* buffer_dev; hipMalloc((void**) &buffer_dev, 7 * width_band * sizeof(double*)); int* output_dev; hipMalloc((void**) &output_dev, width_band * sizeof(int*)); int relation[7] = {-1, -1, -1, -1, -1, -1, -1}; for(int line = 0; line < height_band; line++) { for(int i = -3; i < 4; i++) { if(line + i >= 0 && line + i < height_band){ if(relation[(line + i) % 7] != (line + i)) { read_line_tiff(landCover, buffer + ((line + i) % 7) * width_band, line + i); relation[(line + i) % 7] = line + i; } } } hipMemcpy(buffer_dev, buffer, 7 * width_band * sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( landCoverHomogeneity), dim3((width_band + 1) / WORK_SIZE) , dim3(WORK_SIZE), 0, 0, buffer_dev, output_dev, line, width_band, height_band); hipMemcpy(output_line, output_dev, width_band * sizeof(int), hipMemcpyDeviceToHost); write_line_tiff(GPU, output_line, line); } free(buffer); free(output_line); hipFree(buffer_dev); hipFree(output_dev); TIFFClose(landCover); TIFFClose(GPU); return 0; }
4177a40b1ead90b31ad7899cd6a489bbcaab6b05.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <tiffio.h> #include <math.h> #include <iostream> #include <string.h> #include <string> #include <fstream> #include <sstream> #include <map> #include <vector> #include <algorithm> #include <time.h> #include <chrono> #include <queue> #include <set> #include <cuda.h> static const int WORK_SIZE = 256; const int AGP = 14, PAS = 15, AGR = 18, CAP = 19, CSP = 20, MAP = 21; #define CHECK_CUDA_RESULT(N) { \ CUresult result = N; \ if (result != 0) { \ printf("CUDA call on line %d returned error %d\n", __LINE__, \ result); \ exit(1); \ } } void setup(TIFF* new_tif, int width, int length, int bitsPerSample, int sampleFormat){ TIFFSetField(new_tif, TIFFTAG_IMAGEWIDTH , width); TIFFSetField(new_tif, TIFFTAG_IMAGELENGTH , length); TIFFSetField(new_tif, TIFFTAG_BITSPERSAMPLE , bitsPerSample); TIFFSetField(new_tif, TIFFTAG_SAMPLEFORMAT , sampleFormat); TIFFSetField(new_tif, TIFFTAG_COMPRESSION , 1); TIFFSetField(new_tif, TIFFTAG_PHOTOMETRIC , 1); TIFFSetField(new_tif, TIFFTAG_SAMPLESPERPIXEL, 1); TIFFSetField(new_tif, TIFFTAG_ROWSPERSTRIP , 1); TIFFSetField(new_tif, TIFFTAG_RESOLUTIONUNIT , 1); TIFFSetField(new_tif, TIFFTAG_XRESOLUTION , 1); TIFFSetField(new_tif, TIFFTAG_YRESOLUTION , 1); TIFFSetField(new_tif, TIFFTAG_PLANARCONFIG , PLANARCONFIG_CONTIG); }; void write_line_tiff(TIFF* tif, double tif_line[], int line){ if (TIFFWriteScanline(tif, tif_line, line) < 0){ std::cerr << "Write problem!" << std::endl; exit(4); } }; void write_line_tiff(TIFF* tif, int tif_line[], int line){ if (TIFFWriteScanline(tif, tif_line, line) < 0){ std::cerr << "Write problem!" << std::endl; exit(4); } }; void read_line_tiff(TIFF* tif, double tif_line[], int line){ if(TIFFReadScanline(tif, tif_line, line) < 0){ std::cerr << "Read problem" << std::endl; exit(3); } }; void read_line_tiff(TIFF* tif, int tif_line[], int line){ if(TIFFReadScanline(tif, tif_line, line) < 0){ std::cerr << "Read problem" << std::endl; exit(3); } }; __host__ __device__ bool checkLandCode(int value){ return (value == AGP) || (value == PAS) || (value == AGR) || (value == CAP) || (value == CSP) || (value == MAP); } __global__ void landCoverHomogeneity(double* inputBuffer, int* output, int line, int numCol, int numLine){ int column = threadIdx.x + blockIdx.x * blockDim.x; double pixel_value; int aux; while (column < numCol) { aux = line % 7; pixel_value = inputBuffer[aux * numCol + column]; output[column] = false; if(checkLandCode(pixel_value)) { //Verify if the pixel is an AGR pixel output[column] = true; for(int i = -3; i <= 3 && output[column]; i++){ for(int j = -3; j <= 3 && output[column]; j++){ // Check if the neighbor is AGR too if (column + i >= 0 && column + i < numCol && line + j >= 0 && line + j < numLine) { aux = (line + j) % 7; pixel_value = inputBuffer[aux * numCol + column]; if(!isnan(pixel_value)) if(!checkLandCode(pixel_value)) output[column] = false; } } } } column += blockDim.x * gridDim.x; } } void testLandCoverHomogeneity(TIFF* landCover, TIFF* mask){ uint32 height_band, width_band; TIFFGetField(landCover, TIFFTAG_IMAGELENGTH, &height_band); TIFFGetField(landCover, TIFFTAG_IMAGEWIDTH, &width_band); double* buffer = (double *) malloc(7 * width_band * sizeof(double)); int relation[7] = {-1, -1, -1, -1, -1, -1, -1}, aux; for(int line = 0; line < height_band; line++) { // Create the respective line of the binary map of eligibles pixels int mask_line[width_band]; for(int column = 0; column < width_band; column++) { int pixel_value; aux = line % 7; if(relation[aux] != line) { read_line_tiff(landCover, buffer + aux * width_band, line); relation[aux] = line; } pixel_value = buffer[aux * width_band + column]; mask_line[column] = false; if(checkLandCode(pixel_value)) { //Verify if the pixel is an AGR pixel mask_line[column] = true; for(int i = -3; i <= 3 && mask_line[column]; i++){ for(int j = -3; j <= 3 && mask_line[column]; j++){ // Check if the neighbor is AGR too if (column + i >= 0 && column + i < width_band && line + j >= 0 && line + j < height_band) { aux = (line + j) % 7; if(relation[aux] != (line + j)) { read_line_tiff(landCover, buffer + aux * width_band, line + j); relation[aux] = (line + j); } pixel_value = buffer[aux * width_band + column]; if(!std::isnan(pixel_value)) if(!checkLandCode(pixel_value)) mask_line[column] = false; } } } } } write_line_tiff(mask, mask_line, line); } // for(int i = 0; i < 7; i++){ // free(buffer[i]); // } free(buffer); } int main(int argc, char **argv) { std::string landCoverPath = argv[1]; std::string outputPath = argv[2]; std::string outputCPU = outputPath + "/CPU.tif"; std::string outputGPU = outputPath + "/GPU.tif"; TIFF* landCover = TIFFOpen(landCoverPath.c_str(), "rm"); uint32 height_band, width_band; TIFFGetField(landCover, TIFFTAG_IMAGEWIDTH, &width_band); TIFFGetField(landCover, TIFFTAG_IMAGELENGTH, &height_band); TIFF* CPU = TIFFOpen(outputCPU.c_str(), "w8m"); setup(CPU, width_band, height_band, 32, 2); testLandCoverHomogeneity(landCover, CPU); TIFFClose(CPU); TIFF* GPU = TIFFOpen(outputGPU.c_str(), "w8m"); setup(GPU, width_band, height_band, 32, 2); double* buffer = (double *) malloc(7 * width_band * sizeof(double)); int* output_line = (int*) malloc(width_band * sizeof(int)); double* buffer_dev; cudaMalloc((void**) &buffer_dev, 7 * width_band * sizeof(double*)); int* output_dev; cudaMalloc((void**) &output_dev, width_band * sizeof(int*)); int relation[7] = {-1, -1, -1, -1, -1, -1, -1}; for(int line = 0; line < height_band; line++) { for(int i = -3; i < 4; i++) { if(line + i >= 0 && line + i < height_band){ if(relation[(line + i) % 7] != (line + i)) { read_line_tiff(landCover, buffer + ((line + i) % 7) * width_band, line + i); relation[(line + i) % 7] = line + i; } } } cudaMemcpy(buffer_dev, buffer, 7 * width_band * sizeof(double), cudaMemcpyHostToDevice); landCoverHomogeneity<<< (width_band + 1) / WORK_SIZE , WORK_SIZE>>>(buffer_dev, output_dev, line, width_band, height_band); cudaMemcpy(output_line, output_dev, width_band * sizeof(int), cudaMemcpyDeviceToHost); write_line_tiff(GPU, output_line, line); } free(buffer); free(output_line); cudaFree(buffer_dev); cudaFree(output_dev); TIFFClose(landCover); TIFFClose(GPU); return 0; }
03f70667fd6ebf019fc7062cbbf2f6de8fd2ddc2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/limits.hpp" namespace cv { namespace gpu { namespace device { namespace bgfg_gmg { __constant__ int c_width; __constant__ int c_height; __constant__ float c_minVal; __constant__ float c_maxVal; __constant__ int c_quantizationLevels; __constant__ float c_backgroundPrior; __constant__ float c_decisionThreshold; __constant__ int c_maxFeatures; __constant__ int c_numInitializationFrames; void loadConstants(int width, int height, float minVal, float maxVal, int quantizationLevels, float backgroundPrior, float decisionThreshold, int maxFeatures, int numInitializationFrames) { cudaSafeCall( hipMemcpyToSymbol(c_width, &width, sizeof(width)) ); cudaSafeCall( hipMemcpyToSymbol(c_height, &height, sizeof(height)) ); cudaSafeCall( hipMemcpyToSymbol(c_minVal, &minVal, sizeof(minVal)) ); cudaSafeCall( hipMemcpyToSymbol(c_maxVal, &maxVal, sizeof(maxVal)) ); cudaSafeCall( hipMemcpyToSymbol(c_quantizationLevels, &quantizationLevels, sizeof(quantizationLevels)) ); cudaSafeCall( hipMemcpyToSymbol(c_backgroundPrior, &backgroundPrior, sizeof(backgroundPrior)) ); cudaSafeCall( hipMemcpyToSymbol(c_decisionThreshold, &decisionThreshold, sizeof(decisionThreshold)) ); cudaSafeCall( hipMemcpyToSymbol(c_maxFeatures, &maxFeatures, sizeof(maxFeatures)) ); cudaSafeCall( hipMemcpyToSymbol(c_numInitializationFrames, &numInitializationFrames, sizeof(numInitializationFrames)) ); } __device__ float findFeature(const int color, const PtrStepi& colors, const PtrStepf& weights, const int x, const int y, const int nfeatures) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { if (color == colors(fy, x)) return weights(fy, x); } // not in histogram, so return 0. return 0.0f; } __device__ void normalizeHistogram(PtrStepf weights, const int x, const int y, const int nfeatures) { float total = 0.0f; for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) total += weights(fy, x); if (total != 0.0f) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) weights(fy, x) /= total; } } __device__ bool insertFeature(const int color, const float weight, PtrStepi colors, PtrStepf weights, const int x, const int y, int& nfeatures) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { if (color == colors(fy, x)) { // feature in histogram weights(fy, x) += weight; return false; } } if (nfeatures == c_maxFeatures) { // discard oldest feature int idx = -1; float minVal = numeric_limits<float>::max(); for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { const float w = weights(fy, x); if (w < minVal) { minVal = w; idx = fy; } } colors(idx, x) = color; weights(idx, x) = weight; return false; } colors(nfeatures * c_height + y, x) = color; weights(nfeatures * c_height + y, x) = weight; ++nfeatures; return true; } namespace detail { template <int cn> struct Quantization { template <typename T> __device__ static int apply(const T& val) { int res = 0; res |= static_cast<int>((val.x - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)); res |= static_cast<int>((val.y - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 8; res |= static_cast<int>((val.z - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 16; return res; } }; template <> struct Quantization<1> { template <typename T> __device__ static int apply(T val) { return static_cast<int>((val - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)); } }; } template <typename T> struct Quantization : detail::Quantization<VecTraits<T>::cn> {}; template <typename SrcT> __global__ void update(const PtrStep<SrcT> frame, PtrStepb fgmask, PtrStepi colors_, PtrStepf weights_, PtrStepi nfeatures_, const int frameNum, const float learningRate, const bool updateBackgroundModel) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= c_width || y >= c_height) return; const SrcT pix = frame(y, x); const int newFeatureColor = Quantization<SrcT>::apply(pix); int nfeatures = nfeatures_(y, x); if (frameNum >= c_numInitializationFrames) { // typical operation const float weight = findFeature(newFeatureColor, colors_, weights_, x, y, nfeatures); // see Godbehere, Matsukawa, Goldberg (2012) for reasoning behind this implementation of Bayes rule const float posterior = (weight * c_backgroundPrior) / (weight * c_backgroundPrior + (1.0f - weight) * (1.0f - c_backgroundPrior)); const bool isForeground = ((1.0f - posterior) > c_decisionThreshold); fgmask(y, x) = (uchar)(-isForeground); // update histogram. if (updateBackgroundModel) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) weights_(fy, x) *= 1.0f - learningRate; bool inserted = insertFeature(newFeatureColor, learningRate, colors_, weights_, x, y, nfeatures); if (inserted) { normalizeHistogram(weights_, x, y, nfeatures); nfeatures_(y, x) = nfeatures; } } } else if (updateBackgroundModel) { // training-mode update insertFeature(newFeatureColor, 1.0f, colors_, weights_, x, y, nfeatures); if (frameNum == c_numInitializationFrames - 1) normalizeHistogram(weights_, x, y, nfeatures); } } template <typename SrcT> void update_gpu(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y)); cudaSafeCall( hipFuncSetCacheConfig(update<SrcT>, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( update<SrcT>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<SrcT>) frame, fgmask, colors, weights, nfeatures, frameNum, learningRate, updateBackgroundModel); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void update_gpu<uchar >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<uchar3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<uchar4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<ushort >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<ushort3>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<ushort4>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<float >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<float3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); template void update_gpu<float4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, hipStream_t stream); } }}} #endif /* CUDA_DISABLER */
03f70667fd6ebf019fc7062cbbf2f6de8fd2ddc2.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/limits.hpp" namespace cv { namespace gpu { namespace device { namespace bgfg_gmg { __constant__ int c_width; __constant__ int c_height; __constant__ float c_minVal; __constant__ float c_maxVal; __constant__ int c_quantizationLevels; __constant__ float c_backgroundPrior; __constant__ float c_decisionThreshold; __constant__ int c_maxFeatures; __constant__ int c_numInitializationFrames; void loadConstants(int width, int height, float minVal, float maxVal, int quantizationLevels, float backgroundPrior, float decisionThreshold, int maxFeatures, int numInitializationFrames) { cudaSafeCall( cudaMemcpyToSymbol(c_width, &width, sizeof(width)) ); cudaSafeCall( cudaMemcpyToSymbol(c_height, &height, sizeof(height)) ); cudaSafeCall( cudaMemcpyToSymbol(c_minVal, &minVal, sizeof(minVal)) ); cudaSafeCall( cudaMemcpyToSymbol(c_maxVal, &maxVal, sizeof(maxVal)) ); cudaSafeCall( cudaMemcpyToSymbol(c_quantizationLevels, &quantizationLevels, sizeof(quantizationLevels)) ); cudaSafeCall( cudaMemcpyToSymbol(c_backgroundPrior, &backgroundPrior, sizeof(backgroundPrior)) ); cudaSafeCall( cudaMemcpyToSymbol(c_decisionThreshold, &decisionThreshold, sizeof(decisionThreshold)) ); cudaSafeCall( cudaMemcpyToSymbol(c_maxFeatures, &maxFeatures, sizeof(maxFeatures)) ); cudaSafeCall( cudaMemcpyToSymbol(c_numInitializationFrames, &numInitializationFrames, sizeof(numInitializationFrames)) ); } __device__ float findFeature(const int color, const PtrStepi& colors, const PtrStepf& weights, const int x, const int y, const int nfeatures) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { if (color == colors(fy, x)) return weights(fy, x); } // not in histogram, so return 0. return 0.0f; } __device__ void normalizeHistogram(PtrStepf weights, const int x, const int y, const int nfeatures) { float total = 0.0f; for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) total += weights(fy, x); if (total != 0.0f) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) weights(fy, x) /= total; } } __device__ bool insertFeature(const int color, const float weight, PtrStepi colors, PtrStepf weights, const int x, const int y, int& nfeatures) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { if (color == colors(fy, x)) { // feature in histogram weights(fy, x) += weight; return false; } } if (nfeatures == c_maxFeatures) { // discard oldest feature int idx = -1; float minVal = numeric_limits<float>::max(); for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) { const float w = weights(fy, x); if (w < minVal) { minVal = w; idx = fy; } } colors(idx, x) = color; weights(idx, x) = weight; return false; } colors(nfeatures * c_height + y, x) = color; weights(nfeatures * c_height + y, x) = weight; ++nfeatures; return true; } namespace detail { template <int cn> struct Quantization { template <typename T> __device__ static int apply(const T& val) { int res = 0; res |= static_cast<int>((val.x - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)); res |= static_cast<int>((val.y - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 8; res |= static_cast<int>((val.z - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)) << 16; return res; } }; template <> struct Quantization<1> { template <typename T> __device__ static int apply(T val) { return static_cast<int>((val - c_minVal) * c_quantizationLevels / (c_maxVal - c_minVal)); } }; } template <typename T> struct Quantization : detail::Quantization<VecTraits<T>::cn> {}; template <typename SrcT> __global__ void update(const PtrStep<SrcT> frame, PtrStepb fgmask, PtrStepi colors_, PtrStepf weights_, PtrStepi nfeatures_, const int frameNum, const float learningRate, const bool updateBackgroundModel) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= c_width || y >= c_height) return; const SrcT pix = frame(y, x); const int newFeatureColor = Quantization<SrcT>::apply(pix); int nfeatures = nfeatures_(y, x); if (frameNum >= c_numInitializationFrames) { // typical operation const float weight = findFeature(newFeatureColor, colors_, weights_, x, y, nfeatures); // see Godbehere, Matsukawa, Goldberg (2012) for reasoning behind this implementation of Bayes rule const float posterior = (weight * c_backgroundPrior) / (weight * c_backgroundPrior + (1.0f - weight) * (1.0f - c_backgroundPrior)); const bool isForeground = ((1.0f - posterior) > c_decisionThreshold); fgmask(y, x) = (uchar)(-isForeground); // update histogram. if (updateBackgroundModel) { for (int i = 0, fy = y; i < nfeatures; ++i, fy += c_height) weights_(fy, x) *= 1.0f - learningRate; bool inserted = insertFeature(newFeatureColor, learningRate, colors_, weights_, x, y, nfeatures); if (inserted) { normalizeHistogram(weights_, x, y, nfeatures); nfeatures_(y, x) = nfeatures; } } } else if (updateBackgroundModel) { // training-mode update insertFeature(newFeatureColor, 1.0f, colors_, weights_, x, y, nfeatures); if (frameNum == c_numInitializationFrames - 1) normalizeHistogram(weights_, x, y, nfeatures); } } template <typename SrcT> void update_gpu(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(update<SrcT>, cudaFuncCachePreferL1) ); update<SrcT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, colors, weights, nfeatures, frameNum, learningRate, updateBackgroundModel); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void update_gpu<uchar >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<uchar3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<uchar4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<ushort >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<ushort3>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<ushort4>(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<float >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<float3 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); template void update_gpu<float4 >(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures, int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream); } }}} #endif /* CUDA_DISABLER */
e20bf04a19a84ae85a1de217af5474af0fc3719e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * als.cu * * Created on: Feb 10, 2015 * Author: Wei Tan ([email protected]) * Alternating Least Square for Matrix Factorization on CUDA 7.0+ * Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2 */ //do not use magma and fp16 by default //#define CUMF_USE_MAGMA //#define CUMF_USE_HALF #include "als.h" #include "host_utilities.h" #include <fstream> #include <assert.h> #include <hip/hip_fp16.h> #ifdef CUMF_USE_HALF #define SCAN_BATCH 24 #else #define SCAN_BATCH 24 #endif #ifdef CUMF_USE_MAGMA #include "flops.h" #include "magma.h" #include "magma_lapack.h" #include "testings.h" #endif __global__ void fp32Array2fp16Array(const float * fp32Array, half* fp16Array, const int size) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < size) { fp16Array[i] = __float2half(fp32Array[i]); } } int updateX(const int batch_size, const int batch_offset, float * ythetaT, float * tt, float * XT, hipblasHandle_t handle, const int m, const int n, const int f, const int nnz, float** devPtrTTHost, float **devPtrYthetaTHost){ #ifdef DEBUG float elapsed; struct timeval tv0, tv1, tv2; gettimeofday(&tv0, NULL); printf("*******Batch LU factorization of tt.\n"); #endif //pointers needed by batch op float **devPtrTT = 0; int *INFO; for (int k = 0; k < batch_size; k++) { devPtrTTHost[k] = &tt[k * f * f]; } cudacall(hipMalloc((void** ) &devPtrTT, batch_size * sizeof(*devPtrTT))); cudacall(hipMemcpy(devPtrTT, devPtrTTHost, batch_size * sizeof(*devPtrTT),hipMemcpyHostToDevice)); //cudacall( hipMalloc(&P, f * batch_size * sizeof(int)) ); cudacall( hipMalloc(&INFO, batch_size * sizeof(int) )); cublascall(hipblasSgetrfBatched(handle, f, devPtrTT, f, NULL, INFO, batch_size)); hipDeviceSynchronize(); #ifdef DEBUG gettimeofday(&tv1, NULL); elapsed = (tv1.tv_sec - tv0.tv_sec) + (tv1.tv_usec - tv0.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); printf("*******solve: tt * XT = ythetaT use cublas, with LU decomposition.\n"); #endif float **devPtrYthetaT = 0; for (int k = 0; k < batch_size; k++) { devPtrYthetaTHost[k] = &ythetaT[batch_offset * f + k * f]; } cudacall(hipMalloc((void** ) &devPtrYthetaT, batch_size * sizeof(*devPtrYthetaT))); cudacall(hipMemcpy(devPtrYthetaT, devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaT), hipMemcpyHostToDevice)); int * info2 = (int *) malloc(sizeof(int)); cublascall( hipblasSgetrsBatched(handle, HIPBLAS_OP_N, f, 1, (const float ** ) devPtrTT, f, NULL, devPtrYthetaT, f, info2, batch_size) ); hipDeviceSynchronize(); hipError_t cudaStat1 = hipGetLastError(); if (cudaStat1 != hipSuccess) { fprintf(stderr,"Failed to launch hipblasSgetrsBatched (error code: %s)!\n", hipGetErrorString(cudaStat1)); exit(EXIT_FAILURE); } cudacall( hipMemcpy(&XT[batch_offset * f], &ythetaT[batch_offset * f], batch_size * f * sizeof(float), hipMemcpyDeviceToDevice) ); #ifdef DEBUG gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); #endif cudacall(hipFree(devPtrTT)); //cudacall(hipFree(P)); cudacall(hipFree(INFO)); cudacall(hipFree(devPtrYthetaT)); return 0; } int updateTheta(const int batch_size, const int batch_offset, float * xx, float * yTXT, float * thetaT, hipblasHandle_t handle, const int m, const int n, const int f, const int nnz, float ** devPtrXXHost, float **devPtrYTXTHost ){ #ifdef DEBUG float elapsed; struct timeval tv0, tv1, tv2; gettimeofday(&tv0, NULL); printf("*******LU factorize xx.\n"); #endif float **devPtrXX = 0; for (int k = 0; k < batch_size; k++) { devPtrXXHost[k] = &xx[k * f * f]; } cudacall(hipMalloc((void** ) &devPtrXX, batch_size * sizeof(*devPtrXX))); cudacall(hipMemcpy(devPtrXX, devPtrXXHost, batch_size * sizeof(*devPtrXX), hipMemcpyHostToDevice)); int *INFO; //cudacall(hipMalloc(&P, f * batch_size * sizeof(int))); cudacall(hipMalloc(&INFO, batch_size * sizeof(int))); cublascall(hipblasSgetrfBatched(handle, f, devPtrXX, f, NULL, INFO, batch_size)); hipDeviceSynchronize(); #ifdef DEBUG gettimeofday(&tv1, NULL); elapsed = (tv1.tv_sec - tv0.tv_sec) + (tv1.tv_usec - tv0.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); printf("******* solve xx * thetaT = yTXT with CUDA 7.\n"); #endif float **devPtrYTXT = 0; for (int k = 0; k < batch_size; k++) { devPtrYTXTHost[k] = &yTXT[batch_offset * f + k * f]; } cudacall(hipMalloc((void** ) &devPtrYTXT, batch_size * sizeof(*devPtrYTXT))); cudacall(hipMemcpy(devPtrYTXT, devPtrYTXTHost, batch_size * sizeof(*devPtrYTXT),hipMemcpyHostToDevice)); int * info2 = (int *) malloc(sizeof(int)); cublascall( hipblasSgetrsBatched(handle, HIPBLAS_OP_N, f, 1, (const float ** ) devPtrXX, f, NULL, devPtrYTXT, f, info2, batch_size) ); hipDeviceSynchronize(); hipError_t cudaStat1 = hipGetLastError(); if (cudaStat1 != hipSuccess) { fprintf(stderr,"Failed to launch hipblasSgetrsBatched (error code: %s)!\n", hipGetErrorString(cudaStat1)); exit(EXIT_FAILURE); } cudacall( hipMemcpy( &thetaT[batch_offset * f], &yTXT[batch_offset * f], batch_size * f * sizeof(float), hipMemcpyDeviceToDevice) ); #ifdef DEBUG gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); #endif hipFree(devPtrXX); hipFree(INFO); free(info2); hipFree(devPtrYTXT); return 0; } #ifdef USE_MAGMA int updateThetaMagma(const int batch_size, const int batch_offset, float * xx, float * yTXT, float * thetaT, hipblasHandle_t handle, const int m, const int n, const int f, const int nnz, float ** devPtrXXHost, float **devPtrYTXTHost ){ //variables for timing float elapsed; struct timeval tv1, tv2; gettimeofday(&tv1, NULL); printf("*******magma Cholesky factorization.\n"); magma_init(); magma_opts opts( MagmaOptsBatched ); char *parray[10]; char **x; x = &parray[0]; opts.parse_opts(1,x); magma_queue_t queue = opts.queue; int min_batch = batch_size; int info = 0; int * dinfo_array = 0; float **dA_array = NULL; float **dB_array = NULL; float **hA_array = (float**) malloc(min_batch * sizeof(hA_array[0])); float **hB_array = (float**) malloc(min_batch * sizeof(hB_array[0])); cudacall (hipMalloc((void**) &dinfo_array, min_batch*sizeof(int))); cudacall(hipMalloc((void** ) &dA_array, min_batch * sizeof(*dA_array))); cudacall(hipMalloc((void** ) &dB_array, min_batch * sizeof(*dB_array))); for (int k = 0; k < batch_size; k++) { hA_array[k] = &xx[k * f * f]; hB_array[k] = &yTXT[batch_offset * f + k * f]; } cudacall(hipMemcpy(dA_array, hA_array, min_batch * sizeof(*dA_array), hipMemcpyHostToDevice)); cudacall(hipMemcpy(dB_array, hB_array, min_batch * sizeof(*dB_array), hipMemcpyHostToDevice)); info = magma_sposv_batched(MagmaLower, f, 1, dA_array, f, dB_array, f, dinfo_array, min_batch, queue); magma_int_t *dipiv; magma_int_t **dipiv_array = NULL; TESTING_MALLOC_DEV( dipiv, magma_int_t, f * min_batch ); TESTING_MALLOC_DEV( dipiv_array, magma_int_t*, min_batch ); magma_iset_pointer( dipiv_array, dipiv, 1, 0, 0, f, min_batch, queue ); //info = magma_sgesv_nopiv_batched(f, 1, dA_array, f, dB_array, f, dinfo_array, min_batch, queue); //info = magma_sgesv_batched(f, 1, dA_array, f, dipiv_array, dB_array, f, dinfo_array, min_batch, queue); int *cpu_info = (int*) malloc(min_batch*sizeof(int)); cudacall(hipMemcpy(cpu_info, dinfo_array, min_batch * sizeof(int),hipMemcpyDeviceToHost)); cudacall( hipMemcpy(&thetaT[batch_offset * f], &yTXT[batch_offset * f], batch_size * f * sizeof(float), hipMemcpyDeviceToDevice) ); for(int i = 0; i < min_batch; i++){ if(cpu_info[i] != 0 ){ printf("magma_sposv_batched matrix %d returned internal error %d\n",i, (int)cpu_info[i] ); } } if (info != 0) printf("magma_sposv_batched returned argument error %d: %s.\n", (int) info, magma_strerror( info )); hipFree(dA_array); hipFree(dB_array); hipFree( dinfo_array ); hipFree(dipiv_array); hipFree(dipiv); free(cpu_info); free(hA_array); free(hB_array); //free(x); magma_finalize(); gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); return 0; } #endif __global__ void RMSE(const float * csrVal, const int* cooRowIndex, const int* csrColIndex, const float * __restrict__ thetaT, const float * __restrict__ XT, float * error, const int nnz, const int error_size, const int f) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < nnz) { int row = cooRowIndex[i]; int col = csrColIndex[i]; float e = csrVal[i]; //if(i%1000000==0) printf("row: %d, col: %d, csrVal[%d]: %f.\t", row, col, i, e); for (int k = 0; k < f; k++) { e -= __ldg(&thetaT[f * col + k]) * __ldg(&XT[f * row + k]); } atomicAdd(&error[i%error_size], e*e); //error[i] = e*e; //if(i%1000000==0) printf("error[%d]: %f.\n", i, e); } } //using fp16 as thetaT's format //using fp16 in computate seems causing register pressure since half intrinsics cannot be used. //using fp16 in compute also does not converge. not sure if the code is incorrect, or ALS cannot tolerate half-precision __global__ void __launch_bounds__(64, 6) get_hermitian100WithHalf(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const half* __restrict__ thetaT_fp16) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ //float2 theta; //copy texture --> smem, and sync //two layers: warp divergence unless we split at 32 //require: 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); thetaTemp[index * F/2 + k/2] = __half22float2(theta_half2); //theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k])); //theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1])); //thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); thetaTemp[index * F/2 + k/2 + 25] = __half22float2(theta_half2); //theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50])); //theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51])); //thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); if(threadIdx.x < 55 ){ //copy output to gmem int index = blockIdx.x*F*F; fill_lower_half_from_registers(); //symmetric if(tile_x!=tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } __global__ void __launch_bounds__(64, 6) get_hermitian100(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync /* if(threadIdx.x < SCAN_BATCH){ if(iter*SCAN_BATCH + threadIdx.x < end - start){ for (int k = 0; k < F; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1); thetaTemp[threadIdx.x * F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float)); } */ //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } /* //issue: not coalesced access to csrColIndex if(threadIdx.x < F && threadIdx.x%2 == 0){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1); thetaTemp[k * F/2 + threadIdx.x/2] = theta; } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float)); } } */ /* int layers = blockDim.x/SCAN_BATCH; //100/30 = 3 //int height = blockDim.x/layers; //30 int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable //min(y, (layers-1)) * height int y_start = y * 30;//0-29:0;30-59:30;60-89:60 int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90 if(y >= layers - 1) y_end = F; //60-89:100 if(threadIdx.x - y_start < SCAN_BATCH){ if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){ for (int k = y_start; k < y_end; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1); thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float)); } */ __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); if(threadIdx.x < 55 ){ //copy output to gmem int index = blockIdx.x*F*F; fill_lower_half_from_registers(); //symmetric if(tile_x!=tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } /*a generic kernel to get the hermitian matrices * as the left-hand side of the equations, to update X in ALS *examplary F = 100, T = 10 */ __global__ void get_hermitianT10(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT) { extern __shared__ float2 thetaTemp []; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int N = F/T10; // N = 100/10=10; for F = 100 and T = 10 int effective_block_size = N*(N+1)/2; //get the x and y coordinate int tile_x = 0; int tile_y = 0; for ( int i = 0; i < N; i++ ) { int end = ((2*N-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * T10; tile_y = (N + threadIdx.x - end) * T10; break; } } int index = blockIdx.x*F*F; //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ //phase 1 in iteration: gmem --> smem //REQ: blockDim.x >= F/2 if(threadIdx.x < F/2){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ float2 theta; theta.x = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]); theta.y = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x+1]); thetaTemp[k * F/2 + threadIdx.x] = theta; //this simpler statement is slower. //thetaTemp[k * F/2 + threadIdx.x] = __ldg((float2*)&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]); } //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x], 0, 2*sizeof(float)); } } __syncthreads(); //phase 2 in iteration: smem --> register if(threadIdx.x < effective_block_size){//this redundant "if" seems improving kernel performance for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); //phase 3, after iteration: register --> gmem if(threadIdx.x < effective_block_size){ fill_lower_half_from_registers(); //symmetric if(tile_x != tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < T10; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } float doALS(const int* csrRowIndexHostPtr, const int* csrColIndexHostPtr, const float* csrValHostPtr, const int* cscRowIndexHostPtr, const int* cscColIndexHostPtr, const float* cscValHostPtr, const int* cooRowIndexHostPtr, float* thetaTHost, float* XTHost, const int * cooRowIndexTestHostPtr, const int * cooColIndexTestHostPtr, const float * cooValHostTestPtr, const int m, const int n, const int f, const long nnz, const long nnz_test, const float lambda, const int ITERS, const int X_BATCH, const int THETA_BATCH, const int DEVICEID) { hipSetDevice(DEVICEID); printf("*******parameters: m: %d, n: %d, f: %d, nnz: %ld \n", m, n, f, nnz); //device pointers int * csrRowIndex = 0; int * csrColIndex = 0; float * csrVal = 0; float * thetaT = 0; float * tt = 0; float * XT = 0; float * cscVal =0; int * cscRowIndex = 0; int * cscColIndex = 0; //coo to calculate RMSE int * cooRowIndex =0; float * cooVal_test; int * cooRowIndex_test; int * cooColIndex_test; float final_rmse; printf("*******start allocating memory on GPU...\n"); cudacall(hipMalloc((void** ) &cscRowIndex,nnz * sizeof(cscRowIndex[0]))); cudacall(hipMalloc((void** ) &cscColIndex, (n+1) * sizeof(cscColIndex[0]))); cudacall(hipMalloc((void** ) &cscVal, nnz * sizeof(cscVal[0]))); //dimension: F*N cudacall(hipMalloc((void** ) &thetaT, f * n * sizeof(thetaT[0]))); //dimension: M*F cudacall(hipMalloc((void** ) &XT, f * m * sizeof(XT[0]))); printf("*******start copying memory to GPU...\n"); cudacall(hipMemcpy(cscRowIndex, cscRowIndexHostPtr,(size_t ) nnz * sizeof(cscRowIndex[0]), hipMemcpyHostToDevice)); cudacall(hipMemcpy(cscColIndex, cscColIndexHostPtr,(size_t ) (n+1) * sizeof(cscColIndex[0]), hipMemcpyHostToDevice)); cudacall(hipMemcpy(cscVal, cscValHostPtr,(size_t ) (nnz * sizeof(cscVal[0])),hipMemcpyHostToDevice)); cudacall(hipMemcpy(thetaT, thetaTHost, (size_t ) (n * f * sizeof(thetaT[0])), hipMemcpyHostToDevice)); cudacall(hipDeviceSetCacheConfig(hipFuncCachePreferShared)); //64-bit smem access //http://acceleware.com/blog/maximizing-shared-memory-bandwidth-nvidia-kepler-gpus hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); //initialize cublas, cusparse hipblasHandle_t handle; cublascall(hipblasCreate(&handle)); hipsparseHandle_t cushandle = 0; cusparsecall(hipsparseCreate(&cushandle)); hipsparseMatDescr_t descr; cusparsecall( hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); using namespace std; #ifdef DEBUG //variable used to time double elapsed = 0.0; struct timeval tv; struct timeval start_tv; struct timeval start_tv2; #endif for(int iter = 0; iter < ITERS ; iter ++){ #ifdef DEBUG printf("---------------------------ALS iteration %d, update X.----------------------------------\n", iter); gettimeofday(&start_tv, NULL); #endif //copy csr matrix in cudacall(hipMalloc((void** ) &csrRowIndex,(m + 1) * sizeof(csrRowIndex[0]))); cudacall(hipMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0]))); cudacall(hipMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0]))); cudacall(hipMemcpy(csrRowIndex, csrRowIndexHostPtr,(size_t ) ((m + 1) * sizeof(csrRowIndex[0])), hipMemcpyHostToDevice)); cudacall(hipMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), hipMemcpyHostToDevice)); cudacall(hipMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),hipMemcpyHostToDevice)); #ifdef DEBUG printf("\tgenerate: Y*theta using cusparse.\n"); #endif float * ytheta = 0; float * ythetaT = 0; cudacall(hipMalloc((void** ) &ytheta, f * m * sizeof(ytheta[0]))); cudacall(hipMalloc((void** ) &ythetaT, f * m * sizeof(ythetaT[0]))); const float alpha = 1.0f; const float beta = 0.0f; cusparsecall (hipsparseScsrmm2(cushandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, m, f, n, nnz, &alpha, descr, csrVal, csrRowIndex, csrColIndex, thetaT, f, &beta, ytheta, m) ); //hipDeviceSynchronize(); //printf("*******transpose ytheta use cublas.\n"); //ytheta: m*f; need ythetaT = (ytheta).T = f*m cublascall(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, f, m, &alpha, (const float * ) ytheta, m, &beta, ythetaT, f, ythetaT, f)); hipDeviceSynchronize(); cudaCheckError(); cudacall(hipFree(ytheta)); cudacall(hipFree(csrVal)); #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv.tv_sec) + (tv.tv_usec - start_tv.tv_usec) / 1000000.0; printf("\tgenerate: Y*theta run %f seconds.\n", elapsed); #endif int block_dim = f/T10*(f/T10+1)/2; if (block_dim < f/2) block_dim = f/2; for(int batch_id = 0; batch_id< X_BATCH; batch_id ++){ #ifdef DEBUG printf("*******batch %d / %d.*******\n", batch_id, X_BATCH); #endif int batch_size = 0; if(batch_id != X_BATCH - 1) batch_size = m/X_BATCH; else batch_size = m - batch_id*(m/X_BATCH); int batch_offset = batch_id * (m/X_BATCH); cudacall(hipMalloc((void** ) &tt, f * f * batch_size * sizeof(float))); #ifdef DEBUG gettimeofday(&start_tv2, NULL); printf("\tupdateXByBlock kernel.\n"); #endif if(f == 100){ //do not use fp16 by default #ifdef CUMF_USE_HALF half* thetaT_fp16 = 0; cudacall(hipMalloc((void** ) &thetaT_fp16, f * n * sizeof(thetaT_fp16[0]))); hipLaunchKernelGGL(( fp32Array2fp16Array), dim3((n*f-1)/1024 + 1), dim3(1024), 0, 0, thetaT, thetaT_fp16, f*n); hipLaunchKernelGGL(( get_hermitian100WithHalf), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0, batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT_fp16); cudacall(hipFree(thetaT_fp16)); #else hipLaunchKernelGGL(( get_hermitian100), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0, batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT); #endif } else hipLaunchKernelGGL(( get_hermitianT10), dim3(batch_size), dim3(block_dim), SCAN_BATCH * f/2*sizeof(float2), 0, batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT); hipDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv2.tv_sec) + (tv.tv_usec - start_tv2.tv_usec) / 1000000.0; printf("\tupdate X kernel run %f seconds, gridSize: %d, blockSize %d.\n", elapsed, batch_size, f); double t0 = seconds(); #endif //host pointers for cublas batch operations float ** devPtrTTHost = 0; cudacall(hipHostMalloc( (void** ) &devPtrTTHost, batch_size * sizeof(*devPtrTTHost) ) ); float **devPtrYthetaTHost = 0; cudacall(hipHostMalloc( (void** ) &devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaTHost) ) ); #ifdef DEBUG printf("\tinvoke updateX with batch_size: %d, batch_offset: %d..\n", batch_size, batch_offset); #endif updateX(batch_size, batch_offset, ythetaT, tt, XT, handle, m, n, f, nnz, devPtrTTHost, devPtrYthetaTHost); #ifdef DEBUG printf("\tupdateX run seconds: %f \n", seconds() - t0); #endif cudacall(hipFree(tt)); cudacall(hipHostFree(devPtrTTHost)); cudacall(hipHostFree(devPtrYthetaTHost)); } #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv.tv_sec) + (tv.tv_usec - start_tv.tv_usec) / 1000000.0; printf("update X run %f seconds, gridSize: %d, blockSize %d.\n", elapsed, m, f); #endif cudacall(hipFree(csrRowIndex)); cudacall(hipFree(csrColIndex)); cudacall(hipFree(ythetaT)); #ifdef DEBUG gettimeofday(&start_tv, NULL); printf("---------------------------------- ALS iteration %d, update theta ----------------------------------\n", iter); printf("\tgenerate: Y'*X using cusparse.\n"); #endif float * yTX = 0; float * yTXT = 0; cudacall(hipMalloc((void** ) &yTXT, f * n * sizeof(yTXT[0]))); cudacall(hipMalloc((void** ) &yTX, n * f * sizeof(yTX[0]))); cusparsecall( hipsparseScsrmm2(cushandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, n, f, m, nnz, &alpha, descr, cscVal, cscColIndex, cscRowIndex, XT, f, &beta, yTX, n) ); //hipDeviceSynchronize(); //printf("*******transpose yTX \n"); //yTX: n*f; need yTXT = (yTX).T = f*n cublascall(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, f, n, &alpha, (const float * ) yTX, n, &beta, yTXT, f, yTXT, f)); hipDeviceSynchronize(); cudacall(hipFree(yTX)); #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv.tv_sec) + (tv.tv_usec - start_tv.tv_usec) / 1000000.0; printf("\tgenerate: Y'*X run %f seconds.\n", elapsed); #endif //in batches, when N is huge for(int batch_id = 0; batch_id< THETA_BATCH; batch_id ++){ #ifdef DEBUG printf("*******batch %d / %d.*******\n", batch_id, THETA_BATCH); #endif int batch_size = 0; if(batch_id != THETA_BATCH - 1) batch_size = n/THETA_BATCH; else batch_size = n - batch_id*(n/THETA_BATCH); int batch_offset = batch_id * (n/THETA_BATCH); float * xx = 0; cudacall(hipMalloc((void** ) &xx, f * f * batch_size * sizeof(xx[0]))); cudacall( hipMemset(xx, 0, f*f*batch_size*sizeof(float)) ); #ifdef DEBUG gettimeofday(&start_tv2, NULL); printf("\tupdateThetaByBlock kernel.\n"); #endif //get_hermitian_theta<<<batch_size, 64>>>(batch_offset, xx, cscRowIndex, cscColIndex, lambda, n); //updateThetaByBlock2pRegDsmemTile<<<batch_size, F>>> if(f == 100){ #ifdef CUMF_USE_HALF half * XT_fp16 = 0; cudacall(hipMalloc((void** ) &XT_fp16, f * m * sizeof(XT_fp16[0]))); hipLaunchKernelGGL(( fp32Array2fp16Array), dim3((n*f-1)/1024 + 1), dim3(1024), 0, 0, XT, XT_fp16, f*m); hipLaunchKernelGGL(( get_hermitian100WithHalf), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0, batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT_fp16); cudacall(hipFree(XT_fp16)); #else hipLaunchKernelGGL(( get_hermitian100), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0, batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT); #endif } else hipLaunchKernelGGL(( get_hermitianT10), dim3(batch_size), dim3(block_dim), SCAN_BATCH*f*sizeof(float), 0, batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT); hipDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv2.tv_sec) + (tv.tv_usec - start_tv2.tv_usec) / 1000000.0; printf("\tupdate Theta kernel run %f seconds, gridSize: %d, blockSize %d.\n", elapsed, batch_size, f); double t0 = seconds(); #endif float ** devPtrXXHost = 0; cudacall(hipHostMalloc( (void** ) &devPtrXXHost, batch_size * sizeof(*devPtrXXHost) ) ); float **devPtrYTXTHost = 0; cudacall(hipHostMalloc( (void** ) &devPtrYTXTHost, batch_size * sizeof(*devPtrYTXTHost) ) ); #ifdef DEBUG printf("*******invoke updateTheta with batch_size: %d, batch_offset: %d.\n", batch_size, batch_offset); #endif updateTheta(batch_size, batch_offset, xx, yTXT, thetaT, handle, m, n, f, nnz, devPtrXXHost, devPtrYTXTHost); #ifdef DEBUG printf("\tupdateTheta run seconds: %f \n", seconds() - t0); #endif cudacall(hipFree(xx)); cudacall(hipHostFree(devPtrXXHost)); cudacall(hipHostFree(devPtrYTXTHost)); } cudacall(hipFree(yTXT)); #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv.tv_sec) + (tv.tv_usec - start_tv.tv_usec) / 1000000.0; printf("update theta run %f seconds, gridSize: %d, blockSize %d.\n", elapsed, n, f); printf("Calculate RMSE.\n"); #endif float * errors_train = 0; int error_size = 1000; cudacall(hipMalloc((void** ) &errors_train, error_size * sizeof(errors_train[0]))); cudacall( hipMemset(errors_train, 0, error_size*sizeof(float)) ); cudacall(hipMalloc((void** ) &cooRowIndex, nnz * sizeof(cooRowIndex[0]))); cudacall(hipMemcpy(cooRowIndex, cooRowIndexHostPtr,(size_t ) (nnz * sizeof(cooRowIndex[0])), hipMemcpyHostToDevice)); cudacall(hipMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0]))); cudacall(hipMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0]))); cudacall(hipMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), hipMemcpyHostToDevice)); cudacall(hipMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( RMSE), dim3((nnz-1)/256 + 1), dim3(256), 0, 0, csrVal, cooRowIndex, csrColIndex, thetaT, XT, errors_train, nnz, error_size, f); hipDeviceSynchronize(); cudaCheckError(); cudacall(hipFree(cooRowIndex)); cudacall(hipFree(csrColIndex)); cudacall(hipFree(csrVal)); float* rmse_train = (float*) malloc (sizeof(float)); cublascall( hipblasSasum(handle, error_size, errors_train, 1, rmse_train) ); hipDeviceSynchronize(); printf("--------- Train RMSE in iter %d: %f\n", iter, sqrt((*rmse_train)/nnz)); cudacall(hipFree(errors_train)); float * errors_test = 0; cudacall(hipMalloc((void** ) &errors_test, error_size * sizeof(errors_test[0]))); cudacall( hipMemset(errors_test, 0, error_size*sizeof(float)) ); cudacall(hipMalloc((void** ) &cooRowIndex_test, nnz_test * sizeof(cooRowIndex_test[0]))); cudacall(hipMemcpy(cooRowIndex_test, cooRowIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooRowIndex_test[0])), hipMemcpyHostToDevice)); cudacall(hipMalloc((void** ) &cooColIndex_test, nnz_test * sizeof(cooColIndex_test[0]))); cudacall(hipMalloc((void** ) &cooVal_test, nnz_test * sizeof(cooVal_test[0]))); cudacall(hipMemcpy(cooColIndex_test, cooColIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooColIndex_test[0])), hipMemcpyHostToDevice)); cudacall(hipMemcpy(cooVal_test, cooValHostTestPtr,(size_t ) (nnz_test * sizeof(cooVal_test[0])),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( RMSE), dim3((nnz_test-1)/256), dim3(256), 0, 0, cooVal_test, cooRowIndex_test, cooColIndex_test, thetaT, XT, errors_test, nnz_test, error_size, f); hipDeviceSynchronize(); cudaCheckError(); cudacall(hipFree(cooRowIndex_test)); cudacall(hipFree(cooColIndex_test)); cudacall(hipFree(cooVal_test)); float* rmse_test = (float*) malloc (sizeof(float)); cublascall( hipblasSasum(handle, error_size, errors_test, 1, rmse_test) ); hipDeviceSynchronize(); final_rmse = sqrt((*rmse_test)/nnz_test); printf("--------- Test RMSE in iter %d: %f\n", iter, final_rmse); cudacall(hipFree(errors_test)); } //copy feature vectors back to host cudacall(hipMemcpy(thetaTHost, thetaT, (size_t ) (n * f * sizeof(thetaT[0])), hipMemcpyDeviceToHost)); cudacall(hipMemcpy(XTHost, XT, (size_t ) (m * f * sizeof(XT[0])), hipMemcpyDeviceToHost)); cudacall(hipFree(thetaT)); cudacall(hipFree(XT)); cudacall(hipFree(cscVal)); cudacall(hipFree(cscColIndex)); cudacall(hipFree(cscRowIndex)); cudacall(hipDeviceReset()); return final_rmse; }
e20bf04a19a84ae85a1de217af5474af0fc3719e.cu
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * als.cu * * Created on: Feb 10, 2015 * Author: Wei Tan ([email protected]) * Alternating Least Square for Matrix Factorization on CUDA 7.0+ * Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2 */ //do not use magma and fp16 by default //#define CUMF_USE_MAGMA //#define CUMF_USE_HALF #include "als.h" #include "host_utilities.h" #include <fstream> #include <assert.h> #include <cuda_fp16.h> #ifdef CUMF_USE_HALF #define SCAN_BATCH 24 #else #define SCAN_BATCH 24 #endif #ifdef CUMF_USE_MAGMA #include "flops.h" #include "magma.h" #include "magma_lapack.h" #include "testings.h" #endif __global__ void fp32Array2fp16Array(const float * fp32Array, half* fp16Array, const int size) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < size) { fp16Array[i] = __float2half(fp32Array[i]); } } int updateX(const int batch_size, const int batch_offset, float * ythetaT, float * tt, float * XT, cublasHandle_t handle, const int m, const int n, const int f, const int nnz, float** devPtrTTHost, float **devPtrYthetaTHost){ #ifdef DEBUG float elapsed; struct timeval tv0, tv1, tv2; gettimeofday(&tv0, NULL); printf("*******Batch LU factorization of tt.\n"); #endif //pointers needed by batch op float **devPtrTT = 0; int *INFO; for (int k = 0; k < batch_size; k++) { devPtrTTHost[k] = &tt[k * f * f]; } cudacall(cudaMalloc((void** ) &devPtrTT, batch_size * sizeof(*devPtrTT))); cudacall(cudaMemcpy(devPtrTT, devPtrTTHost, batch_size * sizeof(*devPtrTT),cudaMemcpyHostToDevice)); //cudacall( cudaMalloc(&P, f * batch_size * sizeof(int)) ); cudacall( cudaMalloc(&INFO, batch_size * sizeof(int) )); cublascall(cublasSgetrfBatched(handle, f, devPtrTT, f, NULL, INFO, batch_size)); cudaThreadSynchronize(); #ifdef DEBUG gettimeofday(&tv1, NULL); elapsed = (tv1.tv_sec - tv0.tv_sec) + (tv1.tv_usec - tv0.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); printf("*******solve: tt * XT = ythetaT use cublas, with LU decomposition.\n"); #endif float **devPtrYthetaT = 0; for (int k = 0; k < batch_size; k++) { devPtrYthetaTHost[k] = &ythetaT[batch_offset * f + k * f]; } cudacall(cudaMalloc((void** ) &devPtrYthetaT, batch_size * sizeof(*devPtrYthetaT))); cudacall(cudaMemcpy(devPtrYthetaT, devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaT), cudaMemcpyHostToDevice)); int * info2 = (int *) malloc(sizeof(int)); cublascall( cublasSgetrsBatched(handle, CUBLAS_OP_N, f, 1, (const float ** ) devPtrTT, f, NULL, devPtrYthetaT, f, info2, batch_size) ); cudaThreadSynchronize(); cudaError_t cudaStat1 = cudaGetLastError(); if (cudaStat1 != cudaSuccess) { fprintf(stderr,"Failed to launch cublasSgetrsBatched (error code: %s)!\n", cudaGetErrorString(cudaStat1)); exit(EXIT_FAILURE); } cudacall( cudaMemcpy(&XT[batch_offset * f], &ythetaT[batch_offset * f], batch_size * f * sizeof(float), cudaMemcpyDeviceToDevice) ); #ifdef DEBUG gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); #endif cudacall(cudaFree(devPtrTT)); //cudacall(cudaFree(P)); cudacall(cudaFree(INFO)); cudacall(cudaFree(devPtrYthetaT)); return 0; } int updateTheta(const int batch_size, const int batch_offset, float * xx, float * yTXT, float * thetaT, cublasHandle_t handle, const int m, const int n, const int f, const int nnz, float ** devPtrXXHost, float **devPtrYTXTHost ){ #ifdef DEBUG float elapsed; struct timeval tv0, tv1, tv2; gettimeofday(&tv0, NULL); printf("*******LU factorize xx.\n"); #endif float **devPtrXX = 0; for (int k = 0; k < batch_size; k++) { devPtrXXHost[k] = &xx[k * f * f]; } cudacall(cudaMalloc((void** ) &devPtrXX, batch_size * sizeof(*devPtrXX))); cudacall(cudaMemcpy(devPtrXX, devPtrXXHost, batch_size * sizeof(*devPtrXX), cudaMemcpyHostToDevice)); int *INFO; //cudacall(cudaMalloc(&P, f * batch_size * sizeof(int))); cudacall(cudaMalloc(&INFO, batch_size * sizeof(int))); cublascall(cublasSgetrfBatched(handle, f, devPtrXX, f, NULL, INFO, batch_size)); cudaThreadSynchronize(); #ifdef DEBUG gettimeofday(&tv1, NULL); elapsed = (tv1.tv_sec - tv0.tv_sec) + (tv1.tv_usec - tv0.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); printf("******* solve xx * thetaT = yTXT with CUDA 7.\n"); #endif float **devPtrYTXT = 0; for (int k = 0; k < batch_size; k++) { devPtrYTXTHost[k] = &yTXT[batch_offset * f + k * f]; } cudacall(cudaMalloc((void** ) &devPtrYTXT, batch_size * sizeof(*devPtrYTXT))); cudacall(cudaMemcpy(devPtrYTXT, devPtrYTXTHost, batch_size * sizeof(*devPtrYTXT),cudaMemcpyHostToDevice)); int * info2 = (int *) malloc(sizeof(int)); cublascall( cublasSgetrsBatched(handle, CUBLAS_OP_N, f, 1, (const float ** ) devPtrXX, f, NULL, devPtrYTXT, f, info2, batch_size) ); cudaThreadSynchronize(); cudaError_t cudaStat1 = cudaGetLastError(); if (cudaStat1 != cudaSuccess) { fprintf(stderr,"Failed to launch cublasSgetrsBatched (error code: %s)!\n", cudaGetErrorString(cudaStat1)); exit(EXIT_FAILURE); } cudacall( cudaMemcpy( &thetaT[batch_offset * f], &yTXT[batch_offset * f], batch_size * f * sizeof(float), cudaMemcpyDeviceToDevice) ); #ifdef DEBUG gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); #endif cudaFree(devPtrXX); cudaFree(INFO); free(info2); cudaFree(devPtrYTXT); return 0; } #ifdef USE_MAGMA int updateThetaMagma(const int batch_size, const int batch_offset, float * xx, float * yTXT, float * thetaT, cublasHandle_t handle, const int m, const int n, const int f, const int nnz, float ** devPtrXXHost, float **devPtrYTXTHost ){ //variables for timing float elapsed; struct timeval tv1, tv2; gettimeofday(&tv1, NULL); printf("*******magma Cholesky factorization.\n"); magma_init(); magma_opts opts( MagmaOptsBatched ); char *parray[10]; char **x; x = &parray[0]; opts.parse_opts(1,x); magma_queue_t queue = opts.queue; int min_batch = batch_size; int info = 0; int * dinfo_array = 0; float **dA_array = NULL; float **dB_array = NULL; float **hA_array = (float**) malloc(min_batch * sizeof(hA_array[0])); float **hB_array = (float**) malloc(min_batch * sizeof(hB_array[0])); cudacall (cudaMalloc((void**) &dinfo_array, min_batch*sizeof(int))); cudacall(cudaMalloc((void** ) &dA_array, min_batch * sizeof(*dA_array))); cudacall(cudaMalloc((void** ) &dB_array, min_batch * sizeof(*dB_array))); for (int k = 0; k < batch_size; k++) { hA_array[k] = &xx[k * f * f]; hB_array[k] = &yTXT[batch_offset * f + k * f]; } cudacall(cudaMemcpy(dA_array, hA_array, min_batch * sizeof(*dA_array), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(dB_array, hB_array, min_batch * sizeof(*dB_array), cudaMemcpyHostToDevice)); info = magma_sposv_batched(MagmaLower, f, 1, dA_array, f, dB_array, f, dinfo_array, min_batch, queue); magma_int_t *dipiv; magma_int_t **dipiv_array = NULL; TESTING_MALLOC_DEV( dipiv, magma_int_t, f * min_batch ); TESTING_MALLOC_DEV( dipiv_array, magma_int_t*, min_batch ); magma_iset_pointer( dipiv_array, dipiv, 1, 0, 0, f, min_batch, queue ); //info = magma_sgesv_nopiv_batched(f, 1, dA_array, f, dB_array, f, dinfo_array, min_batch, queue); //info = magma_sgesv_batched(f, 1, dA_array, f, dipiv_array, dB_array, f, dinfo_array, min_batch, queue); int *cpu_info = (int*) malloc(min_batch*sizeof(int)); cudacall(cudaMemcpy(cpu_info, dinfo_array, min_batch * sizeof(int),cudaMemcpyDeviceToHost)); cudacall( cudaMemcpy(&thetaT[batch_offset * f], &yTXT[batch_offset * f], batch_size * f * sizeof(float), cudaMemcpyDeviceToDevice) ); for(int i = 0; i < min_batch; i++){ if(cpu_info[i] != 0 ){ printf("magma_sposv_batched matrix %d returned internal error %d\n",i, (int)cpu_info[i] ); } } if (info != 0) printf("magma_sposv_batched returned argument error %d: %s.\n", (int) info, magma_strerror( info )); cudaFree(dA_array); cudaFree(dB_array); cudaFree( dinfo_array ); cudaFree(dipiv_array); cudaFree(dipiv); free(cpu_info); free(hA_array); free(hB_array); //free(x); magma_finalize(); gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; printf("\t %f seconds. \n", elapsed); return 0; } #endif __global__ void RMSE(const float * csrVal, const int* cooRowIndex, const int* csrColIndex, const float * __restrict__ thetaT, const float * __restrict__ XT, float * error, const int nnz, const int error_size, const int f) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < nnz) { int row = cooRowIndex[i]; int col = csrColIndex[i]; float e = csrVal[i]; //if(i%1000000==0) printf("row: %d, col: %d, csrVal[%d]: %f.\t", row, col, i, e); for (int k = 0; k < f; k++) { e -= __ldg(&thetaT[f * col + k]) * __ldg(&XT[f * row + k]); } atomicAdd(&error[i%error_size], e*e); //error[i] = e*e; //if(i%1000000==0) printf("error[%d]: %f.\n", i, e); } } //using fp16 as thetaT's format //using fp16 in computate seems causing register pressure since half intrinsics cannot be used. //using fp16 in compute also does not converge. not sure if the code is incorrect, or ALS cannot tolerate half-precision __global__ void __launch_bounds__(64, 6) get_hermitian100WithHalf(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const half* __restrict__ thetaT_fp16) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ //float2 theta; //copy texture --> smem, and sync //two layers: warp divergence unless we split at 32 //require: 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); thetaTemp[index * F/2 + k/2] = __half22float2(theta_half2); //theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k])); //theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1])); //thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ half2 theta_half2 = __ldg((half2*)&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); thetaTemp[index * F/2 + k/2 + 25] = __half22float2(theta_half2); //theta.x = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50])); //theta.y = __half2float(__ldg(&thetaT_fp16[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51])); //thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); if(threadIdx.x < 55 ){ //copy output to gmem int index = blockIdx.x*F*F; fill_lower_half_from_registers(); //symmetric if(tile_x!=tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } __global__ void __launch_bounds__(64, 6) get_hermitian100(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync /* if(threadIdx.x < SCAN_BATCH){ if(iter*SCAN_BATCH + threadIdx.x < end - start){ for (int k = 0; k < F; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1); thetaTemp[threadIdx.x * F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float)); } */ //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } /* //issue: not coalesced access to csrColIndex if(threadIdx.x < F && threadIdx.x%2 == 0){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1); thetaTemp[k * F/2 + threadIdx.x/2] = theta; } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float)); } } */ /* int layers = blockDim.x/SCAN_BATCH; //100/30 = 3 //int height = blockDim.x/layers; //30 int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable //min(y, (layers-1)) * height int y_start = y * 30;//0-29:0;30-59:30;60-89:60 int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90 if(y >= layers - 1) y_end = F; //60-89:100 if(threadIdx.x - y_start < SCAN_BATCH){ if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){ for (int k = y_start; k < y_end; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1); thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float)); } */ __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); if(threadIdx.x < 55 ){ //copy output to gmem int index = blockIdx.x*F*F; fill_lower_half_from_registers(); //symmetric if(tile_x!=tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } /*a generic kernel to get the hermitian matrices * as the left-hand side of the equations, to update X in ALS *examplary F = 100, T = 10 */ __global__ void get_hermitianT10(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT) { extern __shared__ float2 thetaTemp []; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int N = F/T10; // N = 100/10=10; for F = 100 and T = 10 int effective_block_size = N*(N+1)/2; //get the x and y coordinate int tile_x = 0; int tile_y = 0; for ( int i = 0; i < N; i++ ) { int end = ((2*N-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * T10; tile_y = (N + threadIdx.x - end) * T10; break; } } int index = blockIdx.x*F*F; //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ //phase 1 in iteration: gmem --> smem //REQ: blockDim.x >= F/2 if(threadIdx.x < F/2){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ float2 theta; theta.x = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]); theta.y = __ldg(&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x+1]); thetaTemp[k * F/2 + threadIdx.x] = theta; //this simpler statement is slower. //thetaTemp[k * F/2 + threadIdx.x] = __ldg((float2*)&thetaT[F * csrColIndex[start + iter*SCAN_BATCH + k] + 2*threadIdx.x]); } //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x], 0, 2*sizeof(float)); } } __syncthreads(); //phase 2 in iteration: smem --> register if(threadIdx.x < effective_block_size){//this redundant "if" seems improving kernel performance for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); //phase 3, after iteration: register --> gmem if(threadIdx.x < effective_block_size){ fill_lower_half_from_registers(); //symmetric if(tile_x != tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < T10; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } float doALS(const int* csrRowIndexHostPtr, const int* csrColIndexHostPtr, const float* csrValHostPtr, const int* cscRowIndexHostPtr, const int* cscColIndexHostPtr, const float* cscValHostPtr, const int* cooRowIndexHostPtr, float* thetaTHost, float* XTHost, const int * cooRowIndexTestHostPtr, const int * cooColIndexTestHostPtr, const float * cooValHostTestPtr, const int m, const int n, const int f, const long nnz, const long nnz_test, const float lambda, const int ITERS, const int X_BATCH, const int THETA_BATCH, const int DEVICEID) { cudaSetDevice(DEVICEID); printf("*******parameters: m: %d, n: %d, f: %d, nnz: %ld \n", m, n, f, nnz); //device pointers int * csrRowIndex = 0; int * csrColIndex = 0; float * csrVal = 0; float * thetaT = 0; float * tt = 0; float * XT = 0; float * cscVal =0; int * cscRowIndex = 0; int * cscColIndex = 0; //coo to calculate RMSE int * cooRowIndex =0; float * cooVal_test; int * cooRowIndex_test; int * cooColIndex_test; float final_rmse; printf("*******start allocating memory on GPU...\n"); cudacall(cudaMalloc((void** ) &cscRowIndex,nnz * sizeof(cscRowIndex[0]))); cudacall(cudaMalloc((void** ) &cscColIndex, (n+1) * sizeof(cscColIndex[0]))); cudacall(cudaMalloc((void** ) &cscVal, nnz * sizeof(cscVal[0]))); //dimension: F*N cudacall(cudaMalloc((void** ) &thetaT, f * n * sizeof(thetaT[0]))); //dimension: M*F cudacall(cudaMalloc((void** ) &XT, f * m * sizeof(XT[0]))); printf("*******start copying memory to GPU...\n"); cudacall(cudaMemcpy(cscRowIndex, cscRowIndexHostPtr,(size_t ) nnz * sizeof(cscRowIndex[0]), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(cscColIndex, cscColIndexHostPtr,(size_t ) (n+1) * sizeof(cscColIndex[0]), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(cscVal, cscValHostPtr,(size_t ) (nnz * sizeof(cscVal[0])),cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(thetaT, thetaTHost, (size_t ) (n * f * sizeof(thetaT[0])), cudaMemcpyHostToDevice)); cudacall(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared)); //64-bit smem access //http://acceleware.com/blog/maximizing-shared-memory-bandwidth-nvidia-kepler-gpus cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); //initialize cublas, cusparse cublasHandle_t handle; cublascall(cublasCreate(&handle)); cusparseHandle_t cushandle = 0; cusparsecall(cusparseCreate(&cushandle)); cusparseMatDescr_t descr; cusparsecall( cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); using namespace std; #ifdef DEBUG //variable used to time double elapsed = 0.0; struct timeval tv; struct timeval start_tv; struct timeval start_tv2; #endif for(int iter = 0; iter < ITERS ; iter ++){ #ifdef DEBUG printf("---------------------------ALS iteration %d, update X.----------------------------------\n", iter); gettimeofday(&start_tv, NULL); #endif //copy csr matrix in cudacall(cudaMalloc((void** ) &csrRowIndex,(m + 1) * sizeof(csrRowIndex[0]))); cudacall(cudaMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0]))); cudacall(cudaMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0]))); cudacall(cudaMemcpy(csrRowIndex, csrRowIndexHostPtr,(size_t ) ((m + 1) * sizeof(csrRowIndex[0])), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),cudaMemcpyHostToDevice)); #ifdef DEBUG printf("\tgenerate: Y*theta using cusparse.\n"); #endif float * ytheta = 0; float * ythetaT = 0; cudacall(cudaMalloc((void** ) &ytheta, f * m * sizeof(ytheta[0]))); cudacall(cudaMalloc((void** ) &ythetaT, f * m * sizeof(ythetaT[0]))); const float alpha = 1.0f; const float beta = 0.0f; cusparsecall (cusparseScsrmm2(cushandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, m, f, n, nnz, &alpha, descr, csrVal, csrRowIndex, csrColIndex, thetaT, f, &beta, ytheta, m) ); //cudaDeviceSynchronize(); //printf("*******transpose ytheta use cublas.\n"); //ytheta: m*f; need ythetaT = (ytheta).T = f*m cublascall(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, f, m, &alpha, (const float * ) ytheta, m, &beta, ythetaT, f, ythetaT, f)); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaFree(ytheta)); cudacall(cudaFree(csrVal)); #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv.tv_sec) + (tv.tv_usec - start_tv.tv_usec) / 1000000.0; printf("\tgenerate: Y*theta run %f seconds.\n", elapsed); #endif int block_dim = f/T10*(f/T10+1)/2; if (block_dim < f/2) block_dim = f/2; for(int batch_id = 0; batch_id< X_BATCH; batch_id ++){ #ifdef DEBUG printf("*******batch %d / %d.*******\n", batch_id, X_BATCH); #endif int batch_size = 0; if(batch_id != X_BATCH - 1) batch_size = m/X_BATCH; else batch_size = m - batch_id*(m/X_BATCH); int batch_offset = batch_id * (m/X_BATCH); cudacall(cudaMalloc((void** ) &tt, f * f * batch_size * sizeof(float))); #ifdef DEBUG gettimeofday(&start_tv2, NULL); printf("\tupdateXByBlock kernel.\n"); #endif if(f == 100){ //do not use fp16 by default #ifdef CUMF_USE_HALF half* thetaT_fp16 = 0; cudacall(cudaMalloc((void** ) &thetaT_fp16, f * n * sizeof(thetaT_fp16[0]))); fp32Array2fp16Array<<<(n*f-1)/1024 + 1, 1024>>>(thetaT, thetaT_fp16, f*n); get_hermitian100WithHalf<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>> (batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT_fp16); cudacall(cudaFree(thetaT_fp16)); #else get_hermitian100<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>> (batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT); #endif } else get_hermitianT10<<<batch_size, block_dim, SCAN_BATCH * f/2*sizeof(float2)>>> (batch_offset, tt, csrRowIndex, csrColIndex, lambda, m, f, thetaT); cudaDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv2.tv_sec) + (tv.tv_usec - start_tv2.tv_usec) / 1000000.0; printf("\tupdate X kernel run %f seconds, gridSize: %d, blockSize %d.\n", elapsed, batch_size, f); double t0 = seconds(); #endif //host pointers for cublas batch operations float ** devPtrTTHost = 0; cudacall(cudaMallocHost( (void** ) &devPtrTTHost, batch_size * sizeof(*devPtrTTHost) ) ); float **devPtrYthetaTHost = 0; cudacall(cudaMallocHost( (void** ) &devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaTHost) ) ); #ifdef DEBUG printf("\tinvoke updateX with batch_size: %d, batch_offset: %d..\n", batch_size, batch_offset); #endif updateX(batch_size, batch_offset, ythetaT, tt, XT, handle, m, n, f, nnz, devPtrTTHost, devPtrYthetaTHost); #ifdef DEBUG printf("\tupdateX run seconds: %f \n", seconds() - t0); #endif cudacall(cudaFree(tt)); cudacall(cudaFreeHost(devPtrTTHost)); cudacall(cudaFreeHost(devPtrYthetaTHost)); } #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv.tv_sec) + (tv.tv_usec - start_tv.tv_usec) / 1000000.0; printf("update X run %f seconds, gridSize: %d, blockSize %d.\n", elapsed, m, f); #endif cudacall(cudaFree(csrRowIndex)); cudacall(cudaFree(csrColIndex)); cudacall(cudaFree(ythetaT)); #ifdef DEBUG gettimeofday(&start_tv, NULL); printf("---------------------------------- ALS iteration %d, update theta ----------------------------------\n", iter); printf("\tgenerate: Y'*X using cusparse.\n"); #endif float * yTX = 0; float * yTXT = 0; cudacall(cudaMalloc((void** ) &yTXT, f * n * sizeof(yTXT[0]))); cudacall(cudaMalloc((void** ) &yTX, n * f * sizeof(yTX[0]))); cusparsecall( cusparseScsrmm2(cushandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, n, f, m, nnz, &alpha, descr, cscVal, cscColIndex, cscRowIndex, XT, f, &beta, yTX, n) ); //cudaDeviceSynchronize(); //printf("*******transpose yTX \n"); //yTX: n*f; need yTXT = (yTX).T = f*n cublascall(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, f, n, &alpha, (const float * ) yTX, n, &beta, yTXT, f, yTXT, f)); cudaDeviceSynchronize(); cudacall(cudaFree(yTX)); #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv.tv_sec) + (tv.tv_usec - start_tv.tv_usec) / 1000000.0; printf("\tgenerate: Y'*X run %f seconds.\n", elapsed); #endif //in batches, when N is huge for(int batch_id = 0; batch_id< THETA_BATCH; batch_id ++){ #ifdef DEBUG printf("*******batch %d / %d.*******\n", batch_id, THETA_BATCH); #endif int batch_size = 0; if(batch_id != THETA_BATCH - 1) batch_size = n/THETA_BATCH; else batch_size = n - batch_id*(n/THETA_BATCH); int batch_offset = batch_id * (n/THETA_BATCH); float * xx = 0; cudacall(cudaMalloc((void** ) &xx, f * f * batch_size * sizeof(xx[0]))); cudacall( cudaMemset(xx, 0, f*f*batch_size*sizeof(float)) ); #ifdef DEBUG gettimeofday(&start_tv2, NULL); printf("\tupdateThetaByBlock kernel.\n"); #endif //get_hermitian_theta<<<batch_size, 64>>>(batch_offset, xx, cscRowIndex, cscColIndex, lambda, n); //updateThetaByBlock2pRegDsmemTile<<<batch_size, F>>> if(f == 100){ #ifdef CUMF_USE_HALF half * XT_fp16 = 0; cudacall(cudaMalloc((void** ) &XT_fp16, f * m * sizeof(XT_fp16[0]))); fp32Array2fp16Array<<<(n*f-1)/1024 + 1, 1024>>>(XT, XT_fp16, f*m); get_hermitian100WithHalf<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>> (batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT_fp16); cudacall(cudaFree(XT_fp16)); #else get_hermitian100<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>> (batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT); #endif } else get_hermitianT10<<<batch_size, block_dim, SCAN_BATCH*f*sizeof(float)>>> (batch_offset, xx, cscColIndex, cscRowIndex, lambda, n, f, XT); cudaDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv2.tv_sec) + (tv.tv_usec - start_tv2.tv_usec) / 1000000.0; printf("\tupdate Theta kernel run %f seconds, gridSize: %d, blockSize %d.\n", elapsed, batch_size, f); double t0 = seconds(); #endif float ** devPtrXXHost = 0; cudacall(cudaMallocHost( (void** ) &devPtrXXHost, batch_size * sizeof(*devPtrXXHost) ) ); float **devPtrYTXTHost = 0; cudacall(cudaMallocHost( (void** ) &devPtrYTXTHost, batch_size * sizeof(*devPtrYTXTHost) ) ); #ifdef DEBUG printf("*******invoke updateTheta with batch_size: %d, batch_offset: %d.\n", batch_size, batch_offset); #endif updateTheta(batch_size, batch_offset, xx, yTXT, thetaT, handle, m, n, f, nnz, devPtrXXHost, devPtrYTXTHost); #ifdef DEBUG printf("\tupdateTheta run seconds: %f \n", seconds() - t0); #endif cudacall(cudaFree(xx)); cudacall(cudaFreeHost(devPtrXXHost)); cudacall(cudaFreeHost(devPtrYTXTHost)); } cudacall(cudaFree(yTXT)); #ifdef DEBUG gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv.tv_sec) + (tv.tv_usec - start_tv.tv_usec) / 1000000.0; printf("update theta run %f seconds, gridSize: %d, blockSize %d.\n", elapsed, n, f); printf("Calculate RMSE.\n"); #endif float * errors_train = 0; int error_size = 1000; cudacall(cudaMalloc((void** ) &errors_train, error_size * sizeof(errors_train[0]))); cudacall( cudaMemset(errors_train, 0, error_size*sizeof(float)) ); cudacall(cudaMalloc((void** ) &cooRowIndex, nnz * sizeof(cooRowIndex[0]))); cudacall(cudaMemcpy(cooRowIndex, cooRowIndexHostPtr,(size_t ) (nnz * sizeof(cooRowIndex[0])), cudaMemcpyHostToDevice)); cudacall(cudaMalloc((void** ) &csrColIndex, nnz * sizeof(csrColIndex[0]))); cudacall(cudaMalloc((void** ) &csrVal, nnz * sizeof(csrVal[0]))); cudacall(cudaMemcpy(csrColIndex, csrColIndexHostPtr,(size_t ) (nnz * sizeof(csrColIndex[0])), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(csrVal, csrValHostPtr,(size_t ) (nnz * sizeof(csrVal[0])),cudaMemcpyHostToDevice)); RMSE<<<(nnz-1)/256 + 1, 256>>> (csrVal, cooRowIndex, csrColIndex, thetaT, XT, errors_train, nnz, error_size, f); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaFree(cooRowIndex)); cudacall(cudaFree(csrColIndex)); cudacall(cudaFree(csrVal)); float* rmse_train = (float*) malloc (sizeof(float)); cublascall( cublasSasum(handle, error_size, errors_train, 1, rmse_train) ); cudaDeviceSynchronize(); printf("--------- Train RMSE in iter %d: %f\n", iter, sqrt((*rmse_train)/nnz)); cudacall(cudaFree(errors_train)); float * errors_test = 0; cudacall(cudaMalloc((void** ) &errors_test, error_size * sizeof(errors_test[0]))); cudacall( cudaMemset(errors_test, 0, error_size*sizeof(float)) ); cudacall(cudaMalloc((void** ) &cooRowIndex_test, nnz_test * sizeof(cooRowIndex_test[0]))); cudacall(cudaMemcpy(cooRowIndex_test, cooRowIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooRowIndex_test[0])), cudaMemcpyHostToDevice)); cudacall(cudaMalloc((void** ) &cooColIndex_test, nnz_test * sizeof(cooColIndex_test[0]))); cudacall(cudaMalloc((void** ) &cooVal_test, nnz_test * sizeof(cooVal_test[0]))); cudacall(cudaMemcpy(cooColIndex_test, cooColIndexTestHostPtr,(size_t ) (nnz_test * sizeof(cooColIndex_test[0])), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(cooVal_test, cooValHostTestPtr,(size_t ) (nnz_test * sizeof(cooVal_test[0])),cudaMemcpyHostToDevice)); RMSE<<<(nnz_test-1)/256, 256>>>(cooVal_test, cooRowIndex_test, cooColIndex_test, thetaT, XT, errors_test, nnz_test, error_size, f); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaFree(cooRowIndex_test)); cudacall(cudaFree(cooColIndex_test)); cudacall(cudaFree(cooVal_test)); float* rmse_test = (float*) malloc (sizeof(float)); cublascall( cublasSasum(handle, error_size, errors_test, 1, rmse_test) ); cudaDeviceSynchronize(); final_rmse = sqrt((*rmse_test)/nnz_test); printf("--------- Test RMSE in iter %d: %f\n", iter, final_rmse); cudacall(cudaFree(errors_test)); } //copy feature vectors back to host cudacall(cudaMemcpy(thetaTHost, thetaT, (size_t ) (n * f * sizeof(thetaT[0])), cudaMemcpyDeviceToHost)); cudacall(cudaMemcpy(XTHost, XT, (size_t ) (m * f * sizeof(XT[0])), cudaMemcpyDeviceToHost)); cudacall(cudaFree(thetaT)); cudacall(cudaFree(XT)); cudacall(cudaFree(cscVal)); cudacall(cudaFree(cscColIndex)); cudacall(cudaFree(cscRowIndex)); cudacall(cudaDeviceReset()); return final_rmse; }
47b53c318940346b6fe59f8074c2ac95081eddb6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_allocator.h" #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/atomic/common.cuh" #include "core/providers/cuda/reduction/reduction_utils.cuh" #include "contrib_ops/cuda/math/isfinite.cuh" #include "orttraining/training_ops/cuda/optimizer/common.h" #include "orttraining/training_ops/cuda/optimizer/common.cuh" #include "orttraining/training_ops/cuda/optimizer/lamb.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T2, typename T3> __device__ __forceinline__ void _LambComputeDirectionRule( const T1& g_scale, const T1& w, const T2& g, const T3& m1, const T3& m2, const float& alpha, const float& beta, const float& lambda, const float& epsilon, const float& alpha_correction, const float& beta_correction, T2& d, T3& m1_new, T3& m2_new) { // Actual gradient. The scale is a product of loss' scale and // global gradient norm (if the norm > 1). const T1 g_unscaled = T1(g) / g_scale; // A constant in Lamb's equation. const T1 one = T1(1.0f); // Update exponentially-averaged historical gradient const T1 m1_new_tmp = alpha * static_cast<T1>(m1) + (one - alpha) * g_unscaled; // Update exponentially-averaged historical squared gradient const T1 m2_new_tmp = beta * static_cast<T1>(m2) + (one - beta) * g_unscaled * g_unscaled; // Compute unbiased 1st-order momentom. // The value alpha_correction is usually (1-alpha^t), // where t is the number of executed training iterations. const T1 m1_new_tmp_corrected = m1_new_tmp / alpha_correction; // Compute unbiased 2nd-order momentom. // The value beta_correction is usually (1-beta^t), // where t is the number of executed training iterations. const T1 m2_new_tmp_corrected = m2_new_tmp / beta_correction; // Save regularized update direction to output. const T1 d_tmp = lambda * w + m1_new_tmp_corrected / (_Sqrt(m2_new_tmp_corrected) + epsilon); // Things are updated only if the direction is finite. if (IsFiniteScalar(d_tmp)) { d = d_tmp; m1_new = m1_new_tmp; m2_new = m2_new_tmp; } else { d = T2(0); m1_new = m1; m2_new = m2; } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void _LambComputeDirectionImpl( const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* g_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); _LambComputeDirectionRule( scale, weights[id], grads[id], moment_1[id], moment_2[id], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, update_direction[id], moment_1_out[id], moment_2_out[id]); } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambComputeDirection( hipStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _LambComputeDirectionImpl<T1, T2, T3, T_GRAD_NORM>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, max_norm, alpha_correction, beta_correction, update_direction, moment_1_out, moment_2_out, N); } #define SPECIALIZED_LAMB_COMPUTE_DIRECTION(T1, T2, T3, T_GRAD_NORM) \ template void LambComputeDirection( \ hipStream_t stream, \ const T1* weights, \ const T2* grads, \ const T3* moment_1, \ const T3* moment_2, \ const T1* loss_scale, \ const T_GRAD_NORM* grad_norm, \ float alpha, \ float beta, \ float lambda, \ float epsilon, \ float max_norm, \ float alpha_correction, \ float beta_correction, \ T2* weights_out, \ T3* moment_1_out, \ T3* moment_2_out, \ size_t count); SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, float, float, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(double, double, double, double) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __device__ __forceinline__ void _LambUpdateRule( const T1 eta, const float ratio_min, const float ratio_max, const T2 r_norm, const T2 w_norm, const T2 w, const T3 d, T2* w_new, T3* g_new, T_MIXED_PRECISION_FP* w_mixed_precision_new) { // Confidence coefficeint of this update. const T2 ratio = (w_norm != T2(0.0f) && r_norm != T2(0.0f)) ? T2(eta) * _Max(T2(ratio_min), _Min(T2(ratio_max), _Sqrt(w_norm / r_norm))) : T2(eta); // Compute delta using the saved update direction. const T2 delta = -ratio * T2(d); const T2 w_new_tmp = w + delta; if (IsFiniteScalar(w_new_tmp)) { if (g_new) { *g_new = T3(delta); } if (w_new) { *w_new = w_new_tmp; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w_new_tmp); } } } else { if (g_new) { *g_new = T3(0); } if (w_new) { *w_new = w; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w); } } } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void _LambUpdateImpl( const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, weights[id], update_direction[id], weights_out != nullptr ? weights_out + id : nullptr, gradients_out != nullptr ? gradients_out + id : nullptr, mixed_precision_weights_out != nullptr ? mixed_precision_weights_out + id : nullptr); } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambUpdate( hipStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _LambUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, eta, ratio_min, ratio_max, r_norm, w_norm, weights, update_direction, weights_out, gradients_out, mixed_precision_weights_out, N); } #define INSTANTIATE_LAMB_UPDATE(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambUpdate( \ hipStream_t stream, \ const T1* eta, \ const float ratio_min, \ const float ratio_max, \ const T2* r_norm, \ const T2* w_norm, \ const T2* weights, \ const T3* update_direction, \ T2* weights_out, \ T3* gradients_out, \ T_MIXED_PRECISION_FP* mixed_precision_weights_out, \ size_t count); INSTANTIATE_LAMB_UPDATE(float, float, float, half) INSTANTIATE_LAMB_UPDATE(double, double, double, half) INSTANTIATE_LAMB_UPDATE(half, float, half, half) INSTANTIATE_LAMB_UPDATE(float, float, half, half) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_UPDATE(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(float, float, nv_bfloat16, nv_bfloat16) #endif template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void LambMultiTensorComputeDirectionImpl( ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T1* w = reinterpret_cast<const T1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; T2* g = reinterpret_cast<T2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; const T3* m1 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* m2 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T3* m1_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start; T3* m2_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start; const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); #pragma unroll for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambComputeDirectionRule( scale, w[i], g[i], m1[i], m2[i], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, g[i], m1_new[i], m2_new[i]); } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( hipStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int thread_count = ChunkGroup<6>::thread_count_per_block; const int block_count = chunk_group.chunk_count; hipLaunchKernelGGL(( LambMultiTensorComputeDirectionImpl<T1, T2, T3>), dim3(block_count), dim3(thread_count), 0, stream, chunk_group, loss_scale, g_norm, lambda, alpha, beta, epsilon, max_norm, alpha_correction, beta_correction); } #define INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(T1, T2, T3, T_GRAD_NORM) \ template void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( \ hipStream_t stream, \ ChunkGroup<6> chunk_group, \ const T1* loss_scale, \ const T_GRAD_NORM* g_norm, \ const float lambda, \ const float alpha, \ const float beta, \ const float epsilon, \ const float max_norm, \ const float alpha_correction, \ const float beta_correction); INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, float, float, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(double, double, double, double) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void LambMultiTensorUpdateImpl( ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T2* w_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[0][group_index]); const T2* r_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[1][group_index]); const T2* w = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* d = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T2* w_new = chunk_group.tensor_ptrs[4][group_index] != nullptr ? reinterpret_cast<T2*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start : nullptr; T3* g_new = chunk_group.tensor_ptrs[5][group_index] != nullptr ? reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start : nullptr; T_MIXED_PRECISION_FP* w_mixed_precision_new = chunk_group.tensor_ptrs[6][group_index] != nullptr ? reinterpret_cast<T_MIXED_PRECISION_FP*>(chunk_group.tensor_ptrs[6][group_index]) + chunk_start : nullptr; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, w[i], d[i], w_new != nullptr ? w_new + i : nullptr, g_new != nullptr ? g_new + i : nullptr, w_mixed_precision_new != nullptr ? w_mixed_precision_new + i : nullptr); } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( hipStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int thread_count = ChunkGroup<7>::thread_count_per_block; const int block_count = chunk_group.chunk_count; hipLaunchKernelGGL(( LambMultiTensorUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP>), dim3(block_count), dim3(thread_count), 0, stream, chunk_group, eta, ratio_min, ratio_max); } #define INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( \ hipStream_t stream, \ ChunkGroup<7> chunk_group, \ const T1* eta, \ const float ratio_min, \ const float ratio_max); INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(half, float, half, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, half, half) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, nv_bfloat16, nv_bfloat16) #endif // w_buffer[i], d_buffer[i] is used to store the squared sum of all elements processed by the i-th block. // sync_range_and_lock is used for a well ordered reduction over blocks spanning the same tensor template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> __launch_bounds__(ChunkGroup<4>::thread_count_per_block) __global__ void LambMultiTensorReductionImpl( ChunkGroup<4> chunk_group, TOut1* w_buffer, TOut2* d_buffer, LambMultiTensorSyncRangeAndLock* sync_range_and_lock) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const TIn1* w = reinterpret_cast<const TIn1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; const TIn2* d = reinterpret_cast<const TIn2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; TOut1* w_norm = reinterpret_cast<TOut1*>(chunk_group.tensor_ptrs[2][group_index]); TOut2* d_norm = reinterpret_cast<TOut2*>(chunk_group.tensor_ptrs[3][group_index]); TBuf d_sum = TBuf(0.f); TBuf w_sum = TBuf(0.f); constexpr int load_count_per_thread = 4; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x * load_count_per_thread) { #pragma unroll for (int j = 0; j < load_count_per_thread; ++j) { const int index_in_chunk = i + j * blockDim.x; const int index_in_tensor = chunk_start + index_in_chunk; if (index_in_chunk < chunk_size && index_in_tensor < tensor_size) { const TBuf w_element = TBuf(w[index_in_chunk]); const TBuf d_element = TBuf(d[index_in_chunk]); w_sum += w_element * w_element; d_sum += d_element * d_element; } } } // Thread count in a block must be a multiple of GPU_WARP_SIZE. #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { w_sum += WARP_SHFL_DOWN(w_sum, stride); d_sum += WARP_SHFL_DOWN(d_sum, stride); } const int warp_count_in_block = blockDim.x / GPU_WARP_SIZE; const int lid = threadIdx.x % GPU_WARP_SIZE; const int wid = threadIdx.x / GPU_WARP_SIZE; // Shape is 2 x warp_count_in_block. extern __shared__ unsigned char shared_memory_[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_); TBuf* w_shared_memory_ = shared_memory; TBuf* d_shared_memory_ = shared_memory + warp_count_in_block; if (lid == 0) { w_shared_memory_[wid] = w_sum; d_shared_memory_[wid] = d_sum; } __syncthreads(); #pragma unroll for (int stride = warp_count_in_block / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride) { w_shared_memory_[threadIdx.x] += w_shared_memory_[threadIdx.x + stride]; d_shared_memory_[threadIdx.x] += d_shared_memory_[threadIdx.x + stride]; } __syncthreads(); } // ascertain the range of blocks with the associated tensor // note: if non-ordered reduction is OK, then atomicAdd over blocks could suffice const int leading_block_in_tensor = sync_range_and_lock[group_index].leading_block; const int num_blocks_in_tensor = sync_range_and_lock[group_index].number_blocks; if (num_blocks_in_tensor == 1) { if (threadIdx.x == 0) { *w_norm = TOut1(w_shared_memory_[0]); *d_norm = TOut2(d_shared_memory_[0]); } return; } if (threadIdx.x == 0) { w_buffer[blockIdx.x] = w_shared_memory_[0]; d_buffer[blockIdx.x] = d_shared_memory_[0]; } __threadfence(); __syncthreads(); // use lock to determine if this is last block for given tensor __shared__ bool is_last_block_done; if (threadIdx.x == 0) { int* p_lock = &sync_range_and_lock[group_index].completed_blocks; int counter = atomicAdd(p_lock, 1); is_last_block_done = (counter == num_blocks_in_tensor - 1); } __syncthreads(); // only last block to finish for associated tensor enters below if (is_last_block_done) { const int pow2_bound = least_pow2_bound(num_blocks_in_tensor); int blockid = leading_block_in_tensor + threadIdx.x; for (int stride = pow2_bound / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride && threadIdx.x + stride < num_blocks_in_tensor) { w_buffer[blockid] += w_buffer[blockid + stride]; d_buffer[blockid] += d_buffer[blockid + stride]; } __syncthreads(); } if (threadIdx.x == 0) { *w_norm = TOut1(w_buffer[leading_block_in_tensor]); *d_norm = TOut2(d_buffer[leading_block_in_tensor]); } } } CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> compute_tensor_range_and_lock(ChunkGroup<4> chunk_group, const CudaKernel& kernel) { const int num_blocks = chunk_group.chunk_count; // sync_range_and_lock is a struct consisting of (start_block, num_blocks, lock) for each tensor // Note: Adding such info to chunk group causes overflow (unless max tensors is reduced) const int max_tensors = ChunkGroup<4>::max_tensor_group_count; LambMultiTensorSyncRangeAndLock initial = {0, 0, 0}; CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> sync_range_and_lock(&kernel, initial, max_tensors); for (int block_index = num_blocks - 1; block_index >= 0; block_index--) { int tensor_index = chunk_group.block_index_to_tensor_group_index[block_index]; auto& tensor_block_span = sync_range_and_lock.CpuPtr()[tensor_index]; tensor_block_span.leading_block = block_index; tensor_block_span.number_blocks++; } sync_range_and_lock.CopyToGpu(); return sync_range_and_lock; } template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(hipStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size) { // thread count per block. constexpr int thread_count = ChunkGroup<4>::thread_count_per_block; // shared memory's size per block. const int shared_memory_size = thread_count / GPU_WARP_SIZE * 2 * sizeof(TBuf); // Enforce assumptions used inside this reduction CUDA kernel. ORT_ENFORCE(thread_count % GPU_WARP_SIZE == 0); ORT_ENFORCE((thread_count & (thread_count - 1)) == 0); const int num_blocks = chunk_group.chunk_count; const size_t w_buffer_size = num_blocks * sizeof(TOut1); const size_t d_buffer_size = num_blocks * sizeof(TOut2); ORT_ENFORCE(w_buffer_size + d_buffer_size <= reduction_buffer_size); TOut1* w_buffer = reinterpret_cast<TOut1*>(reduction_buffer); TOut2* d_buffer = reinterpret_cast<TOut2*>(w_buffer + num_blocks); auto sync_range_and_lock = compute_tensor_range_and_lock(chunk_group, kernel); hipLaunchKernelGGL(( LambMultiTensorReductionImpl<TIn1, TIn2, TOut1, TOut2, TBuf>), dim3(chunk_group.chunk_count), dim3(thread_count), shared_memory_size, stream, chunk_group, w_buffer, d_buffer, sync_range_and_lock.GpuPtr()); } #define INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(TIn1, TIn2, TOut1, TOut2, TBuf) \ template void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(hipStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size); INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, float, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(double, double, double, double, double) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, half, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(half, half, half, half, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(nv_bfloat16, nv_bfloat16, nv_bfloat16, nv_bfloat16, float) #endif } // namespace cuda } // namespace onnxruntime
47b53c318940346b6fe59f8074c2ac95081eddb6.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_allocator.h" #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/atomic/common.cuh" #include "core/providers/cuda/reduction/reduction_utils.cuh" #include "contrib_ops/cuda/math/isfinite.cuh" #include "orttraining/training_ops/cuda/optimizer/common.h" #include "orttraining/training_ops/cuda/optimizer/common.cuh" #include "orttraining/training_ops/cuda/optimizer/lamb.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T2, typename T3> __device__ __forceinline__ void _LambComputeDirectionRule( const T1& g_scale, const T1& w, const T2& g, const T3& m1, const T3& m2, const float& alpha, const float& beta, const float& lambda, const float& epsilon, const float& alpha_correction, const float& beta_correction, T2& d, T3& m1_new, T3& m2_new) { // Actual gradient. The scale is a product of loss' scale and // global gradient norm (if the norm > 1). const T1 g_unscaled = T1(g) / g_scale; // A constant in Lamb's equation. const T1 one = T1(1.0f); // Update exponentially-averaged historical gradient const T1 m1_new_tmp = alpha * static_cast<T1>(m1) + (one - alpha) * g_unscaled; // Update exponentially-averaged historical squared gradient const T1 m2_new_tmp = beta * static_cast<T1>(m2) + (one - beta) * g_unscaled * g_unscaled; // Compute unbiased 1st-order momentom. // The value alpha_correction is usually (1-alpha^t), // where t is the number of executed training iterations. const T1 m1_new_tmp_corrected = m1_new_tmp / alpha_correction; // Compute unbiased 2nd-order momentom. // The value beta_correction is usually (1-beta^t), // where t is the number of executed training iterations. const T1 m2_new_tmp_corrected = m2_new_tmp / beta_correction; // Save regularized update direction to output. const T1 d_tmp = lambda * w + m1_new_tmp_corrected / (_Sqrt(m2_new_tmp_corrected) + epsilon); // Things are updated only if the direction is finite. if (IsFiniteScalar(d_tmp)) { d = d_tmp; m1_new = m1_new_tmp; m2_new = m2_new_tmp; } else { d = T2(0); m1_new = m1; m2_new = m2; } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void _LambComputeDirectionImpl( const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* g_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); _LambComputeDirectionRule( scale, weights[id], grads[id], moment_1[id], moment_2[id], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, update_direction[id], moment_1_out[id], moment_2_out[id]); } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambComputeDirection( cudaStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _LambComputeDirectionImpl<T1, T2, T3, T_GRAD_NORM><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, max_norm, alpha_correction, beta_correction, update_direction, moment_1_out, moment_2_out, N); } #define SPECIALIZED_LAMB_COMPUTE_DIRECTION(T1, T2, T3, T_GRAD_NORM) \ template void LambComputeDirection( \ cudaStream_t stream, \ const T1* weights, \ const T2* grads, \ const T3* moment_1, \ const T3* moment_2, \ const T1* loss_scale, \ const T_GRAD_NORM* grad_norm, \ float alpha, \ float beta, \ float lambda, \ float epsilon, \ float max_norm, \ float alpha_correction, \ float beta_correction, \ T2* weights_out, \ T3* moment_1_out, \ T3* moment_2_out, \ size_t count); SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, float, float, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(double, double, double, double) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __device__ __forceinline__ void _LambUpdateRule( const T1 eta, const float ratio_min, const float ratio_max, const T2 r_norm, const T2 w_norm, const T2 w, const T3 d, T2* w_new, T3* g_new, T_MIXED_PRECISION_FP* w_mixed_precision_new) { // Confidence coefficeint of this update. const T2 ratio = (w_norm != T2(0.0f) && r_norm != T2(0.0f)) ? T2(eta) * _Max(T2(ratio_min), _Min(T2(ratio_max), _Sqrt(w_norm / r_norm))) : T2(eta); // Compute delta using the saved update direction. const T2 delta = -ratio * T2(d); const T2 w_new_tmp = w + delta; if (IsFiniteScalar(w_new_tmp)) { if (g_new) { *g_new = T3(delta); } if (w_new) { *w_new = w_new_tmp; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w_new_tmp); } } } else { if (g_new) { *g_new = T3(0); } if (w_new) { *w_new = w; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w); } } } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void _LambUpdateImpl( const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, weights[id], update_direction[id], weights_out != nullptr ? weights_out + id : nullptr, gradients_out != nullptr ? gradients_out + id : nullptr, mixed_precision_weights_out != nullptr ? mixed_precision_weights_out + id : nullptr); } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambUpdate( cudaStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _LambUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( eta, ratio_min, ratio_max, r_norm, w_norm, weights, update_direction, weights_out, gradients_out, mixed_precision_weights_out, N); } #define INSTANTIATE_LAMB_UPDATE(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambUpdate( \ cudaStream_t stream, \ const T1* eta, \ const float ratio_min, \ const float ratio_max, \ const T2* r_norm, \ const T2* w_norm, \ const T2* weights, \ const T3* update_direction, \ T2* weights_out, \ T3* gradients_out, \ T_MIXED_PRECISION_FP* mixed_precision_weights_out, \ size_t count); INSTANTIATE_LAMB_UPDATE(float, float, float, half) INSTANTIATE_LAMB_UPDATE(double, double, double, half) INSTANTIATE_LAMB_UPDATE(half, float, half, half) INSTANTIATE_LAMB_UPDATE(float, float, half, half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_UPDATE(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(float, float, nv_bfloat16, nv_bfloat16) #endif template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void LambMultiTensorComputeDirectionImpl( ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T1* w = reinterpret_cast<const T1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; T2* g = reinterpret_cast<T2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; const T3* m1 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* m2 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T3* m1_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start; T3* m2_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start; const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); #pragma unroll for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambComputeDirectionRule( scale, w[i], g[i], m1[i], m2[i], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, g[i], m1_new[i], m2_new[i]); } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( cudaStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction) { const int thread_count = ChunkGroup<6>::thread_count_per_block; const int block_count = chunk_group.chunk_count; LambMultiTensorComputeDirectionImpl<T1, T2, T3><<<block_count, thread_count, 0, stream>>>( chunk_group, loss_scale, g_norm, lambda, alpha, beta, epsilon, max_norm, alpha_correction, beta_correction); } #define INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(T1, T2, T3, T_GRAD_NORM) \ template void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( \ cudaStream_t stream, \ ChunkGroup<6> chunk_group, \ const T1* loss_scale, \ const T_GRAD_NORM* g_norm, \ const float lambda, \ const float alpha, \ const float beta, \ const float epsilon, \ const float max_norm, \ const float alpha_correction, \ const float beta_correction); INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, float, float, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(double, double, double, double) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void LambMultiTensorUpdateImpl( ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T2* w_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[0][group_index]); const T2* r_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[1][group_index]); const T2* w = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* d = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T2* w_new = chunk_group.tensor_ptrs[4][group_index] != nullptr ? reinterpret_cast<T2*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start : nullptr; T3* g_new = chunk_group.tensor_ptrs[5][group_index] != nullptr ? reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start : nullptr; T_MIXED_PRECISION_FP* w_mixed_precision_new = chunk_group.tensor_ptrs[6][group_index] != nullptr ? reinterpret_cast<T_MIXED_PRECISION_FP*>(chunk_group.tensor_ptrs[6][group_index]) + chunk_start : nullptr; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, w[i], d[i], w_new != nullptr ? w_new + i : nullptr, g_new != nullptr ? g_new + i : nullptr, w_mixed_precision_new != nullptr ? w_mixed_precision_new + i : nullptr); } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( cudaStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int thread_count = ChunkGroup<7>::thread_count_per_block; const int block_count = chunk_group.chunk_count; LambMultiTensorUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP><<<block_count, thread_count, 0, stream>>>( chunk_group, eta, ratio_min, ratio_max); } #define INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( \ cudaStream_t stream, \ ChunkGroup<7> chunk_group, \ const T1* eta, \ const float ratio_min, \ const float ratio_max); INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(half, float, half, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, half, half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, nv_bfloat16, nv_bfloat16) #endif // w_buffer[i], d_buffer[i] is used to store the squared sum of all elements processed by the i-th block. // sync_range_and_lock is used for a well ordered reduction over blocks spanning the same tensor template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> __launch_bounds__(ChunkGroup<4>::thread_count_per_block) __global__ void LambMultiTensorReductionImpl( ChunkGroup<4> chunk_group, TOut1* w_buffer, TOut2* d_buffer, LambMultiTensorSyncRangeAndLock* sync_range_and_lock) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const TIn1* w = reinterpret_cast<const TIn1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; const TIn2* d = reinterpret_cast<const TIn2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; TOut1* w_norm = reinterpret_cast<TOut1*>(chunk_group.tensor_ptrs[2][group_index]); TOut2* d_norm = reinterpret_cast<TOut2*>(chunk_group.tensor_ptrs[3][group_index]); TBuf d_sum = TBuf(0.f); TBuf w_sum = TBuf(0.f); constexpr int load_count_per_thread = 4; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x * load_count_per_thread) { #pragma unroll for (int j = 0; j < load_count_per_thread; ++j) { const int index_in_chunk = i + j * blockDim.x; const int index_in_tensor = chunk_start + index_in_chunk; if (index_in_chunk < chunk_size && index_in_tensor < tensor_size) { const TBuf w_element = TBuf(w[index_in_chunk]); const TBuf d_element = TBuf(d[index_in_chunk]); w_sum += w_element * w_element; d_sum += d_element * d_element; } } } // Thread count in a block must be a multiple of GPU_WARP_SIZE. #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { w_sum += WARP_SHFL_DOWN(w_sum, stride); d_sum += WARP_SHFL_DOWN(d_sum, stride); } const int warp_count_in_block = blockDim.x / GPU_WARP_SIZE; const int lid = threadIdx.x % GPU_WARP_SIZE; const int wid = threadIdx.x / GPU_WARP_SIZE; // Shape is 2 x warp_count_in_block. extern __shared__ unsigned char shared_memory_[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_); TBuf* w_shared_memory_ = shared_memory; TBuf* d_shared_memory_ = shared_memory + warp_count_in_block; if (lid == 0) { w_shared_memory_[wid] = w_sum; d_shared_memory_[wid] = d_sum; } __syncthreads(); #pragma unroll for (int stride = warp_count_in_block / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride) { w_shared_memory_[threadIdx.x] += w_shared_memory_[threadIdx.x + stride]; d_shared_memory_[threadIdx.x] += d_shared_memory_[threadIdx.x + stride]; } __syncthreads(); } // ascertain the range of blocks with the associated tensor // note: if non-ordered reduction is OK, then atomicAdd over blocks could suffice const int leading_block_in_tensor = sync_range_and_lock[group_index].leading_block; const int num_blocks_in_tensor = sync_range_and_lock[group_index].number_blocks; if (num_blocks_in_tensor == 1) { if (threadIdx.x == 0) { *w_norm = TOut1(w_shared_memory_[0]); *d_norm = TOut2(d_shared_memory_[0]); } return; } if (threadIdx.x == 0) { w_buffer[blockIdx.x] = w_shared_memory_[0]; d_buffer[blockIdx.x] = d_shared_memory_[0]; } __threadfence(); __syncthreads(); // use lock to determine if this is last block for given tensor __shared__ bool is_last_block_done; if (threadIdx.x == 0) { int* p_lock = &sync_range_and_lock[group_index].completed_blocks; int counter = atomicAdd(p_lock, 1); is_last_block_done = (counter == num_blocks_in_tensor - 1); } __syncthreads(); // only last block to finish for associated tensor enters below if (is_last_block_done) { const int pow2_bound = least_pow2_bound(num_blocks_in_tensor); int blockid = leading_block_in_tensor + threadIdx.x; for (int stride = pow2_bound / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride && threadIdx.x + stride < num_blocks_in_tensor) { w_buffer[blockid] += w_buffer[blockid + stride]; d_buffer[blockid] += d_buffer[blockid + stride]; } __syncthreads(); } if (threadIdx.x == 0) { *w_norm = TOut1(w_buffer[leading_block_in_tensor]); *d_norm = TOut2(d_buffer[leading_block_in_tensor]); } } } CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> compute_tensor_range_and_lock(ChunkGroup<4> chunk_group, const CudaKernel& kernel) { const int num_blocks = chunk_group.chunk_count; // sync_range_and_lock is a struct consisting of (start_block, num_blocks, lock) for each tensor // Note: Adding such info to chunk group causes overflow (unless max tensors is reduced) const int max_tensors = ChunkGroup<4>::max_tensor_group_count; LambMultiTensorSyncRangeAndLock initial = {0, 0, 0}; CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> sync_range_and_lock(&kernel, initial, max_tensors); for (int block_index = num_blocks - 1; block_index >= 0; block_index--) { int tensor_index = chunk_group.block_index_to_tensor_group_index[block_index]; auto& tensor_block_span = sync_range_and_lock.CpuPtr()[tensor_index]; tensor_block_span.leading_block = block_index; tensor_block_span.number_blocks++; } sync_range_and_lock.CopyToGpu(); return sync_range_and_lock; } template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(cudaStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size) { // thread count per block. constexpr int thread_count = ChunkGroup<4>::thread_count_per_block; // shared memory's size per block. const int shared_memory_size = thread_count / GPU_WARP_SIZE * 2 * sizeof(TBuf); // Enforce assumptions used inside this reduction CUDA kernel. ORT_ENFORCE(thread_count % GPU_WARP_SIZE == 0); ORT_ENFORCE((thread_count & (thread_count - 1)) == 0); const int num_blocks = chunk_group.chunk_count; const size_t w_buffer_size = num_blocks * sizeof(TOut1); const size_t d_buffer_size = num_blocks * sizeof(TOut2); ORT_ENFORCE(w_buffer_size + d_buffer_size <= reduction_buffer_size); TOut1* w_buffer = reinterpret_cast<TOut1*>(reduction_buffer); TOut2* d_buffer = reinterpret_cast<TOut2*>(w_buffer + num_blocks); auto sync_range_and_lock = compute_tensor_range_and_lock(chunk_group, kernel); LambMultiTensorReductionImpl<TIn1, TIn2, TOut1, TOut2, TBuf><<<chunk_group.chunk_count, thread_count, shared_memory_size, stream>>>( chunk_group, w_buffer, d_buffer, sync_range_and_lock.GpuPtr()); } #define INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(TIn1, TIn2, TOut1, TOut2, TBuf) \ template void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(cudaStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size); INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, float, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(double, double, double, double, double) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, half, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(half, half, half, half, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(nv_bfloat16, nv_bfloat16, nv_bfloat16, nv_bfloat16, float) #endif } // namespace cuda } // namespace onnxruntime
61510016f1b9c00cacf79cebeb9b4845efe22ba0.hip
// !!! This is a file automatically generated by hipify!!! #include "open_acc_map_header.cuh" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" __device__ int* cuda_device_1(int *dev_a, int *dev_b); __device__ int cuda_device_2(int dev_a, int dev_b); __device__ int* cuda_device_1(int *dev_a, int *dev_b) { int i = threadIdx.x; if (i < CUDASIZE) { dev_a[i] = dev_a[i] + dev_b[i]; } return dev_a; } __device__ int cuda_device_2(int dev_a, int dev_b) { int i = threadIdx.x; int dev_c; if (i < CUDASIZE) { dev_c = dev_a + dev_b; printf("CUDA CODE\n"); } return dev_c; }
61510016f1b9c00cacf79cebeb9b4845efe22ba0.cu
#include "open_acc_map_header.cuh" #include "device_launch_parameters.h" #include "cuda.h" #include "cuda_runtime.h" __device__ int* cuda_device_1(int *dev_a, int *dev_b); __device__ int cuda_device_2(int dev_a, int dev_b); __device__ int* cuda_device_1(int *dev_a, int *dev_b) { int i = threadIdx.x; if (i < CUDASIZE) { dev_a[i] = dev_a[i] + dev_b[i]; } return dev_a; } __device__ int cuda_device_2(int dev_a, int dev_b) { int i = threadIdx.x; int dev_c; if (i < CUDASIZE) { dev_c = dev_a + dev_b; printf("CUDA CODE\n"); } return dev_c; }
7dfa834bbda8ecbcf9121d9ea567bb288fb4392d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zlarfbx.cu, normal z -> s, Sun Nov 20 20:20:29 2016 */ #include "magma_internal.h" #include "commonblas_s.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /******************************************************************************/ extern "C" __global__ void magma_sgemv_kernel1(int m, const float * __restrict__ V, int ldv, const float * __restrict__ c, float *dwork) { const int i = threadIdx.x; const float *dV = V + (blockIdx.x) * ldv; __shared__ float sum[ BLOCK_SIZE ]; float lsum; /* lsum := v**H * C */ lsum = MAGMA_S_ZERO; for (int j = i; j < m; j += BLOCK_SIZE) lsum += MAGMA_S_MUL( MAGMA_S_CONJ( dV[j] ), c[j] ); sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i == 0) dwork [blockIdx.x] = sum[0]; } /******************************************************************************/ /* Call magma_sgemv_kernel3<<< n, BLOCK_SIZE, 0, queue->cuda_stream() >>>(m, V, ldv, c, dwork, tau) to compute SGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1) and to set c[0] to 1. i.e., work = -tau[0] V**H c */ extern "C" __global__ void magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv, float *c, float *dwork, float *tau) { const int i = threadIdx.x; const float *dV = V + (blockIdx.x) * ldv; __shared__ float sum[ BLOCK_SIZE ]; float lsum; if (i == 0) c[0] = MAGMA_S_ONE; /* lsum := v**H * C */ lsum = MAGMA_S_ZERO; for (int j = i; j < m; j += BLOCK_SIZE) lsum += MAGMA_S_MUL( MAGMA_S_CONJ( dV[j] ), c[j] ); sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i == 0) dwork [blockIdx.x] = -tau[0]*sum[0]; } /******************************************************************************/ extern "C" __global__ void magma_sgemv_kernel2(int m, int n, const float * __restrict__ V, int ldv, const float * __restrict__ x, float *c) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; float lsum; V += j; lsum = MAGMA_S_ZERO; if (j < m) { for (int k=0; k < n; k++) lsum += MAGMA_S_MUL( V[k*ldv], x[k]); c[j] -= lsum; } } /******************************************************************************/ /* Apply a real block reflector H to a real vector C from the left (i.e., C = H C). H is represented in the form H = I - V T V**H where T is the real k-by-k upper triangular matrix in the representation of the block reflector, and V is a real block of k elementary reflectors. */ extern "C" void magma_slarfbx_gpu( magma_int_t m, magma_int_t k, magmaFloat_ptr V, magma_int_t ldv, magmaFloat_ptr dT, magma_int_t ldt, magmaFloat_ptr c, magmaFloat_ptr dwork, magma_queue_t queue ) { /* dwork = V**H c */ hipLaunchKernelGGL(( magma_sgemv_kernel1) , dim3(k), dim3(BLOCK_SIZE), 0, queue->cuda_stream() , m, V, ldv, c, dwork); /* dwork = T**H dwork */ hipLaunchKernelGGL(( magma_strmv_tkernel) , dim3(k), dim3(k), 0, queue->cuda_stream() , dT, ldt, dwork, dwork+k); /* c = c - V dwork */ dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) ); dim3 threads3( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_sgemv_kernel2) , dim3(blocks3), dim3(threads3), 0, queue->cuda_stream() , m, k, V, ldv, dwork+k, c); }
7dfa834bbda8ecbcf9121d9ea567bb288fb4392d.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zlarfbx.cu, normal z -> s, Sun Nov 20 20:20:29 2016 */ #include "magma_internal.h" #include "commonblas_s.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /******************************************************************************/ extern "C" __global__ void magma_sgemv_kernel1(int m, const float * __restrict__ V, int ldv, const float * __restrict__ c, float *dwork) { const int i = threadIdx.x; const float *dV = V + (blockIdx.x) * ldv; __shared__ float sum[ BLOCK_SIZE ]; float lsum; /* lsum := v**H * C */ lsum = MAGMA_S_ZERO; for (int j = i; j < m; j += BLOCK_SIZE) lsum += MAGMA_S_MUL( MAGMA_S_CONJ( dV[j] ), c[j] ); sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i == 0) dwork [blockIdx.x] = sum[0]; } /******************************************************************************/ /* Call magma_sgemv_kernel3<<< n, BLOCK_SIZE, 0, queue->cuda_stream() >>>(m, V, ldv, c, dwork, tau) to compute SGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1) and to set c[0] to 1. i.e., work = -tau[0] V**H c */ extern "C" __global__ void magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv, float *c, float *dwork, float *tau) { const int i = threadIdx.x; const float *dV = V + (blockIdx.x) * ldv; __shared__ float sum[ BLOCK_SIZE ]; float lsum; if (i == 0) c[0] = MAGMA_S_ONE; /* lsum := v**H * C */ lsum = MAGMA_S_ZERO; for (int j = i; j < m; j += BLOCK_SIZE) lsum += MAGMA_S_MUL( MAGMA_S_CONJ( dV[j] ), c[j] ); sum[i] = lsum; magma_sum_reduce< BLOCK_SIZE >( i, sum ); __syncthreads(); if (i == 0) dwork [blockIdx.x] = -tau[0]*sum[0]; } /******************************************************************************/ extern "C" __global__ void magma_sgemv_kernel2(int m, int n, const float * __restrict__ V, int ldv, const float * __restrict__ x, float *c) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; float lsum; V += j; lsum = MAGMA_S_ZERO; if (j < m) { for (int k=0; k < n; k++) lsum += MAGMA_S_MUL( V[k*ldv], x[k]); c[j] -= lsum; } } /******************************************************************************/ /* Apply a real block reflector H to a real vector C from the left (i.e., C = H C). H is represented in the form H = I - V T V**H where T is the real k-by-k upper triangular matrix in the representation of the block reflector, and V is a real block of k elementary reflectors. */ extern "C" void magma_slarfbx_gpu( magma_int_t m, magma_int_t k, magmaFloat_ptr V, magma_int_t ldv, magmaFloat_ptr dT, magma_int_t ldt, magmaFloat_ptr c, magmaFloat_ptr dwork, magma_queue_t queue ) { /* dwork = V**H c */ magma_sgemv_kernel1 <<< k, BLOCK_SIZE, 0, queue->cuda_stream() >>> (m, V, ldv, c, dwork); /* dwork = T**H dwork */ magma_strmv_tkernel <<< k, k, 0, queue->cuda_stream() >>> ( dT, ldt, dwork, dwork+k); /* c = c - V dwork */ dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) ); dim3 threads3( BLOCK_SIZE ); magma_sgemv_kernel2 <<< blocks3, threads3, 0, queue->cuda_stream() >>> ( m, k, V, ldv, dwork+k, c); }
9dcf614fd355fd3aac19d569d83e321cc4765045.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This program is written to find the nearest neighbour of each point in 3 deminsional space by implementing the brute force algorithm. The brute force approach can easily be converted into a embarassingly parallel algorithm for the GPU where there is no interaction between the threads. Benchmarking is done to compare the CPU and GPU computational approaches to the problem. */ /* Note that there is a considerable dependency of the ratio of execution times of the CPU and GPU on the hardware which is being used to execute the run the program. */ // Importing the required headers #include<stdio.h> #include<stdlib.h> #include<math.h> #include<cuda.h> #include<time.h> struct position { int x,y,z; //odd number of parameters in the structure helps reducing bank conflicts in shared memory(if used). }; // Returns the duration from start to end times in sec double time_elapsed(struct timespec *start, struct timespec *end) { double t; t = (end->tv_sec - start->tv_sec); // diff in seconds t += (end->tv_nsec - start->tv_nsec) * 0.000000001; //diff in nanoseconds return t; } // GPU Kernel __global__ void GPU_Find(struct position *points, int *nearest, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int min = 1<<22; int temp; if(i >= n) return; for(int j = 0; j < n; j++) { if(i == j) continue; temp = (points[i].x - points[j].x)*(points[i].x - points[j].x); temp += (points[i].y - points[j].y)*(points[i].y - points[j].y); temp += (points[i].z - points[j].z)*(points[i].z - points[j].z); if(temp < min) { min = temp; nearest[i] = j; } } return; } // CPU Function void CPU_Find(struct position *points, int *nearest, int n) { int min; //All the distances are going to be smaller than this. int temp; for(int i = 0; i < n; i++) { min = 1<<22; for(int j = 0; j < n; j++) { if(i == j) continue; temp = (points[i].x - points[j].x)*(points[i].x - points[j].x); temp += (points[i].y - points[j].y)*(points[i].y - points[j].y); temp += (points[i].z - points[j].z)*(points[i].z - points[j].z); temp = (int)sqrt(temp); if(temp < min) { min = temp; nearest[i] = j; } } } return; } // Code execution begins here int main() { struct timespec start1, end1; struct timespec start2, end2; int n; printf("Enter the value of n: "); scanf("%d", &n); struct position *points; int *nearest1; int *nearest2; hipMallocManaged(&points, n*sizeof(struct position)); hipMallocManaged(&nearest1, n*sizeof(int)); hipMallocManaged(&nearest2, n*sizeof(int)); for(int i = 0; i < n; i++) { points[i].x = rand()%100000; points[i].y = rand()%100000; points[i].z = rand()%10000; nearest1[i] = -1; nearest2[i] = -1; } clock_gettime(CLOCK_REALTIME, &start1); //start timestamp hipLaunchKernelGGL(( GPU_Find), dim3((n/128+1)), dim3(128), 0, 0, points, nearest1, n); hipDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &end1); //end timestamp clock_gettime(CLOCK_REALTIME, &start2); //start timestamp CPU_Find(points, nearest2, n); clock_gettime(CLOCK_REALTIME, &end2); //end timestamp printf("\nTime taken by GPU is: %lf\n", time_elapsed(&start1, &end1)); //print result for GPU printf("Time taken by CPU is: %lf\n", time_elapsed(&start2, &end2)); //print result for CPU hipFree(points); hipFree(nearest1); hipFree(nearest2); return 0; } /* The results obtained by the CPU and GPU may differ. Why so? */
9dcf614fd355fd3aac19d569d83e321cc4765045.cu
/* This program is written to find the nearest neighbour of each point in 3 deminsional space by implementing the brute force algorithm. The brute force approach can easily be converted into a embarassingly parallel algorithm for the GPU where there is no interaction between the threads. Benchmarking is done to compare the CPU and GPU computational approaches to the problem. */ /* Note that there is a considerable dependency of the ratio of execution times of the CPU and GPU on the hardware which is being used to execute the run the program. */ // Importing the required headers #include<stdio.h> #include<stdlib.h> #include<math.h> #include<cuda.h> #include<time.h> struct position { int x,y,z; //odd number of parameters in the structure helps reducing bank conflicts in shared memory(if used). }; // Returns the duration from start to end times in sec double time_elapsed(struct timespec *start, struct timespec *end) { double t; t = (end->tv_sec - start->tv_sec); // diff in seconds t += (end->tv_nsec - start->tv_nsec) * 0.000000001; //diff in nanoseconds return t; } // GPU Kernel __global__ void GPU_Find(struct position *points, int *nearest, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int min = 1<<22; int temp; if(i >= n) return; for(int j = 0; j < n; j++) { if(i == j) continue; temp = (points[i].x - points[j].x)*(points[i].x - points[j].x); temp += (points[i].y - points[j].y)*(points[i].y - points[j].y); temp += (points[i].z - points[j].z)*(points[i].z - points[j].z); if(temp < min) { min = temp; nearest[i] = j; } } return; } // CPU Function void CPU_Find(struct position *points, int *nearest, int n) { int min; //All the distances are going to be smaller than this. int temp; for(int i = 0; i < n; i++) { min = 1<<22; for(int j = 0; j < n; j++) { if(i == j) continue; temp = (points[i].x - points[j].x)*(points[i].x - points[j].x); temp += (points[i].y - points[j].y)*(points[i].y - points[j].y); temp += (points[i].z - points[j].z)*(points[i].z - points[j].z); temp = (int)sqrt(temp); if(temp < min) { min = temp; nearest[i] = j; } } } return; } // Code execution begins here int main() { struct timespec start1, end1; struct timespec start2, end2; int n; printf("Enter the value of n: "); scanf("%d", &n); struct position *points; int *nearest1; int *nearest2; cudaMallocManaged(&points, n*sizeof(struct position)); cudaMallocManaged(&nearest1, n*sizeof(int)); cudaMallocManaged(&nearest2, n*sizeof(int)); for(int i = 0; i < n; i++) { points[i].x = rand()%100000; points[i].y = rand()%100000; points[i].z = rand()%10000; nearest1[i] = -1; nearest2[i] = -1; } clock_gettime(CLOCK_REALTIME, &start1); //start timestamp GPU_Find<<<(n/128+1), 128>>>(points, nearest1, n); cudaDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &end1); //end timestamp clock_gettime(CLOCK_REALTIME, &start2); //start timestamp CPU_Find(points, nearest2, n); clock_gettime(CLOCK_REALTIME, &end2); //end timestamp printf("\nTime taken by GPU is: %lf\n", time_elapsed(&start1, &end1)); //print result for GPU printf("Time taken by CPU is: %lf\n", time_elapsed(&start2, &end2)); //print result for CPU cudaFree(points); cudaFree(nearest1); cudaFree(nearest2); return 0; } /* The results obtained by the CPU and GPU may differ. Why so? */
cdcfb9b06830551fcb92b23755b64816409fc7ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/AccumulateType.h> #include <THH/THHDeviceUtils.cuh> #include <THH/THHTensorMathReduce.cuh> #include <THH/THHTensorSort.cuh> #include <THH/THHThrustAllocator.cuh> #include <THH/THHAtomics.cuh> #include <thrust/execution_policy.h> #include <thrust/unique.h> #include <thrust/device_vector.h> #include <ATen/native/hip/EmbeddingBackwardKernel.cuh> namespace at { namespace native { namespace { constexpr int MODE_SUM = 0; constexpr int MODE_MEAN = 1; constexpr int MODE_MAX = 2; constexpr int WARP_SIZE = 32; // This kernel assumes that all input tensors except `weight` and // per_sample_weights are contiguous. template <typename scalar_t> __global__ void EmbeddingBag_updateOutputKernel( int64_t *input, int64_t *offsets, scalar_t *weight, scalar_t *output, int64_t *offset2bag, int64_t numIndices, int64_t numBags, int64_t featureSize, int64_t weight_stide0, int64_t weight_stride1, int mode, int64_t *bag_size, int64_t *max_indices, scalar_t* per_sample_weights, int64_t per_sample_weights_stride) { // the strategy here is that each bag x feature is handled by a single thread using accscalar_t = acc_type<scalar_t, true>; int64_t chunksPerBag = THCCeilDiv(featureSize, (int64_t)blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < featureSize) { int64_t bag = chunk / chunksPerBag; scalar_t *weightFeat = weight + featureDim * weight_stride1; int64_t begin = offsets[bag]; int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices; assert(end >= begin); accscalar_t weightFeatSum = 0; scalar_t weightFeatMax; int64_t bag_size_ = 0; int64_t maxWord = -1; for (int64_t emb = begin; emb < end; emb++) { const int64_t weightRow = input[emb] * weight_stide0; scalar_t weightValue = weightFeat[weightRow]; if (mode == MODE_MAX) { if (emb == begin || weightValue > weightFeatMax) { weightFeatMax = weightValue; maxWord = input[emb]; } } else { if (per_sample_weights) { accscalar_t scaleWeightBy = static_cast<accscalar_t>( per_sample_weights[emb * per_sample_weights_stride]); weightFeatSum += scaleWeightBy * static_cast<accscalar_t>(weightValue); } else { weightFeatSum += static_cast<accscalar_t>(weightValue); } } bag_size_++; if (featureDim == 0) { offset2bag[emb] = bag; } } if (mode == MODE_MEAN) { if (end == begin) { bag_size[bag] = 0; } else { weightFeatSum = weightFeatSum / static_cast<accscalar_t>(bag_size_); bag_size[bag] = bag_size_; } } if (mode == MODE_MEAN || mode == MODE_SUM) { output[bag * featureSize + featureDim] = static_cast<scalar_t>(weightFeatSum); } else if (mode == MODE_MAX) { if (end == begin) { // If bag is empty, set output to 0. weightFeatMax = 0; } max_indices[bag * featureSize + featureDim] = maxWord; output[bag * featureSize + featureDim] = weightFeatMax; } } } } Tensor embedding_bag_backward_cuda_sum_avg( const Tensor &grad, const Tensor &indices, const Tensor &offset2bag, const Tensor &bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor& per_sample_weights) { auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); ptrdiff_t numel = indices.numel(); if (numel == 0) { // all empty bags return at::zeros({num_weights, grad.size(1)}, grad.options()); } int64_t stride = grad_weight.stride(0); auto sorted_indices = at::empty_like(indices); auto orig_indices = at::empty_like(indices); using device_ptr = thrust::device_ptr<int64_t>; // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly { sorted_indices.copy_(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data<int64_t>()); thrust::copy(policy, count_iter, count_iter + numel, orig_data); // Sort; a stable sort is not required auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + numel, orig_data, ThrustLTOp<int64_t>()); } Tensor count; if (scale_grad_by_freq) { count = at::empty_like(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); auto count_data = device_ptr(count.data<int64_t>()); thrust::inclusive_scan_by_key(policy, sorted_data, sorted_data + numel, thrust::make_constant_iterator(1), count_data); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( policy, thrust::make_reverse_iterator(sorted_data + numel), thrust::make_reverse_iterator(sorted_data), thrust::make_reverse_iterator(count_data + numel), thrust::make_reverse_iterator(count_data + numel), thrust::equal_to<int64_t>(), thrust::maximum<int64_t>()); } return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, /* padding_idx= */ -1, scale_grad_by_freq, mode == MODE_MEAN, offset2bag, bag_size, per_sample_weights); } template <typename scalar_t> __global__ void EmbeddingBag_accGradParametersKernel_max( int64_t *max_indices, scalar_t *gradOutput, scalar_t *gradWeight, int64_t stride, int64_t numBags) { using accscalar_t = acc_type<scalar_t, true>; int64_t chunksPerBag = THCCeilDiv(stride, (int64_t)blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < stride) { int64_t bag = chunk / chunksPerBag; int64_t word_idx = max_indices[bag * stride + featureDim]; if (word_idx >= 0) { // If bag is empty, we have max_indices[idx] set to -1 in forward. atomicAdd(&(gradWeight[word_idx * stride + featureDim]), gradOutput[bag * stride + featureDim]); } } } } Tensor embedding_bag_backward_cuda_max(const Tensor &grad, const Tensor &max_indices, int64_t num_weights) { auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options()); int64_t stride = grad_weight.stride(0); int64_t numBags = grad.size(0); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 block = dim3(32, 8); int grid = 1024; AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "embedding_bag_backward_cuda_max", [&] { hipLaunchKernelGGL(( EmbeddingBag_accGradParametersKernel_max< scalar_t>), dim3(grid), dim3(block), 0, stream, max_indices.data<int64_t>(), grad.data<scalar_t>(), grad_weight.data<scalar_t>(), stride, numBags); }); THCudaCheck(hipGetLastError()); return grad_weight; } } // Assumes all input tensors are contiguous. // See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details std::tuple<Tensor, Tensor, Tensor, Tensor> _embedding_bag_cuda(const Tensor &weight, const Tensor &indices, const Tensor &offsets, const bool scale_grad_by_freq, const int64_t mode, bool sparse, const Tensor& per_sample_weights) { auto indices_arg = TensorArg(indices, "indices", 1); checkScalarType("embedding_bag_cuda", indices_arg, kLong); auto offsets_arg = TensorArg(offsets, "offsets", 1); checkScalarType("embedding_bag_cuda", offsets_arg, kLong); auto weight_arg = TensorArg(weight, "weight", 1); checkSameGPU("embedding_bag_cuda", weight_arg, indices_arg); checkSameGPU("embedding_bag_cuda", weight_arg, offsets_arg); int64_t numIndices = indices.size(0); int64_t numBags = offsets.size(0); int64_t featureSize = weight.size(1); auto bag_size = at::zeros(offsets.sizes(), indices.options()); auto offset2bag = at::zeros({indices.size(0)}, indices.options()); // offset2bag = [0 0 0 0 0] hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto output = at::zeros({offsets.size(0), weight.size(1)}, weight.options()); Tensor max_indices; if (mode == MODE_MAX) { max_indices = at::zeros({offsets.size(0), weight.size(1)}, indices.options()); } else { // No need to allocate if we aren't doing a backwards pass max_indices = at::zeros({0}, indices.options()); } dim3 block = dim3(32, 8); int grid = 1024; AT_DISPATCH_FLOATING_TYPES_AND_HALF(weight.scalar_type(), "embedding_bag_cuda", [&] { hipLaunchKernelGGL(( EmbeddingBag_updateOutputKernel<scalar_t>), dim3(grid), dim3(block), 0, stream, indices.data<int64_t>(), offsets.data<int64_t>(), weight.data<scalar_t>(), output.data<scalar_t>(), offset2bag.data<int64_t>(), numIndices, numBags, featureSize, weight.stride(0), weight.stride(1), mode, bag_size.data<int64_t>(), mode == MODE_MAX ? max_indices.data<int64_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.data<scalar_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.stride(0) : 0); }); THCudaCheck(hipGetLastError()); return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, offset2bag, bag_size, max_indices); } Tensor _embedding_bag_dense_backward_cuda(const Tensor &grad_, const Tensor &indices, const Tensor &offsets, const Tensor &offset2bag, const Tensor &bag_size_, const Tensor &max_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor& per_sample_weights) { // indices, offsets and offset2bag are assumed having correct dtypes and // contiguous here due to the checks in _embedding_bag_backward in // EmbeddingBag.cpp. // Also see NOTE [ embedding_bag Native Functions ] in native_functions.yaml // for more details. Tensor grad = grad_.contiguous(); auto indices_arg = TensorArg(indices, "indices", 1); auto offsets_arg = TensorArg(offsets, "offsets", 1); auto grad_arg = TensorArg(grad, "grad", 1); checkSameGPU("embedding_bag_cuda", grad_arg, offsets_arg); checkSameGPU("embedding_bag_cuda", grad_arg, indices_arg); switch (mode) { case MODE_SUM: case MODE_MEAN: if (mode == MODE_MEAN) AT_ASSERT(!per_sample_weights.defined()); return embedding_bag_backward_cuda_sum_avg(grad, indices, offset2bag, bag_size_, num_weights, scale_grad_by_freq, mode, per_sample_weights); case MODE_MAX: AT_ASSERT(!per_sample_weights.defined()); return embedding_bag_backward_cuda_max(grad, max_indices, num_weights); default: AT_ERROR( "Unknown mode for embedding_bag_backward_cuda ", mode); } } template <typename scalar_t> __inline__ __device__ static scalar_t warpReduceSum(scalar_t val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2) val += WARP_SHFL_DOWN(val, offset); return val; } template <typename scalar_t> __global__ static void _embedding_bag_per_sample_weights_backward_kernel( const scalar_t* grad, int64_t grad_stride0, int64_t grad_stride1, const scalar_t* weight, int64_t weight_stride0, int64_t weight_stride1, const int64_t* indices, // contiguous const int64_t* offset2bag, // contiguous int64_t num_samples, int64_t embedding_features, scalar_t* output) { using accscalar_t = acc_type<scalar_t, true>; const int idx = threadIdx.x + blockIdx.x * blockDim.x; const int warp = idx / WARP_SIZE; const int thread_in_warp = idx % WARP_SIZE; const int num_warps = blockDim.x * gridDim.x / WARP_SIZE; // Each warp is responsible for the accumulation of one sample. // This involves doing one dot product between grad[bag_idx] and weight[embedding_idx]. for (int sample_idx = warp; sample_idx < num_samples; sample_idx += num_warps) { accscalar_t result = 0.; const int bag_idx = (int)offset2bag[sample_idx]; const int embedding_idx = (int)indices[sample_idx]; for (int feature_idx = thread_in_warp; feature_idx < embedding_features; feature_idx += WARP_SIZE) { result += grad[grad_stride0 * bag_idx + grad_stride1 * feature_idx] * weight[weight_stride0 * embedding_idx + weight_stride1 * feature_idx]; } result = warpReduceSum<accscalar_t>(result); if (thread_in_warp == 0) { output[sample_idx] = result; } } } Tensor _embedding_bag_per_sample_weights_backward_cuda( const Tensor& grad, const Tensor& weight, // NB: embedding table, not per_sample_weights const Tensor& indices, const Tensor& offsets, const Tensor& offset2bag, int64_t mode) { TORCH_CHECK( mode == MODE_SUM, "embedding_bag_backward: per_sample_weights only supported for mode='sum'"); AT_ASSERT(grad.dim() == 2); auto embedding_features = grad.size(1); AT_ASSERT(indices.dim() == 1); auto num_samples = indices.size(0); AT_ASSERT(weight.dim() == 2); AT_ASSERT(weight.size(1) == embedding_features); const int threads_per_block = 1024; const int warps_per_block = threads_per_block / WARP_SIZE; dim3 block(threads_per_block); dim3 grid((num_samples + warps_per_block - 1) / warps_per_block); auto output = at::empty({num_samples}, grad.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() { hipLaunchKernelGGL(( _embedding_bag_per_sample_weights_backward_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad.data<scalar_t>(), grad.stride(0), grad.stride(1), weight.data<scalar_t>(), weight.stride(0), weight.stride(1), indices.data<int64_t>(), offset2bag.data<int64_t>(), num_samples, embedding_features, output.data<scalar_t>()); } ); return output; } } }
cdcfb9b06830551fcb92b23755b64816409fc7ff.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/AccumulateType.h> #include <THC/THCDeviceUtils.cuh> #include <THC/THCTensorMathReduce.cuh> #include <THC/THCTensorSort.cuh> #include <THC/THCThrustAllocator.cuh> #include <THC/THCAtomics.cuh> #include <thrust/execution_policy.h> #include <thrust/unique.h> #include <thrust/device_vector.h> #include <ATen/native/cuda/EmbeddingBackwardKernel.cuh> namespace at { namespace native { namespace { constexpr int MODE_SUM = 0; constexpr int MODE_MEAN = 1; constexpr int MODE_MAX = 2; constexpr int WARP_SIZE = 32; // This kernel assumes that all input tensors except `weight` and // per_sample_weights are contiguous. template <typename scalar_t> __global__ void EmbeddingBag_updateOutputKernel( int64_t *input, int64_t *offsets, scalar_t *weight, scalar_t *output, int64_t *offset2bag, int64_t numIndices, int64_t numBags, int64_t featureSize, int64_t weight_stide0, int64_t weight_stride1, int mode, int64_t *bag_size, int64_t *max_indices, scalar_t* per_sample_weights, int64_t per_sample_weights_stride) { // the strategy here is that each bag x feature is handled by a single thread using accscalar_t = acc_type<scalar_t, true>; int64_t chunksPerBag = THCCeilDiv(featureSize, (int64_t)blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < featureSize) { int64_t bag = chunk / chunksPerBag; scalar_t *weightFeat = weight + featureDim * weight_stride1; int64_t begin = offsets[bag]; int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices; assert(end >= begin); accscalar_t weightFeatSum = 0; scalar_t weightFeatMax; int64_t bag_size_ = 0; int64_t maxWord = -1; for (int64_t emb = begin; emb < end; emb++) { const int64_t weightRow = input[emb] * weight_stide0; scalar_t weightValue = weightFeat[weightRow]; if (mode == MODE_MAX) { if (emb == begin || weightValue > weightFeatMax) { weightFeatMax = weightValue; maxWord = input[emb]; } } else { if (per_sample_weights) { accscalar_t scaleWeightBy = static_cast<accscalar_t>( per_sample_weights[emb * per_sample_weights_stride]); weightFeatSum += scaleWeightBy * static_cast<accscalar_t>(weightValue); } else { weightFeatSum += static_cast<accscalar_t>(weightValue); } } bag_size_++; if (featureDim == 0) { offset2bag[emb] = bag; } } if (mode == MODE_MEAN) { if (end == begin) { bag_size[bag] = 0; } else { weightFeatSum = weightFeatSum / static_cast<accscalar_t>(bag_size_); bag_size[bag] = bag_size_; } } if (mode == MODE_MEAN || mode == MODE_SUM) { output[bag * featureSize + featureDim] = static_cast<scalar_t>(weightFeatSum); } else if (mode == MODE_MAX) { if (end == begin) { // If bag is empty, set output to 0. weightFeatMax = 0; } max_indices[bag * featureSize + featureDim] = maxWord; output[bag * featureSize + featureDim] = weightFeatMax; } } } } Tensor embedding_bag_backward_cuda_sum_avg( const Tensor &grad, const Tensor &indices, const Tensor &offset2bag, const Tensor &bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor& per_sample_weights) { auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); ptrdiff_t numel = indices.numel(); if (numel == 0) { // all empty bags return at::zeros({num_weights, grad.size(1)}, grad.options()); } int64_t stride = grad_weight.stride(0); auto sorted_indices = at::empty_like(indices); auto orig_indices = at::empty_like(indices); using device_ptr = thrust::device_ptr<int64_t>; // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly { sorted_indices.copy_(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data<int64_t>()); thrust::copy(policy, count_iter, count_iter + numel, orig_data); // Sort; a stable sort is not required auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + numel, orig_data, ThrustLTOp<int64_t>()); } Tensor count; if (scale_grad_by_freq) { count = at::empty_like(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); auto count_data = device_ptr(count.data<int64_t>()); thrust::inclusive_scan_by_key(policy, sorted_data, sorted_data + numel, thrust::make_constant_iterator(1), count_data); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( policy, thrust::make_reverse_iterator(sorted_data + numel), thrust::make_reverse_iterator(sorted_data), thrust::make_reverse_iterator(count_data + numel), thrust::make_reverse_iterator(count_data + numel), thrust::equal_to<int64_t>(), thrust::maximum<int64_t>()); } return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, /* padding_idx= */ -1, scale_grad_by_freq, mode == MODE_MEAN, offset2bag, bag_size, per_sample_weights); } template <typename scalar_t> __global__ void EmbeddingBag_accGradParametersKernel_max( int64_t *max_indices, scalar_t *gradOutput, scalar_t *gradWeight, int64_t stride, int64_t numBags) { using accscalar_t = acc_type<scalar_t, true>; int64_t chunksPerBag = THCCeilDiv(stride, (int64_t)blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < stride) { int64_t bag = chunk / chunksPerBag; int64_t word_idx = max_indices[bag * stride + featureDim]; if (word_idx >= 0) { // If bag is empty, we have max_indices[idx] set to -1 in forward. atomicAdd(&(gradWeight[word_idx * stride + featureDim]), gradOutput[bag * stride + featureDim]); } } } } Tensor embedding_bag_backward_cuda_max(const Tensor &grad, const Tensor &max_indices, int64_t num_weights) { auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options()); int64_t stride = grad_weight.stride(0); int64_t numBags = grad.size(0); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 block = dim3(32, 8); int grid = 1024; AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "embedding_bag_backward_cuda_max", [&] { EmbeddingBag_accGradParametersKernel_max< scalar_t><<<grid, block, 0, stream>>>( max_indices.data<int64_t>(), grad.data<scalar_t>(), grad_weight.data<scalar_t>(), stride, numBags); }); THCudaCheck(cudaGetLastError()); return grad_weight; } } // Assumes all input tensors are contiguous. // See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details std::tuple<Tensor, Tensor, Tensor, Tensor> _embedding_bag_cuda(const Tensor &weight, const Tensor &indices, const Tensor &offsets, const bool scale_grad_by_freq, const int64_t mode, bool sparse, const Tensor& per_sample_weights) { auto indices_arg = TensorArg(indices, "indices", 1); checkScalarType("embedding_bag_cuda", indices_arg, kLong); auto offsets_arg = TensorArg(offsets, "offsets", 1); checkScalarType("embedding_bag_cuda", offsets_arg, kLong); auto weight_arg = TensorArg(weight, "weight", 1); checkSameGPU("embedding_bag_cuda", weight_arg, indices_arg); checkSameGPU("embedding_bag_cuda", weight_arg, offsets_arg); int64_t numIndices = indices.size(0); int64_t numBags = offsets.size(0); int64_t featureSize = weight.size(1); auto bag_size = at::zeros(offsets.sizes(), indices.options()); auto offset2bag = at::zeros({indices.size(0)}, indices.options()); // offset2bag = [0 0 0 0 0] cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto output = at::zeros({offsets.size(0), weight.size(1)}, weight.options()); Tensor max_indices; if (mode == MODE_MAX) { max_indices = at::zeros({offsets.size(0), weight.size(1)}, indices.options()); } else { // No need to allocate if we aren't doing a backwards pass max_indices = at::zeros({0}, indices.options()); } dim3 block = dim3(32, 8); int grid = 1024; AT_DISPATCH_FLOATING_TYPES_AND_HALF(weight.scalar_type(), "embedding_bag_cuda", [&] { EmbeddingBag_updateOutputKernel<scalar_t><<<grid, block, 0, stream>>>( indices.data<int64_t>(), offsets.data<int64_t>(), weight.data<scalar_t>(), output.data<scalar_t>(), offset2bag.data<int64_t>(), numIndices, numBags, featureSize, weight.stride(0), weight.stride(1), mode, bag_size.data<int64_t>(), mode == MODE_MAX ? max_indices.data<int64_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.data<scalar_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.stride(0) : 0); }); THCudaCheck(cudaGetLastError()); return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, offset2bag, bag_size, max_indices); } Tensor _embedding_bag_dense_backward_cuda(const Tensor &grad_, const Tensor &indices, const Tensor &offsets, const Tensor &offset2bag, const Tensor &bag_size_, const Tensor &max_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor& per_sample_weights) { // indices, offsets and offset2bag are assumed having correct dtypes and // contiguous here due to the checks in _embedding_bag_backward in // EmbeddingBag.cpp. // Also see NOTE [ embedding_bag Native Functions ] in native_functions.yaml // for more details. Tensor grad = grad_.contiguous(); auto indices_arg = TensorArg(indices, "indices", 1); auto offsets_arg = TensorArg(offsets, "offsets", 1); auto grad_arg = TensorArg(grad, "grad", 1); checkSameGPU("embedding_bag_cuda", grad_arg, offsets_arg); checkSameGPU("embedding_bag_cuda", grad_arg, indices_arg); switch (mode) { case MODE_SUM: case MODE_MEAN: if (mode == MODE_MEAN) AT_ASSERT(!per_sample_weights.defined()); return embedding_bag_backward_cuda_sum_avg(grad, indices, offset2bag, bag_size_, num_weights, scale_grad_by_freq, mode, per_sample_weights); case MODE_MAX: AT_ASSERT(!per_sample_weights.defined()); return embedding_bag_backward_cuda_max(grad, max_indices, num_weights); default: AT_ERROR( "Unknown mode for embedding_bag_backward_cuda ", mode); } } template <typename scalar_t> __inline__ __device__ static scalar_t warpReduceSum(scalar_t val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2) val += WARP_SHFL_DOWN(val, offset); return val; } template <typename scalar_t> __global__ static void _embedding_bag_per_sample_weights_backward_kernel( const scalar_t* grad, int64_t grad_stride0, int64_t grad_stride1, const scalar_t* weight, int64_t weight_stride0, int64_t weight_stride1, const int64_t* indices, // contiguous const int64_t* offset2bag, // contiguous int64_t num_samples, int64_t embedding_features, scalar_t* output) { using accscalar_t = acc_type<scalar_t, true>; const int idx = threadIdx.x + blockIdx.x * blockDim.x; const int warp = idx / WARP_SIZE; const int thread_in_warp = idx % WARP_SIZE; const int num_warps = blockDim.x * gridDim.x / WARP_SIZE; // Each warp is responsible for the accumulation of one sample. // This involves doing one dot product between grad[bag_idx] and weight[embedding_idx]. for (int sample_idx = warp; sample_idx < num_samples; sample_idx += num_warps) { accscalar_t result = 0.; const int bag_idx = (int)offset2bag[sample_idx]; const int embedding_idx = (int)indices[sample_idx]; for (int feature_idx = thread_in_warp; feature_idx < embedding_features; feature_idx += WARP_SIZE) { result += grad[grad_stride0 * bag_idx + grad_stride1 * feature_idx] * weight[weight_stride0 * embedding_idx + weight_stride1 * feature_idx]; } result = warpReduceSum<accscalar_t>(result); if (thread_in_warp == 0) { output[sample_idx] = result; } } } Tensor _embedding_bag_per_sample_weights_backward_cuda( const Tensor& grad, const Tensor& weight, // NB: embedding table, not per_sample_weights const Tensor& indices, const Tensor& offsets, const Tensor& offset2bag, int64_t mode) { TORCH_CHECK( mode == MODE_SUM, "embedding_bag_backward: per_sample_weights only supported for mode='sum'"); AT_ASSERT(grad.dim() == 2); auto embedding_features = grad.size(1); AT_ASSERT(indices.dim() == 1); auto num_samples = indices.size(0); AT_ASSERT(weight.dim() == 2); AT_ASSERT(weight.size(1) == embedding_features); const int threads_per_block = 1024; const int warps_per_block = threads_per_block / WARP_SIZE; dim3 block(threads_per_block); dim3 grid((num_samples + warps_per_block - 1) / warps_per_block); auto output = at::empty({num_samples}, grad.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() { _embedding_bag_per_sample_weights_backward_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( grad.data<scalar_t>(), grad.stride(0), grad.stride(1), weight.data<scalar_t>(), weight.stride(0), weight.stride(1), indices.data<int64_t>(), offset2bag.data<int64_t>(), num_samples, embedding_features, output.data<scalar_t>()); } ); return output; } } }
96416bb6a27ac25f25ba1eda64cecd00155e3f5a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include "../inc/piestimator.h" #include <string> #include <vector> #include <numeric> #include <stdexcept> #include <typeinfo> #include <hip/hip_runtime_api.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "../inc/cudasharedmem.h" using std::string; using std::vector; // Helper templates to support float and double in same code template <typename L, typename R> struct TYPE_IS { static const bool test = false; }; template <typename L> struct TYPE_IS<L, L> { static const bool test = true; }; template <bool, class L, class R> struct IF { typedef R type; }; template <class L, class R> struct IF<true, L, R> { typedef L type; }; // RNG init kernel template <typename rngState_t, typename rngDirectionVectors_t> __global__ void initRNG(rngState_t * const rngStates, rngDirectionVectors_t * const rngDirections) { // Determine thread ID unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int step = gridDim.x * blockDim.x; // Initialise the RNG hiprand_init(rngDirections[0], tid, &rngStates[tid]); hiprand_init(rngDirections[1], tid, &rngStates[tid + step]); } __device__ unsigned int reduce_sum(unsigned int in) { extern __shared__ unsigned int sdata[]; // Perform first level of reduction: // - Write to shared memory unsigned int ltid = threadIdx.x; sdata[ltid] = in; __syncthreads(); // Do reduction in shared mem for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1) { if (ltid < s) { sdata[ltid] += sdata[ltid + s]; } __syncthreads(); } return sdata[0]; } __device__ inline void getPoint(float &x, float &y, curandStateSobol32 &state1, curandStateSobol32 &state2) { x = hiprand_uniform(&state1); y = hiprand_uniform(&state2); } __device__ inline void getPoint(double &x, double &y, curandStateSobol64 &state1, curandStateSobol64 &state2) { x = hiprand_uniform_double(&state1); y = hiprand_uniform_double(&state2); } // Estimator kernel template <typename Real, typename rngState_t> __global__ void computeValue(unsigned int * const results, rngState_t * const rngStates, const unsigned int numSims) { // Determine thread ID unsigned int bid = blockIdx.x; unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int step = gridDim.x * blockDim.x; // Initialise the RNG rngState_t localState1 = rngStates[tid]; rngState_t localState2 = rngStates[tid + step]; // Count the number of points which lie inside the unit quarter-circle unsigned int pointsInside = 0; for (unsigned int i = tid ; i < numSims ; i += step) { Real x; Real y; getPoint(x, y, localState1, localState2); Real l2norm2 = x * x + y * y; if (l2norm2 < static_cast<Real>(1)) pointsInside++; } // Reduce within the block pointsInside = reduce_sum(pointsInside); // Store the result if (threadIdx.x == 0) results[bid] = pointsInside; } template <typename Real> PiEstimator<Real>::PiEstimator(unsigned int numSims, unsigned int device, unsigned int threadBlockSize) : m_numSims(numSims), m_device(device), m_threadBlockSize(threadBlockSize) { } template <typename Real> Real PiEstimator<Real>::operator()() { hipError_t cudaResult = hipSuccess; struct hipDeviceProp_t deviceProperties; struct hipFuncAttributes funcAttributes; // Determine type of generator to use (32- or 64-bit) typedef typename IF<TYPE_IS<Real, double>::test, hiprandStateSobol64_t, hiprandStateSobol32_t>::type curandStateSobol_sz; typedef typename IF<TYPE_IS<Real, double>::test, hiprandDirectionVectors64_t, hiprandDirectionVectors32_t>::type curandDirectionVectors_sz; // Get device properties cudaResult = hipGetDeviceProperties(&deviceProperties, m_device); if (cudaResult != hipSuccess) { string msg("Could not get device properties: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Check precision is valid if (typeid(Real) == typeid(double) && (deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3))) { throw std::runtime_error("Device does not have double precision support"); } // Attach to GPU cudaResult = hipSetDevice(m_device); if (cudaResult != hipSuccess) { string msg("Could not set CUDA device: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Determine how to divide the work between cores dim3 block; dim3 grid; block.x = m_threadBlockSize; grid.x = (m_numSims + m_threadBlockSize - 1) / m_threadBlockSize; // Aim to launch around ten or more times as many blocks as there // are multiprocessors on the target device. unsigned int blocksPerSM = 10; unsigned int numSMs = deviceProperties.multiProcessorCount; while (grid.x > 2 * blocksPerSM * numSMs) grid.x >>= 1; // Get initRNG function properties and check the maximum block size cudaResult = hipFuncGetAttributes(&funcAttributes, initRNG<curandStateSobol_sz, curandDirectionVectors_sz>); if (cudaResult != hipSuccess) { string msg("Could not get function attributes: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock) { throw std::runtime_error("Block X dimension is too large for initRNG kernel"); } // Get computeValue function properties and check the maximum block size cudaResult = hipFuncGetAttributes(&funcAttributes, computeValue<Real, curandStateSobol_sz>); if (cudaResult != hipSuccess) { string msg("Could not get function attributes: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock) { throw std::runtime_error("Block X dimension is too large for computeValue kernel"); } // Check the dimensions are valid if (block.x > (unsigned int)deviceProperties.maxThreadsDim[0]) { throw std::runtime_error("Block X dimension is too large for device"); } if (grid.x > (unsigned int)deviceProperties.maxGridSize[0]) { throw std::runtime_error("Grid X dimension is too large for device"); } // Allocate memory for RNG states and direction vectors curandStateSobol_sz *d_rngStates = 0; curandDirectionVectors_sz *d_rngDirections = 0; cudaResult = hipMalloc((void **)&d_rngStates, 2 * grid.x * block.x * sizeof(curandStateSobol_sz)); if (cudaResult != hipSuccess) { string msg("Could not allocate memory on device for RNG states: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } cudaResult = hipMalloc((void **)&d_rngDirections, 2 * sizeof(curandDirectionVectors_sz)); if (cudaResult != hipSuccess) { string msg("Could not allocate memory on device for RNG direction vectors: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Allocate memory for result // Each thread block will produce one result unsigned int *d_results = 0; cudaResult = hipMalloc((void **)&d_results, grid.x * sizeof(unsigned int)); if (cudaResult != hipSuccess) { string msg("Could not allocate memory on device for partial results: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Generate direction vectors on the host and copy to the device if (typeid(Real) == typeid(float)) { hiprandDirectionVectors32_t *rngDirections; hiprandStatus_t curandResult = hiprandGetDirectionVectors32(&rngDirections, CURAND_DIRECTION_VECTORS_32_JOEKUO6); if (curandResult != HIPRAND_STATUS_SUCCESS) { string msg("Could not get direction vectors for quasi-random number generator: "); msg += curandResult; throw std::runtime_error(msg); } cudaResult = hipMemcpy(d_rngDirections, rngDirections, 2 * sizeof(hiprandDirectionVectors32_t), hipMemcpyHostToDevice); if (cudaResult != hipSuccess) { string msg("Could not copy direction vectors to device: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } } else if (typeid(Real) == typeid(double)) { hiprandDirectionVectors64_t *rngDirections; hiprandStatus_t curandResult = hiprandGetDirectionVectors64(&rngDirections, CURAND_DIRECTION_VECTORS_64_JOEKUO6); if (curandResult != HIPRAND_STATUS_SUCCESS) { string msg("Could not get direction vectors for quasi-random number generator: "); msg += curandResult; throw std::runtime_error(msg); } cudaResult = hipMemcpy(d_rngDirections, rngDirections, 2 * sizeof(hiprandDirectionVectors64_t), hipMemcpyHostToDevice); if (cudaResult != hipSuccess) { string msg("Could not copy direction vectors to device: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } } else { string msg("Could not get direction vectors for random number generator of specified type"); throw std::runtime_error(msg); } // Initialise RNG hipLaunchKernelGGL(( initRNG), dim3(grid), dim3(block), 0, 0, d_rngStates, d_rngDirections); // Count the points inside unit quarter-circle hipLaunchKernelGGL(( computeValue<Real>), dim3(grid), dim3(block), block.x * sizeof(unsigned int), 0, d_results, d_rngStates, m_numSims); // Copy partial results back vector<unsigned int> results(grid.x); cudaResult = hipMemcpy(&results[0], d_results, grid.x * sizeof(unsigned int), hipMemcpyDeviceToHost); if (cudaResult != hipSuccess) { string msg("Could not copy partial results to host: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Complete sum-reduction on host Real value = static_cast<Real>(std::accumulate(results.begin(), results.end(), 0)); // Determine the proportion of points inside the quarter-circle, // i.e. the area of the unit quarter-circle value /= m_numSims; // Value is currently an estimate of the area of a unit quarter-circle, so we can // scale to a full circle by multiplying by four. Now since the area of a circle // is pi * r^2, and r is one, the value will be an estimate for the value of pi. value *= 4; // Cleanup if (d_rngStates) { hipFree(d_rngStates); d_rngStates = 0; } if (d_rngDirections) { hipFree(d_rngDirections); d_rngDirections = 0; } if (d_results) { hipFree(d_results); d_results = 0; } return value; } // Explicit template instantiation template class PiEstimator<float>; template class PiEstimator<double>;
96416bb6a27ac25f25ba1eda64cecd00155e3f5a.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include "../inc/piestimator.h" #include <string> #include <vector> #include <numeric> #include <stdexcept> #include <typeinfo> #include <cuda_runtime_api.h> #include <curand.h> #include <curand_kernel.h> #include "../inc/cudasharedmem.h" using std::string; using std::vector; // Helper templates to support float and double in same code template <typename L, typename R> struct TYPE_IS { static const bool test = false; }; template <typename L> struct TYPE_IS<L, L> { static const bool test = true; }; template <bool, class L, class R> struct IF { typedef R type; }; template <class L, class R> struct IF<true, L, R> { typedef L type; }; // RNG init kernel template <typename rngState_t, typename rngDirectionVectors_t> __global__ void initRNG(rngState_t * const rngStates, rngDirectionVectors_t * const rngDirections) { // Determine thread ID unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int step = gridDim.x * blockDim.x; // Initialise the RNG curand_init(rngDirections[0], tid, &rngStates[tid]); curand_init(rngDirections[1], tid, &rngStates[tid + step]); } __device__ unsigned int reduce_sum(unsigned int in) { extern __shared__ unsigned int sdata[]; // Perform first level of reduction: // - Write to shared memory unsigned int ltid = threadIdx.x; sdata[ltid] = in; __syncthreads(); // Do reduction in shared mem for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1) { if (ltid < s) { sdata[ltid] += sdata[ltid + s]; } __syncthreads(); } return sdata[0]; } __device__ inline void getPoint(float &x, float &y, curandStateSobol32 &state1, curandStateSobol32 &state2) { x = curand_uniform(&state1); y = curand_uniform(&state2); } __device__ inline void getPoint(double &x, double &y, curandStateSobol64 &state1, curandStateSobol64 &state2) { x = curand_uniform_double(&state1); y = curand_uniform_double(&state2); } // Estimator kernel template <typename Real, typename rngState_t> __global__ void computeValue(unsigned int * const results, rngState_t * const rngStates, const unsigned int numSims) { // Determine thread ID unsigned int bid = blockIdx.x; unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int step = gridDim.x * blockDim.x; // Initialise the RNG rngState_t localState1 = rngStates[tid]; rngState_t localState2 = rngStates[tid + step]; // Count the number of points which lie inside the unit quarter-circle unsigned int pointsInside = 0; for (unsigned int i = tid ; i < numSims ; i += step) { Real x; Real y; getPoint(x, y, localState1, localState2); Real l2norm2 = x * x + y * y; if (l2norm2 < static_cast<Real>(1)) pointsInside++; } // Reduce within the block pointsInside = reduce_sum(pointsInside); // Store the result if (threadIdx.x == 0) results[bid] = pointsInside; } template <typename Real> PiEstimator<Real>::PiEstimator(unsigned int numSims, unsigned int device, unsigned int threadBlockSize) : m_numSims(numSims), m_device(device), m_threadBlockSize(threadBlockSize) { } template <typename Real> Real PiEstimator<Real>::operator()() { cudaError_t cudaResult = cudaSuccess; struct cudaDeviceProp deviceProperties; struct cudaFuncAttributes funcAttributes; // Determine type of generator to use (32- or 64-bit) typedef typename IF<TYPE_IS<Real, double>::test, curandStateSobol64_t, curandStateSobol32_t>::type curandStateSobol_sz; typedef typename IF<TYPE_IS<Real, double>::test, curandDirectionVectors64_t, curandDirectionVectors32_t>::type curandDirectionVectors_sz; // Get device properties cudaResult = cudaGetDeviceProperties(&deviceProperties, m_device); if (cudaResult != cudaSuccess) { string msg("Could not get device properties: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Check precision is valid if (typeid(Real) == typeid(double) && (deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3))) { throw std::runtime_error("Device does not have double precision support"); } // Attach to GPU cudaResult = cudaSetDevice(m_device); if (cudaResult != cudaSuccess) { string msg("Could not set CUDA device: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Determine how to divide the work between cores dim3 block; dim3 grid; block.x = m_threadBlockSize; grid.x = (m_numSims + m_threadBlockSize - 1) / m_threadBlockSize; // Aim to launch around ten or more times as many blocks as there // are multiprocessors on the target device. unsigned int blocksPerSM = 10; unsigned int numSMs = deviceProperties.multiProcessorCount; while (grid.x > 2 * blocksPerSM * numSMs) grid.x >>= 1; // Get initRNG function properties and check the maximum block size cudaResult = cudaFuncGetAttributes(&funcAttributes, initRNG<curandStateSobol_sz, curandDirectionVectors_sz>); if (cudaResult != cudaSuccess) { string msg("Could not get function attributes: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock) { throw std::runtime_error("Block X dimension is too large for initRNG kernel"); } // Get computeValue function properties and check the maximum block size cudaResult = cudaFuncGetAttributes(&funcAttributes, computeValue<Real, curandStateSobol_sz>); if (cudaResult != cudaSuccess) { string msg("Could not get function attributes: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock) { throw std::runtime_error("Block X dimension is too large for computeValue kernel"); } // Check the dimensions are valid if (block.x > (unsigned int)deviceProperties.maxThreadsDim[0]) { throw std::runtime_error("Block X dimension is too large for device"); } if (grid.x > (unsigned int)deviceProperties.maxGridSize[0]) { throw std::runtime_error("Grid X dimension is too large for device"); } // Allocate memory for RNG states and direction vectors curandStateSobol_sz *d_rngStates = 0; curandDirectionVectors_sz *d_rngDirections = 0; cudaResult = cudaMalloc((void **)&d_rngStates, 2 * grid.x * block.x * sizeof(curandStateSobol_sz)); if (cudaResult != cudaSuccess) { string msg("Could not allocate memory on device for RNG states: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } cudaResult = cudaMalloc((void **)&d_rngDirections, 2 * sizeof(curandDirectionVectors_sz)); if (cudaResult != cudaSuccess) { string msg("Could not allocate memory on device for RNG direction vectors: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Allocate memory for result // Each thread block will produce one result unsigned int *d_results = 0; cudaResult = cudaMalloc((void **)&d_results, grid.x * sizeof(unsigned int)); if (cudaResult != cudaSuccess) { string msg("Could not allocate memory on device for partial results: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Generate direction vectors on the host and copy to the device if (typeid(Real) == typeid(float)) { curandDirectionVectors32_t *rngDirections; curandStatus_t curandResult = curandGetDirectionVectors32(&rngDirections, CURAND_DIRECTION_VECTORS_32_JOEKUO6); if (curandResult != CURAND_STATUS_SUCCESS) { string msg("Could not get direction vectors for quasi-random number generator: "); msg += curandResult; throw std::runtime_error(msg); } cudaResult = cudaMemcpy(d_rngDirections, rngDirections, 2 * sizeof(curandDirectionVectors32_t), cudaMemcpyHostToDevice); if (cudaResult != cudaSuccess) { string msg("Could not copy direction vectors to device: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } } else if (typeid(Real) == typeid(double)) { curandDirectionVectors64_t *rngDirections; curandStatus_t curandResult = curandGetDirectionVectors64(&rngDirections, CURAND_DIRECTION_VECTORS_64_JOEKUO6); if (curandResult != CURAND_STATUS_SUCCESS) { string msg("Could not get direction vectors for quasi-random number generator: "); msg += curandResult; throw std::runtime_error(msg); } cudaResult = cudaMemcpy(d_rngDirections, rngDirections, 2 * sizeof(curandDirectionVectors64_t), cudaMemcpyHostToDevice); if (cudaResult != cudaSuccess) { string msg("Could not copy direction vectors to device: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } } else { string msg("Could not get direction vectors for random number generator of specified type"); throw std::runtime_error(msg); } // Initialise RNG initRNG<<<grid, block>>>(d_rngStates, d_rngDirections); // Count the points inside unit quarter-circle computeValue<Real><<<grid, block, block.x * sizeof(unsigned int)>>>(d_results, d_rngStates, m_numSims); // Copy partial results back vector<unsigned int> results(grid.x); cudaResult = cudaMemcpy(&results[0], d_results, grid.x * sizeof(unsigned int), cudaMemcpyDeviceToHost); if (cudaResult != cudaSuccess) { string msg("Could not copy partial results to host: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Complete sum-reduction on host Real value = static_cast<Real>(std::accumulate(results.begin(), results.end(), 0)); // Determine the proportion of points inside the quarter-circle, // i.e. the area of the unit quarter-circle value /= m_numSims; // Value is currently an estimate of the area of a unit quarter-circle, so we can // scale to a full circle by multiplying by four. Now since the area of a circle // is pi * r^2, and r is one, the value will be an estimate for the value of pi. value *= 4; // Cleanup if (d_rngStates) { cudaFree(d_rngStates); d_rngStates = 0; } if (d_rngDirections) { cudaFree(d_rngDirections); d_rngDirections = 0; } if (d_results) { cudaFree(d_results); d_results = 0; } return value; } // Explicit template instantiation template class PiEstimator<float>; template class PiEstimator<double>;
7335a519ab9b3b3587fb55172ce57c2caf64c9eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kSquaredDiffTransFast(float* a, float* b, float* dest, unsigned int width, unsigned int bJumpWidth) { const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y; const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int idx = idxY * width + idxX; __shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1]; const unsigned int bBlockReadStart = blockDim.x * blockIdx.x * bJumpWidth + blockIdx.y * blockDim.y; smem[threadIdx.x][threadIdx.y] = b[bBlockReadStart + threadIdx.y * bJumpWidth + threadIdx.x]; __syncthreads(); dest[idx] = (a[idx] - smem[threadIdx.y][threadIdx.x]) * (a[idx] - smem[threadIdx.y][threadIdx.x]); }
7335a519ab9b3b3587fb55172ce57c2caf64c9eb.cu
#include "includes.h" __global__ void kSquaredDiffTransFast(float* a, float* b, float* dest, unsigned int width, unsigned int bJumpWidth) { const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y; const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int idx = idxY * width + idxX; __shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1]; const unsigned int bBlockReadStart = blockDim.x * blockIdx.x * bJumpWidth + blockIdx.y * blockDim.y; smem[threadIdx.x][threadIdx.y] = b[bBlockReadStart + threadIdx.y * bJumpWidth + threadIdx.x]; __syncthreads(); dest[idx] = (a[idx] - smem[threadIdx.y][threadIdx.x]) * (a[idx] - smem[threadIdx.y][threadIdx.x]); }
97df096e96bcea5e0f3b4afee50992c888671752.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<vector> __global__ void vecadd(float *a, float *b, float *c, int num) { c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } int main(int argc, char *argv[]) { const int num = 16; std::vector<float> a(num, 1); std::vector<float> b(num, 1); std::vector<float> c(num, 0); float *d_a; float *d_b; float *d_c; hipMalloc(&d_a, num * sizeof(float)); hipMalloc(&d_b, num * sizeof(float)); hipMalloc(&d_c, num * sizeof(float)); hipMemcpy(d_a, &a[0], num*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_a, &b[0], num*sizeof(float), hipMemcpyHostToDevice); dim3 grid_size = dim3(1, 1, 1); dim3 block_size = dim3(num, 1, 1); hipLaunchKernelGGL(( vecadd), dim3(grid_size), dim3(block_size), 0, 0, d_a, d_b, d_c, num); hipMemcpy(&c[0], d_c, num*sizeof(float), hipMemcpyDeviceToHost); for(int i=0; i < num; ++i) std::cout << c[i] << std::endl; hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
97df096e96bcea5e0f3b4afee50992c888671752.cu
#include<iostream> #include<vector> __global__ void vecadd(float *a, float *b, float *c, int num) { c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } int main(int argc, char *argv[]) { const int num = 16; std::vector<float> a(num, 1); std::vector<float> b(num, 1); std::vector<float> c(num, 0); float *d_a; float *d_b; float *d_c; cudaMalloc(&d_a, num * sizeof(float)); cudaMalloc(&d_b, num * sizeof(float)); cudaMalloc(&d_c, num * sizeof(float)); cudaMemcpy(d_a, &a[0], num*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_a, &b[0], num*sizeof(float), cudaMemcpyHostToDevice); dim3 grid_size = dim3(1, 1, 1); dim3 block_size = dim3(num, 1, 1); vecadd<<<grid_size, block_size>>>(d_a, d_b, d_c, num); cudaMemcpy(&c[0], d_c, num*sizeof(float), cudaMemcpyDeviceToHost); for(int i=0; i < num; ++i) std::cout << c[i] << std::endl; cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
f4a9a5a167a0892e7de3c12851d24ed45857915b.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // pyTorchChamferDistance components // https://github.com/chrdiller/pyTorchChamferDistance // // MIT License // // Copyright (c) 2018 Christian Diller // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "device_atomic_functions.h" #include "../../utils.h" #include <ATen/ATen.h> #include <THH/THHAtomics.cuh> #define BLOCK_SIZE 512 template<typename scalar_t> __global__ void SidedDistanceKernel(int b, int n, const scalar_t * xyz, int m, const scalar_t * xyz2, scalar_t * result, int64_t * result_i) { const int batch=512; __shared__ scalar_t buf[batch*3]; for (int i = blockIdx.x; i<b; i += gridDim.x){ for (int k2 = 0; k2 < m; k2 += batch) { int end_k = min(m, k2 + batch) - k2; for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) { buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { scalar_t x1 = xyz[(i * n + j) * 3 + 0]; scalar_t y1 = xyz[(i * n + j) * 3 + 1]; scalar_t z1 = xyz[(i * n + j) * 3 + 2]; int64_t best_i = 0; scalar_t best = 0; int end_ka = end_k - (end_k & 3); if (end_ka == batch){ for (int k = 0; k < batch; k += 4) { { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2]- z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || d < best) { best = d; best_i = k + k2; } } { scalar_t x2 = buf[k * 3 + 3] - x1; scalar_t y2 = buf[k * 3 + 4] - y1; scalar_t z2 = buf[k * 3 + 5] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (d < best){ best = d; best_i = k + k2 + 1; } } { scalar_t x2 = buf[k * 3 + 6]- x1; scalar_t y2 = buf[k * 3 + 7] - y1; scalar_t z2 = buf[k * 3 + 8] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (d < best) { best = d; best_i = k + k2 + 2; } } { scalar_t x2 = buf[k * 3 + 9] - x1; scalar_t y2 = buf[k * 3 + 10]-y1; scalar_t z2 = buf[k*3 + 11] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (d < best) { best = d; best_i = k + k2 + 3; } } } } else { for (int k = 0; k < end_ka; k += 4) { { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || d < best) { best = d; best_i = k + k2; } } { scalar_t x2 = buf[k * 3 + 3] - x1; scalar_t y2 = buf[k * 3 + 4] - y1; scalar_t z2 = buf[k * 3 + 5] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (d < best) { best = d; best_i = k + k2 + 1; } } { scalar_t x2 = buf[k * 3 + 6] - x1; scalar_t y2 = buf[k * 3 + 7] - y1; scalar_t z2 = buf[k * 3 + 8] - z1; scalar_t d= x2 * x2 + y2 * y2 + z2 * z2; if (d < best) { best = d; best_i = k + k2 + 2; } } { scalar_t x2 = buf[k * 3 + 9] - x1; scalar_t y2 = buf[k * 3 + 10] - y1; scalar_t z2 = buf[k * 3 + 11] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (d < best) { best = d; best_i = k + k2 + 3; } } } } for (int k = end_ka; k < end_k; k++) { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || d < best) { best = d; best_i = k+k2; } } if (k2 == 0 || result[(i * n + j)] > best) { result[(i * n + j)] = best; result_i[(i * n + j)] = best_i; } } __syncthreads(); } } } template<typename scalar_t> __global__ void SidedDistanceBackwardKernel( const scalar_t* grad_output, const int b, const int n, const scalar_t * p1, const int m, const scalar_t * p2, const int64_t* idx, scalar_t* grad_input1, scalar_t* grad_input2) { int batch_id = blockIdx.y; for (int point_id = threadIdx.x + blockIdx.x * blockDim.x; point_id < n; point_id += gridDim.x * blockDim.x) { int main_id = point_id + batch_id * n; scalar_t x1 = p1[main_id * 3]; scalar_t y1 = p1[main_id * 3 + 1]; scalar_t z1 = p1[main_id * 3 + 2]; int64_t p2_idx = (idx[main_id] + batch_id * m) * 3; scalar_t x2 = p2[p2_idx]; scalar_t y2 = p2[p2_idx + 1]; scalar_t z2 = p2[p2_idx + 2]; scalar_t grad = grad_output[main_id]; grad_input1[main_id * 3] = 2 * (x1 - x2) * grad; grad_input1[main_id * 3 + 1] = 2 * (y1 - y2) * grad; grad_input1[main_id * 3 + 2] = 2 * (z1 - z2) * grad; scalar_t result_x = 2 * (x2 - x1) * grad; scalar_t result_y = 2 * (y2 - y1) * grad; scalar_t result_z = 2 * (z2 - z1) * grad; // compute grad_input2 atomicAdd(&(grad_input2[p2_idx]), result_x); atomicAdd(&(grad_input2[p2_idx + 1]), result_y); atomicAdd(&(grad_input2[p2_idx + 2]), result_z); } } void sided_distance_cuda_forward( const at::Tensor xyz1, const at::Tensor xyz2, const at::Tensor dist1, const at::Tensor idx1) { DISPATCH_NUM_TYPES(xyz1.scalar_type(), scalar_t, "sided_distance", [&] { hipLaunchKernelGGL(( SidedDistanceKernel<scalar_t>), dim3(dim3(32,16,1)), dim3(512), 0, 0, xyz1.size(0), xyz1.size(1), xyz1.data_ptr<scalar_t>(), xyz2.size(1), xyz2.data_ptr<scalar_t>(), dist1.data_ptr<scalar_t>(), idx1.data_ptr<int64_t>()); } ); } void sided_distance_cuda_backward( const at::Tensor grad_output, const at::Tensor p1, const at::Tensor p2, const at::Tensor idx, const at::Tensor grad_input1, const at::Tensor grad_input2) { int n = p1.size(1); int m = p2.size(1); int b = p1.size(0); int num_blocks = (max(n, m) + BLOCK_SIZE - 1) / BLOCK_SIZE; DISPATCH_NUM_TYPES(p1.scalar_type(), scalar_t, "sided_distance_backward", [&] { hipLaunchKernelGGL(( SidedDistanceBackwardKernel<scalar_t>), dim3(dim3(num_blocks, b, 1)), dim3(BLOCK_SIZE), 0, 0, grad_output.data_ptr<scalar_t>(), b, n, p1.data_ptr<scalar_t>(), m, p2.data_ptr<scalar_t>(), idx.data_ptr<int64_t>(), grad_input1.data_ptr<scalar_t>(), grad_input2.data_ptr<scalar_t>()); } ); }
f4a9a5a167a0892e7de3c12851d24ed45857915b.cu
// Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // pyTorchChamferDistance components // https://github.com/chrdiller/pyTorchChamferDistance // // MIT License // // Copyright (c) 2018 Christian Diller // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <cuda.h> #include <cuda_runtime.h> #include "device_atomic_functions.h" #include "../../utils.h" #include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #define BLOCK_SIZE 512 template<typename scalar_t> __global__ void SidedDistanceKernel(int b, int n, const scalar_t * xyz, int m, const scalar_t * xyz2, scalar_t * result, int64_t * result_i) { const int batch=512; __shared__ scalar_t buf[batch*3]; for (int i = blockIdx.x; i<b; i += gridDim.x){ for (int k2 = 0; k2 < m; k2 += batch) { int end_k = min(m, k2 + batch) - k2; for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) { buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { scalar_t x1 = xyz[(i * n + j) * 3 + 0]; scalar_t y1 = xyz[(i * n + j) * 3 + 1]; scalar_t z1 = xyz[(i * n + j) * 3 + 2]; int64_t best_i = 0; scalar_t best = 0; int end_ka = end_k - (end_k & 3); if (end_ka == batch){ for (int k = 0; k < batch; k += 4) { { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2]- z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || d < best) { best = d; best_i = k + k2; } } { scalar_t x2 = buf[k * 3 + 3] - x1; scalar_t y2 = buf[k * 3 + 4] - y1; scalar_t z2 = buf[k * 3 + 5] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (d < best){ best = d; best_i = k + k2 + 1; } } { scalar_t x2 = buf[k * 3 + 6]- x1; scalar_t y2 = buf[k * 3 + 7] - y1; scalar_t z2 = buf[k * 3 + 8] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (d < best) { best = d; best_i = k + k2 + 2; } } { scalar_t x2 = buf[k * 3 + 9] - x1; scalar_t y2 = buf[k * 3 + 10]-y1; scalar_t z2 = buf[k*3 + 11] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (d < best) { best = d; best_i = k + k2 + 3; } } } } else { for (int k = 0; k < end_ka; k += 4) { { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || d < best) { best = d; best_i = k + k2; } } { scalar_t x2 = buf[k * 3 + 3] - x1; scalar_t y2 = buf[k * 3 + 4] - y1; scalar_t z2 = buf[k * 3 + 5] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (d < best) { best = d; best_i = k + k2 + 1; } } { scalar_t x2 = buf[k * 3 + 6] - x1; scalar_t y2 = buf[k * 3 + 7] - y1; scalar_t z2 = buf[k * 3 + 8] - z1; scalar_t d= x2 * x2 + y2 * y2 + z2 * z2; if (d < best) { best = d; best_i = k + k2 + 2; } } { scalar_t x2 = buf[k * 3 + 9] - x1; scalar_t y2 = buf[k * 3 + 10] - y1; scalar_t z2 = buf[k * 3 + 11] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (d < best) { best = d; best_i = k + k2 + 3; } } } } for (int k = end_ka; k < end_k; k++) { scalar_t x2 = buf[k * 3 + 0] - x1; scalar_t y2 = buf[k * 3 + 1] - y1; scalar_t z2 = buf[k * 3 + 2] - z1; scalar_t d = x2 * x2 + y2 * y2 + z2 * z2; if (k == 0 || d < best) { best = d; best_i = k+k2; } } if (k2 == 0 || result[(i * n + j)] > best) { result[(i * n + j)] = best; result_i[(i * n + j)] = best_i; } } __syncthreads(); } } } template<typename scalar_t> __global__ void SidedDistanceBackwardKernel( const scalar_t* grad_output, const int b, const int n, const scalar_t * p1, const int m, const scalar_t * p2, const int64_t* idx, scalar_t* grad_input1, scalar_t* grad_input2) { int batch_id = blockIdx.y; for (int point_id = threadIdx.x + blockIdx.x * blockDim.x; point_id < n; point_id += gridDim.x * blockDim.x) { int main_id = point_id + batch_id * n; scalar_t x1 = p1[main_id * 3]; scalar_t y1 = p1[main_id * 3 + 1]; scalar_t z1 = p1[main_id * 3 + 2]; int64_t p2_idx = (idx[main_id] + batch_id * m) * 3; scalar_t x2 = p2[p2_idx]; scalar_t y2 = p2[p2_idx + 1]; scalar_t z2 = p2[p2_idx + 2]; scalar_t grad = grad_output[main_id]; grad_input1[main_id * 3] = 2 * (x1 - x2) * grad; grad_input1[main_id * 3 + 1] = 2 * (y1 - y2) * grad; grad_input1[main_id * 3 + 2] = 2 * (z1 - z2) * grad; scalar_t result_x = 2 * (x2 - x1) * grad; scalar_t result_y = 2 * (y2 - y1) * grad; scalar_t result_z = 2 * (z2 - z1) * grad; // compute grad_input2 atomicAdd(&(grad_input2[p2_idx]), result_x); atomicAdd(&(grad_input2[p2_idx + 1]), result_y); atomicAdd(&(grad_input2[p2_idx + 2]), result_z); } } void sided_distance_cuda_forward( const at::Tensor xyz1, const at::Tensor xyz2, const at::Tensor dist1, const at::Tensor idx1) { DISPATCH_NUM_TYPES(xyz1.scalar_type(), scalar_t, "sided_distance", [&] { SidedDistanceKernel<scalar_t><<<dim3(32,16,1), 512>>>(xyz1.size(0), xyz1.size(1), xyz1.data_ptr<scalar_t>(), xyz2.size(1), xyz2.data_ptr<scalar_t>(), dist1.data_ptr<scalar_t>(), idx1.data_ptr<int64_t>()); } ); } void sided_distance_cuda_backward( const at::Tensor grad_output, const at::Tensor p1, const at::Tensor p2, const at::Tensor idx, const at::Tensor grad_input1, const at::Tensor grad_input2) { int n = p1.size(1); int m = p2.size(1); int b = p1.size(0); int num_blocks = (max(n, m) + BLOCK_SIZE - 1) / BLOCK_SIZE; DISPATCH_NUM_TYPES(p1.scalar_type(), scalar_t, "sided_distance_backward", [&] { SidedDistanceBackwardKernel<scalar_t><<<dim3(num_blocks, b, 1), BLOCK_SIZE>>>( grad_output.data_ptr<scalar_t>(), b, n, p1.data_ptr<scalar_t>(), m, p2.data_ptr<scalar_t>(), idx.data_ptr<int64_t>(), grad_input1.data_ptr<scalar_t>(), grad_input2.data_ptr<scalar_t>()); } ); }
3fea14e6e1a304df811974bcfddede792d01bd13.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // The translation unit for reduction `sum of squares` #include <cudf/detail/reduction_functions.hpp> #include "simple_hip.cuh" std::unique_ptr<cudf::scalar> cudf::experimental::reduction::sum_of_squares( column_view const& col, cudf::data_type const output_dtype, rmm::mr::device_memory_resource* mr, hipStream_t stream) { using reducer = cudf::experimental::reduction::simple::element_type_dispatcher< cudf::experimental::reduction::op::sum_of_squares>; return cudf::experimental::type_dispatcher(col.type(), reducer(), col, output_dtype, mr, stream); }
3fea14e6e1a304df811974bcfddede792d01bd13.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // The translation unit for reduction `sum of squares` #include <cudf/detail/reduction_functions.hpp> #include "simple.cuh" std::unique_ptr<cudf::scalar> cudf::experimental::reduction::sum_of_squares( column_view const& col, cudf::data_type const output_dtype, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { using reducer = cudf::experimental::reduction::simple::element_type_dispatcher< cudf::experimental::reduction::op::sum_of_squares>; return cudf::experimental::type_dispatcher(col.type(), reducer(), col, output_dtype, mr, stream); }
8e6b76b19aaaa1039c72a03c484363e260300889.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/index_sample_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/cuda_device_function.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; template <typename T, typename IndexT = int> __global__ void IndexSampleForward(const IndexT* index, const T* in_data, T* out_data, size_t index_length, size_t input_length, size_t batch_size) { int index_i = blockDim.x * blockIdx.x + threadIdx.x; int index_j = blockDim.y * blockIdx.y + threadIdx.y; int index_idx = index_j * index_length + index_i; int in_idx = index_j * input_length + index_i; if (index_i < index_length & index_j < batch_size) { IndexT sample_idx = index[index_idx]; out_data[index_idx] = in_data[in_idx - index_i + sample_idx]; } } template <typename T, typename IndexT = int> __global__ void IndexSampleGrad(const IndexT* index, T* in_grad, const T* out_grad, size_t index_length, size_t input_length, size_t batch_size, bool same_data_in_row = true) { int index_i = blockDim.x * blockIdx.x + threadIdx.x; int index_j = blockDim.y * blockIdx.y + threadIdx.y; int index_idx = index_j * index_length + index_i; int in_idx = index_j * input_length + index_i; if (index_i < index_length & index_j < batch_size) { IndexT sample_idx = index[index_idx]; if (same_data_in_row) { platform::CudaAtomicAdd(&(in_grad[in_idx - index_i + sample_idx]), out_grad[sample_idx]); } else { in_grad[in_idx - index_i + sample_idx] = out_grad[sample_idx]; } } } template <typename T> class IndexSampleKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<LoDTensor>("X"); auto* index = ctx.Input<LoDTensor>("Index"); auto* output = ctx.Output<LoDTensor>("Out"); const auto& index_type = index->type(); bool index_type_match = index_type == framework::proto::VarType::INT64 || index_type == framework::proto::VarType::INT32; PADDLE_ENFORCE_EQ(index_type_match, true, platform::errors::InvalidArgument( "Input(Index) holds the wrong type, it holds %s, but " "desires to be %s or %s", paddle::framework::DataTypeToString(index_type), paddle::framework::DataTypeToString( framework::proto::VarType::INT32), paddle::framework::DataTypeToString( framework::proto::VarType::INT64))); const auto* in_data = input->data<T>(); auto* out_data = output->mutable_data<T>(ctx.GetPlace()); auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); auto input_dim = input->dims(); auto index_dim = index->dims(); size_t batch_size = input_dim[0]; size_t input_length = input_dim[1]; size_t index_length = index_dim[1]; auto block_width = platform::RoundToPowerOfTwo(index_length); int block_height = platform::RoundToPowerOfTwo(index_length * batch_size) / block_width; dim3 block_dim(block_width, block_height); dim3 grid_dim((index_length + block_dim.x - 1) / block_dim.x, (batch_size + block_dim.y - 1) / block_dim.y); if (index_type == framework::proto::VarType::INT64) { const int64_t* index_data = index->data<int64_t>(); hipLaunchKernelGGL(( IndexSampleForward<T, int64_t>), dim3(grid_dim), dim3(block_dim), 0, stream, index_data, in_data, out_data, index_length, input_length, batch_size); } else if (index_type == framework::proto::VarType::INT32) { const int* index_data = index->data<int>(); hipLaunchKernelGGL(( IndexSampleForward<T, int>), dim3(grid_dim), dim3(block_dim), 0, stream, index_data, in_data, out_data, index_length, input_length, batch_size); } } }; template <typename T> class IndexSampleGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* output_grad = ctx.Input<LoDTensor>(framework::GradVarName("Out")); auto* input_grad = ctx.Output<LoDTensor>(framework::GradVarName("X")); auto* index = ctx.Input<LoDTensor>("Index"); const auto* output_grad_data = output_grad->data<T>(); auto* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace()); const auto& index_type = index->type(); bool index_type_match = index_type == framework::proto::VarType::INT64 || index_type == framework::proto::VarType::INT32; PADDLE_ENFORCE_EQ(index_type_match, true, platform::errors::InvalidArgument( "Input(Index) holds the wrong type, it holds %s, but " "desires to be %s or %s", paddle::framework::DataTypeToString(index_type), paddle::framework::DataTypeToString( framework::proto::VarType::INT32), paddle::framework::DataTypeToString( framework::proto::VarType::INT64))); auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); auto input_num = input_grad->numel(); auto input_dim = input_grad->dims(); auto index_dim = index->dims(); size_t batch_size = index_dim[0]; size_t input_length = input_dim[1]; size_t index_length = index_dim[1]; bool same_data_in_index_row = index_length == 1 ? false : true; auto block_width = platform::RoundToPowerOfTwo(index_length); auto block_height = platform::RoundToPowerOfTwo(index_length * batch_size) / block_width; dim3 block_dim(block_width, block_height); dim3 grid_dim((index_length + block_dim.x - 1) / block_dim.x, (batch_size + block_dim.y - 1) / block_dim.y); math::SetConstant<platform::CUDADeviceContext, T> set_zero; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); set_zero(dev_ctx, input_grad, static_cast<T>(0)); if (index_type == framework::proto::VarType::INT64) { const int64_t* index_data = index->data<int64_t>(); hipLaunchKernelGGL(( IndexSampleGrad<T, int64_t>), dim3(grid_dim), dim3(block_dim), 0, stream, index_data, input_grad_data, output_grad_data, index_length, input_length, batch_size, same_data_in_index_row); } else if (index_type == framework::proto::VarType::INT32) { const int* index_data = index->data<int>(); hipLaunchKernelGGL(( IndexSampleGrad<T, int>), dim3(grid_dim), dim3(block_dim), 0, stream, index_data, input_grad_data, output_grad_data, index_length, input_length, batch_size, same_data_in_index_row); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( index_sample, ops::IndexSampleKernel<paddle::platform::CUDADeviceContext, float>, ops::IndexSampleKernel<paddle::platform::CUDADeviceContext, double>, ops::IndexSampleKernel<paddle::platform::CUDADeviceContext, int>, ops::IndexSampleKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( index_sample_grad, ops::IndexSampleGradKernel<paddle::platform::CUDADeviceContext, float>, ops::IndexSampleGradKernel<paddle::platform::CUDADeviceContext, double>, ops::IndexSampleGradKernel<paddle::platform::CUDADeviceContext, int>, ops::IndexSampleGradKernel<paddle::platform::CUDADeviceContext, int64_t>);
8e6b76b19aaaa1039c72a03c484363e260300889.cu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/index_sample_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/cuda_device_function.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; template <typename T, typename IndexT = int> __global__ void IndexSampleForward(const IndexT* index, const T* in_data, T* out_data, size_t index_length, size_t input_length, size_t batch_size) { int index_i = blockDim.x * blockIdx.x + threadIdx.x; int index_j = blockDim.y * blockIdx.y + threadIdx.y; int index_idx = index_j * index_length + index_i; int in_idx = index_j * input_length + index_i; if (index_i < index_length & index_j < batch_size) { IndexT sample_idx = index[index_idx]; out_data[index_idx] = in_data[in_idx - index_i + sample_idx]; } } template <typename T, typename IndexT = int> __global__ void IndexSampleGrad(const IndexT* index, T* in_grad, const T* out_grad, size_t index_length, size_t input_length, size_t batch_size, bool same_data_in_row = true) { int index_i = blockDim.x * blockIdx.x + threadIdx.x; int index_j = blockDim.y * blockIdx.y + threadIdx.y; int index_idx = index_j * index_length + index_i; int in_idx = index_j * input_length + index_i; if (index_i < index_length & index_j < batch_size) { IndexT sample_idx = index[index_idx]; if (same_data_in_row) { platform::CudaAtomicAdd(&(in_grad[in_idx - index_i + sample_idx]), out_grad[sample_idx]); } else { in_grad[in_idx - index_i + sample_idx] = out_grad[sample_idx]; } } } template <typename T> class IndexSampleKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<LoDTensor>("X"); auto* index = ctx.Input<LoDTensor>("Index"); auto* output = ctx.Output<LoDTensor>("Out"); const auto& index_type = index->type(); bool index_type_match = index_type == framework::proto::VarType::INT64 || index_type == framework::proto::VarType::INT32; PADDLE_ENFORCE_EQ(index_type_match, true, platform::errors::InvalidArgument( "Input(Index) holds the wrong type, it holds %s, but " "desires to be %s or %s", paddle::framework::DataTypeToString(index_type), paddle::framework::DataTypeToString( framework::proto::VarType::INT32), paddle::framework::DataTypeToString( framework::proto::VarType::INT64))); const auto* in_data = input->data<T>(); auto* out_data = output->mutable_data<T>(ctx.GetPlace()); auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); auto input_dim = input->dims(); auto index_dim = index->dims(); size_t batch_size = input_dim[0]; size_t input_length = input_dim[1]; size_t index_length = index_dim[1]; auto block_width = platform::RoundToPowerOfTwo(index_length); int block_height = platform::RoundToPowerOfTwo(index_length * batch_size) / block_width; dim3 block_dim(block_width, block_height); dim3 grid_dim((index_length + block_dim.x - 1) / block_dim.x, (batch_size + block_dim.y - 1) / block_dim.y); if (index_type == framework::proto::VarType::INT64) { const int64_t* index_data = index->data<int64_t>(); IndexSampleForward<T, int64_t><<<grid_dim, block_dim, 0, stream>>>( index_data, in_data, out_data, index_length, input_length, batch_size); } else if (index_type == framework::proto::VarType::INT32) { const int* index_data = index->data<int>(); IndexSampleForward<T, int><<<grid_dim, block_dim, 0, stream>>>( index_data, in_data, out_data, index_length, input_length, batch_size); } } }; template <typename T> class IndexSampleGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* output_grad = ctx.Input<LoDTensor>(framework::GradVarName("Out")); auto* input_grad = ctx.Output<LoDTensor>(framework::GradVarName("X")); auto* index = ctx.Input<LoDTensor>("Index"); const auto* output_grad_data = output_grad->data<T>(); auto* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace()); const auto& index_type = index->type(); bool index_type_match = index_type == framework::proto::VarType::INT64 || index_type == framework::proto::VarType::INT32; PADDLE_ENFORCE_EQ(index_type_match, true, platform::errors::InvalidArgument( "Input(Index) holds the wrong type, it holds %s, but " "desires to be %s or %s", paddle::framework::DataTypeToString(index_type), paddle::framework::DataTypeToString( framework::proto::VarType::INT32), paddle::framework::DataTypeToString( framework::proto::VarType::INT64))); auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); auto input_num = input_grad->numel(); auto input_dim = input_grad->dims(); auto index_dim = index->dims(); size_t batch_size = index_dim[0]; size_t input_length = input_dim[1]; size_t index_length = index_dim[1]; bool same_data_in_index_row = index_length == 1 ? false : true; auto block_width = platform::RoundToPowerOfTwo(index_length); auto block_height = platform::RoundToPowerOfTwo(index_length * batch_size) / block_width; dim3 block_dim(block_width, block_height); dim3 grid_dim((index_length + block_dim.x - 1) / block_dim.x, (batch_size + block_dim.y - 1) / block_dim.y); math::SetConstant<platform::CUDADeviceContext, T> set_zero; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); set_zero(dev_ctx, input_grad, static_cast<T>(0)); if (index_type == framework::proto::VarType::INT64) { const int64_t* index_data = index->data<int64_t>(); IndexSampleGrad<T, int64_t><<<grid_dim, block_dim, 0, stream>>>( index_data, input_grad_data, output_grad_data, index_length, input_length, batch_size, same_data_in_index_row); } else if (index_type == framework::proto::VarType::INT32) { const int* index_data = index->data<int>(); IndexSampleGrad<T, int><<<grid_dim, block_dim, 0, stream>>>( index_data, input_grad_data, output_grad_data, index_length, input_length, batch_size, same_data_in_index_row); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( index_sample, ops::IndexSampleKernel<paddle::platform::CUDADeviceContext, float>, ops::IndexSampleKernel<paddle::platform::CUDADeviceContext, double>, ops::IndexSampleKernel<paddle::platform::CUDADeviceContext, int>, ops::IndexSampleKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( index_sample_grad, ops::IndexSampleGradKernel<paddle::platform::CUDADeviceContext, float>, ops::IndexSampleGradKernel<paddle::platform::CUDADeviceContext, double>, ops::IndexSampleGradKernel<paddle::platform::CUDADeviceContext, int>, ops::IndexSampleGradKernel<paddle::platform::CUDADeviceContext, int64_t>);
c2bbbc3cd9e290ee964a27e8f0fbe21763f73c4a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include <ops/declarable/helpers/compare_elem.h> namespace sd { namespace ops { namespace helpers { template <typename T> static _CUDA_G void comparator(void *vx, const Nd4jLong *xShapeInfo, Nd4jLong length, const bool isStrict, void *reductionBuffer, bool *z) { auto x = reinterpret_cast<T*>(vx); auto reduction = reinterpret_cast<uint32_t*>(reductionBuffer); extern __shared__ uint32_t shared[]; auto tid = threadIdx.x + blockIdx.x * blockDim.x; shared[threadIdx.x] = 0; // each thread will compare 2 elements: E and E+1 for (int e = tid; e < length - 1; e += blockDim.x * gridDim.x) { auto val0 = x[shape::getIndexOffset(e, xShapeInfo)]; auto val1 = x[shape::getIndexOffset(e+1, xShapeInfo)]; bool v = false; if (isStrict) v = val1 > val0; else v = val1 >= val0; // store comparison result in shared memory shared[threadIdx.x] += v ? 0 : 1; } __syncthreads(); // aggregate sums in shared memory for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) shared[threadIdx.x] += shared[threadIdx.x + activeThreads]; __syncthreads(); } // store over the grid if we have more than 1 block if (gridDim.x > 1) { auto tc = reinterpret_cast<unsigned int *>(reductionBuffer); __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) reduction[blockIdx.x] = shared[0]; __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; shared[threadIdx.x] = 0; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) shared[threadIdx.x] += reduction[i]; __syncthreads(); for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) shared[threadIdx.x] += shared[threadIdx.x + activeThreads]; __syncthreads(); } __syncthreads(); if (threadIdx.x == 0) { z[0] = shared[0] == 0; } } } else { // if we have only 1 block, we just store results right away if (threadIdx.x == 0) { auto tc = reinterpret_cast<unsigned int*>(reductionBuffer); tc[16384] = 0; z[0] = shared[0] == 0; } } } template<typename T> static void _compare_elem(sd::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) { auto z = NDArrayFactory::create<bool>(false, context); const int numThreads = 256; const int numBlocks = sd::math::nd4j_min<int>(128, sd::math::nd4j_max<int>(1, input->lengthOf() / numThreads)); hipLaunchKernelGGL(( comparator<T>), dim3(numBlocks), dim3(numThreads), numThreads * 4 + 1024, *context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), input->lengthOf(), isStrictlyIncreasing, context->getReductionPointer(), reinterpret_cast<bool *>(z.specialBuffer())); z.tickWriteDevice(); sd::DebugHelper::checkErrorCode(context->getCudaStream(), "is_strictly_increasing"); output = z.e<bool>(0); } void compare_elem(sd::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) { auto xType = input->dataType(); input->syncToDevice(); BUILD_SINGLE_SELECTOR(xType, _compare_elem, (context, input, isStrictlyIncreasing, output), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void _compare_elem, (sd::LaunchContext * context, NDArray *A, bool isStrictlyIncreasing, bool& output);, LIBND4J_TYPES); } } }
c2bbbc3cd9e290ee964a27e8f0fbe21763f73c4a.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #include <ops/declarable/helpers/compare_elem.h> namespace sd { namespace ops { namespace helpers { template <typename T> static _CUDA_G void comparator(void *vx, const Nd4jLong *xShapeInfo, Nd4jLong length, const bool isStrict, void *reductionBuffer, bool *z) { auto x = reinterpret_cast<T*>(vx); auto reduction = reinterpret_cast<uint32_t*>(reductionBuffer); extern __shared__ uint32_t shared[]; auto tid = threadIdx.x + blockIdx.x * blockDim.x; shared[threadIdx.x] = 0; // each thread will compare 2 elements: E and E+1 for (int e = tid; e < length - 1; e += blockDim.x * gridDim.x) { auto val0 = x[shape::getIndexOffset(e, xShapeInfo)]; auto val1 = x[shape::getIndexOffset(e+1, xShapeInfo)]; bool v = false; if (isStrict) v = val1 > val0; else v = val1 >= val0; // store comparison result in shared memory shared[threadIdx.x] += v ? 0 : 1; } __syncthreads(); // aggregate sums in shared memory for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) shared[threadIdx.x] += shared[threadIdx.x + activeThreads]; __syncthreads(); } // store over the grid if we have more than 1 block if (gridDim.x > 1) { auto tc = reinterpret_cast<unsigned int *>(reductionBuffer); __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) reduction[blockIdx.x] = shared[0]; __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; shared[threadIdx.x] = 0; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) shared[threadIdx.x] += reduction[i]; __syncthreads(); for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) shared[threadIdx.x] += shared[threadIdx.x + activeThreads]; __syncthreads(); } __syncthreads(); if (threadIdx.x == 0) { z[0] = shared[0] == 0; } } } else { // if we have only 1 block, we just store results right away if (threadIdx.x == 0) { auto tc = reinterpret_cast<unsigned int*>(reductionBuffer); tc[16384] = 0; z[0] = shared[0] == 0; } } } template<typename T> static void _compare_elem(sd::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) { auto z = NDArrayFactory::create<bool>(false, context); const int numThreads = 256; const int numBlocks = sd::math::nd4j_min<int>(128, sd::math::nd4j_max<int>(1, input->lengthOf() / numThreads)); comparator<T><<<numBlocks, numThreads, numThreads * 4 + 1024, *context->getCudaStream()>>>(input->specialBuffer(), input->specialShapeInfo(), input->lengthOf(), isStrictlyIncreasing, context->getReductionPointer(), reinterpret_cast<bool *>(z.specialBuffer())); z.tickWriteDevice(); sd::DebugHelper::checkErrorCode(context->getCudaStream(), "is_strictly_increasing"); output = z.e<bool>(0); } void compare_elem(sd::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) { auto xType = input->dataType(); input->syncToDevice(); BUILD_SINGLE_SELECTOR(xType, _compare_elem, (context, input, isStrictlyIncreasing, output), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void _compare_elem, (sd::LaunchContext * context, NDArray *A, bool isStrictlyIncreasing, bool& output);, LIBND4J_TYPES); } } }
c184c3c38625f47096f4c1e7ddcd013149926db9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #include <unistd.h> #include <sys/time.h> //#if __DEVICE_EMULATION__ #define DEBUG_SYNC __syncthreads(); //#else //#define DEBUG_SYNC //#endif #if (__CUDA_ARCH__ < 200) #define int_mult(x,y) __mul24(x,y) #else #define int_mult(x,y) x*y #endif #define inf 0x7f800000 const int blockSize1 = 4096/2; /*const int blockSize2 = 8192; const int blockSize3 = 16384; const int blockSize4 = 32768; const int blockSize5 = 65536;*/ const int threads = 64; unsigned long long int get_clock() { struct timeval tv; gettimeofday(&tv, NULL); return (unsigned long long int)tv.tv_usec + 1000000*tv.tv_sec; } __device__ void warp_reduce_max(float smem[64]) { smem[threadIdx.x] = smem[threadIdx.x+32] > smem[threadIdx.x] ? smem[threadIdx.x+32] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+16] > smem[threadIdx.x] ? smem[threadIdx.x+16] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+8] > smem[threadIdx.x] ? smem[threadIdx.x+8] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+4] > smem[threadIdx.x] ? smem[threadIdx.x+4] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+2] > smem[threadIdx.x] ? smem[threadIdx.x+2] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+1] > smem[threadIdx.x] ? smem[threadIdx.x+1] : smem[threadIdx.x]; DEBUG_SYNC; } __device__ void warp_reduce_min( float smem[64]) { smem[threadIdx.x] = smem[threadIdx.x+32] < smem[threadIdx.x] ? smem[threadIdx.x+32] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+16] < smem[threadIdx.x] ? smem[threadIdx.x+16] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+8] < smem[threadIdx.x] ? smem[threadIdx.x+8] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+4] < smem[threadIdx.x] ? smem[threadIdx.x+4] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+2] < smem[threadIdx.x] ? smem[threadIdx.x+2] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+1] < smem[threadIdx.x] ? smem[threadIdx.x+1] : smem[threadIdx.x]; DEBUG_SYNC; } template<int threads> __global__ void find_min_max_dynamic(float* in, float* out, int n, int start_adr, int num_blocks) { __shared__ float smem_min[64]; __shared__ float smem_max[64]; int tid = threadIdx.x + start_adr; float max = -inf; float min = inf; float val; // tail part int mult = 0; for(int i = 1; mult + tid < n; i++) { val = in[tid + mult]; min = val < min ? val : min; max = val > max ? val : max; mult = int_mult(i,threads); } // previously reduced MIN part mult = 0; int i; for(i = 1; mult+threadIdx.x < num_blocks; i++) { val = out[threadIdx.x + mult]; min = val < min ? val : min; mult = int_mult(i,threads); } // MAX part for(; mult+threadIdx.x < num_blocks*2; i++) { val = out[threadIdx.x + mult]; max = val > max ? val : max; mult = int_mult(i,threads); } if(threads == 32) { smem_min[threadIdx.x+32] = 0.0f; smem_max[threadIdx.x+32] = 0.0f; } smem_min[threadIdx.x] = min; smem_max[threadIdx.x] = max; __syncthreads(); if(threadIdx.x < 32) { warp_reduce_min(smem_min); warp_reduce_max(smem_max); } if(threadIdx.x == 0) { out[blockIdx.x] = smem_min[threadIdx.x]; // out[0] == ans out[blockIdx.x + gridDim.x] = smem_max[threadIdx.x]; } } template<int els_per_block, int threads> __global__ void find_min_max(float* in, float* out) { __shared__ float smem_min[64]; __shared__ float smem_max[64]; int tid = threadIdx.x + blockIdx.x*els_per_block; float max = -inf; float min = inf; float val; const int iters = els_per_block/threads; #pragma unroll for(int i = 0; i < iters; i++) { val = in[tid + i*threads]; min = val < min ? val : min; max = val > max ? val : max; } if(threads == 32) { smem_min[threadIdx.x+32] = 0.0f; smem_max[threadIdx.x+32] = 0.0f; } smem_min[threadIdx.x] = min; smem_max[threadIdx.x] = max; __syncthreads(); if(threadIdx.x < 32) { warp_reduce_min(smem_min); warp_reduce_max(smem_max); } if(threadIdx.x == 0) { out[blockIdx.x] = smem_min[threadIdx.x]; // out[0] == ans out[blockIdx.x + gridDim.x] = smem_max[threadIdx.x]; } } float cpu_min(float* in, int num_els) { float min = inf; for(int i = 0; i < num_els; i++) min = in[i] < min ? in[i] : min; return min; } float cpu_max(float* in, int num_els) { float max = -inf; for(int i = 0; i < num_els; i++) max = in[i] > max ? in[i] : max; return max; } void findBlockSize(int* whichSize, int* num_el) { const float pretty_big_number = 24.0f*1024.0f*1024.0f; float ratio = float((*num_el))/pretty_big_number; if(ratio > 0.8f) (*whichSize) = 5; else if(ratio > 0.6f) (*whichSize) = 4; else if(ratio > 0.4f) (*whichSize) = 3; else if(ratio > 0.2f) (*whichSize) = 2; else (*whichSize) = 1; } void compute_reduction(float* d_in, float* d_out, int num_els) { int whichSize = -1; findBlockSize(&whichSize,&num_els); //whichSize = 5; int block_size = powf(2,whichSize-1)*blockSize1; int num_blocks = num_els/block_size; int tail = num_els - num_blocks*block_size; int start_adr = num_els - tail; if(whichSize == 1) hipLaunchKernelGGL(( find_min_max<blockSize1,threads>), dim3(num_blocks), dim3(threads), 0, 0, d_in, d_out); else if(whichSize == 2) hipLaunchKernelGGL(( find_min_max<blockSize1*2,threads>), dim3(num_blocks), dim3(threads), 0, 0, d_in, d_out); else if(whichSize == 3) hipLaunchKernelGGL(( find_min_max<blockSize1*4,threads>), dim3(num_blocks), dim3(threads), 0, 0, d_in, d_out); else if(whichSize == 4) hipLaunchKernelGGL(( find_min_max<blockSize1*8,threads>), dim3(num_blocks), dim3(threads), 0, 0, d_in, d_out); else hipLaunchKernelGGL(( find_min_max<blockSize1*16,threads>), dim3(num_blocks), dim3(threads), 0, 0, d_in, d_out); hipLaunchKernelGGL(( find_min_max_dynamic<threads>), dim3(1), dim3(threads), 0, 0, d_in, d_out, num_els, start_adr, num_blocks); } unsigned long long int my_min_max_test(int num_els) { // timers unsigned long long int start; unsigned long long int delta; int testIterations = 100; int size = num_els*sizeof(float); float* d_in; float* d_out; float* d_warm1; float* d_warm2; float* in = (float*)malloc(size); float* out = (float*)malloc(size); for(int i = 0; i < num_els; i++) { in[i] = rand()&1; } in[1024] = 34.0f; in[333] = 55.0f; in[23523] = -42.0f; hipMalloc((void**)&d_in, size); hipMalloc((void**)&d_out, size); hipMalloc((void**)&d_warm1, 1024*sizeof(float)); hipMalloc((void**)&d_warm2, 1024*sizeof(float)); hipMemcpy(d_in, in, size, hipMemcpyHostToDevice); ////////// /// warmup ////////// hipLaunchKernelGGL(( find_min_max<32,threads>), dim3(32), dim3(32), 0, 0, d_warm1, d_warm2); hipDeviceSynchronize(); ///// // end warmup ///// //time it start = get_clock(); ////////////// // real reduce ///////////// for(int i = 0; i < testIterations; i++) compute_reduction(d_in, d_out, num_els); hipDeviceSynchronize(); delta = get_clock() - start; float dt = float(delta)/float(testIterations); hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost); // need not be SIZE! (just 2 elements) float throughput = num_els*sizeof(float)*0.001f/(dt); int tail = num_els - (num_els/blockSize1)*blockSize1; printf(" %7.0d \t %0.2f \t\t %0.2f % \t %0.1f \t\t %s \n", num_els, throughput, (throughput/70.6f)*100.0f,dt, (cpu_min(in,num_els) == out[0] && cpu_max(in,num_els) == out[1]) ? "Pass" : "Fail"); //printf("\n min: %0.3f \n", out[0]); //printf("\n max: %0.3f \n", out[1]); hipFree(d_in); hipFree(d_out); hipFree(d_warm1); hipFree(d_warm2); free(in); free(out); //system("pause"); return delta; } int main(int argc, char* argv[]) { printf(" GTS250 @ 70.6 GB/s - Finding min and max"); printf("\n N \t\t [GB/s] \t [perc] \t [usec] \t test \n"); #pragma unroll for(int i = 1024*1024; i <= 32*1024*1024; i=i*2) { my_min_max_test(i); } printf("\n Non-base 2 tests! \n"); printf("\n N \t\t [GB/s] \t [perc] \t [usec] \t test \n"); // just some large numbers.... my_min_max_test(14*1024*1024+6746); my_min_max_test(12*1024*1024+8929); my_min_max_test(18*1024*1024+2891); my_min_max_test(13*1024*1024+7924); my_min_max_test(11*1024*1024+9485); my_min_max_test(19*1024*1024+9425); for(int i = 0; i < 4; i++) { float ratio = float(rand())/float(RAND_MAX); ratio = ratio >= 0 ? ratio : -ratio; int big_num = ratio*18*1e6; my_min_max_test(big_num); } return 0; }
c184c3c38625f47096f4c1e7ddcd013149926db9.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> #include <unistd.h> #include <sys/time.h> //#if __DEVICE_EMULATION__ #define DEBUG_SYNC __syncthreads(); //#else //#define DEBUG_SYNC //#endif #if (__CUDA_ARCH__ < 200) #define int_mult(x,y) __mul24(x,y) #else #define int_mult(x,y) x*y #endif #define inf 0x7f800000 const int blockSize1 = 4096/2; /*const int blockSize2 = 8192; const int blockSize3 = 16384; const int blockSize4 = 32768; const int blockSize5 = 65536;*/ const int threads = 64; unsigned long long int get_clock() { struct timeval tv; gettimeofday(&tv, NULL); return (unsigned long long int)tv.tv_usec + 1000000*tv.tv_sec; } __device__ void warp_reduce_max(float smem[64]) { smem[threadIdx.x] = smem[threadIdx.x+32] > smem[threadIdx.x] ? smem[threadIdx.x+32] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+16] > smem[threadIdx.x] ? smem[threadIdx.x+16] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+8] > smem[threadIdx.x] ? smem[threadIdx.x+8] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+4] > smem[threadIdx.x] ? smem[threadIdx.x+4] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+2] > smem[threadIdx.x] ? smem[threadIdx.x+2] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+1] > smem[threadIdx.x] ? smem[threadIdx.x+1] : smem[threadIdx.x]; DEBUG_SYNC; } __device__ void warp_reduce_min( float smem[64]) { smem[threadIdx.x] = smem[threadIdx.x+32] < smem[threadIdx.x] ? smem[threadIdx.x+32] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+16] < smem[threadIdx.x] ? smem[threadIdx.x+16] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+8] < smem[threadIdx.x] ? smem[threadIdx.x+8] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+4] < smem[threadIdx.x] ? smem[threadIdx.x+4] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+2] < smem[threadIdx.x] ? smem[threadIdx.x+2] : smem[threadIdx.x]; DEBUG_SYNC; smem[threadIdx.x] = smem[threadIdx.x+1] < smem[threadIdx.x] ? smem[threadIdx.x+1] : smem[threadIdx.x]; DEBUG_SYNC; } template<int threads> __global__ void find_min_max_dynamic(float* in, float* out, int n, int start_adr, int num_blocks) { __shared__ float smem_min[64]; __shared__ float smem_max[64]; int tid = threadIdx.x + start_adr; float max = -inf; float min = inf; float val; // tail part int mult = 0; for(int i = 1; mult + tid < n; i++) { val = in[tid + mult]; min = val < min ? val : min; max = val > max ? val : max; mult = int_mult(i,threads); } // previously reduced MIN part mult = 0; int i; for(i = 1; mult+threadIdx.x < num_blocks; i++) { val = out[threadIdx.x + mult]; min = val < min ? val : min; mult = int_mult(i,threads); } // MAX part for(; mult+threadIdx.x < num_blocks*2; i++) { val = out[threadIdx.x + mult]; max = val > max ? val : max; mult = int_mult(i,threads); } if(threads == 32) { smem_min[threadIdx.x+32] = 0.0f; smem_max[threadIdx.x+32] = 0.0f; } smem_min[threadIdx.x] = min; smem_max[threadIdx.x] = max; __syncthreads(); if(threadIdx.x < 32) { warp_reduce_min(smem_min); warp_reduce_max(smem_max); } if(threadIdx.x == 0) { out[blockIdx.x] = smem_min[threadIdx.x]; // out[0] == ans out[blockIdx.x + gridDim.x] = smem_max[threadIdx.x]; } } template<int els_per_block, int threads> __global__ void find_min_max(float* in, float* out) { __shared__ float smem_min[64]; __shared__ float smem_max[64]; int tid = threadIdx.x + blockIdx.x*els_per_block; float max = -inf; float min = inf; float val; const int iters = els_per_block/threads; #pragma unroll for(int i = 0; i < iters; i++) { val = in[tid + i*threads]; min = val < min ? val : min; max = val > max ? val : max; } if(threads == 32) { smem_min[threadIdx.x+32] = 0.0f; smem_max[threadIdx.x+32] = 0.0f; } smem_min[threadIdx.x] = min; smem_max[threadIdx.x] = max; __syncthreads(); if(threadIdx.x < 32) { warp_reduce_min(smem_min); warp_reduce_max(smem_max); } if(threadIdx.x == 0) { out[blockIdx.x] = smem_min[threadIdx.x]; // out[0] == ans out[blockIdx.x + gridDim.x] = smem_max[threadIdx.x]; } } float cpu_min(float* in, int num_els) { float min = inf; for(int i = 0; i < num_els; i++) min = in[i] < min ? in[i] : min; return min; } float cpu_max(float* in, int num_els) { float max = -inf; for(int i = 0; i < num_els; i++) max = in[i] > max ? in[i] : max; return max; } void findBlockSize(int* whichSize, int* num_el) { const float pretty_big_number = 24.0f*1024.0f*1024.0f; float ratio = float((*num_el))/pretty_big_number; if(ratio > 0.8f) (*whichSize) = 5; else if(ratio > 0.6f) (*whichSize) = 4; else if(ratio > 0.4f) (*whichSize) = 3; else if(ratio > 0.2f) (*whichSize) = 2; else (*whichSize) = 1; } void compute_reduction(float* d_in, float* d_out, int num_els) { int whichSize = -1; findBlockSize(&whichSize,&num_els); //whichSize = 5; int block_size = powf(2,whichSize-1)*blockSize1; int num_blocks = num_els/block_size; int tail = num_els - num_blocks*block_size; int start_adr = num_els - tail; if(whichSize == 1) find_min_max<blockSize1,threads><<< num_blocks, threads>>>(d_in, d_out); else if(whichSize == 2) find_min_max<blockSize1*2,threads><<< num_blocks, threads>>>(d_in, d_out); else if(whichSize == 3) find_min_max<blockSize1*4,threads><<< num_blocks, threads>>>(d_in, d_out); else if(whichSize == 4) find_min_max<blockSize1*8,threads><<< num_blocks, threads>>>(d_in, d_out); else find_min_max<blockSize1*16,threads><<< num_blocks, threads>>>(d_in, d_out); find_min_max_dynamic<threads><<< 1, threads>>>(d_in, d_out, num_els, start_adr, num_blocks); } unsigned long long int my_min_max_test(int num_els) { // timers unsigned long long int start; unsigned long long int delta; int testIterations = 100; int size = num_els*sizeof(float); float* d_in; float* d_out; float* d_warm1; float* d_warm2; float* in = (float*)malloc(size); float* out = (float*)malloc(size); for(int i = 0; i < num_els; i++) { in[i] = rand()&1; } in[1024] = 34.0f; in[333] = 55.0f; in[23523] = -42.0f; cudaMalloc((void**)&d_in, size); cudaMalloc((void**)&d_out, size); cudaMalloc((void**)&d_warm1, 1024*sizeof(float)); cudaMalloc((void**)&d_warm2, 1024*sizeof(float)); cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); ////////// /// warmup ////////// find_min_max<32,threads><<< 32, 32>>>(d_warm1, d_warm2); cudaThreadSynchronize(); ///// // end warmup ///// //time it start = get_clock(); ////////////// // real reduce ///////////// for(int i = 0; i < testIterations; i++) compute_reduction(d_in, d_out, num_els); cudaThreadSynchronize(); delta = get_clock() - start; float dt = float(delta)/float(testIterations); cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); // need not be SIZE! (just 2 elements) float throughput = num_els*sizeof(float)*0.001f/(dt); int tail = num_els - (num_els/blockSize1)*blockSize1; printf(" %7.0d \t %0.2f \t\t %0.2f % \t %0.1f \t\t %s \n", num_els, throughput, (throughput/70.6f)*100.0f,dt, (cpu_min(in,num_els) == out[0] && cpu_max(in,num_els) == out[1]) ? "Pass" : "Fail"); //printf("\n min: %0.3f \n", out[0]); //printf("\n max: %0.3f \n", out[1]); cudaFree(d_in); cudaFree(d_out); cudaFree(d_warm1); cudaFree(d_warm2); free(in); free(out); //system("pause"); return delta; } int main(int argc, char* argv[]) { printf(" GTS250 @ 70.6 GB/s - Finding min and max"); printf("\n N \t\t [GB/s] \t [perc] \t [usec] \t test \n"); #pragma unroll for(int i = 1024*1024; i <= 32*1024*1024; i=i*2) { my_min_max_test(i); } printf("\n Non-base 2 tests! \n"); printf("\n N \t\t [GB/s] \t [perc] \t [usec] \t test \n"); // just some large numbers.... my_min_max_test(14*1024*1024+6746); my_min_max_test(12*1024*1024+8929); my_min_max_test(18*1024*1024+2891); my_min_max_test(13*1024*1024+7924); my_min_max_test(11*1024*1024+9485); my_min_max_test(19*1024*1024+9425); for(int i = 0; i < 4; i++) { float ratio = float(rand())/float(RAND_MAX); ratio = ratio >= 0 ? ratio : -ratio; int big_num = ratio*18*1e6; my_min_max_test(big_num); } return 0; }
f8066125ed5eee1a77b5d9defecd4ec944c17bec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <array/NDArrayFactory.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // // Segment ops linear kernels // -------------------------------------------------------------------------------------------------------------- // template<typename T, typename I> static __global__ void segmentMinLinearKernel(const void *input, const Nd4jLong *inputShape, int *starts, int *lengths, Nd4jLong numOfClasses, void *output, const Nd4jLong *outputShape) { __shared__ T *val; __shared__ Nd4jLong xLen, zLen, zIndex; __shared__ const T *x; __shared__ T *z; __shared__ int threadsPerSegment, start, finish; auto segment = blockIdx.x; if (threadIdx.x == 0) { // threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses; // segment = blockIdx.x / threadsPerSegment; x = reinterpret_cast<const T *>(input); z = reinterpret_cast<T *>(output); extern __shared__ unsigned char shmem[]; val = reinterpret_cast<T *>(shmem); xLen = shape::length(inputShape); zLen = shape::length(outputShape); if (segment < numOfClasses) { zIndex = shape::getIndexOffset(segment, outputShape); start = starts[segment]; finish = start + lengths[segment]; z[zIndex] = x[shape::getIndexOffset(start, inputShape)]; val[segment] = z[zIndex]; } } __syncthreads(); for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); sd::math::atomics::nd4j_atomicMin(&z[zIndex], x[xIndex]); } } // -------------------------------------------------------------------------------------------------------------- // template<typename T, typename I> static __global__ void unsortedSegmentMinLinearKernel(const void *input, const Nd4jLong *inputShape, const void *indices, const Nd4jLong *indicesShape, int *starts, int *lengths, Nd4jLong numOfClasses, void *output, const Nd4jLong *outputShape) { __shared__ T *val; __shared__ Nd4jLong xLen, zLen, segment, zIndex; __shared__ const T *x; __shared__ T *z; __shared__ const I *y; //int threadsPerSegment, start, finish; if (threadIdx.x == 0) { segment = blockIdx.x; x = reinterpret_cast<const T *>(input); z = reinterpret_cast<T *>(output); y = reinterpret_cast<const I *>(indices); xLen = shape::length(inputShape); zLen = shape::length(outputShape); zIndex = shape::getIndexOffset(segment, outputShape); if (lengths[segment] > 0) z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)]; else z[zIndex] = DataTypeUtils::max<T>(); } __syncthreads(); if (lengths[segment] > 0) for (auto e = threadIdx.x + 1; e < xLen; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); auto yIndex = shape::getIndexOffset(e, indicesShape); if (y[yIndex] == segment) { sd::math::atomics::nd4j_atomicMin(&z[zIndex], x[xIndex]); } } } // -------------------------------------------------------------------------------------------------------------- // // SegmentMin kernel template <typename T, typename I> static __global__ void segmentMinTadKernel(const void* inputBuf, const Nd4jLong* inputShape, const Nd4jLong* inputTads, const Nd4jLong* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, const Nd4jLong* outputShape, const Nd4jLong* outputTads, const Nd4jLong* outputTadOffsets) { __shared__ T* val; __shared__ Nd4jLong len, zIndex, total; __shared__ T* z; __shared__ int threadsPerSegment, start, finish; auto segment = indices[blockIdx.x]; // / threadsPerSegment; if (threadIdx.x == 0) { z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment]; len = shape::length(inputTads); start = starts[segment]; finish = start + lengths[segment]; total = shape::sizeAt(inputShape, 0); } __syncthreads(); auto idx = blockIdx.x; if (blockIdx.x <= total) { auto x = reinterpret_cast<const T *>(inputBuf) + inputTadOffsets[idx]; if (blockIdx.x == start) { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::nd4j_atomicMin(&z[zIndex], x[xIndex]); } } else { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); // if (lengths[indices[idx]]) sd::math::atomics::nd4j_atomicMin(&z[zIndex], x[xIndex]); } } } } // -------------------------------------------------------------------------------------------------------------- // // segmen min template <typename T, typename I> static void segmentMinFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { auto stream = context->getCudaStream(); Nd4jLong numClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1; auto classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context); auto classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context); output->assign(DataTypeUtils::infOrMax<T>()); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens); NDArray::prepareSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); if (input->isVector()) { hipLaunchKernelGGL(( segmentMinLinearKernel<T,I>), dim3(numClasses), dim3(input->lengthOf()), numClasses * 32 + 32, *stream, input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); hipLaunchKernelGGL(( segmentMinTadKernel<T,I>), dim3(input->sizeAt(0)), dim3(512), 2048, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); } // -------------------------------------------------------------------------------------------------------------- // void segmentMinFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentMinFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentMinFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); // NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2}); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context); NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context); // NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0}); // classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes); output->assign(DataTypeUtils::infOrMax<T>()); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32); // int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); NDArray::prepareSpecialUse({output}, {input, indices}); if (input->isVector()) { hipLaunchKernelGGL(( unsortedSegmentMinLinearKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo()); } else { output->assign(DataTypeUtils::max<T>()); std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); dims.x = input->sizeAt(0); hipLaunchKernelGGL(( segmentMinTadKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentMinFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMinFunctor_, (context, input, indices, numOfClasses, output), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } template <typename T, typename I> static __global__ void segmentMinBPLinearKernel(const void* inputBuf, const Nd4jLong* inputShape, void* forwardOutput, const Nd4jLong* forwardShape, void* eps, const Nd4jLong* epsShape, const void* indicesBuf, const Nd4jLong* indicesShape, void* outputBuf, const Nd4jLong* outputShape) { __shared__ const T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ const I* y; __shared__ T* z; __shared__ Nd4jLong xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<const T*>(inputBuf); y = reinterpret_cast<const I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradIn = reinterpret_cast<T*>(forwardOutput); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetI = shape::getIndexOffset(classIndex, forwardShape); auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); if (sd::math::nd4j_abs(gradIn[gradOffsetI] - x[xOffset]) <= T(1.e-6)) { z[zOffset] = gradOut[gradOffsetO]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static __global__ void segmentMinBPTadKernel(const void* inputBuf, const Nd4jLong* inputShape, void* forwardOutput, const Nd4jLong* forwardShape, void* eps, const Nd4jLong* epsShape, const void* indicesBuf, const Nd4jLong* indicesShape, void* outputBuf, const Nd4jLong* outputShape, const Nd4jLong* inputTad, const Nd4jLong* inputOffsets, const Nd4jLong* gradInTad, const Nd4jLong* gradInOffsets, const Nd4jLong* gradOutTad, const Nd4jLong* gradOutOffsets, const Nd4jLong* outTad, const Nd4jLong* outOffsets) { __shared__ const T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ const I* y; __shared__ T* z; __shared__ Nd4jLong xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<const T*>(inputBuf); y = reinterpret_cast<const I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradIn = reinterpret_cast<T*>(forwardOutput); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { auto yIndex = shape::getIndexOffset(i, indicesShape); auto segment = y[yIndex]; auto current = x + inputOffsets[i]; auto currentOut = z + outOffsets[i]; auto in = gradIn + gradInOffsets[segment]; auto outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { if (sd::math::nd4j_abs(in[e] - current[e]) <= T(1.e-6)) currentOut[e] = outGrad[e]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> int segmentMinFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { //int numOfClasses = gradOut->sizeAt(0); // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context);//->shapeInfo(), context); segmentMinFunctor_<T, I>(context, input, indices, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { Nd4jLong loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1); hipLaunchKernelGGL(( segmentMinBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); auto gradInTads = packGradIn.specialShapeInfo(); auto gradInTadOffsets = packGradIn.specialOffsets(); auto gradOutTads = packGradOut.specialShapeInfo(); auto gradOutTadOffsets = packGradOut.specialOffsets(); hipLaunchKernelGGL(( segmentMinBPTadKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return Status::OK(); } // -------------------------------------------------------------------------------------------------------------- // // segmen min int segmentMinFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMinFunctorBP_, (context, input, indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } template <typename T, typename I> static int unsortedSegmentMinFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { //int numOfClasses = gradOut->sizeAt(0); // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context);//->shapeInfo(), context); unsortedSegmentMinFunctor_<T, I>(context, input, indices, numOfClasses, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { Nd4jLong loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1); hipLaunchKernelGGL(( segmentMinBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); auto gradInTads = packGradIn.specialShapeInfo(); auto gradInTadOffsets = packGradIn.specialOffsets(); auto gradOutTads = packGradOut.specialShapeInfo(); auto gradOutTadOffsets = packGradOut.specialOffsets(); hipLaunchKernelGGL(( segmentMinBPTadKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return Status::OK(); } // -------------------------------------------------------------------------------------------------------------- // int unsortedSegmentMinFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMinFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } } }
f8066125ed5eee1a77b5d9defecd4ec944c17bec.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <array/NDArrayFactory.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // // Segment ops linear kernels // -------------------------------------------------------------------------------------------------------------- // template<typename T, typename I> static __global__ void segmentMinLinearKernel(const void *input, const Nd4jLong *inputShape, int *starts, int *lengths, Nd4jLong numOfClasses, void *output, const Nd4jLong *outputShape) { __shared__ T *val; __shared__ Nd4jLong xLen, zLen, zIndex; __shared__ const T *x; __shared__ T *z; __shared__ int threadsPerSegment, start, finish; auto segment = blockIdx.x; if (threadIdx.x == 0) { // threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses; // segment = blockIdx.x / threadsPerSegment; x = reinterpret_cast<const T *>(input); z = reinterpret_cast<T *>(output); extern __shared__ unsigned char shmem[]; val = reinterpret_cast<T *>(shmem); xLen = shape::length(inputShape); zLen = shape::length(outputShape); if (segment < numOfClasses) { zIndex = shape::getIndexOffset(segment, outputShape); start = starts[segment]; finish = start + lengths[segment]; z[zIndex] = x[shape::getIndexOffset(start, inputShape)]; val[segment] = z[zIndex]; } } __syncthreads(); for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); sd::math::atomics::nd4j_atomicMin(&z[zIndex], x[xIndex]); } } // -------------------------------------------------------------------------------------------------------------- // template<typename T, typename I> static __global__ void unsortedSegmentMinLinearKernel(const void *input, const Nd4jLong *inputShape, const void *indices, const Nd4jLong *indicesShape, int *starts, int *lengths, Nd4jLong numOfClasses, void *output, const Nd4jLong *outputShape) { __shared__ T *val; __shared__ Nd4jLong xLen, zLen, segment, zIndex; __shared__ const T *x; __shared__ T *z; __shared__ const I *y; //int threadsPerSegment, start, finish; if (threadIdx.x == 0) { segment = blockIdx.x; x = reinterpret_cast<const T *>(input); z = reinterpret_cast<T *>(output); y = reinterpret_cast<const I *>(indices); xLen = shape::length(inputShape); zLen = shape::length(outputShape); zIndex = shape::getIndexOffset(segment, outputShape); if (lengths[segment] > 0) z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)]; else z[zIndex] = DataTypeUtils::max<T>(); } __syncthreads(); if (lengths[segment] > 0) for (auto e = threadIdx.x + 1; e < xLen; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); auto yIndex = shape::getIndexOffset(e, indicesShape); if (y[yIndex] == segment) { sd::math::atomics::nd4j_atomicMin(&z[zIndex], x[xIndex]); } } } // -------------------------------------------------------------------------------------------------------------- // // SegmentMin kernel template <typename T, typename I> static __global__ void segmentMinTadKernel(const void* inputBuf, const Nd4jLong* inputShape, const Nd4jLong* inputTads, const Nd4jLong* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, const Nd4jLong* outputShape, const Nd4jLong* outputTads, const Nd4jLong* outputTadOffsets) { __shared__ T* val; __shared__ Nd4jLong len, zIndex, total; __shared__ T* z; __shared__ int threadsPerSegment, start, finish; auto segment = indices[blockIdx.x]; // / threadsPerSegment; if (threadIdx.x == 0) { z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment]; len = shape::length(inputTads); start = starts[segment]; finish = start + lengths[segment]; total = shape::sizeAt(inputShape, 0); } __syncthreads(); auto idx = blockIdx.x; if (blockIdx.x <= total) { auto x = reinterpret_cast<const T *>(inputBuf) + inputTadOffsets[idx]; if (blockIdx.x == start) { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::nd4j_atomicMin(&z[zIndex], x[xIndex]); } } else { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); // if (lengths[indices[idx]]) sd::math::atomics::nd4j_atomicMin(&z[zIndex], x[xIndex]); } } } } // -------------------------------------------------------------------------------------------------------------- // // segmen min template <typename T, typename I> static void segmentMinFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { auto stream = context->getCudaStream(); Nd4jLong numClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1; auto classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context); auto classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context); output->assign(DataTypeUtils::infOrMax<T>()); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens); NDArray::prepareSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); if (input->isVector()) { segmentMinLinearKernel<T,I><<<numClasses, input->lengthOf(), numClasses * 32 + 32, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); segmentMinTadKernel<T,I><<<input->sizeAt(0), 512, 2048, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); } // -------------------------------------------------------------------------------------------------------------- // void segmentMinFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentMinFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentMinFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); // NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2}); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context); NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context); // NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0}); // classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes); output->assign(DataTypeUtils::infOrMax<T>()); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32); // int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); NDArray::prepareSpecialUse({output}, {input, indices}); if (input->isVector()) { unsortedSegmentMinLinearKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo()); } else { output->assign(DataTypeUtils::max<T>()); std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); dims.x = input->sizeAt(0); segmentMinTadKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentMinFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMinFunctor_, (context, input, indices, numOfClasses, output), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } template <typename T, typename I> static __global__ void segmentMinBPLinearKernel(const void* inputBuf, const Nd4jLong* inputShape, void* forwardOutput, const Nd4jLong* forwardShape, void* eps, const Nd4jLong* epsShape, const void* indicesBuf, const Nd4jLong* indicesShape, void* outputBuf, const Nd4jLong* outputShape) { __shared__ const T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ const I* y; __shared__ T* z; __shared__ Nd4jLong xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<const T*>(inputBuf); y = reinterpret_cast<const I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradIn = reinterpret_cast<T*>(forwardOutput); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetI = shape::getIndexOffset(classIndex, forwardShape); auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); if (sd::math::nd4j_abs(gradIn[gradOffsetI] - x[xOffset]) <= T(1.e-6)) { z[zOffset] = gradOut[gradOffsetO]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static __global__ void segmentMinBPTadKernel(const void* inputBuf, const Nd4jLong* inputShape, void* forwardOutput, const Nd4jLong* forwardShape, void* eps, const Nd4jLong* epsShape, const void* indicesBuf, const Nd4jLong* indicesShape, void* outputBuf, const Nd4jLong* outputShape, const Nd4jLong* inputTad, const Nd4jLong* inputOffsets, const Nd4jLong* gradInTad, const Nd4jLong* gradInOffsets, const Nd4jLong* gradOutTad, const Nd4jLong* gradOutOffsets, const Nd4jLong* outTad, const Nd4jLong* outOffsets) { __shared__ const T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ const I* y; __shared__ T* z; __shared__ Nd4jLong xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<const T*>(inputBuf); y = reinterpret_cast<const I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradIn = reinterpret_cast<T*>(forwardOutput); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { auto yIndex = shape::getIndexOffset(i, indicesShape); auto segment = y[yIndex]; auto current = x + inputOffsets[i]; auto currentOut = z + outOffsets[i]; auto in = gradIn + gradInOffsets[segment]; auto outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { if (sd::math::nd4j_abs(in[e] - current[e]) <= T(1.e-6)) currentOut[e] = outGrad[e]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> int segmentMinFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { //int numOfClasses = gradOut->sizeAt(0); // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context);//->shapeInfo(), context); segmentMinFunctor_<T, I>(context, input, indices, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { Nd4jLong loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1); segmentMinBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); auto gradInTads = packGradIn.specialShapeInfo(); auto gradInTadOffsets = packGradIn.specialOffsets(); auto gradOutTads = packGradOut.specialShapeInfo(); auto gradOutTadOffsets = packGradOut.specialOffsets(); segmentMinBPTadKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return Status::OK(); } // -------------------------------------------------------------------------------------------------------------- // // segmen min int segmentMinFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMinFunctorBP_, (context, input, indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } template <typename T, typename I> static int unsortedSegmentMinFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { //int numOfClasses = gradOut->sizeAt(0); // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context);//->shapeInfo(), context); unsortedSegmentMinFunctor_<T, I>(context, input, indices, numOfClasses, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { Nd4jLong loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1); segmentMinBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); auto gradInTads = packGradIn.specialShapeInfo(); auto gradInTadOffsets = packGradIn.specialOffsets(); auto gradOutTads = packGradOut.specialShapeInfo(); auto gradOutTadOffsets = packGradOut.specialOffsets(); segmentMinBPTadKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return Status::OK(); } // -------------------------------------------------------------------------------------------------------------- // int unsortedSegmentMinFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMinFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } } }
affe297b26e9500cf7ed2cf6f686f7d5107ea87e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <torch/types.h> #include <ATen/hip/HIPApplyUtils.cuh> typedef enum { SUM = 0, MEAN = 1, MAX = 2 } reduce_t; #define CHECK_CUDA(x) \ TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) \ TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) \ CHECK_CUDA(x); \ CHECK_CONTIGUOUS(x) namespace { int const threadsPerBlock = 512; int const maxGridDim = 50000; } // namespace __device__ __forceinline__ static void reduceMax(float *address, float val) { int *address_as_i = reinterpret_cast<int *>(address); int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(fmaxf(val, __int_as_float(assumed)))); } while (assumed != old || __int_as_float(old) < val); } __device__ __forceinline__ static void reduceMax(double *address, double val) { unsigned long long *address_as_ull = reinterpret_cast<unsigned long long *>(address); unsigned long long old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __double_as_longlong(fmax(val, __longlong_as_double(assumed)))); } while (assumed != old || __longlong_as_double(old) < val); } // get rid of meaningless warnings when compiling host code #ifdef __CUDA_ARCH__ __device__ __forceinline__ static void reduceAdd(float *address, float val) { #if (__CUDA_ARCH__ < 200) #warning \ "compute capability lower than 2.x. fall back to use CAS version of atomicAdd for float32" int *address_as_i = reinterpret_cast<int *>(address); int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(val + __int_as_float(assumed))); } while (assumed != old); #else atomicAdd(address, val); #endif } __device__ __forceinline__ static void reduceAdd(double *address, double val) { #if (__CUDA_ARCH__ < 600) #warning \ "compute capability lower than 6.x. fall back to use CAS version of atomicAdd for float64" unsigned long long *address_as_ull = reinterpret_cast<unsigned long long *>(address); unsigned long long old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); #else atomicAdd(address, val); #endif } #endif template <typename T> __global__ void feats_reduce_kernel(const T *feats, const int32_t *coors_map, T *reduced_feats, // shall be 0 at initialization const int num_input, const int num_feats, const reduce_t reduce_type) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; if (reduce_to == -1) continue; const T *feats_offset = feats + x * num_feats; T *reduced_feats_offset = reduced_feats + reduce_to * num_feats; if (reduce_type == reduce_t::MAX) { for (int i = 0; i < num_feats; i++) { reduceMax(&reduced_feats_offset[i], feats_offset[i]); } } else { for (int i = 0; i < num_feats; i++) { reduceAdd(&reduced_feats_offset[i], feats_offset[i]); } } } } template <typename T> __global__ void add_reduce_traceback_grad_kernel( T *grad_feats, const T *grad_reduced_feats, const int32_t *coors_map, const int32_t *reduce_count, const int num_input, const int num_feats, const reduce_t reduce_type) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; if (reduce_to == -1) { continue; } const int input_offset = x * num_feats; T *grad_feats_offset = grad_feats + input_offset; const int reduced_offset = reduce_to * num_feats; const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; if (reduce_type == reduce_t::SUM) { for (int i = 0; i < num_feats; i++) { grad_feats_offset[i] = grad_reduced_feats_offset[i]; } } else if (reduce_type == reduce_t::MEAN) { for (int i = 0; i < num_feats; i++) { grad_feats_offset[i] = grad_reduced_feats_offset[i] / static_cast<T>(reduce_count[reduce_to]); } } } } template <typename T> __global__ void max_reduce_traceback_scatter_idx_kernel( const T *feats, const T *reduced_feats, int32_t *reduce_from, const int32_t *coors_map, const int num_input, const int num_feats) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; const int input_offset = x * num_feats; const T *feats_offset = feats + input_offset; if (reduce_to == -1) { continue; } const int reduced_offset = reduce_to * num_feats; const T *reduced_feats_offset = reduced_feats + reduced_offset; int32_t *reduce_from_offset = reduce_from + reduced_offset; for (int i = 0; i < num_feats; i++) { if (feats_offset[i] == reduced_feats_offset[i]) { atomicMin(&reduce_from_offset[i], static_cast<int32_t>(x)); } } } } template <typename T> __global__ void max_reduce_scatter_grad_kernel(T *grad_feats, const T *grad_reduced_feats, const int32_t *reduce_from, const int num_reduced, const int num_feats) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_reduced; x += gridDim.x * blockDim.x) { const int reduced_offset = x * num_feats; const int32_t *scatter_to_offset = reduce_from + reduced_offset; const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; for (int i = 0; i < num_feats; i++) { grad_feats[scatter_to_offset[i] * num_feats + i] = grad_reduced_feats_offset[i]; } } } namespace voxelization { std::vector<at::Tensor> dynamic_point_to_voxel_forward_gpu( const at::Tensor &feats, const at::Tensor &coors, const reduce_t reduce_type) { CHECK_INPUT(feats); CHECK_INPUT(coors); const int num_input = feats.size(0); const int num_feats = feats.size(1); if (num_input == 0) return {feats.clone().detach(), coors.clone().detach(), coors.new_empty({0}, torch::kInt32), coors.new_empty({0}, torch::kInt32)}; at::Tensor out_coors; at::Tensor coors_map; at::Tensor reduce_count; auto coors_clean = coors.masked_fill(coors.lt(0).any(-1, true), -1); std::tie(out_coors, coors_map, reduce_count) = at::unique_dim(coors_clean, 0, true, true, true); if (out_coors.index({0, 0}).lt(0).item<bool>()) { // the first element of out_coors (-1,-1,-1) and should be removed out_coors = out_coors.slice(0, 1); reduce_count = reduce_count.slice(0, 1); coors_map = coors_map - 1; } coors_map = coors_map.to(torch::kInt32); reduce_count = reduce_count.to(torch::kInt32); auto reduced_feats = at::empty({out_coors.size(0), num_feats}, feats.options()); AT_DISPATCH_FLOATING_TYPES( feats.scalar_type(), "feats_reduce_kernel", ([&] { if (reduce_type == reduce_t::MAX) reduced_feats.fill_(-std::numeric_limits<scalar_t>::infinity()); else reduced_feats.fill_(static_cast<scalar_t>(0)); dim3 blocks(::min(at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( feats_reduce_kernel), dim3(blocks), dim3(threads), 0, 0, feats.data_ptr<scalar_t>(), coors_map.data_ptr<int32_t>(), reduced_feats.data_ptr<scalar_t>(), num_input, num_feats, reduce_type); if (reduce_type == reduce_t::MEAN) reduced_feats /= reduce_count.unsqueeze(-1).to(reduced_feats.dtype()); })); AT_CUDA_CHECK(hipGetLastError()); return {reduced_feats, out_coors, coors_map, reduce_count}; } void dynamic_point_to_voxel_backward_gpu(at::Tensor &grad_feats, const at::Tensor &grad_reduced_feats, const at::Tensor &feats, const at::Tensor &reduced_feats, const at::Tensor &coors_map, const at::Tensor &reduce_count, const reduce_t reduce_type) { CHECK_INPUT(grad_feats); CHECK_INPUT(grad_reduced_feats); CHECK_INPUT(feats); CHECK_INPUT(reduced_feats); CHECK_INPUT(coors_map); CHECK_INPUT(reduce_count); const int num_input = feats.size(0); const int num_reduced = reduced_feats.size(0); const int num_feats = feats.size(1); grad_feats.fill_(0); // copy voxel grad to points if (num_input == 0 || num_reduced == 0) return; if (reduce_type == reduce_t::MEAN || reduce_type == reduce_t::SUM) { AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "add_reduce_traceback_grad_kernel", ([&] { dim3 blocks(::min( at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( add_reduce_traceback_grad_kernel), dim3(blocks), dim3(threads), 0, 0, grad_feats.data_ptr<scalar_t>(), grad_reduced_feats.data_ptr<scalar_t>(), coors_map.data_ptr<int32_t>(), reduce_count.data_ptr<int32_t>(), num_input, num_feats, reduce_type); })); AT_CUDA_CHECK(hipGetLastError()); } else { auto reduce_from = at::full({num_reduced, num_feats}, num_input, coors_map.options().dtype(torch::kInt32)); AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "max_reduce_traceback_scatter_idx_kernel", ([&] { dim3 blocks(::min( at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( max_reduce_traceback_scatter_idx_kernel), dim3(blocks), dim3(threads), 0, 0, feats.data_ptr<scalar_t>(), reduced_feats.data_ptr<scalar_t>(), reduce_from.data_ptr<int32_t>(), coors_map.data_ptr<int32_t>(), num_input, num_feats); })); AT_CUDA_CHECK(hipGetLastError()); AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "max_reduce_traceback_scatter_idx_kernel", ([&] { dim3 blocks(::min( at::cuda::ATenCeilDiv(num_reduced, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( max_reduce_scatter_grad_kernel), dim3(blocks), dim3(threads), 0, 0, grad_feats.data_ptr<scalar_t>(), grad_reduced_feats.data_ptr<scalar_t>(), reduce_from.data_ptr<int32_t>(), num_reduced, num_feats); })); AT_CUDA_CHECK(hipGetLastError()); } return; } } // namespace voxelization
affe297b26e9500cf7ed2cf6f686f7d5107ea87e.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <torch/types.h> #include <ATen/cuda/CUDAApplyUtils.cuh> typedef enum { SUM = 0, MEAN = 1, MAX = 2 } reduce_t; #define CHECK_CUDA(x) \ TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) \ TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) \ CHECK_CUDA(x); \ CHECK_CONTIGUOUS(x) namespace { int const threadsPerBlock = 512; int const maxGridDim = 50000; } // namespace __device__ __forceinline__ static void reduceMax(float *address, float val) { int *address_as_i = reinterpret_cast<int *>(address); int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(fmaxf(val, __int_as_float(assumed)))); } while (assumed != old || __int_as_float(old) < val); } __device__ __forceinline__ static void reduceMax(double *address, double val) { unsigned long long *address_as_ull = reinterpret_cast<unsigned long long *>(address); unsigned long long old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __double_as_longlong(fmax(val, __longlong_as_double(assumed)))); } while (assumed != old || __longlong_as_double(old) < val); } // get rid of meaningless warnings when compiling host code #ifdef __CUDA_ARCH__ __device__ __forceinline__ static void reduceAdd(float *address, float val) { #if (__CUDA_ARCH__ < 200) #warning \ "compute capability lower than 2.x. fall back to use CAS version of atomicAdd for float32" int *address_as_i = reinterpret_cast<int *>(address); int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(val + __int_as_float(assumed))); } while (assumed != old); #else atomicAdd(address, val); #endif } __device__ __forceinline__ static void reduceAdd(double *address, double val) { #if (__CUDA_ARCH__ < 600) #warning \ "compute capability lower than 6.x. fall back to use CAS version of atomicAdd for float64" unsigned long long *address_as_ull = reinterpret_cast<unsigned long long *>(address); unsigned long long old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); #else atomicAdd(address, val); #endif } #endif template <typename T> __global__ void feats_reduce_kernel(const T *feats, const int32_t *coors_map, T *reduced_feats, // shall be 0 at initialization const int num_input, const int num_feats, const reduce_t reduce_type) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; if (reduce_to == -1) continue; const T *feats_offset = feats + x * num_feats; T *reduced_feats_offset = reduced_feats + reduce_to * num_feats; if (reduce_type == reduce_t::MAX) { for (int i = 0; i < num_feats; i++) { reduceMax(&reduced_feats_offset[i], feats_offset[i]); } } else { for (int i = 0; i < num_feats; i++) { reduceAdd(&reduced_feats_offset[i], feats_offset[i]); } } } } template <typename T> __global__ void add_reduce_traceback_grad_kernel( T *grad_feats, const T *grad_reduced_feats, const int32_t *coors_map, const int32_t *reduce_count, const int num_input, const int num_feats, const reduce_t reduce_type) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; if (reduce_to == -1) { continue; } const int input_offset = x * num_feats; T *grad_feats_offset = grad_feats + input_offset; const int reduced_offset = reduce_to * num_feats; const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; if (reduce_type == reduce_t::SUM) { for (int i = 0; i < num_feats; i++) { grad_feats_offset[i] = grad_reduced_feats_offset[i]; } } else if (reduce_type == reduce_t::MEAN) { for (int i = 0; i < num_feats; i++) { grad_feats_offset[i] = grad_reduced_feats_offset[i] / static_cast<T>(reduce_count[reduce_to]); } } } } template <typename T> __global__ void max_reduce_traceback_scatter_idx_kernel( const T *feats, const T *reduced_feats, int32_t *reduce_from, const int32_t *coors_map, const int num_input, const int num_feats) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; const int input_offset = x * num_feats; const T *feats_offset = feats + input_offset; if (reduce_to == -1) { continue; } const int reduced_offset = reduce_to * num_feats; const T *reduced_feats_offset = reduced_feats + reduced_offset; int32_t *reduce_from_offset = reduce_from + reduced_offset; for (int i = 0; i < num_feats; i++) { if (feats_offset[i] == reduced_feats_offset[i]) { atomicMin(&reduce_from_offset[i], static_cast<int32_t>(x)); } } } } template <typename T> __global__ void max_reduce_scatter_grad_kernel(T *grad_feats, const T *grad_reduced_feats, const int32_t *reduce_from, const int num_reduced, const int num_feats) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_reduced; x += gridDim.x * blockDim.x) { const int reduced_offset = x * num_feats; const int32_t *scatter_to_offset = reduce_from + reduced_offset; const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; for (int i = 0; i < num_feats; i++) { grad_feats[scatter_to_offset[i] * num_feats + i] = grad_reduced_feats_offset[i]; } } } namespace voxelization { std::vector<at::Tensor> dynamic_point_to_voxel_forward_gpu( const at::Tensor &feats, const at::Tensor &coors, const reduce_t reduce_type) { CHECK_INPUT(feats); CHECK_INPUT(coors); const int num_input = feats.size(0); const int num_feats = feats.size(1); if (num_input == 0) return {feats.clone().detach(), coors.clone().detach(), coors.new_empty({0}, torch::kInt32), coors.new_empty({0}, torch::kInt32)}; at::Tensor out_coors; at::Tensor coors_map; at::Tensor reduce_count; auto coors_clean = coors.masked_fill(coors.lt(0).any(-1, true), -1); std::tie(out_coors, coors_map, reduce_count) = at::unique_dim(coors_clean, 0, true, true, true); if (out_coors.index({0, 0}).lt(0).item<bool>()) { // the first element of out_coors (-1,-1,-1) and should be removed out_coors = out_coors.slice(0, 1); reduce_count = reduce_count.slice(0, 1); coors_map = coors_map - 1; } coors_map = coors_map.to(torch::kInt32); reduce_count = reduce_count.to(torch::kInt32); auto reduced_feats = at::empty({out_coors.size(0), num_feats}, feats.options()); AT_DISPATCH_FLOATING_TYPES( feats.scalar_type(), "feats_reduce_kernel", ([&] { if (reduce_type == reduce_t::MAX) reduced_feats.fill_(-std::numeric_limits<scalar_t>::infinity()); else reduced_feats.fill_(static_cast<scalar_t>(0)); dim3 blocks(std::min(at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); feats_reduce_kernel<<<blocks, threads>>>( feats.data_ptr<scalar_t>(), coors_map.data_ptr<int32_t>(), reduced_feats.data_ptr<scalar_t>(), num_input, num_feats, reduce_type); if (reduce_type == reduce_t::MEAN) reduced_feats /= reduce_count.unsqueeze(-1).to(reduced_feats.dtype()); })); AT_CUDA_CHECK(cudaGetLastError()); return {reduced_feats, out_coors, coors_map, reduce_count}; } void dynamic_point_to_voxel_backward_gpu(at::Tensor &grad_feats, const at::Tensor &grad_reduced_feats, const at::Tensor &feats, const at::Tensor &reduced_feats, const at::Tensor &coors_map, const at::Tensor &reduce_count, const reduce_t reduce_type) { CHECK_INPUT(grad_feats); CHECK_INPUT(grad_reduced_feats); CHECK_INPUT(feats); CHECK_INPUT(reduced_feats); CHECK_INPUT(coors_map); CHECK_INPUT(reduce_count); const int num_input = feats.size(0); const int num_reduced = reduced_feats.size(0); const int num_feats = feats.size(1); grad_feats.fill_(0); // copy voxel grad to points if (num_input == 0 || num_reduced == 0) return; if (reduce_type == reduce_t::MEAN || reduce_type == reduce_t::SUM) { AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "add_reduce_traceback_grad_kernel", ([&] { dim3 blocks(std::min( at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); add_reduce_traceback_grad_kernel<<<blocks, threads>>>( grad_feats.data_ptr<scalar_t>(), grad_reduced_feats.data_ptr<scalar_t>(), coors_map.data_ptr<int32_t>(), reduce_count.data_ptr<int32_t>(), num_input, num_feats, reduce_type); })); AT_CUDA_CHECK(cudaGetLastError()); } else { auto reduce_from = at::full({num_reduced, num_feats}, num_input, coors_map.options().dtype(torch::kInt32)); AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "max_reduce_traceback_scatter_idx_kernel", ([&] { dim3 blocks(std::min( at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); max_reduce_traceback_scatter_idx_kernel<<<blocks, threads>>>( feats.data_ptr<scalar_t>(), reduced_feats.data_ptr<scalar_t>(), reduce_from.data_ptr<int32_t>(), coors_map.data_ptr<int32_t>(), num_input, num_feats); })); AT_CUDA_CHECK(cudaGetLastError()); AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "max_reduce_traceback_scatter_idx_kernel", ([&] { dim3 blocks(std::min( at::cuda::ATenCeilDiv(num_reduced, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); max_reduce_scatter_grad_kernel<<<blocks, threads>>>( grad_feats.data_ptr<scalar_t>(), grad_reduced_feats.data_ptr<scalar_t>(), reduce_from.data_ptr<int32_t>(), num_reduced, num_feats); })); AT_CUDA_CHECK(cudaGetLastError()); } return; } } // namespace voxelization
40785821949eee2d4ea45198b448638f7f668a37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" //TODO:fix the warnings #ifdef _MSC_VER #pragma warning(disable : 4244) #endif #include "core/providers/cuda/tensor/compress_impl.h" #include <hipcub/hipcub.hpp> namespace onnxruntime { namespace cuda { hipError_t CompressCalcPrefixSumTempStorageBytes(hipStream_t stream, const int8_t* condition_data, int* condition_cumulative_sum, int length, size_t& temp_storage_bytes) { return hipcub::DeviceScan::InclusiveSum( nullptr, temp_storage_bytes, condition_data, condition_cumulative_sum, length, stream); } hipError_t CompressInclusivePrefixSum(hipStream_t stream, void* d_temp_storage, size_t temp_storage_bytes, const int8_t* condition_data, int* condition_cumulative_sum, int length) { return hipcub::DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, condition_data, condition_cumulative_sum, length, stream); } template <typename T> __global__ void _CompressKernel(const int32_t valid_condition_length, const fast_divmod axis_right_stride_div, const fast_divmod input_axis_included_stride_div, const int32_t output_axis_included_stride, const int32_t* condition_cumulative_sum, const bool* condition_data, const T* input_data, T* output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG output_index = 0; int div, mod; input_axis_included_stride_div.divmod(id, div, mod); output_index = output_axis_included_stride * div; axis_right_stride_div.divmod(mod, div, mod); if (div < valid_condition_length && condition_data[div]) { output_index += (condition_cumulative_sum[div] - 1) * axis_right_stride_div.d_ + mod; output_data[output_index] = input_data[id]; } } Status CompressImpl(hipStream_t stream, const size_t element_bytes, const int32_t valid_condition_length, const int32_t axis_right_stride, const int32_t input_axis_dim_length, const int32_t output_axis_dim_length, const int32_t* condition_cumulative_sum, const bool* condition_data, const void* input_data, void* output_data, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); fast_divmod axis_right_stride_div(axis_right_stride); fast_divmod input_axis_included_stride_div(axis_right_stride * input_axis_dim_length); int output_axis_included_stride = axis_right_stride * output_axis_dim_length; switch (element_bytes) { case sizeof(int8_t): hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int16_t): hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int32_t): hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int64_t): hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data), (CUDA_LONG)N); break; default: return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Compress operator"); } return Status::OK(); } } // namespace cuda } // namespace onnxruntime
40785821949eee2d4ea45198b448638f7f668a37.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" //TODO:fix the warnings #ifdef _MSC_VER #pragma warning(disable : 4244) #endif #include "core/providers/cuda/tensor/compress_impl.h" #include <cub/cub.cuh> namespace onnxruntime { namespace cuda { cudaError_t CompressCalcPrefixSumTempStorageBytes(cudaStream_t stream, const int8_t* condition_data, int* condition_cumulative_sum, int length, size_t& temp_storage_bytes) { return cub::DeviceScan::InclusiveSum( nullptr, temp_storage_bytes, condition_data, condition_cumulative_sum, length, stream); } cudaError_t CompressInclusivePrefixSum(cudaStream_t stream, void* d_temp_storage, size_t temp_storage_bytes, const int8_t* condition_data, int* condition_cumulative_sum, int length) { return cub::DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, condition_data, condition_cumulative_sum, length, stream); } template <typename T> __global__ void _CompressKernel(const int32_t valid_condition_length, const fast_divmod axis_right_stride_div, const fast_divmod input_axis_included_stride_div, const int32_t output_axis_included_stride, const int32_t* condition_cumulative_sum, const bool* condition_data, const T* input_data, T* output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG output_index = 0; int div, mod; input_axis_included_stride_div.divmod(id, div, mod); output_index = output_axis_included_stride * div; axis_right_stride_div.divmod(mod, div, mod); if (div < valid_condition_length && condition_data[div]) { output_index += (condition_cumulative_sum[div] - 1) * axis_right_stride_div.d_ + mod; output_data[output_index] = input_data[id]; } } Status CompressImpl(cudaStream_t stream, const size_t element_bytes, const int32_t valid_condition_length, const int32_t axis_right_stride, const int32_t input_axis_dim_length, const int32_t output_axis_dim_length, const int32_t* condition_cumulative_sum, const bool* condition_data, const void* input_data, void* output_data, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); fast_divmod axis_right_stride_div(axis_right_stride); fast_divmod input_axis_included_stride_div(axis_right_stride * input_axis_dim_length); int output_axis_included_stride = axis_right_stride * output_axis_dim_length; switch (element_bytes) { case sizeof(int8_t): _CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int16_t): _CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int32_t): _CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data), (CUDA_LONG)N); break; case sizeof(int64_t): _CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( valid_condition_length, axis_right_stride_div, input_axis_included_stride_div, output_axis_included_stride, condition_cumulative_sum, condition_data, reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data), (CUDA_LONG)N); break; default: return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Compress operator"); } return Status::OK(); } } // namespace cuda } // namespace onnxruntime
178dbe5437c3defb4f9c6527c25709af8a0dc70b.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/AccumulateType.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/ReduceOps.h> namespace at { namespace native { template <typename scalar_t, typename out_t=scalar_t> void std_var_kernel_impl(TensorIterator& iter, int32_t correction, bool take_sqrt) { // reducing unrolling factor to 2 for welford kernel // This is necessary to lower register usage that leads to register spills. using accscalar_t = at::acc_type<scalar_t, true>; using ops_t = WelfordOps<scalar_t, accscalar_t, int32_t, float, thrust::pair<out_t, out_t>>; gpu_reduce_kernel<scalar_t, out_t, 2>( iter, ops_t{correction, take_sqrt}, typename ops_t::acc_t{}); } static void std_var_kernel_cuda(TensorIterator& iter, int64_t correction, bool take_sqrt) { using limits = std::numeric_limits<int32_t>; TORCH_CHECK( correction < limits::max() && correction > limits::min(), "The correction argument for std and var computation on CUDA must " "fit within a 32-bit integer, but got ", correction); const auto input_dtype = iter.input_dtype(); if (input_dtype == kHalf && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel std_var_kernel_impl<at::Half, float>(iter, correction, take_sqrt); } else if (input_dtype == kBFloat16 && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel std_var_kernel_impl<at::BFloat16, float>(iter, correction, take_sqrt); } else { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "std_cuda", [&]() { std_var_kernel_impl<scalar_t>(iter, correction, take_sqrt); }); } } template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t> void mean_kernel_impl(TensorIterator& iter) { // returns acc_t for all non-complex dtypes and returns T for c10::complex<T> using factor_t = typename c10::scalar_value_type<acc_t>::type; factor_t factor = static_cast<factor_t>(iter.num_output_elements()) / iter.numel(); gpu_reduce_kernel<scalar_t, out_t>(iter, MeanOps<acc_t, factor_t> {factor}); } static void mean_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == kHalf) { mean_kernel_impl<at::Half, float>(iter); } else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel mean_kernel_impl<at::Half, float, float>(iter); } else if(iter.dtype() == kBFloat16) { mean_kernel_impl<at::BFloat16, float>(iter); } else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel mean_kernel_impl<at::BFloat16, float, float>(iter); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "mean_cuda", [&]() { mean_kernel_impl<scalar_t>(iter); }); } } REGISTER_DISPATCH(std_var_stub, &std_var_kernel_cuda); REGISTER_DISPATCH(mean_stub, &mean_kernel_cuda); }} // namespace at::native
178dbe5437c3defb4f9c6527c25709af8a0dc70b.cu
#include <ATen/AccumulateType.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/ReduceOps.h> namespace at { namespace native { template <typename scalar_t, typename out_t=scalar_t> void std_var_kernel_impl(TensorIterator& iter, int32_t correction, bool take_sqrt) { // reducing unrolling factor to 2 for welford kernel // This is necessary to lower register usage that leads to register spills. using accscalar_t = at::acc_type<scalar_t, true>; using ops_t = WelfordOps<scalar_t, accscalar_t, int32_t, float, thrust::pair<out_t, out_t>>; gpu_reduce_kernel<scalar_t, out_t, 2>( iter, ops_t{correction, take_sqrt}, typename ops_t::acc_t{}); } static void std_var_kernel_cuda(TensorIterator& iter, int64_t correction, bool take_sqrt) { using limits = std::numeric_limits<int32_t>; TORCH_CHECK( correction < limits::max() && correction > limits::min(), "The correction argument for std and var computation on CUDA must " "fit within a 32-bit integer, but got ", correction); const auto input_dtype = iter.input_dtype(); if (input_dtype == kHalf && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel std_var_kernel_impl<at::Half, float>(iter, correction, take_sqrt); } else if (input_dtype == kBFloat16 && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel std_var_kernel_impl<at::BFloat16, float>(iter, correction, take_sqrt); } else { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "std_cuda", [&]() { std_var_kernel_impl<scalar_t>(iter, correction, take_sqrt); }); } } template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t> void mean_kernel_impl(TensorIterator& iter) { // returns acc_t for all non-complex dtypes and returns T for c10::complex<T> using factor_t = typename c10::scalar_value_type<acc_t>::type; factor_t factor = static_cast<factor_t>(iter.num_output_elements()) / iter.numel(); gpu_reduce_kernel<scalar_t, out_t>(iter, MeanOps<acc_t, factor_t> {factor}); } static void mean_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == kHalf) { mean_kernel_impl<at::Half, float>(iter); } else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel mean_kernel_impl<at::Half, float, float>(iter); } else if(iter.dtype() == kBFloat16) { mean_kernel_impl<at::BFloat16, float>(iter); } else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel mean_kernel_impl<at::BFloat16, float, float>(iter); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "mean_cuda", [&]() { mean_kernel_impl<scalar_t>(iter); }); } } REGISTER_DISPATCH(std_var_stub, &std_var_kernel_cuda); REGISTER_DISPATCH(mean_stub, &mean_kernel_cuda); }} // namespace at::native
062d7a68f8a0d1c8473b6fab612468dfff3447db.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <common/device_buffer.hpp> #include <distance/epsilon_neighborhood.cuh> #include <random/make_blobs.cuh> #include "test_utils.h" namespace MLCommon { namespace Distance { template <typename T, typename IdxT> struct EpsInputs { IdxT n_row, n_col, n_centers, n_batches; T eps; }; template <typename T, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const EpsInputs<T, IdxT>& p) { return os; } template <typename T, typename IdxT> class EpsNeighTest : public ::testing::TestWithParam<EpsInputs<T, IdxT>> { protected: void SetUp() override { param = ::testing::TestWithParam<EpsInputs<T, IdxT>>::GetParam(); CUDA_CHECK(hipStreamCreate(&stream)); allocate(data, param.n_row * param.n_col); allocate(labels, param.n_row); batchSize = param.n_row / param.n_batches; allocate(adj, param.n_row * batchSize); allocate(vd, batchSize + 1, true); allocator.reset(new raft::mr::device::default_allocator); Random::make_blobs<T, IdxT>(data, labels, param.n_row, param.n_col, param.n_centers, allocator, stream, true, nullptr, nullptr, T(0.01), false); } void TearDown() override { CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(labels)); CUDA_CHECK(hipFree(adj)); CUDA_CHECK(hipFree(vd)); } EpsInputs<T, IdxT> param; hipStream_t stream; T* data; bool* adj; IdxT *labels, *vd; IdxT batchSize; std::shared_ptr<deviceAllocator> allocator; }; // class EpsNeighTest const std::vector<EpsInputs<float, int>> inputsfi = { {15000, 16, 5, 1, 2.f}, {14000, 16, 5, 1, 2.f}, {15000, 17, 5, 1, 2.f}, {14000, 17, 5, 1, 2.f}, {15000, 18, 5, 1, 2.f}, {14000, 18, 5, 1, 2.f}, {15000, 32, 5, 1, 2.f}, {14000, 32, 5, 1, 2.f}, {20000, 10000, 10, 1, 2.f}, {20000, 10000, 10, 2, 2.f}, }; typedef EpsNeighTest<float, int> EpsNeighTestFI; TEST_P(EpsNeighTestFI, Result) { for (int i = 0; i < param.n_batches; ++i) { CUDA_CHECK( hipMemsetAsync(adj, 0, sizeof(bool) * param.n_row * batchSize, stream)); CUDA_CHECK(hipMemsetAsync(vd, 0, sizeof(int) * (batchSize + 1), stream)); epsUnexpL2SqNeighborhood<float, int>( adj, vd, data, data + (i * batchSize * param.n_col), param.n_row, batchSize, param.n_col, param.eps * param.eps, stream); ASSERT_TRUE(devArrMatch(param.n_row / param.n_centers, vd, batchSize, Compare<int>(), stream)); } } INSTANTIATE_TEST_CASE_P(EpsNeighTests, EpsNeighTestFI, ::testing::ValuesIn(inputsfi)); }; // namespace Distance }; // namespace MLCommon
062d7a68f8a0d1c8473b6fab612468dfff3447db.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <common/device_buffer.hpp> #include <distance/epsilon_neighborhood.cuh> #include <random/make_blobs.cuh> #include "test_utils.h" namespace MLCommon { namespace Distance { template <typename T, typename IdxT> struct EpsInputs { IdxT n_row, n_col, n_centers, n_batches; T eps; }; template <typename T, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const EpsInputs<T, IdxT>& p) { return os; } template <typename T, typename IdxT> class EpsNeighTest : public ::testing::TestWithParam<EpsInputs<T, IdxT>> { protected: void SetUp() override { param = ::testing::TestWithParam<EpsInputs<T, IdxT>>::GetParam(); CUDA_CHECK(cudaStreamCreate(&stream)); allocate(data, param.n_row * param.n_col); allocate(labels, param.n_row); batchSize = param.n_row / param.n_batches; allocate(adj, param.n_row * batchSize); allocate(vd, batchSize + 1, true); allocator.reset(new raft::mr::device::default_allocator); Random::make_blobs<T, IdxT>(data, labels, param.n_row, param.n_col, param.n_centers, allocator, stream, true, nullptr, nullptr, T(0.01), false); } void TearDown() override { CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(labels)); CUDA_CHECK(cudaFree(adj)); CUDA_CHECK(cudaFree(vd)); } EpsInputs<T, IdxT> param; cudaStream_t stream; T* data; bool* adj; IdxT *labels, *vd; IdxT batchSize; std::shared_ptr<deviceAllocator> allocator; }; // class EpsNeighTest const std::vector<EpsInputs<float, int>> inputsfi = { {15000, 16, 5, 1, 2.f}, {14000, 16, 5, 1, 2.f}, {15000, 17, 5, 1, 2.f}, {14000, 17, 5, 1, 2.f}, {15000, 18, 5, 1, 2.f}, {14000, 18, 5, 1, 2.f}, {15000, 32, 5, 1, 2.f}, {14000, 32, 5, 1, 2.f}, {20000, 10000, 10, 1, 2.f}, {20000, 10000, 10, 2, 2.f}, }; typedef EpsNeighTest<float, int> EpsNeighTestFI; TEST_P(EpsNeighTestFI, Result) { for (int i = 0; i < param.n_batches; ++i) { CUDA_CHECK( cudaMemsetAsync(adj, 0, sizeof(bool) * param.n_row * batchSize, stream)); CUDA_CHECK(cudaMemsetAsync(vd, 0, sizeof(int) * (batchSize + 1), stream)); epsUnexpL2SqNeighborhood<float, int>( adj, vd, data, data + (i * batchSize * param.n_col), param.n_row, batchSize, param.n_col, param.eps * param.eps, stream); ASSERT_TRUE(devArrMatch(param.n_row / param.n_centers, vd, batchSize, Compare<int>(), stream)); } } INSTANTIATE_TEST_CASE_P(EpsNeighTests, EpsNeighTestFI, ::testing::ValuesIn(inputsfi)); }; // namespace Distance }; // namespace MLCommon
2e29070049373dfffbfd4f29d358363bc24c6cbe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/time.h> #define MEM_DIM 64 #define RADIO 1 #define SIZE_BLOQUE 8 #define SIZE_GRID 8 /*Programar una funcion que haga la media de numeros end GPU sin memoria compartida, en GPU con memoria compartida y en CPU. Comparar los tiempos de ejecucion*/ __global__ void kernel_Shared(int *d_input, int *d_output) { int i; int valorFinal = 0; __shared__ int arrayValores[ MEM_DIM + RADIO + RADIO ]; //Inicializar el array para poder calcular las medias arrayValores[threadIdx.x + RADIO] = 0; //Inicializar las posiciones extras en el array if (threadIdx.x < RADIO) arrayValores[threadIdx.x] = 0; if (threadIdx.x >= (SIZE_BLOQUE - RADIO)) arrayValores[threadIdx.x + RADIO] = 0; // Sincronizar todos los threads - Se puede omitir? __syncthreads(); //Copiar los valores desde la memoria global a la memoria compartida arrayValores[threadIdx.x + RADIO] = d_input[blockIdx.x * blockDim.x + threadIdx.x]; //Copiar los valores extras de la izquierda if (threadIdx.x < RADIO) { if (blockIdx.x > 0) { arrayValores[threadIdx.x] = d_input[(blockIdx.x * blockDim.x + threadIdx.x) - RADIO]; } } //Copiar los valores extras de la derecha if (threadIdx.x >= (SIZE_BLOQUE - RADIO)) { if (blockIdx.x < SIZE_GRID - 1) { arrayValores[threadIdx.x + RADIO + RADIO] = d_input[(blockIdx.x * blockDim.x + threadIdx.x) + RADIO]; } } /* if (threadIdx.x == 0) { for(int i = 0; i < blockDim.x + RADIO + RADIO; ++i) { printf("Valor kernel (%d, %d): %d\n", blockIdx.x, i, arrayValores[i]); } printf("%d\n\n", blockIdx.x * blockDim.x + threadIdx.x); } */ //Sincronizar los threads __syncthreads(); //Hacer la media en el array de outputs for (i = -RADIO; i <= RADIO; ++i) { valorFinal += arrayValores[(threadIdx.x + RADIO) + i]; } valorFinal /= (RADIO + RADIO + 1); //printf("Valor en el thread actual (%d, %d): %d\n", blockIdx.x, threadIdx.x, valorFinal); d_output[blockIdx.x * blockDim.x + threadIdx.x] = valorFinal; //printf("Bloque: %d -> Thread: %d -> PosicionArray: %d -> Posicion Array Global: %d -> Valor Guardado: %d\n", blockIdx.x, threadIdx.x, threadIdx.x + RADIO, blockIdx.x * blockDim.x + threadIdx.x, arrayValores[threadIdx.x + RADIO]); } __global__ void kernel(int *d_input, int *d_output) { int i; int valorFinal = 0; int valores[RADIO + RADIO + 1] = {0}; } double tiempo( void ) { struct timeval tv; gettimeofday(&tv, NULL); return (double) (tv.tv_usec) / 1000000 + (double) (tv.tv_sec); } int main(int argc, char** argv) { double tiempoInicio; double tiempoFin; int n = SIZE_BLOQUE * SIZE_GRID; printf("\nElementos a reservar: %d\n\n\n", n); int numBytes = n * sizeof(int); int *d_input; int *d_output; int *h_input; int *h_output; hipMalloc((void **) &d_input, numBytes ); if(hipSuccess != hipGetLastError()) { printf("Error de cuda\n"); } hipMalloc((void **) &d_output, numBytes ); if(hipSuccess != hipGetLastError()) { printf("Error de cuda\n"); } hipMemset(d_output, 0, n); if(hipSuccess != hipGetLastError()) { printf("Error de cuda\n"); } h_input = (int *)malloc(numBytes); h_output = (int *)malloc(numBytes); for(int i = 0; i < n; ++i) h_input[i] = i; hipMemcpy (d_input, h_input, numBytes, hipMemcpyHostToDevice); if(hipSuccess != hipGetLastError()) { printf("Error de cuda\n"); } dim3 blockSize(SIZE_BLOQUE); dim3 gridSize(SIZE_GRID); tiempoInicio = tiempo(); hipLaunchKernelGGL(( kernel_Shared) , dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output); hipDeviceSynchronize(); if(hipSuccess != hipGetLastError()) { printf("Error de cuda _1\n"); } tiempoFin = tiempo(); printf("Tiempo de inicio Kernel: %lf\n", tiempoInicio); printf("Tiempo de fin Kernel: %lf\n", tiempoFin); printf("Tiempo total: %lf\n\n\n", tiempoFin - tiempoInicio); tiempoInicio = tiempo(); hipMemcpy (h_output, d_output, numBytes, hipMemcpyDeviceToHost); tiempoFin = tiempo(); if ( hipSuccess != hipGetLastError() ) printf( "Error! _2\n" ); printf("Tiempo de inicio Transferencia: %lf\n", tiempoInicio); printf("Tiempo de fin Transferencia: %lf\n", tiempoFin); printf("Tiempo total: %lf\n\n\n", tiempoFin - tiempoInicio); for(int i = 0; i < n; ++i) { printf("%d - ", h_output[i]); } printf("\n\n\nDone.\n"); return 0; }
2e29070049373dfffbfd4f29d358363bc24c6cbe.cu
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/time.h> #define MEM_DIM 64 #define RADIO 1 #define SIZE_BLOQUE 8 #define SIZE_GRID 8 /*Programar una funcion que haga la media de numeros end GPU sin memoria compartida, en GPU con memoria compartida y en CPU. Comparar los tiempos de ejecucion*/ __global__ void kernel_Shared(int *d_input, int *d_output) { int i; int valorFinal = 0; __shared__ int arrayValores[ MEM_DIM + RADIO + RADIO ]; //Inicializar el array para poder calcular las medias arrayValores[threadIdx.x + RADIO] = 0; //Inicializar las posiciones extras en el array if (threadIdx.x < RADIO) arrayValores[threadIdx.x] = 0; if (threadIdx.x >= (SIZE_BLOQUE - RADIO)) arrayValores[threadIdx.x + RADIO] = 0; // Sincronizar todos los threads - Se puede omitir? __syncthreads(); //Copiar los valores desde la memoria global a la memoria compartida arrayValores[threadIdx.x + RADIO] = d_input[blockIdx.x * blockDim.x + threadIdx.x]; //Copiar los valores extras de la izquierda if (threadIdx.x < RADIO) { if (blockIdx.x > 0) { arrayValores[threadIdx.x] = d_input[(blockIdx.x * blockDim.x + threadIdx.x) - RADIO]; } } //Copiar los valores extras de la derecha if (threadIdx.x >= (SIZE_BLOQUE - RADIO)) { if (blockIdx.x < SIZE_GRID - 1) { arrayValores[threadIdx.x + RADIO + RADIO] = d_input[(blockIdx.x * blockDim.x + threadIdx.x) + RADIO]; } } /* if (threadIdx.x == 0) { for(int i = 0; i < blockDim.x + RADIO + RADIO; ++i) { printf("Valor kernel (%d, %d): %d\n", blockIdx.x, i, arrayValores[i]); } printf("%d\n\n", blockIdx.x * blockDim.x + threadIdx.x); } */ //Sincronizar los threads __syncthreads(); //Hacer la media en el array de outputs for (i = -RADIO; i <= RADIO; ++i) { valorFinal += arrayValores[(threadIdx.x + RADIO) + i]; } valorFinal /= (RADIO + RADIO + 1); //printf("Valor en el thread actual (%d, %d): %d\n", blockIdx.x, threadIdx.x, valorFinal); d_output[blockIdx.x * blockDim.x + threadIdx.x] = valorFinal; //printf("Bloque: %d -> Thread: %d -> PosicionArray: %d -> Posicion Array Global: %d -> Valor Guardado: %d\n", blockIdx.x, threadIdx.x, threadIdx.x + RADIO, blockIdx.x * blockDim.x + threadIdx.x, arrayValores[threadIdx.x + RADIO]); } __global__ void kernel(int *d_input, int *d_output) { int i; int valorFinal = 0; int valores[RADIO + RADIO + 1] = {0}; } double tiempo( void ) { struct timeval tv; gettimeofday(&tv, NULL); return (double) (tv.tv_usec) / 1000000 + (double) (tv.tv_sec); } int main(int argc, char** argv) { double tiempoInicio; double tiempoFin; int n = SIZE_BLOQUE * SIZE_GRID; printf("\nElementos a reservar: %d\n\n\n", n); int numBytes = n * sizeof(int); int *d_input; int *d_output; int *h_input; int *h_output; cudaMalloc((void **) &d_input, numBytes ); if(cudaSuccess != cudaGetLastError()) { printf("Error de cuda\n"); } cudaMalloc((void **) &d_output, numBytes ); if(cudaSuccess != cudaGetLastError()) { printf("Error de cuda\n"); } cudaMemset(d_output, 0, n); if(cudaSuccess != cudaGetLastError()) { printf("Error de cuda\n"); } h_input = (int *)malloc(numBytes); h_output = (int *)malloc(numBytes); for(int i = 0; i < n; ++i) h_input[i] = i; cudaMemcpy (d_input, h_input, numBytes, cudaMemcpyHostToDevice); if(cudaSuccess != cudaGetLastError()) { printf("Error de cuda\n"); } dim3 blockSize(SIZE_BLOQUE); dim3 gridSize(SIZE_GRID); tiempoInicio = tiempo(); kernel_Shared <<<gridSize, blockSize>>>(d_input, d_output); cudaThreadSynchronize(); if(cudaSuccess != cudaGetLastError()) { printf("Error de cuda _1\n"); } tiempoFin = tiempo(); printf("Tiempo de inicio Kernel: %lf\n", tiempoInicio); printf("Tiempo de fin Kernel: %lf\n", tiempoFin); printf("Tiempo total: %lf\n\n\n", tiempoFin - tiempoInicio); tiempoInicio = tiempo(); cudaMemcpy (h_output, d_output, numBytes, cudaMemcpyDeviceToHost); tiempoFin = tiempo(); if ( cudaSuccess != cudaGetLastError() ) printf( "Error! _2\n" ); printf("Tiempo de inicio Transferencia: %lf\n", tiempoInicio); printf("Tiempo de fin Transferencia: %lf\n", tiempoFin); printf("Tiempo total: %lf\n\n\n", tiempoFin - tiempoInicio); for(int i = 0; i < n; ++i) { printf("%d - ", h_output[i]); } printf("\n\n\nDone.\n"); return 0; }
0e59c8a86e7332ac450db0266e0a55f22a26d577.hip
// !!! This is a file automatically generated by hipify!!! #ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_tanh_layer.hpp" namespace caffe { template <typename Ftype, typename Btype> void CuDNNTanHLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>(); Ftype* top_data = top[0]->mutable_gpu_data<Ftype>(); CUDNN_CHECK(cudnnActivationForward(Caffe::cudnn_handle(0), activ_desc_, cudnn::dataType<Ftype>::one, fwd_bottom_desc_, bottom_data, cudnn::dataType<Ftype>::zero, fwd_top_desc_, top_data)); CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream(0))); } template <typename Ftype, typename Btype> void CuDNNTanHLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top, const vector<bool>& propagate_down, const vector<Blob*>& bottom) { if (!propagate_down[0]) { return; } const Btype* top_data = top[0]->gpu_data<Btype>(); const Btype* top_diff = top[0]->gpu_diff<Btype>(); const Btype* bottom_data = bottom[0]->gpu_data<Btype>(); Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>(); CUDNN_CHECK(cudnnActivationBackward(Caffe::cudnn_handle(0), activ_desc_, cudnn::dataType<Btype>::one, bwd_top_desc_, top_data, bwd_top_desc_, top_diff, bwd_bottom_desc_, bottom_data, cudnn::dataType<Btype>::zero, bwd_bottom_desc_, bottom_diff)); CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream())); } INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNTanHLayer); } // namespace caffe #endif
0e59c8a86e7332ac450db0266e0a55f22a26d577.cu
#ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_tanh_layer.hpp" namespace caffe { template <typename Ftype, typename Btype> void CuDNNTanHLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>(); Ftype* top_data = top[0]->mutable_gpu_data<Ftype>(); CUDNN_CHECK(cudnnActivationForward(Caffe::cudnn_handle(0), activ_desc_, cudnn::dataType<Ftype>::one, fwd_bottom_desc_, bottom_data, cudnn::dataType<Ftype>::zero, fwd_top_desc_, top_data)); CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream(0))); } template <typename Ftype, typename Btype> void CuDNNTanHLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top, const vector<bool>& propagate_down, const vector<Blob*>& bottom) { if (!propagate_down[0]) { return; } const Btype* top_data = top[0]->gpu_data<Btype>(); const Btype* top_diff = top[0]->gpu_diff<Btype>(); const Btype* bottom_data = bottom[0]->gpu_data<Btype>(); Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>(); CUDNN_CHECK(cudnnActivationBackward(Caffe::cudnn_handle(0), activ_desc_, cudnn::dataType<Btype>::one, bwd_top_desc_, top_data, bwd_top_desc_, top_diff, bwd_bottom_desc_, bottom_data, cudnn::dataType<Btype>::zero, bwd_bottom_desc_, bottom_diff)); CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream())); } INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNTanHLayer); } // namespace caffe #endif
48b9c86a79fad4e18c0870881148de24d570400e.hip
// !!! This is a file automatically generated by hipify!!! // Author: Nic Olsen #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include "scan.cuh" #include <hip/hip_cooperative_groups.h> namespace cg = cooperative_groups; // Scans each block of g_idata separately and writes the result to g_odata. // g_idata and g_odata are arrays available on device of length n // Writes the sum of each block to lasts[blockIdx.x] __global__ void hillis_steele(float* g_odata, float* lasts, float* g_idata, unsigned int n, bool write_lasts) { extern volatile __shared__ float s[]; float *tmp1; float * tmp2; float* tmp3; bool write_p = write_lasts; cg::grid_group grid = cg::this_grid(); int a = n; int tid = threadIdx.x; unsigned int index = blockDim.x * blockIdx.x + tid; int pout = 0; int pin = 1; for( int i = 0 ; i < 2 ; i++){ pout = 0; pin = 1; if (index >= a) { s[tid] = 0.f; } else if (tid == 0) { s[tid] = 0.f; } else { s[tid] = g_idata[index - 1]; } __syncthreads(); for (unsigned int offset = 1; offset < blockDim.x; offset <<= 1) { pout = 1 - pout; pin = 1 - pout; if (tid >= offset) { s[pout * blockDim.x + tid] = s[pin * blockDim.x + tid] + s[pin * blockDim.x + tid - offset]; } else { s[pout * blockDim.x + tid] = s[pin * blockDim.x + tid]; } __syncthreads(); } if (index < a ) { g_odata[index] = s[pout * blockDim.x + tid]; } if (write_p && threadIdx.x == 0) { unsigned int block_end = blockIdx.x * blockDim.x + blockDim.x - 1; lasts[blockIdx.x] = s[pout * blockDim.x + blockDim.x - 1] + g_idata[block_end]; } __syncthreads(); cg::sync(grid); if(a == n){ tmp1 = g_idata; tmp2 = g_odata; tmp3 = lasts; g_idata = lasts; g_odata = lasts; lasts = nullptr; write_p = false; a = (n + blockDim.x - 1) / blockDim.x; } } cg::sync(grid); lasts = tmp3; g_odata = tmp2; __syncthreads(); if (index < n) { g_odata[index] = g_odata[index] + lasts[blockIdx.x]; // printf("g_odata is %f at index %d\n", g_odata[index], index); } } // Increment each element corresponding to block b_i of arr by lasts[b_i] __global__ void inc_blocks(float* arr, float* lasts, unsigned int n) { unsigned int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { arr[index] = arr[index] + lasts[blockIdx.x]; } } __host__ void scan( float* in, float* out, unsigned int n, unsigned int threads_per_block) { // Sort each block indiviually unsigned int nBlocks = (n + threads_per_block - 1) / threads_per_block; float* lasts; hipMallocManaged(&lasts, nBlocks * sizeof(float)); bool write_lasts = true; unsigned int shmem = 4 * threads_per_block * sizeof(float); // hillis_steele<<<nBlocks, threads_per_block, shmem>>>(out, lasts, in, n, true); //hipDeviceSynchronize(); //for (unsigned int a = n; a > 1; a = (a + threads_per_block - 1) / threads_per_block) { void *kernelArgs[] = { (void *)&out, (void *)&lasts, (void *)&in, (void *)&n, (void *)&write_lasts }; hipLaunchCooperativeKernel((void*)hillis_steele, nBlocks, threads_per_block, kernelArgs, shmem, 0); std::cout <<hipGetLastError() <<std::endl; //hillis_steele<<<nBlocks, threads_per_block, shmem>>>(out, lasts, in, n, true); // Swap input and output arrays // float* tmp = in; // in = lasts; // lasts = tmp; // std::cout << in[a-1] << std::endl; // } // Scan lasts //hillis_steele<<<1, threads_per_block, shmem>>>(lasts, nullptr, lasts, nBlocks, false); hipDeviceSynchronize(); hipFree(lasts); }
48b9c86a79fad4e18c0870881148de24d570400e.cu
// Author: Nic Olsen #include <cuda.h> #include <iostream> #include <stdio.h> #include "scan.cuh" #include <cooperative_groups.h> namespace cg = cooperative_groups; // Scans each block of g_idata separately and writes the result to g_odata. // g_idata and g_odata are arrays available on device of length n // Writes the sum of each block to lasts[blockIdx.x] __global__ void hillis_steele(float* g_odata, float* lasts, float* g_idata, unsigned int n, bool write_lasts) { extern volatile __shared__ float s[]; float *tmp1; float * tmp2; float* tmp3; bool write_p = write_lasts; cg::grid_group grid = cg::this_grid(); int a = n; int tid = threadIdx.x; unsigned int index = blockDim.x * blockIdx.x + tid; int pout = 0; int pin = 1; for( int i = 0 ; i < 2 ; i++){ pout = 0; pin = 1; if (index >= a) { s[tid] = 0.f; } else if (tid == 0) { s[tid] = 0.f; } else { s[tid] = g_idata[index - 1]; } __syncthreads(); for (unsigned int offset = 1; offset < blockDim.x; offset <<= 1) { pout = 1 - pout; pin = 1 - pout; if (tid >= offset) { s[pout * blockDim.x + tid] = s[pin * blockDim.x + tid] + s[pin * blockDim.x + tid - offset]; } else { s[pout * blockDim.x + tid] = s[pin * blockDim.x + tid]; } __syncthreads(); } if (index < a ) { g_odata[index] = s[pout * blockDim.x + tid]; } if (write_p && threadIdx.x == 0) { unsigned int block_end = blockIdx.x * blockDim.x + blockDim.x - 1; lasts[blockIdx.x] = s[pout * blockDim.x + blockDim.x - 1] + g_idata[block_end]; } __syncthreads(); cg::sync(grid); if(a == n){ tmp1 = g_idata; tmp2 = g_odata; tmp3 = lasts; g_idata = lasts; g_odata = lasts; lasts = nullptr; write_p = false; a = (n + blockDim.x - 1) / blockDim.x; } } cg::sync(grid); lasts = tmp3; g_odata = tmp2; __syncthreads(); if (index < n) { g_odata[index] = g_odata[index] + lasts[blockIdx.x]; // printf("g_odata is %f at index %d\n", g_odata[index], index); } } // Increment each element corresponding to block b_i of arr by lasts[b_i] __global__ void inc_blocks(float* arr, float* lasts, unsigned int n) { unsigned int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < n) { arr[index] = arr[index] + lasts[blockIdx.x]; } } __host__ void scan( float* in, float* out, unsigned int n, unsigned int threads_per_block) { // Sort each block indiviually unsigned int nBlocks = (n + threads_per_block - 1) / threads_per_block; float* lasts; cudaMallocManaged(&lasts, nBlocks * sizeof(float)); bool write_lasts = true; unsigned int shmem = 4 * threads_per_block * sizeof(float); // hillis_steele<<<nBlocks, threads_per_block, shmem>>>(out, lasts, in, n, true); //cudaDeviceSynchronize(); //for (unsigned int a = n; a > 1; a = (a + threads_per_block - 1) / threads_per_block) { void *kernelArgs[] = { (void *)&out, (void *)&lasts, (void *)&in, (void *)&n, (void *)&write_lasts }; cudaLaunchCooperativeKernel((void*)hillis_steele, nBlocks, threads_per_block, kernelArgs, shmem, 0); std::cout <<cudaGetLastError() <<std::endl; //hillis_steele<<<nBlocks, threads_per_block, shmem>>>(out, lasts, in, n, true); // Swap input and output arrays // float* tmp = in; // in = lasts; // lasts = tmp; // std::cout << in[a-1] << std::endl; // } // Scan lasts //hillis_steele<<<1, threads_per_block, shmem>>>(lasts, nullptr, lasts, nBlocks, false); cudaDeviceSynchronize(); cudaFree(lasts); }
d1bbcc95c6e1766e7cb32c01ea867cbf118a1bca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <cstdio> __global__ void input( int *output) { __shared__ int s_data[1024]; for(int i= 0 ; i < 1024 ; i++) { s_data[i] = 2; } __syncthreads(); /* for(int i=0 ; i < 32; i++) { int t = threadIdx.x + i *32; output[t]=s_data[t]; }*/ for(int i=0; i < 32 ; i++) { output[threadIdx.x*32+i] = s_data[threadIdx.x*32+i]; } } int main(void) { int *ary; hipMalloc((void**)&ary, 1024*sizeof(int)); hipLaunchKernelGGL(( input), dim3(1),dim3(32), 0, 0, ary); int *ary2; ary2= (int *)malloc(sizeof(int)*1024); hipMemcpy(ary2,ary,sizeof(int)*1024,hipMemcpyDeviceToHost); printf("final result : %d %d %d",ary2[0],ary2[1],ary2[1023]); return 0; }
d1bbcc95c6e1766e7cb32c01ea867cbf118a1bca.cu
#include <stdio.h> #include <stdlib.h> #include <cstdio> __global__ void input( int *output) { __shared__ int s_data[1024]; for(int i= 0 ; i < 1024 ; i++) { s_data[i] = 2; } __syncthreads(); /* for(int i=0 ; i < 32; i++) { int t = threadIdx.x + i *32; output[t]=s_data[t]; }*/ for(int i=0; i < 32 ; i++) { output[threadIdx.x*32+i] = s_data[threadIdx.x*32+i]; } } int main(void) { int *ary; cudaMalloc((void**)&ary, 1024*sizeof(int)); input<<<1,32>>>(ary); int *ary2; ary2= (int *)malloc(sizeof(int)*1024); cudaMemcpy(ary2,ary,sizeof(int)*1024,cudaMemcpyDeviceToHost); printf("final result : %d %d %d",ary2[0],ary2[1],ary2[1023]); return 0; }
52413b6d84907f7b84cfaf721173a2145225807d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <chrono> #include <cstdint> #include <cstdlib> #include <cstring> #include <fstream> #include <iostream> #include <string> #include <vector> struct TItem { int price; int weight; int index; bool operator<(const TItem& other) const { return (double)price / weight > (double)other.price / other.weight; } }; const int BLOCK_SIZE = 32; const int THREADS_PER_BLOCK = 192; void BranchCPU(ssize_t e, int* w, int* p, int* s, int* U_old, uint32_t* X_old, int block_count, int k, int* weight, int* price) { int s_e = s[e]; if (k < s_e) { w[e] -= weight[k]; p[e] -= price[k]; X_old[e * block_count + k / BLOCK_SIZE] |= (1 << (k % BLOCK_SIZE)); } else { ++s[e]; U_old[e] = 0; } } __global__ void BranchGPU(int* w, int* p, int* s, int* U_old, uint32_t* X_old, int block_count, int k, int* weight, int* price, ssize_t q) { ssize_t e = blockIdx.x * blockDim.x + threadIdx.x; if (e >= q) { return; } int s_e = s[e]; if (k < s_e) { w[e] -= weight[k]; p[e] -= price[k]; X_old[e * block_count + k / BLOCK_SIZE] |= (1 << (k % BLOCK_SIZE)); } else { ++s[e]; U_old[e] = 0; } } void BoundCPU(ssize_t e, int* w, int* p, int* s, int* L, int* U, uint32_t* L_set, int block_count, int k, int n, int W, int* weight, int* price) { int i = s[e], w_e = w[e], p_e = p[e], weight_i = 0, price_i = 0; for (; ; ++i) { weight_i = weight[i]; price_i = price[i]; if (i < n && w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; L_set[e * block_count + i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } else { break; } } U[e] = p_e + (weight_i ? (W - w_e) * price_i / weight_i : 0); w[e] = w_e; p[e] = p_e; s[e] = i; for (; i < n; ++i) { weight_i = weight[i]; price_i = price[i]; if (w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; L_set[e * block_count + i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } } L[e] = p_e; } __global__ void BoundGPU(int* w, int* p, int* s, int* L, int* U, uint32_t* L_set, int block_count, int k, int n, int W, int* weight, int* price, ssize_t q) { ssize_t e = blockIdx.x * blockDim.x + threadIdx.x; if (e >= q) { return; } int i = s[e], w_e = w[e], p_e = p[e], weight_i = 0, price_i = 0; for (; ; ++i) { weight_i = weight[i]; price_i = price[i]; if (i < n && w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; L_set[e * block_count + i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } else { break; } } U[e] = p_e + (weight_i ? (W - w_e) * price_i / weight_i : 0); w[e] = w_e; p[e] = p_e; s[e] = i; for (; i < n; ++i) { weight_i = weight[i]; price_i = price[i]; if (w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; L_set[e * block_count + i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } } L[e] = p_e; } int main(int argc, char* argv[]) { if (argc != 3) { std::cerr << "Usage: " << argv[0] << " input_file output_file" << std::endl; return 0; } std::ifstream fin(argv[1]); std::ofstream fout(argv[2]); int n, W; fin >> n >> W; std::vector<TItem> items(n); for (int i = 0; i < n; ++i) { fin >> items[i].price >> items[i].weight; items[i].index = i + 1; } std::sort(items.begin(), items.end()); int* weight = (int*)malloc((n + 1) * sizeof(*weight)); int* price = (int*)malloc((n + 1) * sizeof(*price)); for (int i = 0; i < n; ++i) { weight[i] = items[i].weight; price[i] = items[i].price; } weight[n] = price[n] = 0; std::chrono::high_resolution_clock::time_point total_start = std::chrono::high_resolution_clock::now(); int *cuda_weight = nullptr, *cuda_price = nullptr; ssize_t q = 1; int* w = (int*)malloc(q * sizeof(*w)); int* p = (int*)malloc(q * sizeof(*p)); int* s = (int*)malloc(q * sizeof(*s)); int* L = (int*)malloc(q * sizeof(*L)); int* U = (int*)malloc(q * sizeof(*U)); const int block_count = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; uint32_t* X = (uint32_t*)calloc(q * block_count, sizeof(*X)); w[0] = p[0] = s[0] = 0; uint32_t* record_set = (uint32_t*)calloc(block_count, sizeof(*X)); BoundCPU(0, w, p, s, L, U, record_set, block_count, 0, n, W, weight, price); int record = L[0]; free(L); for (int k = 0; k < n; ++k) { std::cout << "Step " << k + 1 << ", q = " << q << std::endl; if (q > 5000000) { if (cuda_weight == nullptr) { hipMalloc(&cuda_weight, (n + 1) * sizeof(*cuda_weight)); hipMalloc(&cuda_price, (n + 1) * sizeof(*cuda_price)); hipMemcpy(cuda_weight, weight, (n + 1) * sizeof(*cuda_weight), hipMemcpyHostToDevice); hipMemcpy(cuda_price, price, (n + 1) * sizeof(*cuda_price), hipMemcpyHostToDevice); } int *w_new, *p_new, *s_new, *L_new, *U_new, *U_old; uint32_t *X_old, *L_new_set; hipMalloc(&w_new, q * sizeof(*w_new)); hipMalloc(&p_new, q * sizeof(*p_new)); hipMalloc(&s_new, q * sizeof(*s_new)); hipMalloc(&U_old, q * sizeof(*U_old)); hipMalloc(&X_old, q * block_count * sizeof(*X_old)); hipMemcpy(w_new, w, q * sizeof(*w), hipMemcpyHostToDevice); hipMemcpy(p_new, p, q * sizeof(*p), hipMemcpyHostToDevice); hipMemcpy(s_new, s, q * sizeof(*s), hipMemcpyHostToDevice); hipMemcpy(U_old, U, q * sizeof(*U), hipMemcpyHostToDevice); hipMemcpy(X_old, X, q * block_count * sizeof(*X), hipMemcpyHostToDevice); const ssize_t q_block = (q + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( BranchGPU), dim3(q_block), dim3(THREADS_PER_BLOCK), 0, 0, w_new, p_new, s_new, U_old, X_old, block_count, k, cuda_weight, cuda_price, q); hipDeviceSynchronize(); hipMemcpy(U, U_old, q * sizeof(*U), hipMemcpyDeviceToHost); hipFree(U_old); hipMalloc(&L_new, q * sizeof(*L_new)); hipMalloc(&U_new, q * sizeof(*U_new)); hipMalloc(&L_new_set, q * block_count * sizeof(*X)); hipMemcpy(L_new_set, X, q * block_count * sizeof(*X), hipMemcpyHostToDevice); hipLaunchKernelGGL(( BoundGPU), dim3(q_block), dim3(THREADS_PER_BLOCK), 0, 0, w_new, p_new, s_new, L_new, U_new, L_new_set, block_count, k, n, W, cuda_weight, cuda_price, q); hipDeviceSynchronize(); w = (int*)realloc(w, 2 * q * sizeof(*w)); p = (int*)realloc(p, 2 * q * sizeof(*p)); s = (int*)realloc(s, 2 * q * sizeof(*s)); U = (int*)realloc(U, 2 * q * sizeof(*U)); X = (uint32_t*)realloc(X, 2 * q * block_count * sizeof(*X)); memcpy(X + q * block_count, X, q * block_count * sizeof(*X)); hipMemcpy(w + q, w_new, q * sizeof(*w), hipMemcpyDeviceToHost); hipMemcpy(p + q, p_new, q * sizeof(*p), hipMemcpyDeviceToHost); hipMemcpy(s + q, s_new, q * sizeof(*s), hipMemcpyDeviceToHost); hipMemcpy(U + q, U_new, q * sizeof(*U), hipMemcpyDeviceToHost); hipMemcpy(X, X_old, q * block_count * sizeof(*X), hipMemcpyDeviceToHost); hipFree(w_new); hipFree(p_new); hipFree(s_new); hipFree(U_new); hipFree(X_old); int *L_new_CPU = (int*)malloc(q * sizeof(*L_new_CPU)); uint32_t* L_new_set_CPU = (uint32_t*)malloc(q * block_count * sizeof(*X)); hipMemcpy(L_new_CPU, L_new, q * sizeof(*L_new), hipMemcpyDeviceToHost); hipMemcpy(L_new_set_CPU, L_new_set, q * block_count * sizeof(*X), hipMemcpyDeviceToHost); hipFree(L_new); hipFree(L_new_set); for (ssize_t e = 0; e < q; ++e) { if (L_new_CPU[e] > record) { record = L_new_CPU[e]; memcpy(record_set, L_new_set_CPU + e * block_count, block_count * sizeof(*X)); for (int i = k + 1; i < s[q + e]; ++i) { record_set[i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } } } free(L_new_CPU); free(L_new_set_CPU); } else { w = (int*)realloc(w, 2 * q * sizeof(*w)); p = (int*)realloc(p, 2 * q * sizeof(*p)); s = (int*)realloc(s, 2 * q * sizeof(*s)); X = (uint32_t*)realloc(X, 2 * q * block_count * sizeof(*X)); memcpy(w + q, w, q * sizeof(*w)); memcpy(p + q, p, q * sizeof(*p)); memcpy(s + q, s, q * sizeof(*s)); memcpy(X + q * block_count, X, q * block_count * sizeof(*X)); for (ssize_t e = 0; e < q; ++e) { BranchCPU(e, w + q, p + q, s + q, U, X, block_count, k, weight, price); } U = (int*)realloc(U, 2 * q * sizeof(*U)); int* L_new = (int*)malloc(q * sizeof(*L_new)); uint32_t* L_new_set = (uint32_t*)malloc(q * block_count * sizeof(*X)); memcpy(L_new_set, X + q * block_count, q * block_count * sizeof(*X)); for (ssize_t e = 0; e < q; ++e) { BoundCPU(e, w + q, p + q, s + q, L_new, U + q, L_new_set, block_count, k, n, W, weight, price); if (L_new[e] > record) { record = L_new[e]; memcpy(record_set, L_new_set + e * block_count, block_count * sizeof(*X)); for (int i = k + 1; i < s[q + e]; ++i) { record_set[i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } } } free(L_new); free(L_new_set); } for (ssize_t i = 0, j = 2 * q - 1; ;) { while (i < 2 * q && U[i] > record) { ++i; } while (j >= 0 && U[j] <= record) { --j; } if (i >= j) { q = j + 1; break; } w[i] = w[j]; p[i] = p[j]; s[i] = s[j]; std::swap(U[i], U[j]); memcpy(X + i * block_count, X + j * block_count, block_count * sizeof(*X)); } if (q == 0) { break; } } free(w); free(p); free(s); free(U); free(weight); free(price); if (cuda_weight != nullptr) { hipFree(cuda_weight); hipFree(cuda_price); } std::chrono::high_resolution_clock::time_point total_end = std::chrono::high_resolution_clock::now(); double total_time = std::chrono::duration_cast<std::chrono::duration<double>>(total_end - total_start).count(); std::cout << "Total time: " << total_time << std::endl; fout << record << std::endl; std::vector<int> record_ind; for (int i = 0; i < n; ++i) { if (record_set[i / BLOCK_SIZE] & (1 << (i % BLOCK_SIZE))) { record_ind.push_back(items[i].index); } } std::sort(record_ind.begin(), record_ind.end()); for (auto ind : record_ind) { fout << ind << " "; } fout << std::endl; free(record_set); return 0; }
52413b6d84907f7b84cfaf721173a2145225807d.cu
#include <algorithm> #include <chrono> #include <cstdint> #include <cstdlib> #include <cstring> #include <fstream> #include <iostream> #include <string> #include <vector> struct TItem { int price; int weight; int index; bool operator<(const TItem& other) const { return (double)price / weight > (double)other.price / other.weight; } }; const int BLOCK_SIZE = 32; const int THREADS_PER_BLOCK = 192; void BranchCPU(ssize_t e, int* w, int* p, int* s, int* U_old, uint32_t* X_old, int block_count, int k, int* weight, int* price) { int s_e = s[e]; if (k < s_e) { w[e] -= weight[k]; p[e] -= price[k]; X_old[e * block_count + k / BLOCK_SIZE] |= (1 << (k % BLOCK_SIZE)); } else { ++s[e]; U_old[e] = 0; } } __global__ void BranchGPU(int* w, int* p, int* s, int* U_old, uint32_t* X_old, int block_count, int k, int* weight, int* price, ssize_t q) { ssize_t e = blockIdx.x * blockDim.x + threadIdx.x; if (e >= q) { return; } int s_e = s[e]; if (k < s_e) { w[e] -= weight[k]; p[e] -= price[k]; X_old[e * block_count + k / BLOCK_SIZE] |= (1 << (k % BLOCK_SIZE)); } else { ++s[e]; U_old[e] = 0; } } void BoundCPU(ssize_t e, int* w, int* p, int* s, int* L, int* U, uint32_t* L_set, int block_count, int k, int n, int W, int* weight, int* price) { int i = s[e], w_e = w[e], p_e = p[e], weight_i = 0, price_i = 0; for (; ; ++i) { weight_i = weight[i]; price_i = price[i]; if (i < n && w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; L_set[e * block_count + i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } else { break; } } U[e] = p_e + (weight_i ? (W - w_e) * price_i / weight_i : 0); w[e] = w_e; p[e] = p_e; s[e] = i; for (; i < n; ++i) { weight_i = weight[i]; price_i = price[i]; if (w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; L_set[e * block_count + i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } } L[e] = p_e; } __global__ void BoundGPU(int* w, int* p, int* s, int* L, int* U, uint32_t* L_set, int block_count, int k, int n, int W, int* weight, int* price, ssize_t q) { ssize_t e = blockIdx.x * blockDim.x + threadIdx.x; if (e >= q) { return; } int i = s[e], w_e = w[e], p_e = p[e], weight_i = 0, price_i = 0; for (; ; ++i) { weight_i = weight[i]; price_i = price[i]; if (i < n && w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; L_set[e * block_count + i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } else { break; } } U[e] = p_e + (weight_i ? (W - w_e) * price_i / weight_i : 0); w[e] = w_e; p[e] = p_e; s[e] = i; for (; i < n; ++i) { weight_i = weight[i]; price_i = price[i]; if (w_e + weight_i <= W) { w_e += weight_i; p_e += price_i; L_set[e * block_count + i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } } L[e] = p_e; } int main(int argc, char* argv[]) { if (argc != 3) { std::cerr << "Usage: " << argv[0] << " input_file output_file" << std::endl; return 0; } std::ifstream fin(argv[1]); std::ofstream fout(argv[2]); int n, W; fin >> n >> W; std::vector<TItem> items(n); for (int i = 0; i < n; ++i) { fin >> items[i].price >> items[i].weight; items[i].index = i + 1; } std::sort(items.begin(), items.end()); int* weight = (int*)malloc((n + 1) * sizeof(*weight)); int* price = (int*)malloc((n + 1) * sizeof(*price)); for (int i = 0; i < n; ++i) { weight[i] = items[i].weight; price[i] = items[i].price; } weight[n] = price[n] = 0; std::chrono::high_resolution_clock::time_point total_start = std::chrono::high_resolution_clock::now(); int *cuda_weight = nullptr, *cuda_price = nullptr; ssize_t q = 1; int* w = (int*)malloc(q * sizeof(*w)); int* p = (int*)malloc(q * sizeof(*p)); int* s = (int*)malloc(q * sizeof(*s)); int* L = (int*)malloc(q * sizeof(*L)); int* U = (int*)malloc(q * sizeof(*U)); const int block_count = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; uint32_t* X = (uint32_t*)calloc(q * block_count, sizeof(*X)); w[0] = p[0] = s[0] = 0; uint32_t* record_set = (uint32_t*)calloc(block_count, sizeof(*X)); BoundCPU(0, w, p, s, L, U, record_set, block_count, 0, n, W, weight, price); int record = L[0]; free(L); for (int k = 0; k < n; ++k) { std::cout << "Step " << k + 1 << ", q = " << q << std::endl; if (q > 5000000) { if (cuda_weight == nullptr) { cudaMalloc(&cuda_weight, (n + 1) * sizeof(*cuda_weight)); cudaMalloc(&cuda_price, (n + 1) * sizeof(*cuda_price)); cudaMemcpy(cuda_weight, weight, (n + 1) * sizeof(*cuda_weight), cudaMemcpyHostToDevice); cudaMemcpy(cuda_price, price, (n + 1) * sizeof(*cuda_price), cudaMemcpyHostToDevice); } int *w_new, *p_new, *s_new, *L_new, *U_new, *U_old; uint32_t *X_old, *L_new_set; cudaMalloc(&w_new, q * sizeof(*w_new)); cudaMalloc(&p_new, q * sizeof(*p_new)); cudaMalloc(&s_new, q * sizeof(*s_new)); cudaMalloc(&U_old, q * sizeof(*U_old)); cudaMalloc(&X_old, q * block_count * sizeof(*X_old)); cudaMemcpy(w_new, w, q * sizeof(*w), cudaMemcpyHostToDevice); cudaMemcpy(p_new, p, q * sizeof(*p), cudaMemcpyHostToDevice); cudaMemcpy(s_new, s, q * sizeof(*s), cudaMemcpyHostToDevice); cudaMemcpy(U_old, U, q * sizeof(*U), cudaMemcpyHostToDevice); cudaMemcpy(X_old, X, q * block_count * sizeof(*X), cudaMemcpyHostToDevice); const ssize_t q_block = (q + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; BranchGPU<<<q_block, THREADS_PER_BLOCK>>>(w_new, p_new, s_new, U_old, X_old, block_count, k, cuda_weight, cuda_price, q); cudaDeviceSynchronize(); cudaMemcpy(U, U_old, q * sizeof(*U), cudaMemcpyDeviceToHost); cudaFree(U_old); cudaMalloc(&L_new, q * sizeof(*L_new)); cudaMalloc(&U_new, q * sizeof(*U_new)); cudaMalloc(&L_new_set, q * block_count * sizeof(*X)); cudaMemcpy(L_new_set, X, q * block_count * sizeof(*X), cudaMemcpyHostToDevice); BoundGPU<<<q_block, THREADS_PER_BLOCK>>>(w_new, p_new, s_new, L_new, U_new, L_new_set, block_count, k, n, W, cuda_weight, cuda_price, q); cudaDeviceSynchronize(); w = (int*)realloc(w, 2 * q * sizeof(*w)); p = (int*)realloc(p, 2 * q * sizeof(*p)); s = (int*)realloc(s, 2 * q * sizeof(*s)); U = (int*)realloc(U, 2 * q * sizeof(*U)); X = (uint32_t*)realloc(X, 2 * q * block_count * sizeof(*X)); memcpy(X + q * block_count, X, q * block_count * sizeof(*X)); cudaMemcpy(w + q, w_new, q * sizeof(*w), cudaMemcpyDeviceToHost); cudaMemcpy(p + q, p_new, q * sizeof(*p), cudaMemcpyDeviceToHost); cudaMemcpy(s + q, s_new, q * sizeof(*s), cudaMemcpyDeviceToHost); cudaMemcpy(U + q, U_new, q * sizeof(*U), cudaMemcpyDeviceToHost); cudaMemcpy(X, X_old, q * block_count * sizeof(*X), cudaMemcpyDeviceToHost); cudaFree(w_new); cudaFree(p_new); cudaFree(s_new); cudaFree(U_new); cudaFree(X_old); int *L_new_CPU = (int*)malloc(q * sizeof(*L_new_CPU)); uint32_t* L_new_set_CPU = (uint32_t*)malloc(q * block_count * sizeof(*X)); cudaMemcpy(L_new_CPU, L_new, q * sizeof(*L_new), cudaMemcpyDeviceToHost); cudaMemcpy(L_new_set_CPU, L_new_set, q * block_count * sizeof(*X), cudaMemcpyDeviceToHost); cudaFree(L_new); cudaFree(L_new_set); for (ssize_t e = 0; e < q; ++e) { if (L_new_CPU[e] > record) { record = L_new_CPU[e]; memcpy(record_set, L_new_set_CPU + e * block_count, block_count * sizeof(*X)); for (int i = k + 1; i < s[q + e]; ++i) { record_set[i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } } } free(L_new_CPU); free(L_new_set_CPU); } else { w = (int*)realloc(w, 2 * q * sizeof(*w)); p = (int*)realloc(p, 2 * q * sizeof(*p)); s = (int*)realloc(s, 2 * q * sizeof(*s)); X = (uint32_t*)realloc(X, 2 * q * block_count * sizeof(*X)); memcpy(w + q, w, q * sizeof(*w)); memcpy(p + q, p, q * sizeof(*p)); memcpy(s + q, s, q * sizeof(*s)); memcpy(X + q * block_count, X, q * block_count * sizeof(*X)); for (ssize_t e = 0; e < q; ++e) { BranchCPU(e, w + q, p + q, s + q, U, X, block_count, k, weight, price); } U = (int*)realloc(U, 2 * q * sizeof(*U)); int* L_new = (int*)malloc(q * sizeof(*L_new)); uint32_t* L_new_set = (uint32_t*)malloc(q * block_count * sizeof(*X)); memcpy(L_new_set, X + q * block_count, q * block_count * sizeof(*X)); for (ssize_t e = 0; e < q; ++e) { BoundCPU(e, w + q, p + q, s + q, L_new, U + q, L_new_set, block_count, k, n, W, weight, price); if (L_new[e] > record) { record = L_new[e]; memcpy(record_set, L_new_set + e * block_count, block_count * sizeof(*X)); for (int i = k + 1; i < s[q + e]; ++i) { record_set[i / BLOCK_SIZE] |= (1 << (i % BLOCK_SIZE)); } } } free(L_new); free(L_new_set); } for (ssize_t i = 0, j = 2 * q - 1; ;) { while (i < 2 * q && U[i] > record) { ++i; } while (j >= 0 && U[j] <= record) { --j; } if (i >= j) { q = j + 1; break; } w[i] = w[j]; p[i] = p[j]; s[i] = s[j]; std::swap(U[i], U[j]); memcpy(X + i * block_count, X + j * block_count, block_count * sizeof(*X)); } if (q == 0) { break; } } free(w); free(p); free(s); free(U); free(weight); free(price); if (cuda_weight != nullptr) { cudaFree(cuda_weight); cudaFree(cuda_price); } std::chrono::high_resolution_clock::time_point total_end = std::chrono::high_resolution_clock::now(); double total_time = std::chrono::duration_cast<std::chrono::duration<double>>(total_end - total_start).count(); std::cout << "Total time: " << total_time << std::endl; fout << record << std::endl; std::vector<int> record_ind; for (int i = 0; i < n; ++i) { if (record_set[i / BLOCK_SIZE] & (1 << (i % BLOCK_SIZE))) { record_ind.push_back(items[i].index); } } std::sort(record_ind.begin(), record_ind.end()); for (auto ind : record_ind) { fout << ind << " "; } fout << std::endl; free(record_set); return 0; }
75abfdc8abf8be8b8957cfb28063d245b3493a26.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/PinnedMemoryAllocator.h> #include <ATen/hip/HIPSolver.h> #include <ATen/hip/HIPBlas.h> #include <ATen/hip/HIPEvent.h> #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/hip/MiscUtils.h> #include <ATen/native/hip/BatchLinearAlgebraLib.h> namespace at { namespace native { // Some cuBLAS and cuSOLVER batched routines require input to be a device array of pointers to device individual matrices // 'input' must be a contiguous tensor template <typename scalar_t> static Tensor get_device_pointers(const Tensor& input) { auto input_data = input.data_ptr<scalar_t>(); int64_t input_mat_stride = matrixStride(input); // cublas/cusolver interface requires 'int' int batch_size = cuda_int_cast(batchCount(input), "batch_size"); // if batch_size==0, then start=0 and end=0 // if input_mat_stride==0, then step=sizeof(scalar_t) return at::arange( /*start=*/reinterpret_cast<int64_t>(input_data), /*end=*/reinterpret_cast<int64_t>(input_data + batch_size * input_mat_stride), /*step=*/static_cast<int64_t>(std::max<int64_t>(input_mat_stride, 1) * sizeof(scalar_t)), input.options().dtype(at::kLong)); } template <typename scalar_t> void apply_geqrf_batched(const Tensor& input, const Tensor& tau) { // AMD ROCm backend is implemented via rewriting all CUDA calls to HIP // rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER // rocSOLVER is currently not used in ATen, therefore we raise an error in this case #ifndef CUDART_VERSION TORCH_CHECK(false, "geqrf: Batched version is supported only with cuBLAS backend.") #else auto batch_size = cuda_int_cast(batchCount(input), "batch_size"); auto m = cuda_int_cast(input.size(-2), "m"); auto n = cuda_int_cast(input.size(-1), "n"); auto lda = std::max<int>(1, m); // cuBLAS batched geqrf requires input to be the device array of pointers to device single matrices Tensor input_ptr_array = get_device_pointers<scalar_t>(input); Tensor tau_ptr_array = get_device_pointers<scalar_t>(tau.unsqueeze(-1)); auto input_ptr_array_data = reinterpret_cast<scalar_t**>(input_ptr_array.data_ptr()); auto tau_ptr_array_data = reinterpret_cast<scalar_t**>(tau_ptr_array.data_ptr()); int info; auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::geqrfBatched(handle, m, n, input_ptr_array_data, lda, tau_ptr_array_data, &info, batch_size); // info only indicates wrong arguments to geqrfBatched call // info is a host variable, we can check it without device synchronization TORCH_INTERNAL_ASSERT(info == 0); #endif } void geqrf_batched_cublas(const Tensor& input, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_batched_cuda", [&]{ apply_geqrf_batched<scalar_t>(input, tau); }); } template <typename scalar_t> static void apply_lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots) { #ifndef CUDART_VERSION TORCH_CHECK(false, "lu_solve: cuBLAS backend for lu_solve is not available.") #else hipblasOperation_t trans = HIPBLAS_OP_N; auto pivots_data = pivots.data_ptr<int>(); auto batch_size = cuda_int_cast(batchCount(lu), "batch_size");; auto m = cuda_int_cast(lu.size(-2), "m"); auto nrhs = cuda_int_cast(b.size(-1), "nrhs"); auto lda = cuda_int_cast(std::max<int>(1, m), "lda"); int info = 0; Tensor lu_ptr_array = get_device_pointers<scalar_t>(lu); Tensor b_ptr_array = get_device_pointers<scalar_t>(b); auto lu_ptr_array_data = reinterpret_cast<scalar_t**>(lu_ptr_array.data_ptr()); auto b_ptr_array_data = reinterpret_cast<scalar_t**>(b_ptr_array.data_ptr()); auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::getrsBatched(handle, trans, m, nrhs, lu_ptr_array_data, lda, pivots_data, b_ptr_array_data, lda, &info, batch_size); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0); #endif } void lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(lu.scalar_type(), "lu_solve_cublas", [&]{ apply_lu_solve_batched_cublas<scalar_t>(b, lu, pivots); }); } template <typename scalar_t> static void apply_triangular_solve(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; hipblasOperation_t trans = transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N; trans = conjugate_transpose ? HIPBLAS_OP_C : trans; hipblasDiagType_t diag = unitriangular ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT; hipblasSideMode_t side = HIPBLAS_SIDE_LEFT; auto A_data = A.data_ptr<scalar_t>(); auto B_data = B.data_ptr<scalar_t>(); auto A_mat_stride = matrixStride(A); auto B_mat_stride = matrixStride(B); auto batch_size = batchCount(A); auto m = cuda_int_cast(A.size(-2), "m"); auto n = cuda_int_cast(A.size(-1), "n"); auto nrhs = cuda_int_cast(B.size(-1), "nrhs"); auto lda = std::max<int>(1, m); auto alpha = scalar_t{1}; for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* A_working_ptr = &A_data[i * A_mat_stride]; scalar_t* B_working_ptr = &B_data[i * B_mat_stride]; auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::trsm(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_working_ptr, lda, B_working_ptr, lda); } } void triangular_solve_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { (void)infos; // unused AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular); }); } template <typename scalar_t> static void apply_triangular_solve_batched(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; hipblasOperation_t trans = transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N; trans = conjugate_transpose ? HIPBLAS_OP_C : trans; hipblasDiagType_t diag = unitriangular ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT; hipblasSideMode_t side = HIPBLAS_SIDE_LEFT; auto A_data = A.data_ptr<scalar_t>(); auto B_data = B.data_ptr<scalar_t>(); auto A_mat_stride = matrixStride(A); auto B_mat_stride = matrixStride(B); auto batch_size = cuda_int_cast(batchCount(A), "batch_size"); auto m = cuda_int_cast(A.size(-2), "m"); auto n = cuda_int_cast(A.size(-1), "n"); auto nrhs = cuda_int_cast(B.size(-1), "nrhs"); auto lda = std::max<int>(1, m); auto alpha = scalar_t{1}; // cuBLAS batched trsm requires input to be the device array of pointers to device single matrices Tensor A_ptr_array = get_device_pointers<scalar_t>(A); Tensor B_ptr_array = get_device_pointers<scalar_t>(B); auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()); auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr()); auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::trsmBatched(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_ptr_array_data, lda, B_ptr_array_data, lda, batch_size); } void triangular_solve_batched_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { (void)infos; // unused AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular); }); } template <typename scalar_t> inline void apply_gels_batched(const Tensor& A, Tensor& B, Tensor& infos) { // AMD ROCm backend is implemented via rewriting all CUDA calls to HIP // rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER // rocSOLVER is currently not used in ATen, therefore we raise an error in this case #ifndef CUDART_VERSION TORCH_CHECK(false, "torch.linalg.lstsq: Batched version is supported only with cuBLAS backend.") #else auto trans = HIPBLAS_OP_N; auto m = cuda_int_cast(A.size(-2), "m"); auto n = cuda_int_cast(A.size(-1), "n"); auto nrhs = cuda_int_cast(B.size(-1), "nrhs"); // cuBLAS from cuda10 and older doesn't work with nrhs == 0 (cuda11 works) // so we need to put this early return if (nrhs == 0) { return; } auto batch_size = cuda_int_cast(batchCount(B), "batch_size"); auto lda = std::max<int>(1, m); auto ldb = std::max<int>(1, m); // cuBLAS's requirement TORCH_CHECK( m >= n, "torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA with cuBLAS backend."); // cuBLAS documentation says: // Matrices Aarray[i] should not overlap; otherwise, undefined behavior is expected. // explicitly broadcast the batch dimensions of A IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2); IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2); std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes); expand_batch_portion.insert(expand_batch_portion.end(), {A.size(-2), A.size(-1)}); Tensor A_expanded = A.expand({expand_batch_portion}); Tensor A_broadcasted = cloneBatchedColumnMajor(A_expanded); // cuBLAS batched gels requires input to be the device array of pointers to device single matrices Tensor A_ptr_array = get_device_pointers<scalar_t>(A_broadcasted); Tensor B_ptr_array = get_device_pointers<scalar_t>(B); auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()); auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr()); auto infos_data = infos.data_ptr<int>(); auto handle = at::cuda::getCurrentCUDABlasHandle(); int info; at::cuda::blas::gelsBatched<scalar_t>( handle, trans, m, n, nrhs, A_ptr_array_data, lda, B_ptr_array_data, ldb, &info, infos_data, batch_size); // negative info indicates that an argument to gelsBatched call is invalid TORCH_INTERNAL_ASSERT(info == 0); #endif } // This is a type dispatching helper function for 'apply_gels_batched' void gels_batched_cublas(const Tensor& a, Tensor& b, Tensor& infos) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_batched_cublas", [&]{ apply_gels_batched<scalar_t>(a, b, infos); }); } #ifdef USE_CUSOLVER inline static Tensor column_major_identity_matrix_like(const Tensor& self) { auto size = self.sizes(); auto size_slice = IntArrayRef(size.data(), size.size()-1); return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1); } template <typename scalar_t> inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_getrf_ptr, int* info_getrs_ptr, int n, int lda) { // self_inv_ptr should already be an identity matrix auto handle = at::cuda::getCurrentCUDASolverDnHandle(); at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, info_getrf_ptr); at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, self_inv_ptr, lda, info_getrs_ptr); } template <typename scalar_t> static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) { const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); const int n = cuda_int_cast(self.size(-2), "self.size(-2)"); const int lda = std::max<int>(1, n); auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto infos_getrf_data = infos_getrf.data_ptr<int>(); auto infos_getrs_data = infos_getrs.data_ptr<int>(); auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); // Heuristic: For small batch size or large matrix size, we use for-loop to iterate over the batches instead of // calling the batched cublas routine. if (batch_size <= 8 || /* batch_size > 8 && */ n >= 512) { for (int64_t i = 0; i < batch_size; i++) { auto dataPtr = allocator.allocate(sizeof(int) * lda); int* pivot = reinterpret_cast<int*>(dataPtr.get()); int* infos_getrf_working_ptr = &infos_getrf_data[i]; int* infos_getrs_working_ptr = &infos_getrs_data[i]; _apply_single_inverse_helper<scalar_t>( &self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, infos_getrf_working_ptr, infos_getrs_working_ptr, n, lda); } } else { // cublas batched kernels require input be "device array of device pointers" Tensor self_array = at::arange( reinterpret_cast<int64_t>(self_data), reinterpret_cast<int64_t>(&self_data[(batch_size-1) * self_mat_stride]) + 1, static_cast<int64_t>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); Tensor self_inv_array = at::arange( reinterpret_cast<int64_t>(self_inv_data), reinterpret_cast<int64_t>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1, static_cast<int64_t>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); auto dataPtr = allocator.allocate(sizeof(int)*batch_size*lda); int* ipiv_array = reinterpret_cast<int*>(dataPtr.get()); at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda, ipiv_array, infos_getrf_data, batch_size); at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda, ipiv_array, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr()), lda, infos_getrs_data, batch_size); } } template <typename scalar_t> static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) { int n = cuda_int_cast(self.size(-2), "self.size(-2)"); int lda = std::max<int>(1, n); Tensor ipiv = at::empty({lda}, self.options().dtype(at::kInt)); _apply_single_inverse_helper<scalar_t>( self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), infos_getrf.data_ptr<int>(), infos_getrs.data_ptr<int>(), n, lda); } // This is a type dispatching helper function for 'apply_batched_inverse_lib' and 'apply_single_inverse_lib' Tensor& _linalg_inv_out_helper_cuda_lib(Tensor& result, Tensor& infos_getrf, Tensor& infos_getrs) { // assuming result is in column major order and contains the matrices to invert Tensor input_working_copy = cloneBatchedColumnMajor(result); // for getrf + getrs (cusolver path) // result should be filled with identity matrices result.zero_(); result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); const int batch_size = cuda_int_cast(batchCount(result), "batchCount"); if (result.dim() > 2) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( input_working_copy, result, infos_getrf, infos_getrs); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_single_inverse_lib<scalar_t>(input_working_copy, result, infos_getrf, infos_getrs); }); } return result; } // entrance of calculations of `inverse` using cusolver getrf + getrs, cublas getrfBatched + getriBatched Tensor _inverse_helper_cuda_lib(const Tensor& self) { Tensor self_working_copy = cloneBatchedColumnMajor(self); Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy); const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); if (self.dim() > 2 && batch_size > 1) { Tensor infos_getrf = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); Tensor infos_getrs = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs); }); batchCheckErrors(infos_getrf, "inverse_cuda"); batchCheckErrors(infos_getrs, "inverse_cuda"); } else { Tensor infos_getrf = at::zeros({1}, self.options().dtype(kInt)); Tensor infos_getrs = at::zeros({1}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs); }); batchCheckErrors(infos_getrf, "inverse_cuda"); batchCheckErrors(infos_getrs, "inverse_cuda"); } return self_inv_working_copy; } // call cusolver gesvdj function to calculate svd template<typename scalar_t> inline static void _apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); int batchsize = cuda_int_cast(batchCount(self), "batch size"); int m = cuda_int_cast(self.size(-2), "m"); int n = cuda_int_cast(self.size(-1), "n"); int lda = std::max<int>(1, m); int ldvt = std::max<int>(1, n); for(int i = 0; i < batchsize; i++){ // gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU hipsolverGesvdjInfo_t gesvdj_params; TORCH_CUSOLVER_CHECK(hipsolverDnCreateGesvdjInfo(&gesvdj_params)); // TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7)); // TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); auto jobz = compute_uv ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; at::cuda::solver::gesvdj<scalar_t>( handle, jobz, /*econ=*/ some ? 1 : 0, m, n, self_data + i * self_stride, lda, S_data + i * S_stride, U_data + i * U_stride, lda, VT_data + i * VT_stride, ldvt, infos.data_ptr<int>() + i, gesvdj_params ); TORCH_CUSOLVER_CHECK(hipsolverDnDestroyGesvdjInfo(gesvdj_params)); } } // wrapper around _apply_svd_lib_gesvdj that handles dtype dispatch, // creates a working copy of the input, and creates V^H from the V returned by gesvdj inline static void apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) { const int64_t m = self.size(-2); const int64_t n = self.size(-1); Tensor self_working_copy = cloneBatchedColumnMajor(self); VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdj", [&] { _apply_svd_lib_gesvdj<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv, some); }); } // call cusolver gesvdj batched function to calculate svd template<typename scalar_t> inline static void _apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); int batchsize = cuda_int_cast(batchCount(self), "batch size"); int m = cuda_int_cast(self.size(-2), "m"); int n = cuda_int_cast(self.size(-1), "n"); int lda = std::max<int>(1, m); int ldvt = std::max<int>(1, n); TORCH_INTERNAL_ASSERT(m <= 32 && n <= 32, "gesvdjBatched requires both matrix dimensions not greater than 32, but got " "m = ", m, " n = ", n); // gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU hipsolverGesvdjInfo_t gesvdj_params; TORCH_CUSOLVER_CHECK(hipsolverDnCreateGesvdjInfo(&gesvdj_params)); // TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7)); // TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15)); TORCH_CUSOLVER_CHECK(hipsolverDnXgesvdjSetSortEig(gesvdj_params, 1)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); auto jobz = compute_uv ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; at::cuda::solver::gesvdjBatched<scalar_t>( handle, jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, infos.data_ptr<int>(), gesvdj_params, batchsize ); TORCH_CUSOLVER_CHECK(hipsolverDnDestroyGesvdjInfo(gesvdj_params)); } // wrapper around _apply_svd_lib_gesvdjBatched that handles dtype dispatch, // creates a working copy of the input, and creates V^H from the V returned by gesvdj inline static void apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) { const int64_t m = self.size(-2); const int64_t n = self.size(-1); Tensor self_working_copy = cloneBatchedColumnMajor(self); VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdjBatched", [&] { _apply_svd_lib_gesvdjBatched<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv); }); } // entrance of calculations of `svd` using cusolver gesvdj and gesvdjBatched std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_lib(const Tensor& self, bool some, bool compute_uv) { const int64_t batch_size = batchCount(self); at::Tensor infos = at::zeros({batch_size}, self.options().dtype(at::kInt)); const int64_t m = self.size(-2); const int64_t n = self.size(-1); const int64_t k = ::min(m, n); Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = \ _create_U_S_VT(self, some, compute_uv, /* svd_use_cusolver = */ true); // U, S, V working copies are already column majored now // heuristic for using `gesvdjBatched` over `gesvdj` if (m <= 32 && n <= 32 && batch_size > 1 && (!some || m == n)) { apply_svd_lib_gesvdjBatched(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv); } else { apply_svd_lib_gesvdj(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv, some); } // A device-host sync will be performed. batchCheckErrors(infos, "svd_cuda"); if (!compute_uv) { VT_working_copy.zero_(); U_working_copy.zero_(); } if (some) { VT_working_copy = VT_working_copy.narrow(-2, 0, k); } // so far we have computed VT, but torch.svd returns V instead. Adjust accordingly. VT_working_copy.transpose_(-2, -1); return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } // Implementation of Cholesky decomposition using looped cusolverDn<T>potrf or hipsolverDnXpotrf (64-bit) template<typename scalar_t> inline static void apply_cholesky_cusolver_potrf_looped(const Tensor& self_working_copy, bool upper, const Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); int* infos_ptr = infos.data_ptr<int>(); #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; size_t worksize_host; hipsolverDnParams_t params; hipDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>(); TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(&params)); at::cuda::solver::xpotrf_buffersize(handle, params, uplo, n, datatype, nullptr, lda, datatype, &worksize_device, &worksize_host); // allocate workspace storage auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto workdata_device = device_allocator.allocate(worksize_device * batch_size); void* workdata_device_ptr = workdata_device.get(); auto& host_allocator = *at::getCPUAllocator(); auto workdata_host = host_allocator.allocate(worksize_host * batch_size); void* workdata_host_ptr = workdata_host.get(); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::xpotrf( handle, params, uplo, n, datatype, self_working_copy_ptr + i * matrix_stride, lda, datatype, (char*)workdata_device_ptr + i * worksize_device, worksize_device, (char*)workdata_host_ptr + i * worksize_host, worksize_host, infos_ptr + i ); } TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params)); #else // USE_CUSOLVER_64_BIT int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); int lwork; at::cuda::solver::potrf_buffersize<scalar_t>( handle, uplo, n_32, nullptr, lda_32, &lwork); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork * batch_size); scalar_t* work_data_ptr = static_cast<scalar_t*>(work_data.get()); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::potrf<scalar_t>( handle, uplo, n_32, self_working_copy_ptr + i * matrix_stride, lda_32, work_data_ptr + i * lwork, lwork, infos_ptr + i ); } #endif // USE_CUSOLVER_64_BIT } // Implementation of Cholesky decomposition using batched cusolverDn<T>potrfBatched // Warning: cusolverDn<T>potrfBatched doesn't work quite well when matrix size or batch size is zero. // If you write your own C++ extension and use this function, make sure you do a zero numel check for the input. template<typename scalar_t> inline static void apply_cholesky_cusolver_potrfBatched(const Tensor& self_working_copy, bool upper, const Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; const int n = cuda_int_cast(self_working_copy.size(-1), "n"); const int lda = std::max<int>(1, n); const int batch_size = cuda_int_cast(batchCount(self_working_copy), "batch_size"); // cusolver batched kernels require input be "device array of device pointers" Tensor self_working_copy_array = get_device_pointers<scalar_t>(self_working_copy); at::cuda::solver::potrfBatched<scalar_t>( handle, uplo, n, reinterpret_cast<scalar_t**>(self_working_copy_array.data_ptr()), lda, infos.data_ptr<int>(), batch_size); } void cholesky_helper_cusolver(const Tensor& input, bool upper, const Tensor& info) { if (input.numel() == 0) { return; } if (use_cusolver_potrf_batched_ && batchCount(input) > 1) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] { apply_cholesky_cusolver_potrfBatched<scalar_t>(input, upper, info); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] { apply_cholesky_cusolver_potrf_looped<scalar_t>(input, upper, info); }); } } template<typename scalar_t> inline static void apply_cholesky_cusolver_potrs(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-2); const int64_t nrhs = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t self_matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>(); const int64_t A_matrix_stride = matrixStride(A_column_major_copy); const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1)); int* infos_ptr = infos.data_ptr<int>(); #ifdef USE_CUSOLVER_64_BIT hipsolverDnParams_t params; hipDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>(); TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(&params)); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::xpotrs( handle, params, uplo, n, nrhs, datatype, A_ptr + i * A_matrix_stride, lda, datatype, self_working_copy_ptr + i * self_matrix_stride, ldb, infos_ptr ); } TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params)); #else // USE_CUSOLVER_64_BIT int n_32 = cuda_int_cast(n, "n"); int nrhs_32 = cuda_int_cast(nrhs, "nrhs"); int lda_32 = cuda_int_cast(lda, "lda"); int ldb_32 = cuda_int_cast(ldb, "ldb"); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::potrs<scalar_t>( handle, uplo, n_32, nrhs_32, A_ptr + i * A_matrix_stride, lda_32, self_working_copy_ptr + i * self_matrix_stride, ldb_32, infos_ptr ); } #endif // USE_CUSOLVER_64_BIT } // This code path is only dispatched to if MAGMA is not linked in the pytorch build. // cusolverDn<t>potrsBatched only supports nrhs == 1 template<typename scalar_t> inline static void apply_cholesky_cusolver_potrsBatched(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-2); const int64_t nrhs = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t self_matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>(); const int64_t A_matrix_stride = matrixStride(A_column_major_copy); const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1)); int* infos_ptr = infos.data_ptr<int>(); auto self_ptr_array = get_device_pointers<scalar_t>(self_working_copy); auto A_ptr_array = get_device_pointers<scalar_t>(A_column_major_copy); at::cuda::solver::potrsBatched( handle, uplo, cuda_int_cast(n, "n"), cuda_int_cast(nrhs, "nrhs"), reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()), cuda_int_cast(lda, "lda"), reinterpret_cast<scalar_t**>(self_ptr_array.data_ptr()), cuda_int_cast(ldb, "ldb"), infos_ptr, cuda_int_cast(batch_size, "batch_size") ); } Tensor _cholesky_solve_helper_cuda_cusolver(const Tensor& self, const Tensor& A, bool upper) { const int64_t batch_size = batchCount(self); at::Tensor infos = at::zeros({1}, self.options().dtype(at::kInt)); at::Tensor self_working_copy = cloneBatchedColumnMajor(self); at::Tensor A_column_major_copy = cloneBatchedColumnMajor(A); const int64_t nrhs = self_working_copy.size(-1); // cusolverDn<t>potrsBatched only supports nrhs == 1 if (batch_size > 1 && nrhs == 1) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs_batched", [&] { apply_cholesky_cusolver_potrsBatched<scalar_t>(self_working_copy, A_column_major_copy, upper, infos); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs", [&] { apply_cholesky_cusolver_potrs<scalar_t>(self_working_copy, A_column_major_copy, upper, infos); }); } // info from potrs and potrsBatched only report if the i-th parameter is wrong, not about the matrix singularity, etc. // So we don't need to check it all the time. TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0); return self_working_copy; } void _cholesky_inverse_cusolver_potrs_based(Tensor& result, Tensor& infos, bool upper) { at::Tensor input_working_copy = cloneBatchedColumnMajor(result); at::Tensor infos_gpu = at::zeros({1}, result.options().dtype(at::kInt)); result.fill_(0); result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_cuda_potri", [&] { apply_cholesky_cusolver_potrs<scalar_t>(result, input_working_copy, upper, infos_gpu); }); // Debug only: info of cusolver potrs only check if the i-th parameter is wrong // Function argument `infos` is a CPU tensor, the following copy will cause a device-host sync. // infos.copy_(infos_gpu); } Tensor& cholesky_inverse_kernel_impl_cusolver(Tensor &result, Tensor& infos, bool upper) { _cholesky_inverse_cusolver_potrs_based(result, infos, upper); return result; } /* The geqrf function computes the QR decomposition of a m x n matrix A. Args: * `A` - [in] Tensor with matrices for QR decomposition, [out] Tensor containing R in the upper triangle of A and elementary reflectors below the main diagonal of A * `tau` - Tensor containing the magnitudes of the elementary reflectors * `m` - The number of rows of `input` to consider * `n` - The number of columns of `input` to consider (actual sizes of `input` could be larger) For further details, please see the cuSOLVER documentation for GEQRF. */ template <typename scalar_t> static void apply_geqrf(const Tensor& A, const Tensor& tau) { int64_t m = A.size(-2); int64_t n = A.size(-1); int64_t lda = std::max<int64_t>(1, m); int64_t batch_size = batchCount(A); auto A_stride = matrixStride(A); auto tau_stride = tau.size(-1); auto A_data = A.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto infos = at::zeros({1}, A.options().dtype(at::kInt)); auto infos_data = infos.data_ptr<int>(); // get the optimal work size and allocate workspace tensor #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; // workspaceInBytesOnDevice size_t worksize_host; // workspaceInBytesOnHost hipsolverDnParams_t params = NULL; // use default algorithm (currently it's the only option) at::cuda::solver::xgeqrf_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), params, m, n, A_data, lda, tau_data, &worksize_device, &worksize_host); #else int lwork; int m_32 = cuda_int_cast(m, "m"); int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); at::cuda::solver::geqrf_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), m_32, n_32, A_data, lda_32, &lwork); #endif // USE_CUSOLVER_64_BIT for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* A_working_ptr = &A_data[i * A_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); #ifdef USE_CUSOLVER_64_BIT // allocate workspace storage on device and host auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto work_device_data = device_allocator.allocate(worksize_device); auto& host_allocator = *at::getCPUAllocator(); auto work_host_data = host_allocator.allocate(worksize_host); at::cuda::solver::xgeqrf<scalar_t>( handle, params, m, n, A_working_ptr, lda, tau_working_ptr, static_cast<scalar_t*>(work_device_data.get()), worksize_device, static_cast<scalar_t*>(work_host_data.get()), worksize_host, infos_data); #else // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * std::max<int>(1, lwork)); at::cuda::solver::geqrf<scalar_t>( handle, m_32, n_32, A_working_ptr, lda_32, tau_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, infos_data); #endif // USE_CUSOLVER_64_BIT } // info from geqrf only reports if the i-th parameter is wrong, not about the matrix singularity // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0); } // This is a type dispatching helper function for 'apply_geqrf' void geqrf_cusolver(const Tensor& input, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_cuda", [&]{ apply_geqrf<scalar_t>(input, tau); }); } /* The ormqr function multiplies Q with another matrix from a sequence of elementary reflectors, such as is produced by the geqrf function. Args: * `input` - Tensor with elementary reflectors below the diagonal, encoding the matrix Q. * `tau` - Tensor containing the magnitudes of the elementary reflectors. * `other` - [in] Tensor containing the matrix to be multiplied. [out] result of the matrix multiplication with Q. * `left` - bool, determining whether `other` is left- or right-multiplied with Q. * `transpose` - bool, determining whether to transpose (or conjugate transpose) Q before multiplying. For further details, please see the cuSOLVER documentation for ORMQR and UNMQR. */ template <typename scalar_t> static void apply_ormqr(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto side = left ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT; auto trans = transpose ? (input.is_complex() ? HIPBLAS_OP_C : HIPBLAS_OP_T) : HIPBLAS_OP_N; auto input_data = input.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto other_data = other.data_ptr<scalar_t>(); auto input_matrix_stride = matrixStride(input); auto other_matrix_stride = matrixStride(other); auto tau_stride = tau.size(-1); auto batch_size = batchCount(input); auto m = cuda_int_cast(other.size(-2), "m"); auto n = cuda_int_cast(other.size(-1), "n"); auto k = cuda_int_cast(tau.size(-1), "k"); auto lda = std::max<int>(1, left ? m : n); auto ldc = std::max<int>(1, m); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::ormqr_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), side, trans, m, n, k, input_data, lda, tau_data, other_data, ldc, &lwork); auto info = at::zeros({1}, input.options().dtype(at::kInt)); auto info_data = info.data_ptr<int>(); for (auto i = decltype(batch_size){0}; i < batch_size; i++) { scalar_t* input_working_ptr = &input_data[i * input_matrix_stride]; scalar_t* other_working_ptr = &other_data[i * other_matrix_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork); at::cuda::solver::ormqr<scalar_t>( handle, side, trans, m, n, k, input_working_ptr, lda, tau_working_ptr, other_working_ptr, ldc, static_cast<scalar_t*>(work_data.get()), lwork, info_data ); // info from ormqr only reports if the i-th parameter is wrong // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0); } } // This is a type dispatching helper function for 'apply_ormqr' void ormqr_cusolver(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "orgmr_cuda", [&]{ apply_ormqr<scalar_t>(input, tau, other, left, transpose); }); } /* The orgqr function allows reconstruction of an orthogonal (or unitary) matrix Q, from a sequence of elementary reflectors, such as produced by the geqrf function. Args: * `self` - Tensor with the directions of the elementary reflectors below the diagonal, it will be overwritten with the result * `tau` - Tensor containing the magnitudes of the elementary reflectors For further details, please see the cuSOLVER documentation for ORGQR and UNGQR. */ template <typename scalar_t> inline static void apply_orgqr(Tensor& self, const Tensor& tau) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto self_matrix_stride = matrixStride(self); auto batchsize = cuda_int_cast(batchCount(self), "batch size"); auto m = cuda_int_cast(self.size(-2), "m"); auto n = cuda_int_cast(self.size(-1), "n"); auto k = cuda_int_cast(tau.size(-1), "k"); auto tau_stride = std::max<int>(1, k); auto lda = std::max<int>(1, m); // LAPACK's requirement TORCH_INTERNAL_ASSERT(m >= n); TORCH_INTERNAL_ASSERT(n >= k); // cuSOLVER doesn't compute anything for this case, which is wrong // the result should be a matrix with 1 on the diagonal if (k == 0) { self.fill_(0); self.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); return; } // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::orgqr_buffersize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), m, n, k, self_data, lda, tau_data, &lwork); auto info = at::zeros({1}, self.options().dtype(at::kInt)); auto info_data = info.data_ptr<int>(); for (auto i = decltype(batchsize){0}; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork); at::cuda::solver::orgqr<scalar_t>( handle, m, n, k, self_working_ptr, lda, tau_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_data ); // info from orgqr only reports if the i-th parameter is wrong // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0); } } // This is a type dispatching helper function for 'apply_orgqr' Tensor& orgqr_helper_cusolver(Tensor& result, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "orgqr_cuda", [&]{ apply_orgqr<scalar_t>(result, tau); }); return result; } template <typename scalar_t> static void apply_syevd(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; int64_t n = vectors.size(-1); int64_t lda = std::max<int64_t>(1, n); int64_t batch_size = batchCount(vectors); auto vectors_stride = matrixStride(vectors); auto values_stride = values.size(-1); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // get the optimal work size and allocate workspace tensor #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; // workspaceInBytesOnDevice size_t worksize_host; // workspaceInBytesOnHost hipsolverDnParams_t params = NULL; // use default algorithm (currently it's the only option) at::cuda::solver::xsyevd_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), params, jobz, uplo, n, vectors_data, lda, values_data, &worksize_device, &worksize_host); #else int lwork; int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); at::cuda::solver::syevd_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n_32, vectors_data, lda_32, values_data, &lwork); #endif // USE_CUSOLVER_64_BIT for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride]; value_t* values_working_ptr = &values_data[i * values_stride]; int* info_working_ptr = &infos_data[i]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); #ifdef USE_CUSOLVER_64_BIT // allocate workspace storage on device and host auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto work_device_data = device_allocator.allocate(worksize_device); auto& host_allocator = *at::getCPUAllocator(); auto work_host_data = host_allocator.allocate(worksize_host); at::cuda::solver::xsyevd<scalar_t>( handle, params, jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr, static_cast<scalar_t*>(work_device_data.get()), worksize_device, static_cast<scalar_t*>(work_host_data.get()), worksize_host, info_working_ptr); #else // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevd<scalar_t>( handle, jobz, uplo, n_32, vectors_working_ptr, lda_32, values_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_working_ptr); #endif // USE_CUSOLVER_64_BIT } } template <typename scalar_t> static void apply_syevj(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; int n = cuda_int_cast(vectors.size(-1), "n"); int lda = std::max<int>(1, n); auto batch_size = batchCount(vectors); auto vectors_stride = matrixStride(vectors); auto values_stride = values.size(-1); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // syevj_params controls the numerical accuracy of syevj // by default the tolerance is set to machine accuracy // the maximum number of iteration of Jacobi method by default is 100 // cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy" // LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set // Let's use the default values for now hipsolverSyevjInfo_t syevj_params; TORCH_CUSOLVER_CHECK(hipsolverDnCreateSyevjInfo(&syevj_params)); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::syevj_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params); for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride]; value_t* values_working_ptr = &values_data[i * values_stride]; int* info_working_ptr = &infos_data[i]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevj<scalar_t>( handle, jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_working_ptr, syevj_params); } TORCH_CUSOLVER_CHECK(hipsolverDnDestroySyevjInfo(syevj_params)); } template <typename scalar_t> static void apply_syevj_batched(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; hipblasFillMode_t uplo = upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER; hipsolverEigMode_t jobz = compute_eigenvectors ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; int n = cuda_int_cast(vectors.size(-1), "n"); int lda = std::max<int>(1, n); int batch_size = cuda_int_cast(batchCount(vectors), "batch_size"); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // syevj_params controls the numerical accuracy of syevj // by default the tolerance is set to machine accuracy // the maximum number of iteration of Jacobi method by default is 100 // cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy" // LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set // Let's use the default values for now hipsolverSyevjInfo_t syevj_params; TORCH_CUSOLVER_CHECK(hipsolverDnCreateSyevjInfo(&syevj_params)); TORCH_CUSOLVER_CHECK(hipsolverDnXsyevjSetSortEig(syevj_params, 1)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::syevjBatched_bufferSize<scalar_t>( handle, jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params, batch_size); // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevjBatched<scalar_t>( handle, jobz, uplo, n, vectors_data, lda, values_data, static_cast<scalar_t*>(work_data.get()), lwork, infos_data, syevj_params, batch_size); TORCH_CUSOLVER_CHECK(hipsolverDnDestroySyevjInfo(syevj_params)); } static void linalg_eigh_cusolver_syevd(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] { apply_syevd<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); }); } static void linalg_eigh_cusolver_syevj(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] { apply_syevj<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); }); } void linalg_eigh_cusolver(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { // TODO: syevj_batched should be added here, but at least for CUDA 11.2 it contains a bug leading to incorrect results // See https://github.com/pytorch/pytorch/pull/53040#issuecomment-793626268 and https://github.com/cupy/cupy/issues/4847 // syevj is better than syevd for float32 dtype and matrix sizes 32x32 - 512x512 // See https://github.com/pytorch/pytorch/pull/53040#issuecomment-788264724 if (eigenvectors.scalar_type() == at::kFloat && eigenvectors.size(-1) >= 32 && eigenvectors.size(-1) <= 512) { return linalg_eigh_cusolver_syevj(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); } else { return linalg_eigh_cusolver_syevd(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); } } // The 'apply_' word is used for templated by dtype functions that call an API routine // underneath. Since the cusolver API has a slightly different structure we do not prepend // apply_ to this function. void lu_looped_cusolver(const Tensor& self, const Tensor& pivots, const Tensor& infos, bool get_pivots) { // Fill the pivots tensor with indices using 1-based (Fortran) indexing. This // is needed for maintaining the same results with MAGMA. auto k = ::min(self.size(-2), self.size(-1)); Tensor pivots_tmp = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand_as(pivots); pivots.copy_(pivots_tmp); AT_DISPATCH_FLOATING_TYPES( self.scalar_type(), "lu_cusolver", [&self, &pivots, &infos, &get_pivots]() { int m = cuda_int_cast(self.size(-2), "m"); int n = cuda_int_cast(self.size(-1), "n"); int lda = std::max<int>(1, m); int64_t self_stride = matrixStride(self); int64_t batch_size = batchCount(self); scalar_t* self_data = self.data_ptr<scalar_t>(); int* infos_data = infos.data_ptr<int>(); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) { if (get_pivots) { auto pivots_data = pivots.data_ptr<int>(); auto pivots_stride = pivots.size(-1); at::cuda::solver::getrf<scalar_t>( handle, m, n, self_data + batch * self_stride, lda, pivots_data + batch * pivots_stride, infos_data + batch ); } else { at::cuda::solver::getrf<scalar_t>( handle, m, n, self_data + batch * self_stride, lda, nullptr, infos_data + batch ); } } }); // Necessary because cuSOLVER uses nan for outputs that correspond to 0 in MAGMA for non-pivoted LU. // See https://github.com/pytorch/pytorch/issues/53879 for more details. if (!get_pivots) { at::nan_to_num_(const_cast<Tensor&>(self), 0, std::numeric_limits<double>::infinity(), -std::numeric_limits<double>::infinity()); } } void lu_solve_looped_cusolver(const Tensor& b, const Tensor& lu, const Tensor& pivots) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_cusolver", [&] { int n = cuda_int_cast(lu.size(-2), "n"); int nrhs = cuda_int_cast(b.size(-1), "nrhs"); auto batch_size = batchCount(lu); auto info = at::zeros({1}, lu.options().dtype(kInt)); auto info_data = info.data_ptr<int>(); auto b_data = b.data_ptr<scalar_t>(); auto lu_data = lu.data_ptr<scalar_t>(); auto pivots_data = pivots.data_ptr<int>(); auto pivots_stride = pivots.size(-1); auto lu_stride = matrixStride(lu); auto b_stride = matrixStride(b); int leading_dimension = cuda_int_cast(std::max<int>(1, n), "leading_dimension"); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) { at::cuda::solver::getrs<scalar_t>( handle, n, nrhs, lu_data + batch * lu_stride, leading_dimension, pivots_data + batch * pivots_stride, b_data + batch * b_stride, leading_dimension, info_data); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0); } }); } #endif // USE_CUSOLVER }} // namespace at::native
75abfdc8abf8be8b8957cfb28063d245b3493a26.cu
#include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/PinnedMemoryAllocator.h> #include <ATen/cuda/CUDASolver.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/CUDAEvent.h> #include <c10/cuda/CUDAStream.h> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/cuda/MiscUtils.h> #include <ATen/native/cuda/BatchLinearAlgebraLib.h> namespace at { namespace native { // Some cuBLAS and cuSOLVER batched routines require input to be a device array of pointers to device individual matrices // 'input' must be a contiguous tensor template <typename scalar_t> static Tensor get_device_pointers(const Tensor& input) { auto input_data = input.data_ptr<scalar_t>(); int64_t input_mat_stride = matrixStride(input); // cublas/cusolver interface requires 'int' int batch_size = cuda_int_cast(batchCount(input), "batch_size"); // if batch_size==0, then start=0 and end=0 // if input_mat_stride==0, then step=sizeof(scalar_t) return at::arange( /*start=*/reinterpret_cast<int64_t>(input_data), /*end=*/reinterpret_cast<int64_t>(input_data + batch_size * input_mat_stride), /*step=*/static_cast<int64_t>(std::max<int64_t>(input_mat_stride, 1) * sizeof(scalar_t)), input.options().dtype(at::kLong)); } template <typename scalar_t> void apply_geqrf_batched(const Tensor& input, const Tensor& tau) { // AMD ROCm backend is implemented via rewriting all CUDA calls to HIP // rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER // rocSOLVER is currently not used in ATen, therefore we raise an error in this case #ifndef CUDART_VERSION TORCH_CHECK(false, "geqrf: Batched version is supported only with cuBLAS backend.") #else auto batch_size = cuda_int_cast(batchCount(input), "batch_size"); auto m = cuda_int_cast(input.size(-2), "m"); auto n = cuda_int_cast(input.size(-1), "n"); auto lda = std::max<int>(1, m); // cuBLAS batched geqrf requires input to be the device array of pointers to device single matrices Tensor input_ptr_array = get_device_pointers<scalar_t>(input); Tensor tau_ptr_array = get_device_pointers<scalar_t>(tau.unsqueeze(-1)); auto input_ptr_array_data = reinterpret_cast<scalar_t**>(input_ptr_array.data_ptr()); auto tau_ptr_array_data = reinterpret_cast<scalar_t**>(tau_ptr_array.data_ptr()); int info; auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::geqrfBatched(handle, m, n, input_ptr_array_data, lda, tau_ptr_array_data, &info, batch_size); // info only indicates wrong arguments to geqrfBatched call // info is a host variable, we can check it without device synchronization TORCH_INTERNAL_ASSERT(info == 0); #endif } void geqrf_batched_cublas(const Tensor& input, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_batched_cuda", [&]{ apply_geqrf_batched<scalar_t>(input, tau); }); } template <typename scalar_t> static void apply_lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots) { #ifndef CUDART_VERSION TORCH_CHECK(false, "lu_solve: cuBLAS backend for lu_solve is not available.") #else cublasOperation_t trans = CUBLAS_OP_N; auto pivots_data = pivots.data_ptr<int>(); auto batch_size = cuda_int_cast(batchCount(lu), "batch_size");; auto m = cuda_int_cast(lu.size(-2), "m"); auto nrhs = cuda_int_cast(b.size(-1), "nrhs"); auto lda = cuda_int_cast(std::max<int>(1, m), "lda"); int info = 0; Tensor lu_ptr_array = get_device_pointers<scalar_t>(lu); Tensor b_ptr_array = get_device_pointers<scalar_t>(b); auto lu_ptr_array_data = reinterpret_cast<scalar_t**>(lu_ptr_array.data_ptr()); auto b_ptr_array_data = reinterpret_cast<scalar_t**>(b_ptr_array.data_ptr()); auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::getrsBatched(handle, trans, m, nrhs, lu_ptr_array_data, lda, pivots_data, b_ptr_array_data, lda, &info, batch_size); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0); #endif } void lu_solve_batched_cublas(const Tensor& b, const Tensor& lu, const Tensor& pivots) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(lu.scalar_type(), "lu_solve_cublas", [&]{ apply_lu_solve_batched_cublas<scalar_t>(b, lu, pivots); }); } template <typename scalar_t> static void apply_triangular_solve(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; cublasOperation_t trans = transpose ? CUBLAS_OP_T : CUBLAS_OP_N; trans = conjugate_transpose ? CUBLAS_OP_C : trans; cublasDiagType_t diag = unitriangular ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT; cublasSideMode_t side = CUBLAS_SIDE_LEFT; auto A_data = A.data_ptr<scalar_t>(); auto B_data = B.data_ptr<scalar_t>(); auto A_mat_stride = matrixStride(A); auto B_mat_stride = matrixStride(B); auto batch_size = batchCount(A); auto m = cuda_int_cast(A.size(-2), "m"); auto n = cuda_int_cast(A.size(-1), "n"); auto nrhs = cuda_int_cast(B.size(-1), "nrhs"); auto lda = std::max<int>(1, m); auto alpha = scalar_t{1}; for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* A_working_ptr = &A_data[i * A_mat_stride]; scalar_t* B_working_ptr = &B_data[i * B_mat_stride]; auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::trsm(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_working_ptr, lda, B_working_ptr, lda); } } void triangular_solve_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { (void)infos; // unused AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular); }); } template <typename scalar_t> static void apply_triangular_solve_batched(Tensor& A, Tensor& B, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; cublasOperation_t trans = transpose ? CUBLAS_OP_T : CUBLAS_OP_N; trans = conjugate_transpose ? CUBLAS_OP_C : trans; cublasDiagType_t diag = unitriangular ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT; cublasSideMode_t side = CUBLAS_SIDE_LEFT; auto A_data = A.data_ptr<scalar_t>(); auto B_data = B.data_ptr<scalar_t>(); auto A_mat_stride = matrixStride(A); auto B_mat_stride = matrixStride(B); auto batch_size = cuda_int_cast(batchCount(A), "batch_size"); auto m = cuda_int_cast(A.size(-2), "m"); auto n = cuda_int_cast(A.size(-1), "n"); auto nrhs = cuda_int_cast(B.size(-1), "nrhs"); auto lda = std::max<int>(1, m); auto alpha = scalar_t{1}; // cuBLAS batched trsm requires input to be the device array of pointers to device single matrices Tensor A_ptr_array = get_device_pointers<scalar_t>(A); Tensor B_ptr_array = get_device_pointers<scalar_t>(B); auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()); auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr()); auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::trsmBatched(handle, side, uplo, trans, diag, n, nrhs, &alpha, A_ptr_array_data, lda, B_ptr_array_data, lda, batch_size); } void triangular_solve_batched_cublas(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) { (void)infos; // unused AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular); }); } template <typename scalar_t> inline void apply_gels_batched(const Tensor& A, Tensor& B, Tensor& infos) { // AMD ROCm backend is implemented via rewriting all CUDA calls to HIP // rocBLAS does not implement BLAS-like extensions of cuBLAS, they're in rocSOLVER // rocSOLVER is currently not used in ATen, therefore we raise an error in this case #ifndef CUDART_VERSION TORCH_CHECK(false, "torch.linalg.lstsq: Batched version is supported only with cuBLAS backend.") #else auto trans = CUBLAS_OP_N; auto m = cuda_int_cast(A.size(-2), "m"); auto n = cuda_int_cast(A.size(-1), "n"); auto nrhs = cuda_int_cast(B.size(-1), "nrhs"); // cuBLAS from cuda10 and older doesn't work with nrhs == 0 (cuda11 works) // so we need to put this early return if (nrhs == 0) { return; } auto batch_size = cuda_int_cast(batchCount(B), "batch_size"); auto lda = std::max<int>(1, m); auto ldb = std::max<int>(1, m); // cuBLAS's requirement TORCH_CHECK( m >= n, "torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA with cuBLAS backend."); // cuBLAS documentation says: // Matrices Aarray[i] should not overlap; otherwise, undefined behavior is expected. // explicitly broadcast the batch dimensions of A IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2); IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2); std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes); expand_batch_portion.insert(expand_batch_portion.end(), {A.size(-2), A.size(-1)}); Tensor A_expanded = A.expand({expand_batch_portion}); Tensor A_broadcasted = cloneBatchedColumnMajor(A_expanded); // cuBLAS batched gels requires input to be the device array of pointers to device single matrices Tensor A_ptr_array = get_device_pointers<scalar_t>(A_broadcasted); Tensor B_ptr_array = get_device_pointers<scalar_t>(B); auto A_ptr_array_data = reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()); auto B_ptr_array_data = reinterpret_cast<scalar_t**>(B_ptr_array.data_ptr()); auto infos_data = infos.data_ptr<int>(); auto handle = at::cuda::getCurrentCUDABlasHandle(); int info; at::cuda::blas::gelsBatched<scalar_t>( handle, trans, m, n, nrhs, A_ptr_array_data, lda, B_ptr_array_data, ldb, &info, infos_data, batch_size); // negative info indicates that an argument to gelsBatched call is invalid TORCH_INTERNAL_ASSERT(info == 0); #endif } // This is a type dispatching helper function for 'apply_gels_batched' void gels_batched_cublas(const Tensor& a, Tensor& b, Tensor& infos) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_batched_cublas", [&]{ apply_gels_batched<scalar_t>(a, b, infos); }); } #ifdef USE_CUSOLVER inline static Tensor column_major_identity_matrix_like(const Tensor& self) { auto size = self.sizes(); auto size_slice = IntArrayRef(size.data(), size.size()-1); return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1); } template <typename scalar_t> inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_getrf_ptr, int* info_getrs_ptr, int n, int lda) { // self_inv_ptr should already be an identity matrix auto handle = at::cuda::getCurrentCUDASolverDnHandle(); at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, info_getrf_ptr); at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, lda, ipiv_ptr, self_inv_ptr, lda, info_getrs_ptr); } template <typename scalar_t> static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) { const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); const int n = cuda_int_cast(self.size(-2), "self.size(-2)"); const int lda = std::max<int>(1, n); auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto infos_getrf_data = infos_getrf.data_ptr<int>(); auto infos_getrs_data = infos_getrs.data_ptr<int>(); auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); // Heuristic: For small batch size or large matrix size, we use for-loop to iterate over the batches instead of // calling the batched cublas routine. if (batch_size <= 8 || /* batch_size > 8 && */ n >= 512) { for (int64_t i = 0; i < batch_size; i++) { auto dataPtr = allocator.allocate(sizeof(int) * lda); int* pivot = reinterpret_cast<int*>(dataPtr.get()); int* infos_getrf_working_ptr = &infos_getrf_data[i]; int* infos_getrs_working_ptr = &infos_getrs_data[i]; _apply_single_inverse_helper<scalar_t>( &self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, infos_getrf_working_ptr, infos_getrs_working_ptr, n, lda); } } else { // cublas batched kernels require input be "device array of device pointers" Tensor self_array = at::arange( reinterpret_cast<int64_t>(self_data), reinterpret_cast<int64_t>(&self_data[(batch_size-1) * self_mat_stride]) + 1, static_cast<int64_t>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); Tensor self_inv_array = at::arange( reinterpret_cast<int64_t>(self_inv_data), reinterpret_cast<int64_t>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1, static_cast<int64_t>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); auto dataPtr = allocator.allocate(sizeof(int)*batch_size*lda); int* ipiv_array = reinterpret_cast<int*>(dataPtr.get()); at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda, ipiv_array, infos_getrf_data, batch_size); at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), lda, ipiv_array, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr()), lda, infos_getrs_data, batch_size); } } template <typename scalar_t> static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, Tensor& infos_getrf, Tensor& infos_getrs) { int n = cuda_int_cast(self.size(-2), "self.size(-2)"); int lda = std::max<int>(1, n); Tensor ipiv = at::empty({lda}, self.options().dtype(at::kInt)); _apply_single_inverse_helper<scalar_t>( self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), infos_getrf.data_ptr<int>(), infos_getrs.data_ptr<int>(), n, lda); } // This is a type dispatching helper function for 'apply_batched_inverse_lib' and 'apply_single_inverse_lib' Tensor& _linalg_inv_out_helper_cuda_lib(Tensor& result, Tensor& infos_getrf, Tensor& infos_getrs) { // assuming result is in column major order and contains the matrices to invert Tensor input_working_copy = cloneBatchedColumnMajor(result); // for getrf + getrs (cusolver path) // result should be filled with identity matrices result.zero_(); result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); const int batch_size = cuda_int_cast(batchCount(result), "batchCount"); if (result.dim() > 2) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( input_working_copy, result, infos_getrf, infos_getrs); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{ apply_single_inverse_lib<scalar_t>(input_working_copy, result, infos_getrf, infos_getrs); }); } return result; } // entrance of calculations of `inverse` using cusolver getrf + getrs, cublas getrfBatched + getriBatched Tensor _inverse_helper_cuda_lib(const Tensor& self) { Tensor self_working_copy = cloneBatchedColumnMajor(self); Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy); const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); if (self.dim() > 2 && batch_size > 1) { Tensor infos_getrf = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); Tensor infos_getrs = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs); }); batchCheckErrors(infos_getrf, "inverse_cuda"); batchCheckErrors(infos_getrs, "inverse_cuda"); } else { Tensor infos_getrf = at::zeros({1}, self.options().dtype(kInt)); Tensor infos_getrs = at::zeros({1}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, infos_getrf, infos_getrs); }); batchCheckErrors(infos_getrf, "inverse_cuda"); batchCheckErrors(infos_getrs, "inverse_cuda"); } return self_inv_working_copy; } // call cusolver gesvdj function to calculate svd template<typename scalar_t> inline static void _apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); int batchsize = cuda_int_cast(batchCount(self), "batch size"); int m = cuda_int_cast(self.size(-2), "m"); int n = cuda_int_cast(self.size(-1), "n"); int lda = std::max<int>(1, m); int ldvt = std::max<int>(1, n); for(int i = 0; i < batchsize; i++){ // gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU gesvdjInfo_t gesvdj_params; TORCH_CUSOLVER_CHECK(cusolverDnCreateGesvdjInfo(&gesvdj_params)); // TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7)); // TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); auto jobz = compute_uv ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; at::cuda::solver::gesvdj<scalar_t>( handle, jobz, /*econ=*/ some ? 1 : 0, m, n, self_data + i * self_stride, lda, S_data + i * S_stride, U_data + i * U_stride, lda, VT_data + i * VT_stride, ldvt, infos.data_ptr<int>() + i, gesvdj_params ); TORCH_CUSOLVER_CHECK(cusolverDnDestroyGesvdjInfo(gesvdj_params)); } } // wrapper around _apply_svd_lib_gesvdj that handles dtype dispatch, // creates a working copy of the input, and creates V^H from the V returned by gesvdj inline static void apply_svd_lib_gesvdj(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv, bool some) { const int64_t m = self.size(-2); const int64_t n = self.size(-1); Tensor self_working_copy = cloneBatchedColumnMajor(self); VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdj", [&] { _apply_svd_lib_gesvdj<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv, some); }); } // call cusolver gesvdj batched function to calculate svd template<typename scalar_t> inline static void _apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<value_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); int batchsize = cuda_int_cast(batchCount(self), "batch size"); int m = cuda_int_cast(self.size(-2), "m"); int n = cuda_int_cast(self.size(-1), "n"); int lda = std::max<int>(1, m); int ldvt = std::max<int>(1, n); TORCH_INTERNAL_ASSERT(m <= 32 && n <= 32, "gesvdjBatched requires both matrix dimensions not greater than 32, but got " "m = ", m, " n = ", n); // gesvdj_params controls the numerical accuracy of cusolver gesvdj iterations on GPU gesvdjInfo_t gesvdj_params; TORCH_CUSOLVER_CHECK(cusolverDnCreateGesvdjInfo(&gesvdj_params)); // TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetTolerance(gesvdj_params, 1.0e-7)); // TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, 15)); TORCH_CUSOLVER_CHECK(cusolverDnXgesvdjSetSortEig(gesvdj_params, 1)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); auto jobz = compute_uv ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; at::cuda::solver::gesvdjBatched<scalar_t>( handle, jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, infos.data_ptr<int>(), gesvdj_params, batchsize ); TORCH_CUSOLVER_CHECK(cusolverDnDestroyGesvdjInfo(gesvdj_params)); } // wrapper around _apply_svd_lib_gesvdjBatched that handles dtype dispatch, // creates a working copy of the input, and creates V^H from the V returned by gesvdj inline static void apply_svd_lib_gesvdjBatched(const Tensor& self, Tensor& U, Tensor& S, Tensor& VT, Tensor& infos, bool compute_uv) { const int64_t m = self.size(-2); const int64_t n = self.size(-1); Tensor self_working_copy = cloneBatchedColumnMajor(self); VT = VT.transpose(-2, -1); // gesvdj returns V instead of V^H AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda_gesvdjBatched", [&] { _apply_svd_lib_gesvdjBatched<scalar_t>(self_working_copy, U, S, VT, infos, compute_uv); }); } // entrance of calculations of `svd` using cusolver gesvdj and gesvdjBatched std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_lib(const Tensor& self, bool some, bool compute_uv) { const int64_t batch_size = batchCount(self); at::Tensor infos = at::zeros({batch_size}, self.options().dtype(at::kInt)); const int64_t m = self.size(-2); const int64_t n = self.size(-1); const int64_t k = std::min(m, n); Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = \ _create_U_S_VT(self, some, compute_uv, /* svd_use_cusolver = */ true); // U, S, V working copies are already column majored now // heuristic for using `gesvdjBatched` over `gesvdj` if (m <= 32 && n <= 32 && batch_size > 1 && (!some || m == n)) { apply_svd_lib_gesvdjBatched(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv); } else { apply_svd_lib_gesvdj(self, U_working_copy, S_working_copy, VT_working_copy, infos, compute_uv, some); } // A device-host sync will be performed. batchCheckErrors(infos, "svd_cuda"); if (!compute_uv) { VT_working_copy.zero_(); U_working_copy.zero_(); } if (some) { VT_working_copy = VT_working_copy.narrow(-2, 0, k); } // so far we have computed VT, but torch.svd returns V instead. Adjust accordingly. VT_working_copy.transpose_(-2, -1); return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } // Implementation of Cholesky decomposition using looped cusolverDn<T>potrf or cusolverDnXpotrf (64-bit) template<typename scalar_t> inline static void apply_cholesky_cusolver_potrf_looped(const Tensor& self_working_copy, bool upper, const Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); int* infos_ptr = infos.data_ptr<int>(); #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; size_t worksize_host; cusolverDnParams_t params; cudaDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>(); TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(&params)); at::cuda::solver::xpotrf_buffersize(handle, params, uplo, n, datatype, nullptr, lda, datatype, &worksize_device, &worksize_host); // allocate workspace storage auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto workdata_device = device_allocator.allocate(worksize_device * batch_size); void* workdata_device_ptr = workdata_device.get(); auto& host_allocator = *at::getCPUAllocator(); auto workdata_host = host_allocator.allocate(worksize_host * batch_size); void* workdata_host_ptr = workdata_host.get(); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::xpotrf( handle, params, uplo, n, datatype, self_working_copy_ptr + i * matrix_stride, lda, datatype, (char*)workdata_device_ptr + i * worksize_device, worksize_device, (char*)workdata_host_ptr + i * worksize_host, worksize_host, infos_ptr + i ); } TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params)); #else // USE_CUSOLVER_64_BIT int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); int lwork; at::cuda::solver::potrf_buffersize<scalar_t>( handle, uplo, n_32, nullptr, lda_32, &lwork); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork * batch_size); scalar_t* work_data_ptr = static_cast<scalar_t*>(work_data.get()); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::potrf<scalar_t>( handle, uplo, n_32, self_working_copy_ptr + i * matrix_stride, lda_32, work_data_ptr + i * lwork, lwork, infos_ptr + i ); } #endif // USE_CUSOLVER_64_BIT } // Implementation of Cholesky decomposition using batched cusolverDn<T>potrfBatched // Warning: cusolverDn<T>potrfBatched doesn't work quite well when matrix size or batch size is zero. // If you write your own C++ extension and use this function, make sure you do a zero numel check for the input. template<typename scalar_t> inline static void apply_cholesky_cusolver_potrfBatched(const Tensor& self_working_copy, bool upper, const Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; const int n = cuda_int_cast(self_working_copy.size(-1), "n"); const int lda = std::max<int>(1, n); const int batch_size = cuda_int_cast(batchCount(self_working_copy), "batch_size"); // cusolver batched kernels require input be "device array of device pointers" Tensor self_working_copy_array = get_device_pointers<scalar_t>(self_working_copy); at::cuda::solver::potrfBatched<scalar_t>( handle, uplo, n, reinterpret_cast<scalar_t**>(self_working_copy_array.data_ptr()), lda, infos.data_ptr<int>(), batch_size); } void cholesky_helper_cusolver(const Tensor& input, bool upper, const Tensor& info) { if (input.numel() == 0) { return; } if (use_cusolver_potrf_batched_ && batchCount(input) > 1) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] { apply_cholesky_cusolver_potrfBatched<scalar_t>(input, upper, info); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "cholesky_cusolver", [&] { apply_cholesky_cusolver_potrf_looped<scalar_t>(input, upper, info); }); } } template<typename scalar_t> inline static void apply_cholesky_cusolver_potrs(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-2); const int64_t nrhs = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t self_matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>(); const int64_t A_matrix_stride = matrixStride(A_column_major_copy); const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1)); int* infos_ptr = infos.data_ptr<int>(); #ifdef USE_CUSOLVER_64_BIT cusolverDnParams_t params; cudaDataType datatype = at::cuda::solver::get_cusolver_datatype<scalar_t>(); TORCH_CUSOLVER_CHECK(cusolverDnCreateParams(&params)); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::xpotrs( handle, params, uplo, n, nrhs, datatype, A_ptr + i * A_matrix_stride, lda, datatype, self_working_copy_ptr + i * self_matrix_stride, ldb, infos_ptr ); } TORCH_CUSOLVER_CHECK(cusolverDnDestroyParams(params)); #else // USE_CUSOLVER_64_BIT int n_32 = cuda_int_cast(n, "n"); int nrhs_32 = cuda_int_cast(nrhs, "nrhs"); int lda_32 = cuda_int_cast(lda, "lda"); int ldb_32 = cuda_int_cast(ldb, "ldb"); for (int64_t i = 0; i < batch_size; i++) { at::cuda::solver::potrs<scalar_t>( handle, uplo, n_32, nrhs_32, A_ptr + i * A_matrix_stride, lda_32, self_working_copy_ptr + i * self_matrix_stride, ldb_32, infos_ptr ); } #endif // USE_CUSOLVER_64_BIT } // This code path is only dispatched to if MAGMA is not linked in the pytorch build. // cusolverDn<t>potrsBatched only supports nrhs == 1 template<typename scalar_t> inline static void apply_cholesky_cusolver_potrsBatched(Tensor& self_working_copy, const Tensor& A_column_major_copy, bool upper, Tensor& infos) { auto handle = at::cuda::getCurrentCUDASolverDnHandle(); const auto uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; const int64_t n = self_working_copy.size(-2); const int64_t nrhs = self_working_copy.size(-1); const int64_t lda = std::max<int64_t>(1, n); const int64_t batch_size = batchCount(self_working_copy); const int64_t self_matrix_stride = matrixStride(self_working_copy); scalar_t* self_working_copy_ptr = self_working_copy.data_ptr<scalar_t>(); const scalar_t* A_ptr = A_column_major_copy.data_ptr<scalar_t>(); const int64_t A_matrix_stride = matrixStride(A_column_major_copy); const int64_t ldb = std::max<int64_t>(1, A_column_major_copy.size(-1)); int* infos_ptr = infos.data_ptr<int>(); auto self_ptr_array = get_device_pointers<scalar_t>(self_working_copy); auto A_ptr_array = get_device_pointers<scalar_t>(A_column_major_copy); at::cuda::solver::potrsBatched( handle, uplo, cuda_int_cast(n, "n"), cuda_int_cast(nrhs, "nrhs"), reinterpret_cast<scalar_t**>(A_ptr_array.data_ptr()), cuda_int_cast(lda, "lda"), reinterpret_cast<scalar_t**>(self_ptr_array.data_ptr()), cuda_int_cast(ldb, "ldb"), infos_ptr, cuda_int_cast(batch_size, "batch_size") ); } Tensor _cholesky_solve_helper_cuda_cusolver(const Tensor& self, const Tensor& A, bool upper) { const int64_t batch_size = batchCount(self); at::Tensor infos = at::zeros({1}, self.options().dtype(at::kInt)); at::Tensor self_working_copy = cloneBatchedColumnMajor(self); at::Tensor A_column_major_copy = cloneBatchedColumnMajor(A); const int64_t nrhs = self_working_copy.size(-1); // cusolverDn<t>potrsBatched only supports nrhs == 1 if (batch_size > 1 && nrhs == 1) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs_batched", [&] { apply_cholesky_cusolver_potrsBatched<scalar_t>(self_working_copy, A_column_major_copy, upper, infos); }); } else { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda_potrs", [&] { apply_cholesky_cusolver_potrs<scalar_t>(self_working_copy, A_column_major_copy, upper, infos); }); } // info from potrs and potrsBatched only report if the i-th parameter is wrong, not about the matrix singularity, etc. // So we don't need to check it all the time. TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0); return self_working_copy; } void _cholesky_inverse_cusolver_potrs_based(Tensor& result, Tensor& infos, bool upper) { at::Tensor input_working_copy = cloneBatchedColumnMajor(result); at::Tensor infos_gpu = at::zeros({1}, result.options().dtype(at::kInt)); result.fill_(0); result.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_cuda_potri", [&] { apply_cholesky_cusolver_potrs<scalar_t>(result, input_working_copy, upper, infos_gpu); }); // Debug only: info of cusolver potrs only check if the i-th parameter is wrong // Function argument `infos` is a CPU tensor, the following copy will cause a device-host sync. // infos.copy_(infos_gpu); } Tensor& cholesky_inverse_kernel_impl_cusolver(Tensor &result, Tensor& infos, bool upper) { _cholesky_inverse_cusolver_potrs_based(result, infos, upper); return result; } /* The geqrf function computes the QR decomposition of a m x n matrix A. Args: * `A` - [in] Tensor with matrices for QR decomposition, [out] Tensor containing R in the upper triangle of A and elementary reflectors below the main diagonal of A * `tau` - Tensor containing the magnitudes of the elementary reflectors * `m` - The number of rows of `input` to consider * `n` - The number of columns of `input` to consider (actual sizes of `input` could be larger) For further details, please see the cuSOLVER documentation for GEQRF. */ template <typename scalar_t> static void apply_geqrf(const Tensor& A, const Tensor& tau) { int64_t m = A.size(-2); int64_t n = A.size(-1); int64_t lda = std::max<int64_t>(1, m); int64_t batch_size = batchCount(A); auto A_stride = matrixStride(A); auto tau_stride = tau.size(-1); auto A_data = A.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto infos = at::zeros({1}, A.options().dtype(at::kInt)); auto infos_data = infos.data_ptr<int>(); // get the optimal work size and allocate workspace tensor #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; // workspaceInBytesOnDevice size_t worksize_host; // workspaceInBytesOnHost cusolverDnParams_t params = NULL; // use default algorithm (currently it's the only option) at::cuda::solver::xgeqrf_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), params, m, n, A_data, lda, tau_data, &worksize_device, &worksize_host); #else int lwork; int m_32 = cuda_int_cast(m, "m"); int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); at::cuda::solver::geqrf_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), m_32, n_32, A_data, lda_32, &lwork); #endif // USE_CUSOLVER_64_BIT for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* A_working_ptr = &A_data[i * A_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); #ifdef USE_CUSOLVER_64_BIT // allocate workspace storage on device and host auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto work_device_data = device_allocator.allocate(worksize_device); auto& host_allocator = *at::getCPUAllocator(); auto work_host_data = host_allocator.allocate(worksize_host); at::cuda::solver::xgeqrf<scalar_t>( handle, params, m, n, A_working_ptr, lda, tau_working_ptr, static_cast<scalar_t*>(work_device_data.get()), worksize_device, static_cast<scalar_t*>(work_host_data.get()), worksize_host, infos_data); #else // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * std::max<int>(1, lwork)); at::cuda::solver::geqrf<scalar_t>( handle, m_32, n_32, A_working_ptr, lda_32, tau_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, infos_data); #endif // USE_CUSOLVER_64_BIT } // info from geqrf only reports if the i-th parameter is wrong, not about the matrix singularity // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.item().toInt() == 0); } // This is a type dispatching helper function for 'apply_geqrf' void geqrf_cusolver(const Tensor& input, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_cuda", [&]{ apply_geqrf<scalar_t>(input, tau); }); } /* The ormqr function multiplies Q with another matrix from a sequence of elementary reflectors, such as is produced by the geqrf function. Args: * `input` - Tensor with elementary reflectors below the diagonal, encoding the matrix Q. * `tau` - Tensor containing the magnitudes of the elementary reflectors. * `other` - [in] Tensor containing the matrix to be multiplied. [out] result of the matrix multiplication with Q. * `left` - bool, determining whether `other` is left- or right-multiplied with Q. * `transpose` - bool, determining whether to transpose (or conjugate transpose) Q before multiplying. For further details, please see the cuSOLVER documentation for ORMQR and UNMQR. */ template <typename scalar_t> static void apply_ormqr(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto side = left ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT; auto trans = transpose ? (input.is_complex() ? CUBLAS_OP_C : CUBLAS_OP_T) : CUBLAS_OP_N; auto input_data = input.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto other_data = other.data_ptr<scalar_t>(); auto input_matrix_stride = matrixStride(input); auto other_matrix_stride = matrixStride(other); auto tau_stride = tau.size(-1); auto batch_size = batchCount(input); auto m = cuda_int_cast(other.size(-2), "m"); auto n = cuda_int_cast(other.size(-1), "n"); auto k = cuda_int_cast(tau.size(-1), "k"); auto lda = std::max<int>(1, left ? m : n); auto ldc = std::max<int>(1, m); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::ormqr_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), side, trans, m, n, k, input_data, lda, tau_data, other_data, ldc, &lwork); auto info = at::zeros({1}, input.options().dtype(at::kInt)); auto info_data = info.data_ptr<int>(); for (auto i = decltype(batch_size){0}; i < batch_size; i++) { scalar_t* input_working_ptr = &input_data[i * input_matrix_stride]; scalar_t* other_working_ptr = &other_data[i * other_matrix_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork); at::cuda::solver::ormqr<scalar_t>( handle, side, trans, m, n, k, input_working_ptr, lda, tau_working_ptr, other_working_ptr, ldc, static_cast<scalar_t*>(work_data.get()), lwork, info_data ); // info from ormqr only reports if the i-th parameter is wrong // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0); } } // This is a type dispatching helper function for 'apply_ormqr' void ormqr_cusolver(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "orgmr_cuda", [&]{ apply_ormqr<scalar_t>(input, tau, other, left, transpose); }); } /* The orgqr function allows reconstruction of an orthogonal (or unitary) matrix Q, from a sequence of elementary reflectors, such as produced by the geqrf function. Args: * `self` - Tensor with the directions of the elementary reflectors below the diagonal, it will be overwritten with the result * `tau` - Tensor containing the magnitudes of the elementary reflectors For further details, please see the cuSOLVER documentation for ORGQR and UNGQR. */ template <typename scalar_t> inline static void apply_orgqr(Tensor& self, const Tensor& tau) { using value_t = typename c10::scalar_value_type<scalar_t>::type; auto self_data = self.data_ptr<scalar_t>(); auto tau_data = tau.data_ptr<scalar_t>(); auto self_matrix_stride = matrixStride(self); auto batchsize = cuda_int_cast(batchCount(self), "batch size"); auto m = cuda_int_cast(self.size(-2), "m"); auto n = cuda_int_cast(self.size(-1), "n"); auto k = cuda_int_cast(tau.size(-1), "k"); auto tau_stride = std::max<int>(1, k); auto lda = std::max<int>(1, m); // LAPACK's requirement TORCH_INTERNAL_ASSERT(m >= n); TORCH_INTERNAL_ASSERT(n >= k); // cuSOLVER doesn't compute anything for this case, which is wrong // the result should be a matrix with 1 on the diagonal if (k == 0) { self.fill_(0); self.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1); return; } // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::orgqr_buffersize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), m, n, k, self_data, lda, tau_data, &lwork); auto info = at::zeros({1}, self.options().dtype(at::kInt)); auto info_data = info.data_ptr<int>(); for (auto i = decltype(batchsize){0}; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; scalar_t* tau_working_ptr = &tau_data[i * tau_stride]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t)*lwork); at::cuda::solver::orgqr<scalar_t>( handle, m, n, k, self_working_ptr, lda, tau_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_data ); // info from orgqr only reports if the i-th parameter is wrong // so we don't need to check it all the time TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0); } } // This is a type dispatching helper function for 'apply_orgqr' Tensor& orgqr_helper_cusolver(Tensor& result, const Tensor& tau) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "orgqr_cuda", [&]{ apply_orgqr<scalar_t>(result, tau); }); return result; } template <typename scalar_t> static void apply_syevd(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; int64_t n = vectors.size(-1); int64_t lda = std::max<int64_t>(1, n); int64_t batch_size = batchCount(vectors); auto vectors_stride = matrixStride(vectors); auto values_stride = values.size(-1); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // get the optimal work size and allocate workspace tensor #ifdef USE_CUSOLVER_64_BIT size_t worksize_device; // workspaceInBytesOnDevice size_t worksize_host; // workspaceInBytesOnHost cusolverDnParams_t params = NULL; // use default algorithm (currently it's the only option) at::cuda::solver::xsyevd_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), params, jobz, uplo, n, vectors_data, lda, values_data, &worksize_device, &worksize_host); #else int lwork; int n_32 = cuda_int_cast(n, "n"); int lda_32 = cuda_int_cast(lda, "lda"); at::cuda::solver::syevd_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n_32, vectors_data, lda_32, values_data, &lwork); #endif // USE_CUSOLVER_64_BIT for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride]; value_t* values_working_ptr = &values_data[i * values_stride]; int* info_working_ptr = &infos_data[i]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); #ifdef USE_CUSOLVER_64_BIT // allocate workspace storage on device and host auto& device_allocator = *at::cuda::getCUDADeviceAllocator(); auto work_device_data = device_allocator.allocate(worksize_device); auto& host_allocator = *at::getCPUAllocator(); auto work_host_data = host_allocator.allocate(worksize_host); at::cuda::solver::xsyevd<scalar_t>( handle, params, jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr, static_cast<scalar_t*>(work_device_data.get()), worksize_device, static_cast<scalar_t*>(work_host_data.get()), worksize_host, info_working_ptr); #else // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevd<scalar_t>( handle, jobz, uplo, n_32, vectors_working_ptr, lda_32, values_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_working_ptr); #endif // USE_CUSOLVER_64_BIT } } template <typename scalar_t> static void apply_syevj(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; int n = cuda_int_cast(vectors.size(-1), "n"); int lda = std::max<int>(1, n); auto batch_size = batchCount(vectors); auto vectors_stride = matrixStride(vectors); auto values_stride = values.size(-1); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // syevj_params controls the numerical accuracy of syevj // by default the tolerance is set to machine accuracy // the maximum number of iteration of Jacobi method by default is 100 // cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy" // LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set // Let's use the default values for now syevjInfo_t syevj_params; TORCH_CUSOLVER_CHECK(cusolverDnCreateSyevjInfo(&syevj_params)); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::syevj_bufferSize<scalar_t>( at::cuda::getCurrentCUDASolverDnHandle(), jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params); for (decltype(batch_size) i = 0; i < batch_size; i++) { scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride]; value_t* values_working_ptr = &values_data[i * values_stride]; int* info_working_ptr = &infos_data[i]; auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevj<scalar_t>( handle, jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr, static_cast<scalar_t*>(work_data.get()), lwork, info_working_ptr, syevj_params); } TORCH_CUSOLVER_CHECK(cusolverDnDestroySyevjInfo(syevj_params)); } template <typename scalar_t> static void apply_syevj_batched(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { using value_t = typename c10::scalar_value_type<scalar_t>::type; cublasFillMode_t uplo = upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; cusolverEigMode_t jobz = compute_eigenvectors ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; int n = cuda_int_cast(vectors.size(-1), "n"); int lda = std::max<int>(1, n); int batch_size = cuda_int_cast(batchCount(vectors), "batch_size"); auto vectors_data = vectors.data_ptr<scalar_t>(); auto values_data = values.data_ptr<value_t>(); auto infos_data = infos.data_ptr<int>(); // syevj_params controls the numerical accuracy of syevj // by default the tolerance is set to machine accuracy // the maximum number of iteration of Jacobi method by default is 100 // cuSOLVER documentations says: "15 sweeps are good enough to converge to machine accuracy" // LAPACK has SVD routine based on similar Jacobi algorithm (gesvj) and there a maximum of 30 iterations is set // Let's use the default values for now syevjInfo_t syevj_params; TORCH_CUSOLVER_CHECK(cusolverDnCreateSyevjInfo(&syevj_params)); TORCH_CUSOLVER_CHECK(cusolverDnXsyevjSetSortEig(syevj_params, 1)); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); // get the optimal work size and allocate workspace tensor int lwork; at::cuda::solver::syevjBatched_bufferSize<scalar_t>( handle, jobz, uplo, n, vectors_data, lda, values_data, &lwork, syevj_params, batch_size); // allocate workspace storage on device auto& allocator = *at::cuda::getCUDADeviceAllocator(); auto work_data = allocator.allocate(sizeof(scalar_t) * lwork); at::cuda::solver::syevjBatched<scalar_t>( handle, jobz, uplo, n, vectors_data, lda, values_data, static_cast<scalar_t*>(work_data.get()), lwork, infos_data, syevj_params, batch_size); TORCH_CUSOLVER_CHECK(cusolverDnDestroySyevjInfo(syevj_params)); } static void linalg_eigh_cusolver_syevd(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] { apply_syevd<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); }); } static void linalg_eigh_cusolver_syevj(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(eigenvectors.scalar_type(), "linalg_eigh_cuda", [&] { apply_syevj<scalar_t>(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); }); } void linalg_eigh_cusolver(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { // TODO: syevj_batched should be added here, but at least for CUDA 11.2 it contains a bug leading to incorrect results // See https://github.com/pytorch/pytorch/pull/53040#issuecomment-793626268 and https://github.com/cupy/cupy/issues/4847 // syevj is better than syevd for float32 dtype and matrix sizes 32x32 - 512x512 // See https://github.com/pytorch/pytorch/pull/53040#issuecomment-788264724 if (eigenvectors.scalar_type() == at::kFloat && eigenvectors.size(-1) >= 32 && eigenvectors.size(-1) <= 512) { return linalg_eigh_cusolver_syevj(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); } else { return linalg_eigh_cusolver_syevd(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); } } // The 'apply_' word is used for templated by dtype functions that call an API routine // underneath. Since the cusolver API has a slightly different structure we do not prepend // apply_ to this function. void lu_looped_cusolver(const Tensor& self, const Tensor& pivots, const Tensor& infos, bool get_pivots) { // Fill the pivots tensor with indices using 1-based (Fortran) indexing. This // is needed for maintaining the same results with MAGMA. auto k = std::min(self.size(-2), self.size(-1)); Tensor pivots_tmp = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand_as(pivots); pivots.copy_(pivots_tmp); AT_DISPATCH_FLOATING_TYPES( self.scalar_type(), "lu_cusolver", [&self, &pivots, &infos, &get_pivots]() { int m = cuda_int_cast(self.size(-2), "m"); int n = cuda_int_cast(self.size(-1), "n"); int lda = std::max<int>(1, m); int64_t self_stride = matrixStride(self); int64_t batch_size = batchCount(self); scalar_t* self_data = self.data_ptr<scalar_t>(); int* infos_data = infos.data_ptr<int>(); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) { if (get_pivots) { auto pivots_data = pivots.data_ptr<int>(); auto pivots_stride = pivots.size(-1); at::cuda::solver::getrf<scalar_t>( handle, m, n, self_data + batch * self_stride, lda, pivots_data + batch * pivots_stride, infos_data + batch ); } else { at::cuda::solver::getrf<scalar_t>( handle, m, n, self_data + batch * self_stride, lda, nullptr, infos_data + batch ); } } }); // Necessary because cuSOLVER uses nan for outputs that correspond to 0 in MAGMA for non-pivoted LU. // See https://github.com/pytorch/pytorch/issues/53879 for more details. if (!get_pivots) { at::nan_to_num_(const_cast<Tensor&>(self), 0, std::numeric_limits<double>::infinity(), -std::numeric_limits<double>::infinity()); } } void lu_solve_looped_cusolver(const Tensor& b, const Tensor& lu, const Tensor& pivots) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_cusolver", [&] { int n = cuda_int_cast(lu.size(-2), "n"); int nrhs = cuda_int_cast(b.size(-1), "nrhs"); auto batch_size = batchCount(lu); auto info = at::zeros({1}, lu.options().dtype(kInt)); auto info_data = info.data_ptr<int>(); auto b_data = b.data_ptr<scalar_t>(); auto lu_data = lu.data_ptr<scalar_t>(); auto pivots_data = pivots.data_ptr<int>(); auto pivots_stride = pivots.size(-1); auto lu_stride = matrixStride(lu); auto b_stride = matrixStride(b); int leading_dimension = cuda_int_cast(std::max<int>(1, n), "leading_dimension"); auto handle = at::cuda::getCurrentCUDASolverDnHandle(); for (auto batch = decltype(batch_size){0}; batch < batch_size; ++batch) { at::cuda::solver::getrs<scalar_t>( handle, n, nrhs, lu_data + batch * lu_stride, leading_dimension, pivots_data + batch * pivots_stride, b_data + batch * b_stride, leading_dimension, info_data); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info.item().toInt() == 0); } }); } #endif // USE_CUSOLVER }} // namespace at::native
b5cb84996f98b9bc3166e3830a890e2b8d263028.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <thrust/random.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <hip/hip_runtime.h> #include <assert.h> #include <algorithm> #include <omp.h> #include <fstream> #include <sstream> #include <stdio.h> #define KEPLER 0 #include "ErrorCheck.h" #include "include/encode.cuh" #include "include/decode.cuh" #include "include/cuZFP.cuh" #include "zfparray3.h" enum ENGHS_t{ N_LEFT, N_RIGHT, N_UP, N_DOWN, N_NEAR, N_FAR } enghs; using namespace thrust; using namespace std; #define index(x, y, z) ((x) + 4 * ((y) + 4 * (z))) const size_t nx = 512; const size_t ny = 512; const size_t nz = 512; const int nt = 0; const double pi = 3.14159265358979323846; //BSIZE is the length of the array in class Bit //It's tied to MAXBITS such that //MAXBITS = sizeof(Word) * BSIZE //which is really //MAXBITS = wsize * BSIZE //e.g. if we match bits one-to-one, double -> unsigned long long // then BSIZE = 64 and MAXPBITS = 4096 #define BSIZE 16 uint minbits = BSIZE*64; uint MAXBITS = BSIZE*64; uint MAXPREC = 64; int MINEXP = -1074; const double rate = BSIZE; size_t blksize = 0; unsigned long long group_count = 0x46acca631ull; uint size = 64; int EBITS = 11; /* number of exponent bits */ const int EBIAS = 1023; const int intprec = 64; static const unsigned char perm[64] = { index(0, 0, 0), // 0 : 0 index(1, 0, 0), // 1 : 1 index(0, 1, 0), // 2 : 1 index(0, 0, 1), // 3 : 1 index(0, 1, 1), // 4 : 2 index(1, 0, 1), // 5 : 2 index(1, 1, 0), // 6 : 2 index(2, 0, 0), // 7 : 2 index(0, 2, 0), // 8 : 2 index(0, 0, 2), // 9 : 2 index(1, 1, 1), // 10 : 3 index(2, 1, 0), // 11 : 3 index(2, 0, 1), // 12 : 3 index(0, 2, 1), // 13 : 3 index(1, 2, 0), // 14 : 3 index(1, 0, 2), // 15 : 3 index(0, 1, 2), // 16 : 3 index(3, 0, 0), // 17 : 3 index(0, 3, 0), // 18 : 3 index(0, 0, 3), // 19 : 3 index(2, 1, 1), // 20 : 4 index(1, 2, 1), // 21 : 4 index(1, 1, 2), // 22 : 4 index(0, 2, 2), // 23 : 4 index(2, 0, 2), // 24 : 4 index(2, 2, 0), // 25 : 4 index(3, 1, 0), // 26 : 4 index(3, 0, 1), // 27 : 4 index(0, 3, 1), // 28 : 4 index(1, 3, 0), // 29 : 4 index(1, 0, 3), // 30 : 4 index(0, 1, 3), // 31 : 4 index(1, 2, 2), // 32 : 5 index(2, 1, 2), // 33 : 5 index(2, 2, 1), // 34 : 5 index(3, 1, 1), // 35 : 5 index(1, 3, 1), // 36 : 5 index(1, 1, 3), // 37 : 5 index(3, 2, 0), // 38 : 5 index(3, 0, 2), // 39 : 5 index(0, 3, 2), // 40 : 5 index(2, 3, 0), // 41 : 5 index(2, 0, 3), // 42 : 5 index(0, 2, 3), // 43 : 5 index(2, 2, 2), // 44 : 6 index(3, 2, 1), // 45 : 6 index(3, 1, 2), // 46 : 6 index(1, 3, 2), // 47 : 6 index(2, 3, 1), // 48 : 6 index(2, 1, 3), // 49 : 6 index(1, 2, 3), // 50 : 6 index(0, 3, 3), // 51 : 6 index(3, 0, 3), // 52 : 6 index(3, 3, 0), // 53 : 6 index(3, 2, 2), // 54 : 7 index(2, 3, 2), // 55 : 7 index(2, 2, 3), // 56 : 7 index(1, 3, 3), // 57 : 7 index(3, 1, 3), // 58 : 7 index(3, 3, 1), // 59 : 7 index(2, 3, 3), // 60 : 8 index(3, 2, 3), // 61 : 8 index(3, 3, 2), // 62 : 8 index(3, 3, 3), // 63 : 9 }; static size_t block_size(double rate) { return (lrint(64 * rate) + CHAR_BIT - 1) / CHAR_BIT; } template<class Scalar> void setupConst(const unsigned char *perm, uint maxbits_, uint maxprec_, int minexp_, int ebits_, int ebias_ ) { ErrorCheck ec; ec.chk("setupConst start"); hipMemcpyToSymbol(c_perm, perm, sizeof(unsigned char) * 64, 0); ec.chk("setupConst: c_perm"); hipMemcpyToSymbol(c_maxbits, &MAXBITS, sizeof(uint)); ec.chk("setupConst: c_maxbits"); const uint sizeof_scalar = sizeof(Scalar); hipMemcpyToSymbol(c_sizeof_scalar, &sizeof_scalar, sizeof(uint)); ec.chk("setupConst: c_sizeof_scalar"); hipMemcpyToSymbol(c_maxprec, &maxprec_, sizeof(uint)); ec.chk("setupConst: c_maxprec"); hipMemcpyToSymbol(c_minexp, &minexp_, sizeof(int)); ec.chk("setupConst: c_minexp"); hipMemcpyToSymbol(c_ebits, &ebits_, sizeof(int)); ec.chk("setupConst: c_ebits"); hipMemcpyToSymbol(c_ebias, &ebias_, sizeof(int)); ec.chk("setupConst: c_ebias"); ec.chk("setupConst finished"); } //Used to generate rand array in CUDA with Thrust struct RandGen { RandGen() {} __device__ float operator () (const uint idx) { thrust::default_random_engine randEng; thrust::uniform_real_distribution<float> uniDist(0.0, 0.0001); randEng.discard(idx); return uniDist(randEng); } }; __device__ static inline int idx(int x, int y, int z) { return x + y * (blockDim.x * gridDim.x) + z * (blockDim.x * gridDim.x * blockDim.y * gridDim.y); } template<typename Scalar> __global__ void cudaDiffusion ( const Scalar *u, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k, const Scalar tfinal, Scalar *du ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int z = threadIdx.z + blockIdx.z * blockDim.z; Scalar uxx = (u[idx(max(0, x - 1), y, z)] - 2 * u[idx(x, y, z)] + u[idx(min(blockDim.x*gridDim.x - 1, x + 1), y, z)]) / (dx * dx); Scalar uyy = (u[idx(x, max(0, y - 1), z)] - 2 * u[idx(x, y, z)] + u[idx(x, min(blockDim.y*gridDim.y - 1, y + 1), z)]) / (dy * dy); Scalar uzz = (u[idx(x, y, max(0, z - 1))] - 2 * u[idx(x, y, z)] + u[idx(x, y, min(blockDim.z*gridDim.z-1, z + 1))]) / (dz * dz); du[idx(x, y, z)] = dt * k * (uxx + uyy + uzz); } template<typename Scalar> __global__ void cudaSum ( Scalar *u, const Scalar *du ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int z = threadIdx.z + blockIdx.z * blockDim.z; u[idx(x, y, z)] += du[idx(x, y, z)]; } template<class Int, class UInt, class Scalar, uint bsize, int intprec> __global__ void __launch_bounds__(64, 5) cudaZFPDiffusion ( const Scalar *u, Word *du, uint size, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k ) { uint x = threadIdx.x; uint y = threadIdx.y; uint z = threadIdx.z; uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y; uint bidx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); uint bdim = blockDim.x*blockDim.y*blockDim.z; uint tbidx = bidx*bdim; extern __shared__ unsigned char smem[]; __shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext; s_u = (Scalar*)&smem[0]; s_du = (Scalar*)&s_u[64]; s_u_ext = (Scalar*)&s_du[64]; s_nghs = (Scalar*)&s_u_ext[216]; unsigned char *new_smem = (unsigned char*)&s_nghs[64]; //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + bidx*bsize, new_smem, tid, s_u); //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + bidx*bsize, new_smem, tid, s_du); //__syncthreads(); int3 utid = make_int3(threadIdx.x + blockDim.x * blockIdx.x, threadIdx.y + blockDim.y * blockIdx.y, threadIdx.z + blockDim.z * blockIdx.z); Scalar uxx = (u[idx(max(0, utid.x - 1), utid.y, utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(min(blockDim.x*gridDim.x - 1, utid.x + 1), utid.y, utid.z)]) / (dx * dx); Scalar uyy = (u[idx(utid.x, max(0, utid.y - 1), utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, min(blockDim.y*gridDim.y - 1, utid.y + 1), utid.z)]) / (dy * dy); Scalar uzz = (u[idx(utid.x, utid.y, max(0, utid.z - 1))] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, utid.y, min(blockDim.z*gridDim.z - 1, utid.z + 1))]) / (dz * dz); s_du[tid] = dt*k * (uxx + uyy + uzz); __syncthreads(); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>( s_du, size, new_smem, bidx * bsize, du ); } template<class Int, class UInt, class Scalar, uint bsize, int intprec> __global__ void __launch_bounds__(64, 5) cudaZFPDiffusion ( const Word *u, Word *du, uint size, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k ) { uint x = threadIdx.x; uint y = threadIdx.y; uint z = threadIdx.z; uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y; uint idx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); uint bdim = blockDim.x*blockDim.y*blockDim.z; uint bidx = idx*bdim; extern __shared__ unsigned char smem[]; __shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext; s_u = (Scalar*)&smem[0]; s_du = (Scalar*)&s_u[64]; s_u_ext = (Scalar*)&s_du[64]; s_nghs = (Scalar*)&s_u_ext[216]; unsigned char *new_smem = (unsigned char*)&s_nghs[64]; cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + idx*bsize, new_smem, tid, s_u); //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + idx*bsize, new_smem, tid, s_du); for (int i = 0; i < 3; i++){ s_u_ext[i * 64 + tid] = 0; } if (tid < 24) s_u_ext[192 + tid] = 0; __syncthreads(); //left s_nghs[tid] = 0; if (blockIdx.x > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + ((blockIdx.x-1) + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[(i+1) * 6 + (j+1) * 36] = s_nghs[3 + i * blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[(x + 1) * 6 + (y + 1) * 36] = s_nghs[3 + x * blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //right s_nghs[tid] = 0; if (blockIdx.x+1 < gridDim.x){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (1 + blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[5 + (i+1) * 6 + (j+1) * 36] = s_nghs[i*blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[5 + (x + 1) * 6 + (y + 1) * 36] = s_nghs[x*blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //down s_nghs[tid] = 0; if (blockIdx.y > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y - 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j+1) * 36] = s_nghs[i + 3*blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 36] = s_nghs[x + 3 * blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //up s_nghs[tid] = 0; if (blockIdx.y + 1 < gridDim.y){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y + 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + 5*6 + (j+1) * 36] = s_nghs[i + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + 5 * 6 + (y + 1) * 36] = s_nghs[x + y * blockDim.x * blockDim.y]; } __syncthreads(); //near s_nghs[tid] = 0; if (blockIdx.z > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z - 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j + 1) * 6] = s_nghs[i + (j)*blockDim.x + 3 * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 6] = s_nghs[x + (y)*blockDim.x + 3 * blockDim.x * blockDim.y]; } __syncthreads(); //far s_nghs[tid] = 0; if (blockIdx.z + 1 < gridDim.z){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z + 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j + 1) * 6 + 5 * 36] = s_nghs[i + (j)*blockDim.x ]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 6 + 5 * 36] = s_nghs[x + (y)*blockDim.x]; } __syncthreads(); s_u_ext[1 + x + (y + 1) * 6 + (z + 1) * 36] = s_u[tid]; __syncthreads(); Scalar uxx = (s_u_ext[x + (y + 1) * 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 2 + (y + 1) * 6 + (z + 1) * 36]) / (dx * dx); Scalar uyy = (s_u_ext[x + 1 + (y)* 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 2) * 6 + (z + 1) * 36]) / (dy * dy); Scalar uzz = (s_u_ext[x + 1 + (y + 1) * 6 + (z)* 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 1) * 6 + (z + 2) * 36]) / (dz * dz); s_du[tid] = dt*k * (uxx + uyy + uzz); __syncthreads(); //if (uxx < 0 || uyy < 0 || uzz < 0){ // printf("%d, %f, %f, %f, %f %f %f %d %d %d %d\n", tid, dt, k, s_du[tid], uxx, uyy, uzz, threadIdx.x + blockIdx.x * blockDim.x, threadIdx.y + blockIdx.y * blockDim.y, threadIdx.z + blockIdx.z * blockDim.z, threadIdx.x + blockIdx.x * blockDim.x + (threadIdx.y + blockIdx.y * blockDim.y)*gridDim.x * blockDim.x + (threadIdx.z + blockIdx.z * blockDim.z)*gridDim.x * blockDim.x * gridDim.y * blockDim.y); //} cuZFP::encode<Int, UInt, Scalar, bsize, intprec>( s_du, size, new_smem, idx * bsize, du ); //out[(threadIdx.z + blockIdx.z * 4)*gridDim.x * gridDim.y * blockDim.x * blockDim.y + (threadIdx.y + blockIdx.y * 4)*gridDim.x * blockDim.x + (threadIdx.x + blockIdx.x * 4)] = s_dblock[tid]; } template<class Int, class UInt, class Scalar, uint bsize, int intprec> void gpuZFPDiffusion ( int nx, int ny, int nz, device_vector<Word > &u, device_vector<Word > &du, device_vector<Scalar> &df_u, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k, const Scalar tfinal ) { dim3 block_size = dim3(4, 4, 4); dim3 grid_size = dim3(nx, ny, nz); grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> > ( thrust::raw_pointer_cast(u.data()), thrust::raw_pointer_cast(du.data()), size, dx,dy,dz,dt,k ); // cuZFP::decode<Int, UInt, Scalar, bsize, intprec>( // nx, ny, nz, // u, df_u, // group_count // ); //cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> > // ( // thrust::raw_pointer_cast(df_u.data()), // thrust::raw_pointer_cast(du.data()), // size, // dx,dy,dz,dt,k // ); cuZFP::transform <Int, UInt, Scalar, bsize, intprec> ( nx,ny,nz, size, u, du, thrust::plus<Scalar>() ); //Scalar sum_u = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u); //Scalar sum_du = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, du); //cout << "post-transform du: " << sum_du << " u: " << sum_u << endl; } template<class Int, class UInt, class Scalar, uint bsize> void gpuEncode ( host_vector<Scalar> &h_u ) { device_vector<Scalar> d_u; d_u = h_u; ErrorCheck ec; hipEvent_t start, stop; float millisecs; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); dim3 emax_size(nx / 4, ny / 4, nz / 4); device_vector<Word > u(emax_size.x * emax_size.y * emax_size.z * bsize); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, d_u, u, group_count, size); hipStreamSynchronize(0); ec.chk("cudaEncode"); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&millisecs, start, stop); ec.chk("cudaencode"); cout << "encode GPU in time: " << millisecs/1000.0 << endl; cout << "sum: " << cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u) << endl; double tot_sum = 0, max_diff = 0, min_diff = 1e16; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u, d_u, group_count); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&millisecs, start, stop); ec.chk("cudadecoe"); cout << "decode GPU in time: " << millisecs / 1000.0 << endl; host_vector<Scalar> h_out = d_u; //array3d out(nx, ny, nz, rate); //for (int i = 0; i < h_out.size(); i++){ // out[i] = h_out[i]; //} } int main() { #if 0 //Convert combustion dataset from uinta output to raw const size_t nx = 417; const size_t ny = 664; const size_t nz = 417; std::vector<double> h_vec_in(nx*ny*nz, 0); ifstream ifs("../../combustion.txt"); size_t max_x, max_y, max_z, min_x, min_y, min_z; max_x = max_y = max_z = 0; min_x = min_y = min_z = 1e6; for (std::string line; std::getline(ifs, line);) { std::istringstream is(line); size_t idx_x, idx_y, idx_z; double val; is >> idx_x; is >> idx_y; is >> idx_z; if (max_x < idx_x) max_x = idx_x; if (max_y < idx_y) max_y = idx_y; if (max_z < idx_z) max_z = idx_z; if (min_x > idx_x) min_x = idx_x; if (min_y > idx_y) min_y = idx_y; if (min_z > idx_z) min_z = idx_z; is >> val; h_vec_in[idx_z * nx*ny + idx_y *nx + idx_x] = val; } cout << min_x << " " << min_y << " " << min_z << " " << max_x << " " << max_y << " " << max_z << endl; ifs.close(); FILE* pFile; pFile = fopen("combustion.raw", "wb"); fwrite(&h_vec_in[0], 1, nx*ny*nz*sizeof(double), pFile); fclose(pFile); #endif host_vector<double> h_vec_in(nx*ny*nz, 0); ifstream ifs("../../combustion_512.raw", ios::binary); if (ifs) { double read; for (int i = 0; i < nx*ny*nz; i++){ ifs.read(reinterpret_cast<char*>(&read), sizeof read); h_vec_in[i] = read; } } ifs.close(); cout << "cpu encode start" << endl; double start_time = omp_get_wtime(); zfp::array3d u(nx, ny, nz, rate); for (int i = 0; i < nx*ny*nz; i++){ u[i] = h_vec_in[i]; } double time = omp_get_wtime() - start_time; cout << "decode cpu time: " << time << endl; host_vector<double> h_vec_out(nx*ny*nz, 0); cout << "cpu decode start" << endl; start_time = omp_get_wtime(); for (int z = 0; z < nz; z++){ for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { h_vec_out[z*nx*ny + y*nx + x] = u(x, y, z); } } } time = omp_get_wtime() - start_time; cout << "decode cpu time: " << time << endl; cout << "sum: " << thrust::reduce(h_vec_out.begin(), h_vec_out.end()) << endl; cout << "GPU ZFP encode start" << endl; hipDeviceSetCacheConfig(hipFuncCachePreferL1); setupConst<double>(perm, MAXBITS, MAXPREC, MINEXP, EBITS, EBIAS); cout << "Begin gpuDiffusion" << endl; gpuEncode<long long, unsigned long long, double, BSIZE>(h_vec_in); cout << "Finish gpuDiffusion" << endl; }
b5cb84996f98b9bc3166e3830a890e2b8d263028.cu
#include <iostream> #include <iomanip> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <thrust/random.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <cuda_runtime.h> #include <assert.h> #include <algorithm> #include <omp.h> #include <fstream> #include <sstream> #include <stdio.h> #define KEPLER 0 #include "ErrorCheck.h" #include "include/encode.cuh" #include "include/decode.cuh" #include "include/cuZFP.cuh" #include "zfparray3.h" enum ENGHS_t{ N_LEFT, N_RIGHT, N_UP, N_DOWN, N_NEAR, N_FAR } enghs; using namespace thrust; using namespace std; #define index(x, y, z) ((x) + 4 * ((y) + 4 * (z))) const size_t nx = 512; const size_t ny = 512; const size_t nz = 512; const int nt = 0; const double pi = 3.14159265358979323846; //BSIZE is the length of the array in class Bit //It's tied to MAXBITS such that //MAXBITS = sizeof(Word) * BSIZE //which is really //MAXBITS = wsize * BSIZE //e.g. if we match bits one-to-one, double -> unsigned long long // then BSIZE = 64 and MAXPBITS = 4096 #define BSIZE 16 uint minbits = BSIZE*64; uint MAXBITS = BSIZE*64; uint MAXPREC = 64; int MINEXP = -1074; const double rate = BSIZE; size_t blksize = 0; unsigned long long group_count = 0x46acca631ull; uint size = 64; int EBITS = 11; /* number of exponent bits */ const int EBIAS = 1023; const int intprec = 64; static const unsigned char perm[64] = { index(0, 0, 0), // 0 : 0 index(1, 0, 0), // 1 : 1 index(0, 1, 0), // 2 : 1 index(0, 0, 1), // 3 : 1 index(0, 1, 1), // 4 : 2 index(1, 0, 1), // 5 : 2 index(1, 1, 0), // 6 : 2 index(2, 0, 0), // 7 : 2 index(0, 2, 0), // 8 : 2 index(0, 0, 2), // 9 : 2 index(1, 1, 1), // 10 : 3 index(2, 1, 0), // 11 : 3 index(2, 0, 1), // 12 : 3 index(0, 2, 1), // 13 : 3 index(1, 2, 0), // 14 : 3 index(1, 0, 2), // 15 : 3 index(0, 1, 2), // 16 : 3 index(3, 0, 0), // 17 : 3 index(0, 3, 0), // 18 : 3 index(0, 0, 3), // 19 : 3 index(2, 1, 1), // 20 : 4 index(1, 2, 1), // 21 : 4 index(1, 1, 2), // 22 : 4 index(0, 2, 2), // 23 : 4 index(2, 0, 2), // 24 : 4 index(2, 2, 0), // 25 : 4 index(3, 1, 0), // 26 : 4 index(3, 0, 1), // 27 : 4 index(0, 3, 1), // 28 : 4 index(1, 3, 0), // 29 : 4 index(1, 0, 3), // 30 : 4 index(0, 1, 3), // 31 : 4 index(1, 2, 2), // 32 : 5 index(2, 1, 2), // 33 : 5 index(2, 2, 1), // 34 : 5 index(3, 1, 1), // 35 : 5 index(1, 3, 1), // 36 : 5 index(1, 1, 3), // 37 : 5 index(3, 2, 0), // 38 : 5 index(3, 0, 2), // 39 : 5 index(0, 3, 2), // 40 : 5 index(2, 3, 0), // 41 : 5 index(2, 0, 3), // 42 : 5 index(0, 2, 3), // 43 : 5 index(2, 2, 2), // 44 : 6 index(3, 2, 1), // 45 : 6 index(3, 1, 2), // 46 : 6 index(1, 3, 2), // 47 : 6 index(2, 3, 1), // 48 : 6 index(2, 1, 3), // 49 : 6 index(1, 2, 3), // 50 : 6 index(0, 3, 3), // 51 : 6 index(3, 0, 3), // 52 : 6 index(3, 3, 0), // 53 : 6 index(3, 2, 2), // 54 : 7 index(2, 3, 2), // 55 : 7 index(2, 2, 3), // 56 : 7 index(1, 3, 3), // 57 : 7 index(3, 1, 3), // 58 : 7 index(3, 3, 1), // 59 : 7 index(2, 3, 3), // 60 : 8 index(3, 2, 3), // 61 : 8 index(3, 3, 2), // 62 : 8 index(3, 3, 3), // 63 : 9 }; static size_t block_size(double rate) { return (lrint(64 * rate) + CHAR_BIT - 1) / CHAR_BIT; } template<class Scalar> void setupConst(const unsigned char *perm, uint maxbits_, uint maxprec_, int minexp_, int ebits_, int ebias_ ) { ErrorCheck ec; ec.chk("setupConst start"); cudaMemcpyToSymbol(c_perm, perm, sizeof(unsigned char) * 64, 0); ec.chk("setupConst: c_perm"); cudaMemcpyToSymbol(c_maxbits, &MAXBITS, sizeof(uint)); ec.chk("setupConst: c_maxbits"); const uint sizeof_scalar = sizeof(Scalar); cudaMemcpyToSymbol(c_sizeof_scalar, &sizeof_scalar, sizeof(uint)); ec.chk("setupConst: c_sizeof_scalar"); cudaMemcpyToSymbol(c_maxprec, &maxprec_, sizeof(uint)); ec.chk("setupConst: c_maxprec"); cudaMemcpyToSymbol(c_minexp, &minexp_, sizeof(int)); ec.chk("setupConst: c_minexp"); cudaMemcpyToSymbol(c_ebits, &ebits_, sizeof(int)); ec.chk("setupConst: c_ebits"); cudaMemcpyToSymbol(c_ebias, &ebias_, sizeof(int)); ec.chk("setupConst: c_ebias"); ec.chk("setupConst finished"); } //Used to generate rand array in CUDA with Thrust struct RandGen { RandGen() {} __device__ float operator () (const uint idx) { thrust::default_random_engine randEng; thrust::uniform_real_distribution<float> uniDist(0.0, 0.0001); randEng.discard(idx); return uniDist(randEng); } }; __device__ static inline int idx(int x, int y, int z) { return x + y * (blockDim.x * gridDim.x) + z * (blockDim.x * gridDim.x * blockDim.y * gridDim.y); } template<typename Scalar> __global__ void cudaDiffusion ( const Scalar *u, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k, const Scalar tfinal, Scalar *du ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int z = threadIdx.z + blockIdx.z * blockDim.z; Scalar uxx = (u[idx(max(0, x - 1), y, z)] - 2 * u[idx(x, y, z)] + u[idx(min(blockDim.x*gridDim.x - 1, x + 1), y, z)]) / (dx * dx); Scalar uyy = (u[idx(x, max(0, y - 1), z)] - 2 * u[idx(x, y, z)] + u[idx(x, min(blockDim.y*gridDim.y - 1, y + 1), z)]) / (dy * dy); Scalar uzz = (u[idx(x, y, max(0, z - 1))] - 2 * u[idx(x, y, z)] + u[idx(x, y, min(blockDim.z*gridDim.z-1, z + 1))]) / (dz * dz); du[idx(x, y, z)] = dt * k * (uxx + uyy + uzz); } template<typename Scalar> __global__ void cudaSum ( Scalar *u, const Scalar *du ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int z = threadIdx.z + blockIdx.z * blockDim.z; u[idx(x, y, z)] += du[idx(x, y, z)]; } template<class Int, class UInt, class Scalar, uint bsize, int intprec> __global__ void __launch_bounds__(64, 5) cudaZFPDiffusion ( const Scalar *u, Word *du, uint size, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k ) { uint x = threadIdx.x; uint y = threadIdx.y; uint z = threadIdx.z; uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y; uint bidx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); uint bdim = blockDim.x*blockDim.y*blockDim.z; uint tbidx = bidx*bdim; extern __shared__ unsigned char smem[]; __shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext; s_u = (Scalar*)&smem[0]; s_du = (Scalar*)&s_u[64]; s_u_ext = (Scalar*)&s_du[64]; s_nghs = (Scalar*)&s_u_ext[216]; unsigned char *new_smem = (unsigned char*)&s_nghs[64]; //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + bidx*bsize, new_smem, tid, s_u); //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + bidx*bsize, new_smem, tid, s_du); //__syncthreads(); int3 utid = make_int3(threadIdx.x + blockDim.x * blockIdx.x, threadIdx.y + blockDim.y * blockIdx.y, threadIdx.z + blockDim.z * blockIdx.z); Scalar uxx = (u[idx(max(0, utid.x - 1), utid.y, utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(min(blockDim.x*gridDim.x - 1, utid.x + 1), utid.y, utid.z)]) / (dx * dx); Scalar uyy = (u[idx(utid.x, max(0, utid.y - 1), utid.z)] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, min(blockDim.y*gridDim.y - 1, utid.y + 1), utid.z)]) / (dy * dy); Scalar uzz = (u[idx(utid.x, utid.y, max(0, utid.z - 1))] - 2 * u[idx(utid.x, utid.y, utid.z)] + u[idx(utid.x, utid.y, min(blockDim.z*gridDim.z - 1, utid.z + 1))]) / (dz * dz); s_du[tid] = dt*k * (uxx + uyy + uzz); __syncthreads(); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>( s_du, size, new_smem, bidx * bsize, du ); } template<class Int, class UInt, class Scalar, uint bsize, int intprec> __global__ void __launch_bounds__(64, 5) cudaZFPDiffusion ( const Word *u, Word *du, uint size, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k ) { uint x = threadIdx.x; uint y = threadIdx.y; uint z = threadIdx.z; uint tid = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z *blockDim.x*blockDim.y; uint idx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); uint bdim = blockDim.x*blockDim.y*blockDim.z; uint bidx = idx*bdim; extern __shared__ unsigned char smem[]; __shared__ Scalar *s_u, *s_du, *s_nghs, *s_u_ext; s_u = (Scalar*)&smem[0]; s_du = (Scalar*)&s_u[64]; s_u_ext = (Scalar*)&s_du[64]; s_nghs = (Scalar*)&s_u_ext[216]; unsigned char *new_smem = (unsigned char*)&s_nghs[64]; cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + idx*bsize, new_smem, tid, s_u); //cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(du + idx*bsize, new_smem, tid, s_du); for (int i = 0; i < 3; i++){ s_u_ext[i * 64 + tid] = 0; } if (tid < 24) s_u_ext[192 + tid] = 0; __syncthreads(); //left s_nghs[tid] = 0; if (blockIdx.x > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + ((blockIdx.x-1) + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[(i+1) * 6 + (j+1) * 36] = s_nghs[3 + i * blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[(x + 1) * 6 + (y + 1) * 36] = s_nghs[3 + x * blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //right s_nghs[tid] = 0; if (blockIdx.x+1 < gridDim.x){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (1 + blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[5 + (i+1) * 6 + (j+1) * 36] = s_nghs[i*blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[5 + (x + 1) * 6 + (y + 1) * 36] = s_nghs[x*blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //down s_nghs[tid] = 0; if (blockIdx.y > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y - 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j+1) * 36] = s_nghs[i + 3*blockDim.x + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 36] = s_nghs[x + 3 * blockDim.x + y * blockDim.x * blockDim.y]; } __syncthreads(); //up s_nghs[tid] = 0; if (blockIdx.y + 1 < gridDim.y){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + (blockIdx.y + 1) * gridDim.x + blockIdx.z * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + 5*6 + (j+1) * 36] = s_nghs[i + j * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + 5 * 6 + (y + 1) * 36] = s_nghs[x + y * blockDim.x * blockDim.y]; } __syncthreads(); //near s_nghs[tid] = 0; if (blockIdx.z > 0){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z - 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j + 1) * 6] = s_nghs[i + (j)*blockDim.x + 3 * blockDim.x * blockDim.y]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 6] = s_nghs[x + (y)*blockDim.x + 3 * blockDim.x * blockDim.y]; } __syncthreads(); //far s_nghs[tid] = 0; if (blockIdx.z + 1 < gridDim.z){ cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(u + (blockIdx.x + blockIdx.y * gridDim.x + (blockIdx.z + 1) * gridDim.y * gridDim.x)*bsize, new_smem, tid, s_nghs); } __syncthreads(); //if (tid == 0){ // for (int i = 0; i < 4; i++){ // for (int j = 0; j < 4; j++){ // s_u_ext[1 + i + (j + 1) * 6 + 5 * 36] = s_nghs[i + (j)*blockDim.x ]; // } // } //} if (z == 0){ s_u_ext[1 + x + (y + 1) * 6 + 5 * 36] = s_nghs[x + (y)*blockDim.x]; } __syncthreads(); s_u_ext[1 + x + (y + 1) * 6 + (z + 1) * 36] = s_u[tid]; __syncthreads(); Scalar uxx = (s_u_ext[x + (y + 1) * 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 2 + (y + 1) * 6 + (z + 1) * 36]) / (dx * dx); Scalar uyy = (s_u_ext[x + 1 + (y)* 6 + (z + 1) * 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 2) * 6 + (z + 1) * 36]) / (dy * dy); Scalar uzz = (s_u_ext[x + 1 + (y + 1) * 6 + (z)* 36] - 2 * s_u_ext[x + 1 + (y + 1) * 6 + (z + 1) * 36] + s_u_ext[x + 1 + (y + 1) * 6 + (z + 2) * 36]) / (dz * dz); s_du[tid] = dt*k * (uxx + uyy + uzz); __syncthreads(); //if (uxx < 0 || uyy < 0 || uzz < 0){ // printf("%d, %f, %f, %f, %f %f %f %d %d %d %d\n", tid, dt, k, s_du[tid], uxx, uyy, uzz, threadIdx.x + blockIdx.x * blockDim.x, threadIdx.y + blockIdx.y * blockDim.y, threadIdx.z + blockIdx.z * blockDim.z, threadIdx.x + blockIdx.x * blockDim.x + (threadIdx.y + blockIdx.y * blockDim.y)*gridDim.x * blockDim.x + (threadIdx.z + blockIdx.z * blockDim.z)*gridDim.x * blockDim.x * gridDim.y * blockDim.y); //} cuZFP::encode<Int, UInt, Scalar, bsize, intprec>( s_du, size, new_smem, idx * bsize, du ); //out[(threadIdx.z + blockIdx.z * 4)*gridDim.x * gridDim.y * blockDim.x * blockDim.y + (threadIdx.y + blockIdx.y * 4)*gridDim.x * blockDim.x + (threadIdx.x + blockIdx.x * 4)] = s_dblock[tid]; } template<class Int, class UInt, class Scalar, uint bsize, int intprec> void gpuZFPDiffusion ( int nx, int ny, int nz, device_vector<Word > &u, device_vector<Word > &du, device_vector<Scalar> &df_u, const Scalar dx, const Scalar dy, const Scalar dz, const Scalar dt, const Scalar k, const Scalar tfinal ) { dim3 block_size = dim3(4, 4, 4); dim3 grid_size = dim3(nx, ny, nz); grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> > ( thrust::raw_pointer_cast(u.data()), thrust::raw_pointer_cast(du.data()), size, dx,dy,dz,dt,k ); // cuZFP::decode<Int, UInt, Scalar, bsize, intprec>( // nx, ny, nz, // u, df_u, // group_count // ); //cudaZFPDiffusion<Int, UInt, Scalar, bsize, intprec> << < grid_size, block_size, (sizeof(Scalar) * 2 + 2 * sizeof(unsigned char) + sizeof(Bitter) + sizeof(UInt) + sizeof(Int) + sizeof(Scalar) + 3 * sizeof(int)) * 64 + 32 * sizeof(Scalar) + 4 + 216 * sizeof(Scalar) >> > // ( // thrust::raw_pointer_cast(df_u.data()), // thrust::raw_pointer_cast(du.data()), // size, // dx,dy,dz,dt,k // ); cuZFP::transform <Int, UInt, Scalar, bsize, intprec> ( nx,ny,nz, size, u, du, thrust::plus<Scalar>() ); //Scalar sum_u = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u); //Scalar sum_du = cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, du); //cout << "post-transform du: " << sum_du << " u: " << sum_u << endl; } template<class Int, class UInt, class Scalar, uint bsize> void gpuEncode ( host_vector<Scalar> &h_u ) { device_vector<Scalar> d_u; d_u = h_u; ErrorCheck ec; cudaEvent_t start, stop; float millisecs; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); dim3 emax_size(nx / 4, ny / 4, nz / 4); device_vector<Word > u(emax_size.x * emax_size.y * emax_size.z * bsize); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, d_u, u, group_count, size); cudaStreamSynchronize(0); ec.chk("cudaEncode"); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&millisecs, start, stop); ec.chk("cudaencode"); cout << "encode GPU in time: " << millisecs/1000.0 << endl; cout << "sum: " << cuZFP::reduce<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u) << endl; double tot_sum = 0, max_diff = 0, min_diff = 1e16; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, u, d_u, group_count); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&millisecs, start, stop); ec.chk("cudadecoe"); cout << "decode GPU in time: " << millisecs / 1000.0 << endl; host_vector<Scalar> h_out = d_u; //array3d out(nx, ny, nz, rate); //for (int i = 0; i < h_out.size(); i++){ // out[i] = h_out[i]; //} } int main() { #if 0 //Convert combustion dataset from uinta output to raw const size_t nx = 417; const size_t ny = 664; const size_t nz = 417; std::vector<double> h_vec_in(nx*ny*nz, 0); ifstream ifs("../../combustion.txt"); size_t max_x, max_y, max_z, min_x, min_y, min_z; max_x = max_y = max_z = 0; min_x = min_y = min_z = 1e6; for (std::string line; std::getline(ifs, line);) { std::istringstream is(line); size_t idx_x, idx_y, idx_z; double val; is >> idx_x; is >> idx_y; is >> idx_z; if (max_x < idx_x) max_x = idx_x; if (max_y < idx_y) max_y = idx_y; if (max_z < idx_z) max_z = idx_z; if (min_x > idx_x) min_x = idx_x; if (min_y > idx_y) min_y = idx_y; if (min_z > idx_z) min_z = idx_z; is >> val; h_vec_in[idx_z * nx*ny + idx_y *nx + idx_x] = val; } cout << min_x << " " << min_y << " " << min_z << " " << max_x << " " << max_y << " " << max_z << endl; ifs.close(); FILE* pFile; pFile = fopen("combustion.raw", "wb"); fwrite(&h_vec_in[0], 1, nx*ny*nz*sizeof(double), pFile); fclose(pFile); #endif host_vector<double> h_vec_in(nx*ny*nz, 0); ifstream ifs("../../combustion_512.raw", ios::binary); if (ifs) { double read; for (int i = 0; i < nx*ny*nz; i++){ ifs.read(reinterpret_cast<char*>(&read), sizeof read); h_vec_in[i] = read; } } ifs.close(); cout << "cpu encode start" << endl; double start_time = omp_get_wtime(); zfp::array3d u(nx, ny, nz, rate); for (int i = 0; i < nx*ny*nz; i++){ u[i] = h_vec_in[i]; } double time = omp_get_wtime() - start_time; cout << "decode cpu time: " << time << endl; host_vector<double> h_vec_out(nx*ny*nz, 0); cout << "cpu decode start" << endl; start_time = omp_get_wtime(); for (int z = 0; z < nz; z++){ for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { h_vec_out[z*nx*ny + y*nx + x] = u(x, y, z); } } } time = omp_get_wtime() - start_time; cout << "decode cpu time: " << time << endl; cout << "sum: " << thrust::reduce(h_vec_out.begin(), h_vec_out.end()) << endl; cout << "GPU ZFP encode start" << endl; cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); setupConst<double>(perm, MAXBITS, MAXPREC, MINEXP, EBITS, EBIAS); cout << "Begin gpuDiffusion" << endl; gpuEncode<long long, unsigned long long, double, BSIZE>(h_vec_in); cout << "Finish gpuDiffusion" << endl; }
9efd5ebf76029e370001056691235e7c8b8f78f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/new_kernel_util.h" #include "oneflow/core/common/nd_index_offset_helper.h" #include "oneflow/core/cuda/atomic.cuh" #include "oneflow/user/kernels/upsample_kernel.h" namespace oneflow { namespace { template<typename T> __global__ void UpsampleBilinear2DForward(const int64_t elem_cnt, const T* in_dptr, NdIndexOffsetHelper<int64_t, 4> in_helper, NdIndexOffsetHelper<int64_t, 4> out_helper, const int64_t in_height, const int64_t in_width, const T scale_h, const T scale_w, const bool align_corners, T* out_dptr) { CUDA_1D_KERNEL_LOOP(index, elem_cnt) { int64_t n, c, h, w; out_helper.OffsetToNdIndex(index, n, c, h, w); BilinearParam<T> params; GetBilinearParam(align_corners, h, w, in_height, in_width, scale_h, scale_w, &params); const int64_t top_offset = in_helper.NdIndexToOffset(n, c, params.top_h_index, 0); const int64_t bottom_offset = in_helper.NdIndexToOffset(n, c, params.bottom_h_index, 0); const T top_left = in_dptr[top_offset + params.left_w_index]; const T top_right = in_dptr[top_offset + params.right_w_index]; const T bottom_left = in_dptr[bottom_offset + params.left_w_index]; const T bottom_right = in_dptr[bottom_offset + params.right_w_index]; const T top = top_left + (top_right - top_left) * params.w_lerp; const T bottom = bottom_left + (bottom_right - bottom_left) * params.w_lerp; out_dptr[index] = top + (bottom - top) * params.h_lerp; } } template<typename T> __global__ void UpsampleBilinearBackward(const int64_t elem_cnt, const T* dy_dptr, NdIndexOffsetHelper<int64_t, 4> dy_helper, NdIndexOffsetHelper<int64_t, 4> dx_helper, const int64_t dx_height, const int64_t dx_width, const T scale_h, const T scale_w, const bool align_corners, T* dx_dptr) { CUDA_1D_KERNEL_LOOP(index, elem_cnt) { int64_t n, c, h, w; dy_helper.OffsetToNdIndex(index, n, c, h, w); BilinearParam<T> params; GetBilinearParam(align_corners, h, w, dx_height, dx_width, scale_h, scale_w, &params); const int64_t top_offset = dx_helper.NdIndexToOffset(n, c, params.top_h_index, 0); const int64_t bottom_offset = dx_helper.NdIndexToOffset(n, c, params.bottom_h_index, 0); const T dy = dy_dptr[index]; const T dbottom = params.h_lerp * dy; T* dx_dptr_bottom_offset = dx_dptr + bottom_offset; cuda::atomic::Add(dx_dptr_bottom_offset + params.left_w_index, static_cast<T>((1 - params.w_lerp) * dbottom)); cuda::atomic::Add(dx_dptr_bottom_offset + params.right_w_index, static_cast<T>(params.w_lerp * dbottom)); const T dtop = dy - dbottom; T* dx_dptr_top_offset = dx_dptr + top_offset; cuda::atomic::Add(dx_dptr_top_offset + params.left_w_index, static_cast<T>((1 - params.w_lerp) * dtop)); cuda::atomic::Add(dx_dptr_top_offset + params.right_w_index, static_cast<T>(params.w_lerp * dtop)); } } } // namespace template<typename T> class UpsampleBilinear2DGPUKernel final : public user_op::OpKernel { public: UpsampleBilinear2DGPUKernel() = default; ~UpsampleBilinear2DGPUKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* x_tensor = ctx->Tensor4ArgNameAndIndex("x", 0); user_op::Tensor* y_tensor = ctx->Tensor4ArgNameAndIndex("y", 0); const float height_scale = ctx->Attr<float>("height_scale"); const float width_scale = ctx->Attr<float>("width_scale"); const bool align_corners = ctx->Attr<bool>("align_corners"); const int64_t elem_cnt = y_tensor->shape().elem_cnt(); NdIndexOffsetHelper<int64_t, 4> in_helper(x_tensor->shape().At(0), x_tensor->shape().At(1), x_tensor->shape().At(2), x_tensor->shape().At(3)); NdIndexOffsetHelper<int64_t, 4> out_helper(y_tensor->shape().At(0), y_tensor->shape().At(1), y_tensor->shape().At(2), y_tensor->shape().At(3)); const int64_t in_height = x_tensor->shape().At(2); const int64_t in_width = x_tensor->shape().At(3); const int64_t out_height = y_tensor->shape().At(2); const int64_t out_width = y_tensor->shape().At(3); if (in_height == out_height && in_width == out_width) { Memcpy<DeviceType::kGPU>( ctx->device_ctx(), y_tensor->mut_dptr<void>(), x_tensor->dptr<void>(), x_tensor->shape().elem_cnt() * GetSizeOfDataType(x_tensor->data_type())); } else { const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale); const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale); RUN_CUDA_KERNEL((UpsampleBilinear2DForward<T>), ctx->device_ctx(), elem_cnt, elem_cnt, x_tensor->dptr<T>(), in_helper, out_helper, in_height, in_width, scale_height, scale_width, align_corners, y_tensor->mut_dptr<T>()); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<typename T> class UpsampleBilinear2DGradGPUKernel final : public user_op::OpKernel { public: UpsampleBilinear2DGradGPUKernel() = default; ~UpsampleBilinear2DGradGPUKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { user_op::Tensor* dx_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0); Memset<DeviceType::kGPU>(ctx->device_ctx(), dx_tensor->mut_dptr<T>(), 0, dx_tensor->shape().elem_cnt() * sizeof(T)); const user_op::Tensor* dy_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0); const float height_scale = ctx->Attr<float>("height_scale"); const float width_scale = ctx->Attr<float>("width_scale"); const bool align_corners = ctx->Attr<bool>("align_corners"); const int64_t elem_cnt = dy_tensor->shape().elem_cnt(); NdIndexOffsetHelper<int64_t, 4> dy_helper(dy_tensor->shape().At(0), dy_tensor->shape().At(1), dy_tensor->shape().At(2), dy_tensor->shape().At(3)); NdIndexOffsetHelper<int64_t, 4> dx_helper(dx_tensor->shape().At(0), dx_tensor->shape().At(1), dx_tensor->shape().At(2), dx_tensor->shape().At(3)); const int64_t in_height = dx_tensor->shape().At(2); const int64_t in_width = dx_tensor->shape().At(3); const int64_t out_height = dy_tensor->shape().At(2); const int64_t out_width = dy_tensor->shape().At(3); if (in_height == out_height && in_width == out_width) { Memcpy<DeviceType::kGPU>( ctx->device_ctx(), dx_tensor->mut_dptr<void>(), dy_tensor->dptr<void>(), dy_tensor->shape().elem_cnt() * GetSizeOfDataType(dy_tensor->data_type())); } else { const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale); const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale); RUN_CUDA_KERNEL((UpsampleBilinearBackward<T>), ctx->device_ctx(), elem_cnt, elem_cnt, dy_tensor->dptr<T>(), dy_helper, dx_helper, in_height, in_width, scale_height, scale_width, align_corners, dx_tensor->mut_dptr<T>()); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_UPSAMPLE_BILINEAR_2D_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("upsample_bilinear_2d") \ .SetCreateFn<UpsampleBilinear2DGPUKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("upsample_bilinear_2d_grad") \ .SetCreateFn<UpsampleBilinear2DGradGPUKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)); REGISTER_UPSAMPLE_BILINEAR_2D_GPU_KERNEL(float) REGISTER_UPSAMPLE_BILINEAR_2D_GPU_KERNEL(double) } // namespace oneflow
9efd5ebf76029e370001056691235e7c8b8f78f5.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/new_kernel_util.h" #include "oneflow/core/common/nd_index_offset_helper.h" #include "oneflow/core/cuda/atomic.cuh" #include "oneflow/user/kernels/upsample_kernel.h" namespace oneflow { namespace { template<typename T> __global__ void UpsampleBilinear2DForward(const int64_t elem_cnt, const T* in_dptr, NdIndexOffsetHelper<int64_t, 4> in_helper, NdIndexOffsetHelper<int64_t, 4> out_helper, const int64_t in_height, const int64_t in_width, const T scale_h, const T scale_w, const bool align_corners, T* out_dptr) { CUDA_1D_KERNEL_LOOP(index, elem_cnt) { int64_t n, c, h, w; out_helper.OffsetToNdIndex(index, n, c, h, w); BilinearParam<T> params; GetBilinearParam(align_corners, h, w, in_height, in_width, scale_h, scale_w, &params); const int64_t top_offset = in_helper.NdIndexToOffset(n, c, params.top_h_index, 0); const int64_t bottom_offset = in_helper.NdIndexToOffset(n, c, params.bottom_h_index, 0); const T top_left = in_dptr[top_offset + params.left_w_index]; const T top_right = in_dptr[top_offset + params.right_w_index]; const T bottom_left = in_dptr[bottom_offset + params.left_w_index]; const T bottom_right = in_dptr[bottom_offset + params.right_w_index]; const T top = top_left + (top_right - top_left) * params.w_lerp; const T bottom = bottom_left + (bottom_right - bottom_left) * params.w_lerp; out_dptr[index] = top + (bottom - top) * params.h_lerp; } } template<typename T> __global__ void UpsampleBilinearBackward(const int64_t elem_cnt, const T* dy_dptr, NdIndexOffsetHelper<int64_t, 4> dy_helper, NdIndexOffsetHelper<int64_t, 4> dx_helper, const int64_t dx_height, const int64_t dx_width, const T scale_h, const T scale_w, const bool align_corners, T* dx_dptr) { CUDA_1D_KERNEL_LOOP(index, elem_cnt) { int64_t n, c, h, w; dy_helper.OffsetToNdIndex(index, n, c, h, w); BilinearParam<T> params; GetBilinearParam(align_corners, h, w, dx_height, dx_width, scale_h, scale_w, &params); const int64_t top_offset = dx_helper.NdIndexToOffset(n, c, params.top_h_index, 0); const int64_t bottom_offset = dx_helper.NdIndexToOffset(n, c, params.bottom_h_index, 0); const T dy = dy_dptr[index]; const T dbottom = params.h_lerp * dy; T* dx_dptr_bottom_offset = dx_dptr + bottom_offset; cuda::atomic::Add(dx_dptr_bottom_offset + params.left_w_index, static_cast<T>((1 - params.w_lerp) * dbottom)); cuda::atomic::Add(dx_dptr_bottom_offset + params.right_w_index, static_cast<T>(params.w_lerp * dbottom)); const T dtop = dy - dbottom; T* dx_dptr_top_offset = dx_dptr + top_offset; cuda::atomic::Add(dx_dptr_top_offset + params.left_w_index, static_cast<T>((1 - params.w_lerp) * dtop)); cuda::atomic::Add(dx_dptr_top_offset + params.right_w_index, static_cast<T>(params.w_lerp * dtop)); } } } // namespace template<typename T> class UpsampleBilinear2DGPUKernel final : public user_op::OpKernel { public: UpsampleBilinear2DGPUKernel() = default; ~UpsampleBilinear2DGPUKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* x_tensor = ctx->Tensor4ArgNameAndIndex("x", 0); user_op::Tensor* y_tensor = ctx->Tensor4ArgNameAndIndex("y", 0); const float height_scale = ctx->Attr<float>("height_scale"); const float width_scale = ctx->Attr<float>("width_scale"); const bool align_corners = ctx->Attr<bool>("align_corners"); const int64_t elem_cnt = y_tensor->shape().elem_cnt(); NdIndexOffsetHelper<int64_t, 4> in_helper(x_tensor->shape().At(0), x_tensor->shape().At(1), x_tensor->shape().At(2), x_tensor->shape().At(3)); NdIndexOffsetHelper<int64_t, 4> out_helper(y_tensor->shape().At(0), y_tensor->shape().At(1), y_tensor->shape().At(2), y_tensor->shape().At(3)); const int64_t in_height = x_tensor->shape().At(2); const int64_t in_width = x_tensor->shape().At(3); const int64_t out_height = y_tensor->shape().At(2); const int64_t out_width = y_tensor->shape().At(3); if (in_height == out_height && in_width == out_width) { Memcpy<DeviceType::kGPU>( ctx->device_ctx(), y_tensor->mut_dptr<void>(), x_tensor->dptr<void>(), x_tensor->shape().elem_cnt() * GetSizeOfDataType(x_tensor->data_type())); } else { const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale); const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale); RUN_CUDA_KERNEL((UpsampleBilinear2DForward<T>), ctx->device_ctx(), elem_cnt, elem_cnt, x_tensor->dptr<T>(), in_helper, out_helper, in_height, in_width, scale_height, scale_width, align_corners, y_tensor->mut_dptr<T>()); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<typename T> class UpsampleBilinear2DGradGPUKernel final : public user_op::OpKernel { public: UpsampleBilinear2DGradGPUKernel() = default; ~UpsampleBilinear2DGradGPUKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { user_op::Tensor* dx_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0); Memset<DeviceType::kGPU>(ctx->device_ctx(), dx_tensor->mut_dptr<T>(), 0, dx_tensor->shape().elem_cnt() * sizeof(T)); const user_op::Tensor* dy_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0); const float height_scale = ctx->Attr<float>("height_scale"); const float width_scale = ctx->Attr<float>("width_scale"); const bool align_corners = ctx->Attr<bool>("align_corners"); const int64_t elem_cnt = dy_tensor->shape().elem_cnt(); NdIndexOffsetHelper<int64_t, 4> dy_helper(dy_tensor->shape().At(0), dy_tensor->shape().At(1), dy_tensor->shape().At(2), dy_tensor->shape().At(3)); NdIndexOffsetHelper<int64_t, 4> dx_helper(dx_tensor->shape().At(0), dx_tensor->shape().At(1), dx_tensor->shape().At(2), dx_tensor->shape().At(3)); const int64_t in_height = dx_tensor->shape().At(2); const int64_t in_width = dx_tensor->shape().At(3); const int64_t out_height = dy_tensor->shape().At(2); const int64_t out_width = dy_tensor->shape().At(3); if (in_height == out_height && in_width == out_width) { Memcpy<DeviceType::kGPU>( ctx->device_ctx(), dx_tensor->mut_dptr<void>(), dy_tensor->dptr<void>(), dy_tensor->shape().elem_cnt() * GetSizeOfDataType(dy_tensor->data_type())); } else { const T scale_height = GetAreaPixelScale(in_height, out_height, align_corners, height_scale); const T scale_width = GetAreaPixelScale(in_width, out_width, align_corners, width_scale); RUN_CUDA_KERNEL((UpsampleBilinearBackward<T>), ctx->device_ctx(), elem_cnt, elem_cnt, dy_tensor->dptr<T>(), dy_helper, dx_helper, in_height, in_width, scale_height, scale_width, align_corners, dx_tensor->mut_dptr<T>()); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_UPSAMPLE_BILINEAR_2D_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("upsample_bilinear_2d") \ .SetCreateFn<UpsampleBilinear2DGPUKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("upsample_bilinear_2d_grad") \ .SetCreateFn<UpsampleBilinear2DGradGPUKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)); REGISTER_UPSAMPLE_BILINEAR_2D_GPU_KERNEL(float) REGISTER_UPSAMPLE_BILINEAR_2D_GPU_KERNEL(double) } // namespace oneflow
224978f31fae7e4d88301ce9b3bb52a5145ec2ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ntile x ceil(m/NB). Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zsymmetrize_tiles_lower( int m, magmaDoubleComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = cuConj(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void zsymmetrize_tiles_upper( int m, magmaDoubleComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = cuConj(*dAT); // lower := upper dA += ldda; dAT += 1; } } } extern "C" void magmablas_zsymmetrize_tiles( char uplo, magma_int_t m, magmaDoubleComplex *dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride ) { /* Purpose ======= ZSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix dA that is valid on input. = 'U': Upper triangular part = 'L': Lower triangular part M (input) INTEGER The number of rows of the matrix dA. M >= 0. dA (input/output) COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by m matrix dA. LDDA (input) INTEGER The leading dimension of the array dA. LDDA >= max(1,M). ===================================================================== */ if ( m == 0 || ntile == 0 ) return; assert( m >= 0 ); assert( ldda >= m ); assert( ldda >= (ntile - 1)*mstride + m ); assert( ntile >= 0 ); assert( mstride >= 0 ); assert( nstride >= 0 ); assert( mstride >= m || nstride >= m ); // prevent tile overlap dim3 threads( NB ); dim3 grid( ntile, (m + NB - 1)/NB ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( (uplo == 'U') || (uplo == 'u') ) { hipLaunchKernelGGL(( zsymmetrize_tiles_upper), dim3(grid), dim3(threads), 0, magma_stream , m, dA, ldda, mstride, nstride ); } else if ( (uplo == 'L') || (uplo == 'l') ) { hipLaunchKernelGGL(( zsymmetrize_tiles_lower), dim3(grid), dim3(threads), 0, magma_stream , m, dA, ldda, mstride, nstride ); } else { printf( "uplo has illegal value\n" ); exit(1); } }
224978f31fae7e4d88301ce9b3bb52a5145ec2ad.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ntile x ceil(m/NB). Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zsymmetrize_tiles_lower( int m, magmaDoubleComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = cuConj(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void zsymmetrize_tiles_upper( int m, magmaDoubleComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = cuConj(*dAT); // lower := upper dA += ldda; dAT += 1; } } } extern "C" void magmablas_zsymmetrize_tiles( char uplo, magma_int_t m, magmaDoubleComplex *dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride ) { /* Purpose ======= ZSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix dA that is valid on input. = 'U': Upper triangular part = 'L': Lower triangular part M (input) INTEGER The number of rows of the matrix dA. M >= 0. dA (input/output) COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by m matrix dA. LDDA (input) INTEGER The leading dimension of the array dA. LDDA >= max(1,M). ===================================================================== */ if ( m == 0 || ntile == 0 ) return; assert( m >= 0 ); assert( ldda >= m ); assert( ldda >= (ntile - 1)*mstride + m ); assert( ntile >= 0 ); assert( mstride >= 0 ); assert( nstride >= 0 ); assert( mstride >= m || nstride >= m ); // prevent tile overlap dim3 threads( NB ); dim3 grid( ntile, (m + NB - 1)/NB ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( (uplo == 'U') || (uplo == 'u') ) { zsymmetrize_tiles_upper<<< grid, threads, 0, magma_stream >>>( m, dA, ldda, mstride, nstride ); } else if ( (uplo == 'L') || (uplo == 'l') ) { zsymmetrize_tiles_lower<<< grid, threads, 0, magma_stream >>>( m, dA, ldda, mstride, nstride ); } else { printf( "uplo has illegal value\n" ); exit(1); } }
10f289ec6cf828741ad8274d50c756eb3bc81e0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parquet_gpu.h" #include <io/utilities/block_utils.cuh> namespace cudf { namespace io { namespace parquet { namespace gpu { // Minimal thrift implementation for parsing page headers enum { ST_FLD_TRUE = 1, ST_FLD_FALSE = 2, ST_FLD_BYTE = 3, ST_FLD_I16 = 4, ST_FLD_I32 = 5, ST_FLD_I64 = 6, ST_FLD_DOUBLE = 7, ST_FLD_BINARY = 8, ST_FLD_LIST = 9, ST_FLD_SET = 10, ST_FLD_MAP = 11, ST_FLD_STRUCT = 12, }; static const __device__ __constant__ uint8_t g_list2struct[16] = { 0, 1, 2, ST_FLD_BYTE, ST_FLD_DOUBLE, 5, ST_FLD_I16, 7, ST_FLD_I32, 9, ST_FLD_I64, ST_FLD_BINARY, ST_FLD_STRUCT, ST_FLD_MAP, ST_FLD_SET, ST_FLD_LIST }; struct byte_stream_s { const uint8_t *cur; const uint8_t *end; const uint8_t *base; // Parsed symbols PageType page_type; PageInfo page; ColumnChunkDesc ck; }; inline __device__ unsigned int getb(byte_stream_s *bs) { return (bs->cur < bs->end) ? *bs->cur++ : 0; } inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt) { bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur)); bs->cur += bytecnt; } __device__ uint32_t get_u32(byte_stream_s *bs) { uint32_t v = 0, l = 0, c; do { c = getb(bs); v |= (c & 0x7f) << l; l += 7; } while (c & 0x80); return v; } inline __device__ int32_t get_i32(byte_stream_s *bs) { uint32_t u = get_u32(bs); return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); } __device__ void skip_struct_field(byte_stream_s *bs, int t) { int struct_depth = 0; int rep_cnt = 0; do { if (rep_cnt != 0) { rep_cnt--; } else if (struct_depth != 0) { int c; do { c = getb(bs); if (!c) --struct_depth; } while (!c && struct_depth); if (!struct_depth) break; t = c & 0xf; if (!(c & 0xf0)) get_i32(bs); } switch (t) { case ST_FLD_TRUE: case ST_FLD_FALSE: break; case ST_FLD_I16: case ST_FLD_I32: case ST_FLD_I64: get_u32(bs); break; case ST_FLD_BYTE: skip_bytes(bs, 1); break; case ST_FLD_DOUBLE: skip_bytes(bs, 8); break; case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break; case ST_FLD_LIST: case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled int c = getb(bs); int n = c >> 4; if (n == 0xf) n = get_u32(bs); t = g_list2struct[c & 0xf]; if (t == ST_FLD_STRUCT) struct_depth += n; else rep_cnt = n; } break; case ST_FLD_STRUCT: struct_depth++; break; } } while (rep_cnt || struct_depth); } #define PARQUET_BEGIN_STRUCT(fn) \ __device__ bool fn(byte_stream_s *bs) \ { \ int fld = 0; \ for (;;) \ { \ int c, t, f; \ c = getb(bs); \ if (!c) \ break; \ f = c >> 4; \ t = c & 0xf; \ fld = (f) ? fld+f : get_i32(bs); \ switch(fld) { \ #define PARQUET_FLD_ENUM(id, m, mt) \ case id: bs->m = (mt)get_i32(bs); if (t != ST_FLD_I32) return false; break; \ #define PARQUET_FLD_INT32(id, m) \ case id: bs->m = get_i32(bs); if (t != ST_FLD_I32) return false; break; \ #define PARQUET_FLD_STRUCT(id, m) \ case id: if (t != ST_FLD_STRUCT || !m(bs)) return false; break; \ #define PARQUET_END_STRUCT() \ default: \ skip_struct_field(bs, t); \ break; \ } \ } \ return true; \ } \ PARQUET_BEGIN_STRUCT(gpuParseDataPageHeader) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_ENUM(2, page.encoding, Encoding); PARQUET_FLD_ENUM(3, page.definition_level_encoding, Encoding); PARQUET_FLD_ENUM(4, page.repetition_level_encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParseDictionaryPageHeader) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_ENUM(2, page.encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParseDataPageHeaderV2) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_INT32(3, page.num_rows) PARQUET_FLD_ENUM(4, page.encoding, Encoding); PARQUET_FLD_ENUM(5, page.definition_level_encoding, Encoding); PARQUET_FLD_ENUM(6, page.repetition_level_encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParsePageHeader) PARQUET_FLD_ENUM(1, page_type, PageType) PARQUET_FLD_INT32(2, page.uncompressed_page_size) PARQUET_FLD_INT32(3, page.compressed_page_size) PARQUET_FLD_STRUCT(5, gpuParseDataPageHeader) PARQUET_FLD_STRUCT(7, gpuParseDictionaryPageHeader) PARQUET_FLD_STRUCT(8, gpuParseDataPageHeaderV2) PARQUET_END_STRUCT() /** * @brief Kernel for outputting page headers from the specified column chunks * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks **/ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks) { __shared__ byte_stream_s bs_g[4]; int t = threadIdx.x & 0x1f; int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5); byte_stream_s * const bs = &bs_g[threadIdx.x >> 5]; if (chunk < num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128 if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { ((uint32_t *)&bs->ck)[t] = ((const uint32_t *)&chunks[chunk])[t]; } } __syncthreads(); if (chunk < num_chunks) { size_t num_values, values_found; uint32_t data_page_count = 0; uint32_t dictionary_page_count = 0; int32_t max_num_pages; int32_t num_dict_pages = bs->ck.num_dict_pages; PageInfo *page_info; if (!t) { bs->base = bs->cur = bs->ck.compressed_data; bs->end = bs->base + bs->ck.compressed_size; bs->page.chunk_idx = chunk; bs->page.chunk_row = 0; bs->page.num_rows = 0; } num_values = bs->ck.num_values; page_info = bs->ck.page_info; num_dict_pages = bs->ck.num_dict_pages; max_num_pages = (page_info) ? bs->ck.max_num_pages : 0; values_found = 0; SYNCWARP(); while (values_found < num_values && bs->cur < bs->end) { int index_out = -1; if (t == 0) { bs->page.chunk_row += bs->page.num_rows; bs->page.num_rows = 0; if (gpuParsePageHeader(bs) && bs->page.compressed_page_size >= 0) { switch (bs->page_type) { case DATA_PAGE: // TODO: Unless the file only uses V2 page headers or has no complex nesting (num_rows == num_values), we can't infer num_rows at this time // -> we'll need another pass after decompression to parse the definition and repetition levels to infer the correct value of num_rows bs->page.num_rows = bs->page.num_values; // Assumes num_rows == num_values // Fall-through to V2 case DATA_PAGE_V2: index_out = num_dict_pages + data_page_count; data_page_count++; bs->page.flags = 0; values_found += bs->page.num_values; break; case DICTIONARY_PAGE: index_out = dictionary_page_count; dictionary_page_count++; bs->page.flags = PAGEINFO_FLAGS_DICTIONARY; break; default: index_out = -1; break; } bs->page.page_data = const_cast<uint8_t *>(bs->cur); bs->cur += bs->page.compressed_page_size; } else { bs->cur = bs->end; } } index_out = SHFL0(index_out); if (index_out >= 0 && index_out < max_num_pages) { // NOTE: Assumes that sizeof(PageInfo) <= 128 if (t < sizeof(PageInfo) / sizeof(uint32_t)) { ((uint32_t *)(page_info + index_out))[t] = ((const uint32_t *)&bs->page)[t]; } } num_values = SHFL0(num_values); SYNCWARP(); } if (t == 0) { chunks[chunk].num_data_pages = data_page_count; chunks[chunk].num_dict_pages = dictionary_page_count; } } } /** * @brief Kernel for building dictionary index for the specified column chunks * * This function builds an index to point to each dictionary entry * (string format is 4-byte little-endian string length followed by character * data). The index is a 32-bit integer which contains the offset of each string * relative to the beginning of the dictionary page data. * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks **/ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks) { __shared__ ColumnChunkDesc chunk_g[4]; int t = threadIdx.x & 0x1f; int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5); ColumnChunkDesc * const ck = &chunk_g[threadIdx.x >> 5]; if (chunk < num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128 if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { ((uint32_t *)ck)[t] = ((const uint32_t *)&chunks[chunk])[t]; } } __syncthreads(); if (chunk >= num_chunks) { return; } if (!t && ck->num_dict_pages > 0 && ck->str_dict_index) { // Data type to describe a string nvstrdesc_s *dict_index = ck->str_dict_index; const uint8_t *dict = ck->page_info[0].page_data; int dict_size = ck->page_info[0].uncompressed_page_size; int num_entries = ck->page_info[0].num_values; int pos = 0, cur = 0; for (int i = 0; i < num_entries; i++) { int len = 0; if (cur + 4 <= dict_size) { len = dict[cur+0] | (dict[cur+1] << 8) | (dict[cur+2] << 16) | (dict[cur+3] << 24); if (len >= 0 && cur + 4 + len <= dict_size) { pos = cur; cur = cur + 4 + len; } else { cur = dict_size; } } // TODO: Could store 8 entries in shared mem, then do a single warp-wide store dict_index[i].ptr = (const char *)(dict + pos + 4); dict_index[i].count = len; } } } hipError_t __host__ DecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks, hipStream_t stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block hipLaunchKernelGGL(( gpuDecodePageHeaders), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, num_chunks); return hipSuccess; } hipError_t __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks, hipStream_t stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block hipLaunchKernelGGL(( gpuBuildStringDictionaryIndex), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, num_chunks); return hipSuccess; } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
10f289ec6cf828741ad8274d50c756eb3bc81e0a.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parquet_gpu.h" #include <io/utilities/block_utils.cuh> namespace cudf { namespace io { namespace parquet { namespace gpu { // Minimal thrift implementation for parsing page headers enum { ST_FLD_TRUE = 1, ST_FLD_FALSE = 2, ST_FLD_BYTE = 3, ST_FLD_I16 = 4, ST_FLD_I32 = 5, ST_FLD_I64 = 6, ST_FLD_DOUBLE = 7, ST_FLD_BINARY = 8, ST_FLD_LIST = 9, ST_FLD_SET = 10, ST_FLD_MAP = 11, ST_FLD_STRUCT = 12, }; static const __device__ __constant__ uint8_t g_list2struct[16] = { 0, 1, 2, ST_FLD_BYTE, ST_FLD_DOUBLE, 5, ST_FLD_I16, 7, ST_FLD_I32, 9, ST_FLD_I64, ST_FLD_BINARY, ST_FLD_STRUCT, ST_FLD_MAP, ST_FLD_SET, ST_FLD_LIST }; struct byte_stream_s { const uint8_t *cur; const uint8_t *end; const uint8_t *base; // Parsed symbols PageType page_type; PageInfo page; ColumnChunkDesc ck; }; inline __device__ unsigned int getb(byte_stream_s *bs) { return (bs->cur < bs->end) ? *bs->cur++ : 0; } inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt) { bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur)); bs->cur += bytecnt; } __device__ uint32_t get_u32(byte_stream_s *bs) { uint32_t v = 0, l = 0, c; do { c = getb(bs); v |= (c & 0x7f) << l; l += 7; } while (c & 0x80); return v; } inline __device__ int32_t get_i32(byte_stream_s *bs) { uint32_t u = get_u32(bs); return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); } __device__ void skip_struct_field(byte_stream_s *bs, int t) { int struct_depth = 0; int rep_cnt = 0; do { if (rep_cnt != 0) { rep_cnt--; } else if (struct_depth != 0) { int c; do { c = getb(bs); if (!c) --struct_depth; } while (!c && struct_depth); if (!struct_depth) break; t = c & 0xf; if (!(c & 0xf0)) get_i32(bs); } switch (t) { case ST_FLD_TRUE: case ST_FLD_FALSE: break; case ST_FLD_I16: case ST_FLD_I32: case ST_FLD_I64: get_u32(bs); break; case ST_FLD_BYTE: skip_bytes(bs, 1); break; case ST_FLD_DOUBLE: skip_bytes(bs, 8); break; case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break; case ST_FLD_LIST: case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled int c = getb(bs); int n = c >> 4; if (n == 0xf) n = get_u32(bs); t = g_list2struct[c & 0xf]; if (t == ST_FLD_STRUCT) struct_depth += n; else rep_cnt = n; } break; case ST_FLD_STRUCT: struct_depth++; break; } } while (rep_cnt || struct_depth); } #define PARQUET_BEGIN_STRUCT(fn) \ __device__ bool fn(byte_stream_s *bs) \ { \ int fld = 0; \ for (;;) \ { \ int c, t, f; \ c = getb(bs); \ if (!c) \ break; \ f = c >> 4; \ t = c & 0xf; \ fld = (f) ? fld+f : get_i32(bs); \ switch(fld) { \ #define PARQUET_FLD_ENUM(id, m, mt) \ case id: bs->m = (mt)get_i32(bs); if (t != ST_FLD_I32) return false; break; \ #define PARQUET_FLD_INT32(id, m) \ case id: bs->m = get_i32(bs); if (t != ST_FLD_I32) return false; break; \ #define PARQUET_FLD_STRUCT(id, m) \ case id: if (t != ST_FLD_STRUCT || !m(bs)) return false; break; \ #define PARQUET_END_STRUCT() \ default: \ skip_struct_field(bs, t); \ break; \ } \ } \ return true; \ } \ PARQUET_BEGIN_STRUCT(gpuParseDataPageHeader) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_ENUM(2, page.encoding, Encoding); PARQUET_FLD_ENUM(3, page.definition_level_encoding, Encoding); PARQUET_FLD_ENUM(4, page.repetition_level_encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParseDictionaryPageHeader) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_ENUM(2, page.encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParseDataPageHeaderV2) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_INT32(3, page.num_rows) PARQUET_FLD_ENUM(4, page.encoding, Encoding); PARQUET_FLD_ENUM(5, page.definition_level_encoding, Encoding); PARQUET_FLD_ENUM(6, page.repetition_level_encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParsePageHeader) PARQUET_FLD_ENUM(1, page_type, PageType) PARQUET_FLD_INT32(2, page.uncompressed_page_size) PARQUET_FLD_INT32(3, page.compressed_page_size) PARQUET_FLD_STRUCT(5, gpuParseDataPageHeader) PARQUET_FLD_STRUCT(7, gpuParseDictionaryPageHeader) PARQUET_FLD_STRUCT(8, gpuParseDataPageHeaderV2) PARQUET_END_STRUCT() /** * @brief Kernel for outputting page headers from the specified column chunks * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks **/ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks) { __shared__ byte_stream_s bs_g[4]; int t = threadIdx.x & 0x1f; int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5); byte_stream_s * const bs = &bs_g[threadIdx.x >> 5]; if (chunk < num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128 if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { ((uint32_t *)&bs->ck)[t] = ((const uint32_t *)&chunks[chunk])[t]; } } __syncthreads(); if (chunk < num_chunks) { size_t num_values, values_found; uint32_t data_page_count = 0; uint32_t dictionary_page_count = 0; int32_t max_num_pages; int32_t num_dict_pages = bs->ck.num_dict_pages; PageInfo *page_info; if (!t) { bs->base = bs->cur = bs->ck.compressed_data; bs->end = bs->base + bs->ck.compressed_size; bs->page.chunk_idx = chunk; bs->page.chunk_row = 0; bs->page.num_rows = 0; } num_values = bs->ck.num_values; page_info = bs->ck.page_info; num_dict_pages = bs->ck.num_dict_pages; max_num_pages = (page_info) ? bs->ck.max_num_pages : 0; values_found = 0; SYNCWARP(); while (values_found < num_values && bs->cur < bs->end) { int index_out = -1; if (t == 0) { bs->page.chunk_row += bs->page.num_rows; bs->page.num_rows = 0; if (gpuParsePageHeader(bs) && bs->page.compressed_page_size >= 0) { switch (bs->page_type) { case DATA_PAGE: // TODO: Unless the file only uses V2 page headers or has no complex nesting (num_rows == num_values), we can't infer num_rows at this time // -> we'll need another pass after decompression to parse the definition and repetition levels to infer the correct value of num_rows bs->page.num_rows = bs->page.num_values; // Assumes num_rows == num_values // Fall-through to V2 case DATA_PAGE_V2: index_out = num_dict_pages + data_page_count; data_page_count++; bs->page.flags = 0; values_found += bs->page.num_values; break; case DICTIONARY_PAGE: index_out = dictionary_page_count; dictionary_page_count++; bs->page.flags = PAGEINFO_FLAGS_DICTIONARY; break; default: index_out = -1; break; } bs->page.page_data = const_cast<uint8_t *>(bs->cur); bs->cur += bs->page.compressed_page_size; } else { bs->cur = bs->end; } } index_out = SHFL0(index_out); if (index_out >= 0 && index_out < max_num_pages) { // NOTE: Assumes that sizeof(PageInfo) <= 128 if (t < sizeof(PageInfo) / sizeof(uint32_t)) { ((uint32_t *)(page_info + index_out))[t] = ((const uint32_t *)&bs->page)[t]; } } num_values = SHFL0(num_values); SYNCWARP(); } if (t == 0) { chunks[chunk].num_data_pages = data_page_count; chunks[chunk].num_dict_pages = dictionary_page_count; } } } /** * @brief Kernel for building dictionary index for the specified column chunks * * This function builds an index to point to each dictionary entry * (string format is 4-byte little-endian string length followed by character * data). The index is a 32-bit integer which contains the offset of each string * relative to the beginning of the dictionary page data. * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks **/ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks) { __shared__ ColumnChunkDesc chunk_g[4]; int t = threadIdx.x & 0x1f; int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5); ColumnChunkDesc * const ck = &chunk_g[threadIdx.x >> 5]; if (chunk < num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128 if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { ((uint32_t *)ck)[t] = ((const uint32_t *)&chunks[chunk])[t]; } } __syncthreads(); if (chunk >= num_chunks) { return; } if (!t && ck->num_dict_pages > 0 && ck->str_dict_index) { // Data type to describe a string nvstrdesc_s *dict_index = ck->str_dict_index; const uint8_t *dict = ck->page_info[0].page_data; int dict_size = ck->page_info[0].uncompressed_page_size; int num_entries = ck->page_info[0].num_values; int pos = 0, cur = 0; for (int i = 0; i < num_entries; i++) { int len = 0; if (cur + 4 <= dict_size) { len = dict[cur+0] | (dict[cur+1] << 8) | (dict[cur+2] << 16) | (dict[cur+3] << 24); if (len >= 0 && cur + 4 + len <= dict_size) { pos = cur; cur = cur + 4 + len; } else { cur = dict_size; } } // TODO: Could store 8 entries in shared mem, then do a single warp-wide store dict_index[i].ptr = (const char *)(dict + pos + 4); dict_index[i].count = len; } } } cudaError_t __host__ DecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks, cudaStream_t stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block gpuDecodePageHeaders<<<dim_grid, dim_block, 0, stream>>>(chunks, num_chunks); return cudaSuccess; } cudaError_t __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks, cudaStream_t stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block gpuBuildStringDictionaryIndex<<<dim_grid, dim_block, 0, stream>>>(chunks, num_chunks); return cudaSuccess; } } // namespace gpu } // namespace parquet } // namespace io } // namespace cudf
4c67f8fb9370822e72693fb199fc30cb76adfad5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { typedef struct { int e0; char* e1; } struct_Buffer_5425; typedef struct { struct_Buffer_5425 e0; struct_Buffer_5425 e1; int e2; int e3; } struct_image_5424; typedef struct { struct_Buffer_5425 e0; int e1; int e2; } struct_filter_5428; __device__ inline int threadIdx_x() { return threadIdx.x; } __device__ inline int threadIdx_y() { return threadIdx.y; } __device__ inline int threadIdx_z() { return threadIdx.z; } __device__ inline int blockIdx_x() { return blockIdx.x; } __device__ inline int blockIdx_y() { return blockIdx.y; } __device__ inline int blockIdx_z() { return blockIdx.z; } __device__ inline int blockDim_x() { return blockDim.x; } __device__ inline int blockDim_y() { return blockDim.y; } __device__ inline int blockDim_z() { return blockDim.z; } __device__ inline int gridDim_x() { return gridDim.x; } __device__ inline int gridDim_y() { return gridDim.y; } __device__ inline int gridDim_z() { return gridDim.z; } __global__ void lambda_20643(struct_image_5424, struct_filter_5428, struct_Buffer_5425); __global__ void lambda_20775(struct_filter_5428, struct_image_5424, struct_Buffer_5425, double*, struct_Buffer_5425); __global__ __launch_bounds__ (128 * 1 * 1) void lambda_20643(struct_image_5424 _20646_22982, struct_filter_5428 _20647_22983, struct_Buffer_5425 _20648_22984) { __shared__ double ds_img[134][7]; int _22990; int p_22990; int _22996; int p_22996; int _23002; int p_23002; int _23008; int p_23008; int _23014; int p_23014; int _23020; int p_23020; int _23043; int p_23043; double sum_23045; double psum_23045; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _22990 = blockIdx_x(); p_22990 = _22990; l22988: ; _22990 = p_22990; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _22996 = blockDim_x(); p_22996 = _22996; l22994: ; _22996 = p_22996; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23002 = threadIdx_x(); p_23002 = _23002; l23000: ; _23002 = p_23002; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23008 = blockIdx_y(); p_23008 = _23008; l23006: ; _23008 = p_23008; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23014 = blockDim_y(); p_23014 = _23014; l23012: ; _23014 = p_23014; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23020 = threadIdx_y(); p_23020 = _23020; l23018: ; _23020 = p_23020; #line 11 "main.impala" int _23024; _23024 = _20646_22982.e2; #line 155 "gpu_device.impala" int _23021; _23021 = _22990 * _22996; #line 155 "gpu_device.impala" int gid_x_23022; gid_x_23022 = _23021 + _23002; #line 160 "gpu_device.impala" bool _23025; _23025 = gid_x_23022 < _23024; #line 160 "gpu_device.impala" if (_23025) goto l23026; else goto l23091; l23091: ; #line 163 "gpu_device.impala" goto l23090; l23026: ; #line 157 "gpu_device.impala" int _23027; _23027 = _23008 * _23014; #line 157 "gpu_device.impala" int gid_y_23028; gid_y_23028 = _23027 + _23020; #line 11 "main.impala" int _23030; _23030 = _20646_22982.e3; #line 160 "gpu_device.impala" bool _23031; _23031 = gid_y_23028 < _23030; #line 160 "gpu_device.impala" if (_23031) goto l23032; else goto l23089; l23089: ; #line 163 "gpu_device.impala" goto l23090; l23090: ; return ; l23032: ; #line 45 "gpu_device.impala" char* _23073; _23073 = _20648_22984.e1; #line 50 "gpu_device.impala" int _23062; _23062 = gid_y_23028 * _23024; #line 50 "gpu_device.impala" struct_Buffer_5425 _23059; _23059 = _20646_22982.e1; #line 45 "gpu_device.impala" double* _23074; union { double* dst; char* src; } u_23074; u_23074.src = _23073; _23074 = u_23074.dst; #line 50 "gpu_device.impala" int _23075; _23075 = _23062 + gid_x_23022; #line 45 "gpu_device.impala" double* _23076; _23076 = _23074 + _23075; #line 4 "gaussian.impala" int _23034; _23034 = _20647_22983.e1; #line 50 "gpu_device.impala" char* _23060; _23060 = _23059.e1; #line 4 "gaussian.impala" int h_anchor_23036; h_anchor_23036 = _23034 / 2; #line 50 "gpu_device.impala" double* _23061; union { double* dst; char* src; } u_23061; u_23061.src = _23060; _23061 = u_23061.dst; for(int i = 0; i < blockDim.x + 6; i += blockDim.x) { for(int j = 0; j < blockDim.y + 6; j += blockDim.y) { if(threadIdx.x + i < blockDim.x + 6 && threadIdx.y + j < blockDim.y + 6 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) >= 0 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) < IMAGE_WIDTH && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) >= 0 && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) < IMAGE_HEIGHT) { ds_img[threadIdx.x + i][threadIdx.y + j] = \ _23061[((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) * IMAGE_WIDTH + ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i)]; } } } __syncthreads(); #line 17 "gaussian.impala" bool _23037; _23037 = h_anchor_23036 <= gid_x_23022; #line 17 "gaussian.impala" if (_23037) goto l23038; else goto l23088; l23088: ; #line 27 "gaussian.impala" goto l23082; l23038: ; #line 17 "gaussian.impala" int _23039; _23039 = _23024 - h_anchor_23036; #line 17 "gaussian.impala" bool _23040; _23040 = gid_x_23022 < _23039; #line 17 "gaussian.impala" if (_23040) goto l23041; else goto l23081; l23081: ; #line 27 "gaussian.impala" goto l23082; l23082: ; #line 50 "gpu_device.impala" double* _23083; _23083 = ds_img[_23075 + 3 - blockIdx.x * blockDim.x][_23075 + 3 - blockIdx.y * blockDim.y]; #line 50 "gpu_device.impala" double _23084; _23084 = *_23083; #line 50 "gpu_device.impala" double _23086; _23086 = _23084; #line 45 "gpu_device.impala" *_23076 = _23086; return ; l23041: ; #line 19 "gaussian.impala" int _23047; _23047 = 1 + h_anchor_23036; #line 55 "gpu_device.impala" struct_Buffer_5425 _23052; _23052 = _20647_22983.e0; #line 19 "gaussian.impala" int _23079; _23079 = 0 - h_anchor_23036; #line 55 "gpu_device.impala" char* _23053; _23053 = _23052.e1; #line 55 "gpu_device.impala" double* _23054; union { double* dst; char* src; } u_23054; u_23054.src = _23053; _23054 = u_23054.dst; for(int i = 0; i < blockDim.x + 6; i += blockDim.x) { for(int j = 0; j < blockDim.y + 6; j += blockDim.y) { if(threadIdx.x + i < blockDim.x + 6 && threadIdx.y + j < blockDim.y + 6 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) >= 0 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) < IMAGE_WIDTH && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) >= 0 && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) < IMAGE_HEIGHT) { ds_img[threadIdx.x + i][threadIdx.y + j] = \ _23054[((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) * IMAGE_WIDTH + ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i)]; } } } __syncthreads(); #line 19 "gpu_device.impala" p_23043 = _23079; psum_23045 = 0.000000e+00; goto l23042; l23042: ; _23043 = p_23043; sum_23045 = psum_23045; #line 19 "gpu_device.impala" bool _23048; _23048 = _23043 < _23047; #line 19 "gpu_device.impala" if (_23048) goto l23049; else goto l23072; l23072: ; #line 45 "gpu_device.impala" *_23076 = sum_23045; return ; l23049: ; #line 23 "gpu_device.impala" int _23050; _23050 = 1 + _23043; #line 21 "gaussian.impala" int _23055; _23055 = _23043 + h_anchor_23036; #line 21 "gaussian.impala" int _23063; _23063 = gid_x_23022 + _23043; #line 54 "gpu_device.impala" double* i_23056; i_23056 = ds_img[_23055 + 3 - blockIdx.x * blockDim.x][_23055 + 3 - blockIdx.y * blockDim.y]; #line 50 "gpu_device.impala" int _23064; _23064 = _23062 + _23063; #line 55 "gpu_device.impala" double _23057; _23057 = *i_23056; #line 50 "gpu_device.impala" double* _23065; _23065 = ds_img[_23064 + 3 - blockIdx.x * blockDim.x][_23064 + 3 - blockIdx.y * blockDim.y]; #line 55 "gpu_device.impala" double _23068; _23068 = _23057; #line 50 "gpu_device.impala" double _23066; _23066 = *_23065; #line 50 "gpu_device.impala" double _23069; _23069 = _23066; #line 21 "gaussian.impala" double _23070; _23070 = _23068 * _23069; #line 21 "gaussian.impala" double _23071; _23071 = sum_23045 + _23070; #line 19 "gpu_device.impala" p_23043 = _23050; psum_23045 = _23071; goto l23042; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_20775(struct_filter_5428 _20778_23095, struct_image_5424 _20779_23096, struct_Buffer_5425 _20780_23097, double* _20781_23098, struct_Buffer_5425 _20782_23099) { __shared__ double ds_img[134][7]; int _23102; int p_23102; int _23105; int p_23105; int _23108; int p_23108; int _23111; int p_23111; int _23114; int p_23114; int _23117; int p_23117; int _23136; int p_23136; double sum_23138; double psum_23138; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23102 = blockIdx_x(); p_23102 = _23102; l23100: ; _23102 = p_23102; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23105 = blockDim_x(); p_23105 = _23105; l23103: ; _23105 = p_23105; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23108 = threadIdx_x(); p_23108 = _23108; l23106: ; _23108 = p_23108; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23111 = blockIdx_y(); p_23111 = _23111; l23109: ; _23111 = p_23111; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23114 = blockDim_y(); p_23114 = _23114; l23112: ; _23114 = p_23114; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23117 = threadIdx_y(); p_23117 = _23117; l23115: ; _23117 = p_23117; #line 155 "gpu_device.impala" int _23118; _23118 = _23102 * _23105; #line 155 "gpu_device.impala" int gid_x_23119; gid_x_23119 = _23118 + _23108; #line 11 "main.impala" int _23120; _23120 = _20779_23096.e2; #line 160 "gpu_device.impala" bool _23121; _23121 = gid_x_23119 < _23120; #line 160 "gpu_device.impala" if (_23121) goto l23122; else goto l23180; l23180: ; #line 163 "gpu_device.impala" goto l23179; l23122: ; #line 157 "gpu_device.impala" int _23123; _23123 = _23111 * _23114; #line 11 "main.impala" int _23125; _23125 = _20779_23096.e3; #line 157 "gpu_device.impala" int gid_y_23124; gid_y_23124 = _23123 + _23117; #line 160 "gpu_device.impala" bool _23126; _23126 = gid_y_23124 < _23125; #line 160 "gpu_device.impala" if (_23126) goto l23127; else goto l23178; l23178: ; #line 163 "gpu_device.impala" goto l23179; l23179: ; return ; l23127: ; #line 45 "gpu_device.impala" char* _23163; _23163 = _20780_23097.e1; #line 50 "gpu_device.impala" char* _23150; _23150 = _20782_23099.e1; #line 50 "gpu_device.impala" int _23165; _23165 = gid_y_23124 * _23120; #line 6 "gaussian.impala" int _23128; _23128 = _20778_23095.e2; #line 45 "gpu_device.impala" double* _23164; union { double* dst; char* src; } u_23164; u_23164.src = _23163; _23164 = u_23164.dst; #line 50 "gpu_device.impala" double* _23151; union { double* dst; char* src; } u_23151; u_23151.src = _23150; _23151 = u_23151.dst; #line 50 "gpu_device.impala" int _23166; _23166 = _23165 + gid_x_23119; #line 6 "gaussian.impala" int v_anchor_23129; v_anchor_23129 = _23128 / 2; #line 45 "gpu_device.impala" double* _23167; _23167 = _23164 + _23166; #line 39 "gaussian.impala" bool _23130; _23130 = v_anchor_23129 <= gid_y_23124; #line 39 "gaussian.impala" if (_23130) goto l23131; else goto l23177; l23177: ; #line 49 "gaussian.impala" goto l23171; l23131: ; #line 39 "gaussian.impala" int _23132; _23132 = _23125 - v_anchor_23129; #line 39 "gaussian.impala" bool _23133; _23133 = gid_y_23124 < _23132; #line 39 "gaussian.impala" if (_23133) goto l23134; else goto l23170; l23170: ; #line 49 "gaussian.impala" goto l23171; l23171: ; #line 50 "gpu_device.impala" double* _23172; _23172 = _23151 + _23166; #line 50 "gpu_device.impala" double _23173; _23173 = *_23172; #line 50 "gpu_device.impala" double _23175; _23175 = _23173; #line 45 "gpu_device.impala" *_23167 = _23175; return ; l23134: ; #line 41 "gaussian.impala" int _23139; _23139 = 1 + v_anchor_23129; #line 55 "gpu_device.impala" struct_Buffer_5425 _23143; _23143 = _20778_23095.e0; #line 41 "gaussian.impala" int _23169; _23169 = 0 - v_anchor_23129; #line 55 "gpu_device.impala" char* _23144; _23144 = _23143.e1; #line 55 "gpu_device.impala" double* _23145; union { double* dst; char* src; } u_23145; u_23145.src = _23144; _23145 = u_23145.dst; for(int i = 0; i < blockDim.x + 6; i += blockDim.x) { for(int j = 0; j < blockDim.y + 6; j += blockDim.y) { if(threadIdx.x + i < blockDim.x + 6 && threadIdx.y + j < blockDim.y + 6 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) >= 0 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) < IMAGE_WIDTH && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) >= 0 && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) < IMAGE_HEIGHT) { ds_img[threadIdx.x + i][threadIdx.y + j] = \ _23145[((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) * IMAGE_WIDTH + ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i)]; } } } __syncthreads(); #line 19 "gpu_device.impala" p_23136 = _23169; psum_23138 = 0.000000e+00; goto l23135; l23135: ; _23136 = p_23136; sum_23138 = psum_23138; #line 19 "gpu_device.impala" bool _23140; _23140 = _23136 < _23139; #line 19 "gpu_device.impala" if (_23140) goto l23141; else goto l23162; l23162: ; #line 45 "gpu_device.impala" *_23167 = sum_23138; return ; l23141: ; #line 43 "gaussian.impala" int _23146; _23146 = _23136 + v_anchor_23129; #line 43 "gaussian.impala" int _23152; _23152 = gid_y_23124 + _23136; #line 50 "gpu_device.impala" int _23153; _23153 = _23152 * _23120; #line 23 "gpu_device.impala" int _23142; _23142 = 1 + _23136; #line 54 "gpu_device.impala" double* i_23147; i_23147 = ds_img[_23146 + 3 - blockIdx.x * blockDim.x][_23146 + 3 - blockIdx.y * blockDim.y]; #line 55 "gpu_device.impala" double _23148; _23148 = *i_23147; #line 50 "gpu_device.impala" int _23154; _23154 = _23153 + gid_x_23119; #line 55 "gpu_device.impala" double _23158; _23158 = _23148; #line 50 "gpu_device.impala" double* _23155; _23155 = _23151 + _23154; #line 50 "gpu_device.impala" double _23156; _23156 = *_23155; #line 50 "gpu_device.impala" double _23159; _23159 = _23156; #line 43 "gaussian.impala" double _23160; _23160 = _23158 * _23159; #line 43 "gaussian.impala" double _23161; _23161 = sum_23138 + _23160; #line 19 "gpu_device.impala" p_23136 = _23142; psum_23138 = _23161; goto l23135; } }
4c67f8fb9370822e72693fb199fc30cb76adfad5.cu
extern "C" { typedef struct { int e0; char* e1; } struct_Buffer_5425; typedef struct { struct_Buffer_5425 e0; struct_Buffer_5425 e1; int e2; int e3; } struct_image_5424; typedef struct { struct_Buffer_5425 e0; int e1; int e2; } struct_filter_5428; __device__ inline int threadIdx_x() { return threadIdx.x; } __device__ inline int threadIdx_y() { return threadIdx.y; } __device__ inline int threadIdx_z() { return threadIdx.z; } __device__ inline int blockIdx_x() { return blockIdx.x; } __device__ inline int blockIdx_y() { return blockIdx.y; } __device__ inline int blockIdx_z() { return blockIdx.z; } __device__ inline int blockDim_x() { return blockDim.x; } __device__ inline int blockDim_y() { return blockDim.y; } __device__ inline int blockDim_z() { return blockDim.z; } __device__ inline int gridDim_x() { return gridDim.x; } __device__ inline int gridDim_y() { return gridDim.y; } __device__ inline int gridDim_z() { return gridDim.z; } __global__ void lambda_20643(struct_image_5424, struct_filter_5428, struct_Buffer_5425); __global__ void lambda_20775(struct_filter_5428, struct_image_5424, struct_Buffer_5425, double*, struct_Buffer_5425); __global__ __launch_bounds__ (128 * 1 * 1) void lambda_20643(struct_image_5424 _20646_22982, struct_filter_5428 _20647_22983, struct_Buffer_5425 _20648_22984) { __shared__ double ds_img[134][7]; int _22990; int p_22990; int _22996; int p_22996; int _23002; int p_23002; int _23008; int p_23008; int _23014; int p_23014; int _23020; int p_23020; int _23043; int p_23043; double sum_23045; double psum_23045; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _22990 = blockIdx_x(); p_22990 = _22990; l22988: ; _22990 = p_22990; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _22996 = blockDim_x(); p_22996 = _22996; l22994: ; _22996 = p_22996; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23002 = threadIdx_x(); p_23002 = _23002; l23000: ; _23002 = p_23002; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23008 = blockIdx_y(); p_23008 = _23008; l23006: ; _23008 = p_23008; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23014 = blockDim_y(); p_23014 = _23014; l23012: ; _23014 = p_23014; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23020 = threadIdx_y(); p_23020 = _23020; l23018: ; _23020 = p_23020; #line 11 "main.impala" int _23024; _23024 = _20646_22982.e2; #line 155 "gpu_device.impala" int _23021; _23021 = _22990 * _22996; #line 155 "gpu_device.impala" int gid_x_23022; gid_x_23022 = _23021 + _23002; #line 160 "gpu_device.impala" bool _23025; _23025 = gid_x_23022 < _23024; #line 160 "gpu_device.impala" if (_23025) goto l23026; else goto l23091; l23091: ; #line 163 "gpu_device.impala" goto l23090; l23026: ; #line 157 "gpu_device.impala" int _23027; _23027 = _23008 * _23014; #line 157 "gpu_device.impala" int gid_y_23028; gid_y_23028 = _23027 + _23020; #line 11 "main.impala" int _23030; _23030 = _20646_22982.e3; #line 160 "gpu_device.impala" bool _23031; _23031 = gid_y_23028 < _23030; #line 160 "gpu_device.impala" if (_23031) goto l23032; else goto l23089; l23089: ; #line 163 "gpu_device.impala" goto l23090; l23090: ; return ; l23032: ; #line 45 "gpu_device.impala" char* _23073; _23073 = _20648_22984.e1; #line 50 "gpu_device.impala" int _23062; _23062 = gid_y_23028 * _23024; #line 50 "gpu_device.impala" struct_Buffer_5425 _23059; _23059 = _20646_22982.e1; #line 45 "gpu_device.impala" double* _23074; union { double* dst; char* src; } u_23074; u_23074.src = _23073; _23074 = u_23074.dst; #line 50 "gpu_device.impala" int _23075; _23075 = _23062 + gid_x_23022; #line 45 "gpu_device.impala" double* _23076; _23076 = _23074 + _23075; #line 4 "gaussian.impala" int _23034; _23034 = _20647_22983.e1; #line 50 "gpu_device.impala" char* _23060; _23060 = _23059.e1; #line 4 "gaussian.impala" int h_anchor_23036; h_anchor_23036 = _23034 / 2; #line 50 "gpu_device.impala" double* _23061; union { double* dst; char* src; } u_23061; u_23061.src = _23060; _23061 = u_23061.dst; for(int i = 0; i < blockDim.x + 6; i += blockDim.x) { for(int j = 0; j < blockDim.y + 6; j += blockDim.y) { if(threadIdx.x + i < blockDim.x + 6 && threadIdx.y + j < blockDim.y + 6 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) >= 0 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) < IMAGE_WIDTH && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) >= 0 && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) < IMAGE_HEIGHT) { ds_img[threadIdx.x + i][threadIdx.y + j] = \ _23061[((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) * IMAGE_WIDTH + ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i)]; } } } __syncthreads(); #line 17 "gaussian.impala" bool _23037; _23037 = h_anchor_23036 <= gid_x_23022; #line 17 "gaussian.impala" if (_23037) goto l23038; else goto l23088; l23088: ; #line 27 "gaussian.impala" goto l23082; l23038: ; #line 17 "gaussian.impala" int _23039; _23039 = _23024 - h_anchor_23036; #line 17 "gaussian.impala" bool _23040; _23040 = gid_x_23022 < _23039; #line 17 "gaussian.impala" if (_23040) goto l23041; else goto l23081; l23081: ; #line 27 "gaussian.impala" goto l23082; l23082: ; #line 50 "gpu_device.impala" double* _23083; _23083 = ds_img[_23075 + 3 - blockIdx.x * blockDim.x][_23075 + 3 - blockIdx.y * blockDim.y]; #line 50 "gpu_device.impala" double _23084; _23084 = *_23083; #line 50 "gpu_device.impala" double _23086; _23086 = _23084; #line 45 "gpu_device.impala" *_23076 = _23086; return ; l23041: ; #line 19 "gaussian.impala" int _23047; _23047 = 1 + h_anchor_23036; #line 55 "gpu_device.impala" struct_Buffer_5425 _23052; _23052 = _20647_22983.e0; #line 19 "gaussian.impala" int _23079; _23079 = 0 - h_anchor_23036; #line 55 "gpu_device.impala" char* _23053; _23053 = _23052.e1; #line 55 "gpu_device.impala" double* _23054; union { double* dst; char* src; } u_23054; u_23054.src = _23053; _23054 = u_23054.dst; for(int i = 0; i < blockDim.x + 6; i += blockDim.x) { for(int j = 0; j < blockDim.y + 6; j += blockDim.y) { if(threadIdx.x + i < blockDim.x + 6 && threadIdx.y + j < blockDim.y + 6 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) >= 0 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) < IMAGE_WIDTH && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) >= 0 && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) < IMAGE_HEIGHT) { ds_img[threadIdx.x + i][threadIdx.y + j] = \ _23054[((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) * IMAGE_WIDTH + ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i)]; } } } __syncthreads(); #line 19 "gpu_device.impala" p_23043 = _23079; psum_23045 = 0.000000e+00; goto l23042; l23042: ; _23043 = p_23043; sum_23045 = psum_23045; #line 19 "gpu_device.impala" bool _23048; _23048 = _23043 < _23047; #line 19 "gpu_device.impala" if (_23048) goto l23049; else goto l23072; l23072: ; #line 45 "gpu_device.impala" *_23076 = sum_23045; return ; l23049: ; #line 23 "gpu_device.impala" int _23050; _23050 = 1 + _23043; #line 21 "gaussian.impala" int _23055; _23055 = _23043 + h_anchor_23036; #line 21 "gaussian.impala" int _23063; _23063 = gid_x_23022 + _23043; #line 54 "gpu_device.impala" double* i_23056; i_23056 = ds_img[_23055 + 3 - blockIdx.x * blockDim.x][_23055 + 3 - blockIdx.y * blockDim.y]; #line 50 "gpu_device.impala" int _23064; _23064 = _23062 + _23063; #line 55 "gpu_device.impala" double _23057; _23057 = *i_23056; #line 50 "gpu_device.impala" double* _23065; _23065 = ds_img[_23064 + 3 - blockIdx.x * blockDim.x][_23064 + 3 - blockIdx.y * blockDim.y]; #line 55 "gpu_device.impala" double _23068; _23068 = _23057; #line 50 "gpu_device.impala" double _23066; _23066 = *_23065; #line 50 "gpu_device.impala" double _23069; _23069 = _23066; #line 21 "gaussian.impala" double _23070; _23070 = _23068 * _23069; #line 21 "gaussian.impala" double _23071; _23071 = sum_23045 + _23070; #line 19 "gpu_device.impala" p_23043 = _23050; psum_23045 = _23071; goto l23042; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_20775(struct_filter_5428 _20778_23095, struct_image_5424 _20779_23096, struct_Buffer_5425 _20780_23097, double* _20781_23098, struct_Buffer_5425 _20782_23099) { __shared__ double ds_img[134][7]; int _23102; int p_23102; int _23105; int p_23105; int _23108; int p_23108; int _23111; int p_23111; int _23114; int p_23114; int _23117; int p_23117; int _23136; int p_23136; double sum_23138; double psum_23138; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23102 = blockIdx_x(); p_23102 = _23102; l23100: ; _23102 = p_23102; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23105 = blockDim_x(); p_23105 = _23105; l23103: ; _23105 = p_23105; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23108 = threadIdx_x(); p_23108 = _23108; l23106: ; _23108 = p_23108; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23111 = blockIdx_y(); p_23111 = _23111; l23109: ; _23111 = p_23111; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23114 = blockDim_y(); p_23114 = _23114; l23112: ; _23114 = p_23114; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23117 = threadIdx_y(); p_23117 = _23117; l23115: ; _23117 = p_23117; #line 155 "gpu_device.impala" int _23118; _23118 = _23102 * _23105; #line 155 "gpu_device.impala" int gid_x_23119; gid_x_23119 = _23118 + _23108; #line 11 "main.impala" int _23120; _23120 = _20779_23096.e2; #line 160 "gpu_device.impala" bool _23121; _23121 = gid_x_23119 < _23120; #line 160 "gpu_device.impala" if (_23121) goto l23122; else goto l23180; l23180: ; #line 163 "gpu_device.impala" goto l23179; l23122: ; #line 157 "gpu_device.impala" int _23123; _23123 = _23111 * _23114; #line 11 "main.impala" int _23125; _23125 = _20779_23096.e3; #line 157 "gpu_device.impala" int gid_y_23124; gid_y_23124 = _23123 + _23117; #line 160 "gpu_device.impala" bool _23126; _23126 = gid_y_23124 < _23125; #line 160 "gpu_device.impala" if (_23126) goto l23127; else goto l23178; l23178: ; #line 163 "gpu_device.impala" goto l23179; l23179: ; return ; l23127: ; #line 45 "gpu_device.impala" char* _23163; _23163 = _20780_23097.e1; #line 50 "gpu_device.impala" char* _23150; _23150 = _20782_23099.e1; #line 50 "gpu_device.impala" int _23165; _23165 = gid_y_23124 * _23120; #line 6 "gaussian.impala" int _23128; _23128 = _20778_23095.e2; #line 45 "gpu_device.impala" double* _23164; union { double* dst; char* src; } u_23164; u_23164.src = _23163; _23164 = u_23164.dst; #line 50 "gpu_device.impala" double* _23151; union { double* dst; char* src; } u_23151; u_23151.src = _23150; _23151 = u_23151.dst; #line 50 "gpu_device.impala" int _23166; _23166 = _23165 + gid_x_23119; #line 6 "gaussian.impala" int v_anchor_23129; v_anchor_23129 = _23128 / 2; #line 45 "gpu_device.impala" double* _23167; _23167 = _23164 + _23166; #line 39 "gaussian.impala" bool _23130; _23130 = v_anchor_23129 <= gid_y_23124; #line 39 "gaussian.impala" if (_23130) goto l23131; else goto l23177; l23177: ; #line 49 "gaussian.impala" goto l23171; l23131: ; #line 39 "gaussian.impala" int _23132; _23132 = _23125 - v_anchor_23129; #line 39 "gaussian.impala" bool _23133; _23133 = gid_y_23124 < _23132; #line 39 "gaussian.impala" if (_23133) goto l23134; else goto l23170; l23170: ; #line 49 "gaussian.impala" goto l23171; l23171: ; #line 50 "gpu_device.impala" double* _23172; _23172 = _23151 + _23166; #line 50 "gpu_device.impala" double _23173; _23173 = *_23172; #line 50 "gpu_device.impala" double _23175; _23175 = _23173; #line 45 "gpu_device.impala" *_23167 = _23175; return ; l23134: ; #line 41 "gaussian.impala" int _23139; _23139 = 1 + v_anchor_23129; #line 55 "gpu_device.impala" struct_Buffer_5425 _23143; _23143 = _20778_23095.e0; #line 41 "gaussian.impala" int _23169; _23169 = 0 - v_anchor_23129; #line 55 "gpu_device.impala" char* _23144; _23144 = _23143.e1; #line 55 "gpu_device.impala" double* _23145; union { double* dst; char* src; } u_23145; u_23145.src = _23144; _23145 = u_23145.dst; for(int i = 0; i < blockDim.x + 6; i += blockDim.x) { for(int j = 0; j < blockDim.y + 6; j += blockDim.y) { if(threadIdx.x + i < blockDim.x + 6 && threadIdx.y + j < blockDim.y + 6 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) >= 0 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) < IMAGE_WIDTH && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) >= 0 && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) < IMAGE_HEIGHT) { ds_img[threadIdx.x + i][threadIdx.y + j] = \ _23145[((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) * IMAGE_WIDTH + ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i)]; } } } __syncthreads(); #line 19 "gpu_device.impala" p_23136 = _23169; psum_23138 = 0.000000e+00; goto l23135; l23135: ; _23136 = p_23136; sum_23138 = psum_23138; #line 19 "gpu_device.impala" bool _23140; _23140 = _23136 < _23139; #line 19 "gpu_device.impala" if (_23140) goto l23141; else goto l23162; l23162: ; #line 45 "gpu_device.impala" *_23167 = sum_23138; return ; l23141: ; #line 43 "gaussian.impala" int _23146; _23146 = _23136 + v_anchor_23129; #line 43 "gaussian.impala" int _23152; _23152 = gid_y_23124 + _23136; #line 50 "gpu_device.impala" int _23153; _23153 = _23152 * _23120; #line 23 "gpu_device.impala" int _23142; _23142 = 1 + _23136; #line 54 "gpu_device.impala" double* i_23147; i_23147 = ds_img[_23146 + 3 - blockIdx.x * blockDim.x][_23146 + 3 - blockIdx.y * blockDim.y]; #line 55 "gpu_device.impala" double _23148; _23148 = *i_23147; #line 50 "gpu_device.impala" int _23154; _23154 = _23153 + gid_x_23119; #line 55 "gpu_device.impala" double _23158; _23158 = _23148; #line 50 "gpu_device.impala" double* _23155; _23155 = _23151 + _23154; #line 50 "gpu_device.impala" double _23156; _23156 = *_23155; #line 50 "gpu_device.impala" double _23159; _23159 = _23156; #line 43 "gaussian.impala" double _23160; _23160 = _23158 * _23159; #line 43 "gaussian.impala" double _23161; _23161 = sum_23138 + _23160; #line 19 "gpu_device.impala" p_23136 = _23142; psum_23138 = _23161; goto l23135; } }
802d739ce522ddb608a03e5db733e74a0825f07d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> int main() { int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("%lu\n", prop.sharedMemPerBlock); } }
802d739ce522ddb608a03e5db733e74a0825f07d.cu
#include <stdio.h> int main() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("%lu\n", prop.sharedMemPerBlock); } }
graph_gpu_wrapper.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h" #include <sstream> #include "paddle/fluid/framework/fleet/fleet_wrapper.h" #include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_utils.h" #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h" #include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h" DECLARE_int32(gpugraph_storage_mode); DECLARE_bool(graph_metapath_split_opt); namespace paddle { namespace framework { #ifdef PADDLE_WITH_HETERPS std::shared_ptr<GraphGpuWrapper> GraphGpuWrapper::s_instance_(nullptr); void GraphGpuWrapper::set_device(std::vector<int> ids) { for (auto device_id : ids) { device_id_mapping.push_back(device_id); } } void GraphGpuWrapper::init_conf(const std::string &first_node_type, const std::string &meta_path) { static std::mutex mutex; { std::lock_guard<std::mutex> lock(mutex); if (conf_initialized_) { return; } VLOG(2) << "init path config"; conf_initialized_ = true; auto node_types = paddle::string::split_string<std::string>(first_node_type, ";"); VLOG(2) << "node_types: " << first_node_type; for (auto &type : node_types) { auto iter = feature_to_id.find(type); PADDLE_ENFORCE_NE(iter, feature_to_id.end(), platform::errors::NotFound( "(%s) is not found in feature_to_id.", type)); VLOG(2) << "feature_to_id[" << type << "] = " << iter->second; first_node_type_.push_back(iter->second); } meta_path_.resize(first_node_type_.size()); auto meta_paths = paddle::string::split_string<std::string>(meta_path, ";"); for (size_t i = 0; i < meta_paths.size(); i++) { auto path = meta_paths[i]; auto nodes = paddle::string::split_string<std::string>(path, "-"); for (auto &node : nodes) { auto iter = edge_to_id.find(node); PADDLE_ENFORCE_NE(iter, edge_to_id.end(), platform::errors::NotFound( "(%s) is not found in edge_to_id.", node)); VLOG(2) << "edge_to_id[" << node << "] = " << iter->second; meta_path_[i].push_back(iter->second); } } int max_dev_id = 0; for (size_t i = 0; i < device_id_mapping.size(); i++) { if (device_id_mapping[i] > max_dev_id) { max_dev_id = device_id_mapping[i]; } } finish_node_type_.resize(max_dev_id + 1); node_type_start_.resize(max_dev_id + 1); global_infer_node_type_start_.resize(max_dev_id + 1); for (size_t i = 0; i < device_id_mapping.size(); i++) { int dev_id = device_id_mapping[i]; auto &node_type_start = node_type_start_[i]; auto &infer_node_type_start = global_infer_node_type_start_[i]; auto &finish_node_type = finish_node_type_[i]; finish_node_type.clear(); for (size_t idx = 0; idx < feature_to_id.size(); idx++) { infer_node_type_start[idx] = 0; } for (auto &type : node_types) { auto iter = feature_to_id.find(type); node_type_start[iter->second] = 0; infer_node_type_start[iter->second] = 0; } infer_cursor_.push_back(0); cursor_.push_back(0); } init_type_keys(); } } void GraphGpuWrapper::init_type_keys() { size_t thread_num = device_id_mapping.size(); int cnt = 0; auto &graph_all_type_total_keys = get_graph_type_keys(); auto &type_to_index = get_graph_type_to_index(); std::vector<std::vector<uint64_t>> tmp_keys; tmp_keys.resize(thread_num); int first_node_idx; d_graph_all_type_total_keys_.resize(graph_all_type_total_keys.size()); h_graph_all_type_keys_len_.resize(graph_all_type_total_keys.size()); for (size_t f_idx = 0; f_idx < graph_all_type_total_keys.size(); f_idx++) { for (size_t j = 0; j < tmp_keys.size(); j++) { tmp_keys[j].clear(); } d_graph_all_type_total_keys_[f_idx].resize(thread_num); auto &type_total_key = graph_all_type_total_keys[f_idx]; for (size_t j = 0; j < type_total_key.size(); j++) { uint64_t shard = type_total_key[j] % thread_num; tmp_keys[shard].push_back(type_total_key[j]); } for (size_t j = 0; j < thread_num; j++) { h_graph_all_type_keys_len_[f_idx].push_back(tmp_keys[j].size()); VLOG(1) << "node type: " << type_to_index[f_idx] << ", gpu_graph_device_keys[" << j << "] = " << tmp_keys[j].size(); } for (size_t j = 0; j < thread_num; j++) { auto stream = get_local_stream(j); int gpuid = device_id_mapping[j]; auto place = platform::CUDAPlace(gpuid); platform::CUDADeviceGuard guard(gpuid); d_graph_all_type_total_keys_[f_idx][j] = memory::AllocShared(place, tmp_keys[j].size() * sizeof(uint64_t)); hipMemcpyAsync(d_graph_all_type_total_keys_[f_idx][j]->ptr(), tmp_keys[j].data(), sizeof(uint64_t) * tmp_keys[j].size(), hipMemcpyHostToDevice, stream); } } for (int i = 0; i < thread_num; i++) { auto stream = get_local_stream(i); hipStreamSynchronize(stream); } } void GraphGpuWrapper::init_metapath(std::string cur_metapath, int cur_metapath_index, int cur_metapath_len) { cur_metapath_ = cur_metapath; cur_metapath_index_ = cur_metapath_index; cur_metapath_len_ = cur_metapath_len; auto nodes = paddle::string::split_string<std::string>(cur_metapath_, "-"); cur_parse_metapath_.clear(); cur_parse_reverse_metapath_.clear(); for (auto &node : nodes) { VLOG(2) << "node: " << node << " , in metapath: " << cur_metapath_; auto iter = edge_to_id.find(node); PADDLE_ENFORCE_NE( iter, edge_to_id.end(), platform::errors::NotFound("(%s) is not found in edge_to_id.", node)); cur_parse_metapath_.push_back(iter->second); auto etype_split = paddle::string::split_string<std::string>(node, "2"); std::string reverse_type = etype_split[1] + "2" + etype_split[0]; iter = edge_to_id.find(reverse_type); PADDLE_ENFORCE_NE(iter, edge_to_id.end(), platform::errors::NotFound( "(%s) is not found in edge_to_id.", reverse_type)); cur_parse_reverse_metapath_.push_back(iter->second); } size_t thread_num = device_id_mapping.size(); cur_metapath_start_.resize(thread_num); for (size_t i = 0; i < thread_num; i++) { cur_metapath_start_[i] = 0; } auto &graph_all_type_total_keys = get_graph_type_keys(); auto &type_to_index = get_graph_type_to_index(); std::vector<std::vector<uint64_t>> tmp_keys; tmp_keys.resize(thread_num); int first_node_idx; std::string first_node = paddle::string::split_string<std::string>(cur_metapath_, "2")[0]; auto it = feature_to_id.find(first_node); first_node_idx = it->second; d_graph_train_total_keys_.resize(thread_num); h_graph_train_keys_len_.resize(thread_num); for (size_t j = 0; j < tmp_keys.size(); j++) { tmp_keys[j].clear(); } size_t f_idx = type_to_index[first_node_idx]; auto &type_total_key = graph_all_type_total_keys[f_idx]; VLOG(2) << "first node type:" << first_node_idx << ", node start size:" << type_total_key.size(); for (size_t j = 0; j < type_total_key.size(); j++) { uint64_t shard = type_total_key[j] % thread_num; tmp_keys[shard].push_back(type_total_key[j]); } auto fleet_ptr = framework::FleetWrapper::GetInstance(); std::shuffle( tmp_keys.begin(), tmp_keys.end(), fleet_ptr->LocalRandomEngine()); for (size_t j = 0; j < thread_num; j++) { h_graph_train_keys_len_[j] = tmp_keys[j].size(); VLOG(2) << j << " th card, graph train keys len: " << tmp_keys[j].size(); } for (size_t j = 0; j < thread_num; j++) { auto stream = get_local_stream(j); int gpuid = device_id_mapping[j]; auto place = platform::CUDAPlace(gpuid); platform::CUDADeviceGuard guard(gpuid); d_graph_train_total_keys_[j] = memory::AllocShared(place, tmp_keys[j].size() * sizeof(uint64_t)); hipMemcpyAsync(d_graph_train_total_keys_[j]->ptr(), tmp_keys[j].data(), sizeof(uint64_t) * tmp_keys[j].size(), hipMemcpyHostToDevice, stream); } } void GraphGpuWrapper::clear_metapath_state() { size_t thread_num = device_id_mapping.size(); for (size_t j = 0; j < thread_num; j++) { cur_metapath_start_[j] = 0; h_graph_train_keys_len_[j] = 0; d_graph_train_total_keys_[j].reset(); for (size_t k = 0; k < cur_parse_metapath_.size(); k++) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->clear_graph_info(j, cur_parse_metapath_[k]); } } std::vector<int> clear_etype; for (size_t j = 0; j < cur_parse_metapath_.size(); j++) { if (find(clear_etype.begin(), clear_etype.end(), cur_parse_metapath_[j]) == clear_etype.end()) { clear_etype.push_back(cur_parse_metapath_[j]); } } for (size_t j = 0; j < cur_parse_reverse_metapath_.size(); j++) { if (find(clear_etype.begin(), clear_etype.end(), cur_parse_reverse_metapath_[j]) == clear_etype.end()) { clear_etype.push_back(cur_parse_reverse_metapath_[j]); } } for (size_t j = 0; j < clear_etype.size(); j++) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->clear_graph(clear_etype[j]); } } int GraphGpuWrapper::get_all_id(int type, int slice_num, std::vector<std::vector<uint64_t>> *output) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_id(type, slice_num, output); } int GraphGpuWrapper::get_all_neighbor_id( int type, int slice_num, std::vector<std::vector<uint64_t>> *output) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_neighbor_id(type, slice_num, output); } int GraphGpuWrapper::get_all_id(int type, int idx, int slice_num, std::vector<std::vector<uint64_t>> *output) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_id(type, idx, slice_num, output); } int GraphGpuWrapper::get_all_neighbor_id( int type, int idx, int slice_num, std::vector<std::vector<uint64_t>> *output) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_neighbor_id(type, idx, slice_num, output); } int GraphGpuWrapper::get_all_feature_ids( int type, int idx, int slice_num, std::vector<std::vector<uint64_t>> *output) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_feature_ids(type, idx, slice_num, output); } void GraphGpuWrapper::set_up_types(const std::vector<std::string> &edge_types, const std::vector<std::string> &node_types) { id_to_edge = edge_types; for (size_t table_id = 0; table_id < edge_types.size(); table_id++) { int res = edge_to_id.size(); edge_to_id[edge_types[table_id]] = res; } id_to_feature = node_types; for (size_t table_id = 0; table_id < node_types.size(); table_id++) { int res = feature_to_id.size(); feature_to_id[node_types[table_id]] = res; } table_feat_mapping.resize(node_types.size()); this->table_feat_conf_feat_name.resize(node_types.size()); this->table_feat_conf_feat_dtype.resize(node_types.size()); this->table_feat_conf_feat_shape.resize(node_types.size()); } void GraphGpuWrapper::set_feature_separator(std::string ch) { feature_separator_ = ch; if (graph_table != nullptr) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->set_feature_separator(feature_separator_); } } void GraphGpuWrapper::set_slot_feature_separator(std::string ch) { slot_feature_separator_ = ch; if (graph_table != nullptr) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->set_slot_feature_separator(slot_feature_separator_); } } void GraphGpuWrapper::make_partitions(int idx, int64_t byte_size, int device_len) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->make_partitions(idx, byte_size, device_len); } int32_t GraphGpuWrapper::load_next_partition(int idx) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->load_next_partition(idx); } void GraphGpuWrapper::set_search_level(int level) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->set_search_level(level); } std::vector<uint64_t> GraphGpuWrapper::get_partition(int idx, int num) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_partition(idx, num); } int32_t GraphGpuWrapper::get_partition_num(int idx) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_partition_num(idx); } void GraphGpuWrapper::make_complementary_graph(int idx, int64_t byte_size) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->make_complementary_graph(idx, byte_size); } void GraphGpuWrapper::load_edge_file(std::string name, std::string filepath, bool reverse) { // 'e' means load edge std::string params = "e"; if (reverse) { // 'e<' means load edges from $2 to $1 params += "<" + name; } else { // 'e>' means load edges from $1 to $2 params += ">" + name; } if (edge_to_id.find(name) != edge_to_id.end()) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->Load(std::string(filepath), params); } } void GraphGpuWrapper::load_edge_file(std::string etype2files, std::string graph_data_local_path, int part_num, bool reverse) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->parse_edge_and_load( etype2files, graph_data_local_path, part_num, reverse); } void GraphGpuWrapper::load_node_file(std::string name, std::string filepath) { // 'n' means load nodes and 'node_type' follows std::string params = "n" + name; if (feature_to_id.find(name) != feature_to_id.end()) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->Load(std::string(filepath), params); } } void GraphGpuWrapper::load_node_file(std::string ntype2files, std::string graph_data_local_path, int part_num) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->parse_node_and_load( ntype2files, graph_data_local_path, part_num); } void GraphGpuWrapper::load_node_and_edge(std::string etype2files, std::string ntype2files, std::string graph_data_local_path, int part_num, bool reverse) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->load_node_and_edge_file( etype2files, ntype2files, graph_data_local_path, part_num, reverse); } void GraphGpuWrapper::add_table_feat_conf(std::string table_name, std::string feat_name, std::string feat_dtype, int feat_shape) { if (feature_to_id.find(table_name) != feature_to_id.end()) { int idx = feature_to_id[table_name]; if (table_feat_mapping[idx].find(feat_name) == table_feat_mapping[idx].end()) { int res = table_feat_mapping[idx].size(); table_feat_mapping[idx][feat_name] = res; } int feat_idx = table_feat_mapping[idx][feat_name]; VLOG(0) << "table_name " << table_name << " mapping id " << idx; VLOG(0) << " feat name " << feat_name << " feat id" << feat_idx; if (feat_idx < table_feat_conf_feat_name[idx].size()) { // overide table_feat_conf_feat_name[idx][feat_idx] = feat_name; table_feat_conf_feat_dtype[idx][feat_idx] = feat_dtype; table_feat_conf_feat_shape[idx][feat_idx] = feat_shape; } else { // new table_feat_conf_feat_name[idx].push_back(feat_name); table_feat_conf_feat_dtype[idx].push_back(feat_dtype); table_feat_conf_feat_shape[idx].push_back(feat_shape); } } VLOG(0) << "add conf over"; } void GraphGpuWrapper::init_search_level(int level) { search_level = level; } gpuStream_t GraphGpuWrapper::get_local_stream(int gpuid) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->get_local_stream(gpuid); } void GraphGpuWrapper::init_service() { table_proto.set_task_pool_size(64); table_proto.set_shard_num(1000); table_proto.set_build_sampler_on_cpu(false); table_proto.set_search_level(search_level); table_proto.set_table_name("cpu_graph_table_"); table_proto.set_use_cache(false); for (int i = 0; i < id_to_edge.size(); i++) table_proto.add_edge_types(id_to_edge[i]); for (int i = 0; i < id_to_feature.size(); i++) { table_proto.add_node_types(id_to_feature[i]); auto feat_node = id_to_feature[i]; ::paddle::distributed::GraphFeature *g_f = table_proto.add_graph_feature(); for (int x = 0; x < table_feat_conf_feat_name[i].size(); x++) { g_f->add_name(table_feat_conf_feat_name[i][x]); g_f->add_dtype(table_feat_conf_feat_dtype[i][x]); g_f->add_shape(table_feat_conf_feat_shape[i][x]); } } std::shared_ptr<HeterPsResource> resource = std::make_shared<HeterPsResource>(device_id_mapping); resource->enable_p2p(); GpuPsGraphTable *g = new GpuPsGraphTable(resource, id_to_edge.size()); size_t gpu_num = device_id_mapping.size(); g->init_cpu_table(table_proto, gpu_num); g->cpu_graph_table_->set_feature_separator(feature_separator_); g->cpu_graph_table_->set_slot_feature_separator(slot_feature_separator_); graph_table = reinterpret_cast<char *>(g); upload_num = gpu_num; upload_task_pool.reset(new ::ThreadPool(upload_num)); } void GraphGpuWrapper::finalize() { reinterpret_cast<GpuPsGraphTable *>(graph_table)->show_table_collisions(); } void GraphGpuWrapper::upload_batch(int type, int idx, int slice_num, const std::string &edge_type) { VLOG(0) << "begin upload edge, type[" << edge_type << "]"; auto iter = edge_to_id.find(edge_type); idx = iter->second; VLOG(2) << "cur edge: " << edge_type << ",idx: " << idx; std::vector<std::vector<uint64_t>> ids; reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_id(type, idx, slice_num, &ids); debug_gpu_memory_info("upload_batch node start"); GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table); std::vector<std::future<int>> tasks; for (int i = 0; i < ids.size(); i++) { tasks.push_back(upload_task_pool->enqueue([&, i, idx, this]() -> int { VLOG(0) << "begin make_gpu_ps_graph, node_id[" << i << "]_size[" << ids[i].size() << "]"; GpuPsCommGraph sub_graph = g->cpu_graph_table_->make_gpu_ps_graph(idx, ids[i]); g->build_graph_on_single_gpu(sub_graph, i, idx); sub_graph.release_on_cpu(); VLOG(0) << "sub graph on gpu " << i << " is built"; return 0; })); } for (size_t i = 0; i < tasks.size(); i++) tasks[i].get(); debug_gpu_memory_info("upload_batch node end"); } // feature table void GraphGpuWrapper::upload_batch(int type, int slice_num, int slot_num) { if (type == 1 && (FLAGS_gpugraph_storage_mode == paddle::framework::GpuGraphStorageMode:: MEM_EMB_FEATURE_AND_GPU_GRAPH || FLAGS_gpugraph_storage_mode == paddle::framework::GpuGraphStorageMode:: SSD_EMB_AND_MEM_FEATURE_GPU_GRAPH)) { return; } std::vector<std::vector<uint64_t>> node_ids; reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_id(type, slice_num, &node_ids); debug_gpu_memory_info("upload_batch feature start"); GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table); std::vector<std::future<int>> tasks; for (int i = 0; i < node_ids.size(); i++) { tasks.push_back(upload_task_pool->enqueue([&, i, this]() -> int { VLOG(0) << "begin make_gpu_ps_graph_fea, node_ids[" << i << "]_size[" << node_ids[i].size() << "]"; GpuPsCommGraphFea sub_graph = g->cpu_graph_table_->make_gpu_ps_graph_fea(i, node_ids[i], slot_num); // sub_graph.display_on_cpu(); VLOG(0) << "begin build_graph_fea_on_single_gpu, node_ids[" << i << "]_size[" << node_ids[i].size() << "]"; g->build_graph_fea_on_single_gpu(sub_graph, i); sub_graph.release_on_cpu(); VLOG(0) << "sub graph fea on gpu " << i << " is built"; return 0; })); } for (size_t i = 0; i < tasks.size(); i++) tasks[i].get(); // g->build_graph_from_cpu(vec); debug_gpu_memory_info("upload_batch feature end"); } // get sub_graph_fea std::vector<GpuPsCommGraphFea> GraphGpuWrapper::get_sub_graph_fea( std::vector<std::vector<uint64_t>> &node_ids, int slot_num) { GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table); std::vector<std::future<int>> tasks; std::vector<GpuPsCommGraphFea> sub_graph_feas(node_ids.size()); for (int i = 0; i < node_ids.size(); i++) { tasks.push_back(upload_task_pool->enqueue([&, i, this]() -> int { GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table); sub_graph_feas[i] = g->cpu_graph_table_->make_gpu_ps_graph_fea(i, node_ids[i], slot_num); return 0; })); } for (size_t i = 0; i < tasks.size(); i++) tasks[i].get(); return sub_graph_feas; } // build_gpu_graph_fea void GraphGpuWrapper::build_gpu_graph_fea(GpuPsCommGraphFea &sub_graph_fea, int i) { GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table); g->build_graph_fea_on_single_gpu(sub_graph_fea, i); sub_graph_fea.release_on_cpu(); VLOG(0) << "sub graph fea on gpu " << i << " is built"; return; } NeighborSampleResult GraphGpuWrapper::graph_neighbor_sample_v3( NeighborSampleQuery q, bool cpu_switch, bool compress = true) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->graph_neighbor_sample_v3(q, cpu_switch, compress); } NeighborSampleResultV2 GraphGpuWrapper::graph_neighbor_sample_all_edge_type( int gpu_id, int edge_type_len, uint64_t *key, int sample_size, int len, std::vector<std::shared_ptr<phi::Allocation>> edge_type_graphs) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->graph_neighbor_sample_all_edge_type( gpu_id, edge_type_len, key, sample_size, len, edge_type_graphs); } std::vector<std::shared_ptr<phi::Allocation>> GraphGpuWrapper::get_edge_type_graph(int gpu_id, int edge_type_len) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->get_edge_type_graph(gpu_id, edge_type_len); } int GraphGpuWrapper::get_feature_info_of_nodes( int gpu_id, uint64_t *d_nodes, int node_num, uint32_t *size_list, uint32_t *size_list_prefix_sum, std::shared_ptr<phi::Allocation> &feature_list, std::shared_ptr<phi::Allocation> &slot_list) { platform::CUDADeviceGuard guard(gpu_id); PADDLE_ENFORCE_NOT_NULL(graph_table, paddle::platform::errors::InvalidArgument( "graph_table should not be null")); return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->get_feature_info_of_nodes(gpu_id, d_nodes, node_num, size_list, size_list_prefix_sum, feature_list, slot_list); } int GraphGpuWrapper::get_feature_of_nodes(int gpu_id, uint64_t *d_walk, uint64_t *d_offset, uint32_t size, int slot_num, int *d_slot_feature_num_map, int fea_num_per_node) { platform::CUDADeviceGuard guard(gpu_id); PADDLE_ENFORCE_NOT_NULL(graph_table, paddle::platform::errors::InvalidArgument( "graph_table should not be null")); return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->get_feature_of_nodes(gpu_id, d_walk, d_offset, size, slot_num, d_slot_feature_num_map, fea_num_per_node); } NeighborSampleResult GraphGpuWrapper::graph_neighbor_sample( int gpu_id, uint64_t *device_keys, int walk_degree, int len) { platform::CUDADeviceGuard guard(gpu_id); auto neighbor_sample_res = reinterpret_cast<GpuPsGraphTable *>(graph_table) ->graph_neighbor_sample(gpu_id, device_keys, walk_degree, len); return neighbor_sample_res; } // this function is contributed by Liwb5 std::vector<uint64_t> GraphGpuWrapper::graph_neighbor_sample( int gpu_id, int idx, std::vector<uint64_t> &key, int sample_size) { std::vector<uint64_t> res; if (key.size() == 0) { return res; } uint64_t *cuda_key; platform::CUDADeviceGuard guard(gpu_id); hipMalloc(&cuda_key, key.size() * sizeof(uint64_t)); hipMemcpy(cuda_key, key.data(), key.size() * sizeof(uint64_t), hipMemcpyHostToDevice); VLOG(0) << "key_size: " << key.size(); auto neighbor_sample_res = reinterpret_cast<GpuPsGraphTable *>(graph_table) ->graph_neighbor_sample_v2( gpu_id, idx, cuda_key, sample_size, key.size(), false, true); int *actual_sample_size = new int[key.size()]; hipMemcpy(actual_sample_size, neighbor_sample_res.actual_sample_size, key.size() * sizeof(int), hipMemcpyDeviceToHost); // 3, 1, 3 int cumsum = 0; for (int i = 0; i < key.size(); i++) { cumsum += actual_sample_size[i]; } std::vector<uint64_t> cpu_key; cpu_key.resize(key.size() * sample_size); hipMemcpy(cpu_key.data(), neighbor_sample_res.val, key.size() * sample_size * sizeof(uint64_t), hipMemcpyDeviceToHost); for (int i = 0; i < key.size(); i++) { for (int j = 0; j < actual_sample_size[i]; j++) { res.push_back(key[i]); res.push_back(cpu_key[i * sample_size + j]); } } delete[] actual_sample_size; hipFree(cuda_key); return res; } NodeQueryResult GraphGpuWrapper::query_node_list(int gpu_id, int idx, int start, int query_size) { PADDLE_ENFORCE_EQ(FLAGS_gpugraph_load_node_list_into_hbm, true, paddle::platform::errors::PreconditionNotMet( "when use query_node_list should set " "gpugraph_load_node_list_into_hbm true")); return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->query_node_list(gpu_id, idx, start, query_size); } void GraphGpuWrapper::load_node_weight(int type_id, int idx, std::string path) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->load_node_weight(type_id, idx, path); } std::vector<int> GraphGpuWrapper::slot_feature_num_map() const { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->slot_feature_num_map(); } void GraphGpuWrapper::export_partition_files(int idx, std::string file_path) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->export_partition_files(idx, file_path); } void GraphGpuWrapper::release_graph() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->release_graph(); } void GraphGpuWrapper::release_graph_edge() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->release_graph_edge(); } void GraphGpuWrapper::release_graph_node() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->release_graph_node(); } std::vector<uint64_t> &GraphGpuWrapper::get_graph_total_keys() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->graph_total_keys_; } std::vector<std::vector<uint64_t>> &GraphGpuWrapper::get_graph_type_keys() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->graph_type_keys_; } std::unordered_map<int, int> &GraphGpuWrapper::get_graph_type_to_index() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->type_to_index_; } std::string &GraphGpuWrapper::get_node_type_size(std::string first_node_type) { auto node_types = paddle::string::split_string<std::string>(first_node_type, ";"); for (auto &type : node_types) { uniq_first_node_.insert(type); } auto &graph_all_type_total_keys = get_graph_type_keys(); auto &type_to_index = get_graph_type_to_index(); std::vector<std::string> node_type_size; for (auto node : uniq_first_node_) { auto it = feature_to_id.find(node); auto first_node_idx = it->second; size_t f_idx = type_to_index[first_node_idx]; int type_total_key_size = graph_all_type_total_keys[f_idx].size(); std::string node_type_str = node + ":" + std::to_string(type_total_key_size); node_type_size.push_back(node_type_str); } std::string delim = ";"; node_type_size_str_ = paddle::string::join_strings(node_type_size, delim); return node_type_size_str_; } std::string &GraphGpuWrapper::get_edge_type_size() { auto edge_type_size = reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->edge_type_size; std::string delim = ";"; edge_type_size_str_ = paddle::string::join_strings(edge_type_size, delim); std::cout << "edge_type_size_str: " << edge_type_size_str_ << std::endl; return edge_type_size_str_; } #endif } // namespace framework }; // namespace paddle
graph_gpu_wrapper.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h" #include <sstream> #include "paddle/fluid/framework/fleet/fleet_wrapper.h" #include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_utils.h" #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h" #include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h" DECLARE_int32(gpugraph_storage_mode); DECLARE_bool(graph_metapath_split_opt); namespace paddle { namespace framework { #ifdef PADDLE_WITH_HETERPS std::shared_ptr<GraphGpuWrapper> GraphGpuWrapper::s_instance_(nullptr); void GraphGpuWrapper::set_device(std::vector<int> ids) { for (auto device_id : ids) { device_id_mapping.push_back(device_id); } } void GraphGpuWrapper::init_conf(const std::string &first_node_type, const std::string &meta_path) { static std::mutex mutex; { std::lock_guard<std::mutex> lock(mutex); if (conf_initialized_) { return; } VLOG(2) << "init path config"; conf_initialized_ = true; auto node_types = paddle::string::split_string<std::string>(first_node_type, ";"); VLOG(2) << "node_types: " << first_node_type; for (auto &type : node_types) { auto iter = feature_to_id.find(type); PADDLE_ENFORCE_NE(iter, feature_to_id.end(), platform::errors::NotFound( "(%s) is not found in feature_to_id.", type)); VLOG(2) << "feature_to_id[" << type << "] = " << iter->second; first_node_type_.push_back(iter->second); } meta_path_.resize(first_node_type_.size()); auto meta_paths = paddle::string::split_string<std::string>(meta_path, ";"); for (size_t i = 0; i < meta_paths.size(); i++) { auto path = meta_paths[i]; auto nodes = paddle::string::split_string<std::string>(path, "-"); for (auto &node : nodes) { auto iter = edge_to_id.find(node); PADDLE_ENFORCE_NE(iter, edge_to_id.end(), platform::errors::NotFound( "(%s) is not found in edge_to_id.", node)); VLOG(2) << "edge_to_id[" << node << "] = " << iter->second; meta_path_[i].push_back(iter->second); } } int max_dev_id = 0; for (size_t i = 0; i < device_id_mapping.size(); i++) { if (device_id_mapping[i] > max_dev_id) { max_dev_id = device_id_mapping[i]; } } finish_node_type_.resize(max_dev_id + 1); node_type_start_.resize(max_dev_id + 1); global_infer_node_type_start_.resize(max_dev_id + 1); for (size_t i = 0; i < device_id_mapping.size(); i++) { int dev_id = device_id_mapping[i]; auto &node_type_start = node_type_start_[i]; auto &infer_node_type_start = global_infer_node_type_start_[i]; auto &finish_node_type = finish_node_type_[i]; finish_node_type.clear(); for (size_t idx = 0; idx < feature_to_id.size(); idx++) { infer_node_type_start[idx] = 0; } for (auto &type : node_types) { auto iter = feature_to_id.find(type); node_type_start[iter->second] = 0; infer_node_type_start[iter->second] = 0; } infer_cursor_.push_back(0); cursor_.push_back(0); } init_type_keys(); } } void GraphGpuWrapper::init_type_keys() { size_t thread_num = device_id_mapping.size(); int cnt = 0; auto &graph_all_type_total_keys = get_graph_type_keys(); auto &type_to_index = get_graph_type_to_index(); std::vector<std::vector<uint64_t>> tmp_keys; tmp_keys.resize(thread_num); int first_node_idx; d_graph_all_type_total_keys_.resize(graph_all_type_total_keys.size()); h_graph_all_type_keys_len_.resize(graph_all_type_total_keys.size()); for (size_t f_idx = 0; f_idx < graph_all_type_total_keys.size(); f_idx++) { for (size_t j = 0; j < tmp_keys.size(); j++) { tmp_keys[j].clear(); } d_graph_all_type_total_keys_[f_idx].resize(thread_num); auto &type_total_key = graph_all_type_total_keys[f_idx]; for (size_t j = 0; j < type_total_key.size(); j++) { uint64_t shard = type_total_key[j] % thread_num; tmp_keys[shard].push_back(type_total_key[j]); } for (size_t j = 0; j < thread_num; j++) { h_graph_all_type_keys_len_[f_idx].push_back(tmp_keys[j].size()); VLOG(1) << "node type: " << type_to_index[f_idx] << ", gpu_graph_device_keys[" << j << "] = " << tmp_keys[j].size(); } for (size_t j = 0; j < thread_num; j++) { auto stream = get_local_stream(j); int gpuid = device_id_mapping[j]; auto place = platform::CUDAPlace(gpuid); platform::CUDADeviceGuard guard(gpuid); d_graph_all_type_total_keys_[f_idx][j] = memory::AllocShared(place, tmp_keys[j].size() * sizeof(uint64_t)); cudaMemcpyAsync(d_graph_all_type_total_keys_[f_idx][j]->ptr(), tmp_keys[j].data(), sizeof(uint64_t) * tmp_keys[j].size(), cudaMemcpyHostToDevice, stream); } } for (int i = 0; i < thread_num; i++) { auto stream = get_local_stream(i); cudaStreamSynchronize(stream); } } void GraphGpuWrapper::init_metapath(std::string cur_metapath, int cur_metapath_index, int cur_metapath_len) { cur_metapath_ = cur_metapath; cur_metapath_index_ = cur_metapath_index; cur_metapath_len_ = cur_metapath_len; auto nodes = paddle::string::split_string<std::string>(cur_metapath_, "-"); cur_parse_metapath_.clear(); cur_parse_reverse_metapath_.clear(); for (auto &node : nodes) { VLOG(2) << "node: " << node << " , in metapath: " << cur_metapath_; auto iter = edge_to_id.find(node); PADDLE_ENFORCE_NE( iter, edge_to_id.end(), platform::errors::NotFound("(%s) is not found in edge_to_id.", node)); cur_parse_metapath_.push_back(iter->second); auto etype_split = paddle::string::split_string<std::string>(node, "2"); std::string reverse_type = etype_split[1] + "2" + etype_split[0]; iter = edge_to_id.find(reverse_type); PADDLE_ENFORCE_NE(iter, edge_to_id.end(), platform::errors::NotFound( "(%s) is not found in edge_to_id.", reverse_type)); cur_parse_reverse_metapath_.push_back(iter->second); } size_t thread_num = device_id_mapping.size(); cur_metapath_start_.resize(thread_num); for (size_t i = 0; i < thread_num; i++) { cur_metapath_start_[i] = 0; } auto &graph_all_type_total_keys = get_graph_type_keys(); auto &type_to_index = get_graph_type_to_index(); std::vector<std::vector<uint64_t>> tmp_keys; tmp_keys.resize(thread_num); int first_node_idx; std::string first_node = paddle::string::split_string<std::string>(cur_metapath_, "2")[0]; auto it = feature_to_id.find(first_node); first_node_idx = it->second; d_graph_train_total_keys_.resize(thread_num); h_graph_train_keys_len_.resize(thread_num); for (size_t j = 0; j < tmp_keys.size(); j++) { tmp_keys[j].clear(); } size_t f_idx = type_to_index[first_node_idx]; auto &type_total_key = graph_all_type_total_keys[f_idx]; VLOG(2) << "first node type:" << first_node_idx << ", node start size:" << type_total_key.size(); for (size_t j = 0; j < type_total_key.size(); j++) { uint64_t shard = type_total_key[j] % thread_num; tmp_keys[shard].push_back(type_total_key[j]); } auto fleet_ptr = framework::FleetWrapper::GetInstance(); std::shuffle( tmp_keys.begin(), tmp_keys.end(), fleet_ptr->LocalRandomEngine()); for (size_t j = 0; j < thread_num; j++) { h_graph_train_keys_len_[j] = tmp_keys[j].size(); VLOG(2) << j << " th card, graph train keys len: " << tmp_keys[j].size(); } for (size_t j = 0; j < thread_num; j++) { auto stream = get_local_stream(j); int gpuid = device_id_mapping[j]; auto place = platform::CUDAPlace(gpuid); platform::CUDADeviceGuard guard(gpuid); d_graph_train_total_keys_[j] = memory::AllocShared(place, tmp_keys[j].size() * sizeof(uint64_t)); cudaMemcpyAsync(d_graph_train_total_keys_[j]->ptr(), tmp_keys[j].data(), sizeof(uint64_t) * tmp_keys[j].size(), cudaMemcpyHostToDevice, stream); } } void GraphGpuWrapper::clear_metapath_state() { size_t thread_num = device_id_mapping.size(); for (size_t j = 0; j < thread_num; j++) { cur_metapath_start_[j] = 0; h_graph_train_keys_len_[j] = 0; d_graph_train_total_keys_[j].reset(); for (size_t k = 0; k < cur_parse_metapath_.size(); k++) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->clear_graph_info(j, cur_parse_metapath_[k]); } } std::vector<int> clear_etype; for (size_t j = 0; j < cur_parse_metapath_.size(); j++) { if (find(clear_etype.begin(), clear_etype.end(), cur_parse_metapath_[j]) == clear_etype.end()) { clear_etype.push_back(cur_parse_metapath_[j]); } } for (size_t j = 0; j < cur_parse_reverse_metapath_.size(); j++) { if (find(clear_etype.begin(), clear_etype.end(), cur_parse_reverse_metapath_[j]) == clear_etype.end()) { clear_etype.push_back(cur_parse_reverse_metapath_[j]); } } for (size_t j = 0; j < clear_etype.size(); j++) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->clear_graph(clear_etype[j]); } } int GraphGpuWrapper::get_all_id(int type, int slice_num, std::vector<std::vector<uint64_t>> *output) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_id(type, slice_num, output); } int GraphGpuWrapper::get_all_neighbor_id( int type, int slice_num, std::vector<std::vector<uint64_t>> *output) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_neighbor_id(type, slice_num, output); } int GraphGpuWrapper::get_all_id(int type, int idx, int slice_num, std::vector<std::vector<uint64_t>> *output) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_id(type, idx, slice_num, output); } int GraphGpuWrapper::get_all_neighbor_id( int type, int idx, int slice_num, std::vector<std::vector<uint64_t>> *output) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_neighbor_id(type, idx, slice_num, output); } int GraphGpuWrapper::get_all_feature_ids( int type, int idx, int slice_num, std::vector<std::vector<uint64_t>> *output) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_feature_ids(type, idx, slice_num, output); } void GraphGpuWrapper::set_up_types(const std::vector<std::string> &edge_types, const std::vector<std::string> &node_types) { id_to_edge = edge_types; for (size_t table_id = 0; table_id < edge_types.size(); table_id++) { int res = edge_to_id.size(); edge_to_id[edge_types[table_id]] = res; } id_to_feature = node_types; for (size_t table_id = 0; table_id < node_types.size(); table_id++) { int res = feature_to_id.size(); feature_to_id[node_types[table_id]] = res; } table_feat_mapping.resize(node_types.size()); this->table_feat_conf_feat_name.resize(node_types.size()); this->table_feat_conf_feat_dtype.resize(node_types.size()); this->table_feat_conf_feat_shape.resize(node_types.size()); } void GraphGpuWrapper::set_feature_separator(std::string ch) { feature_separator_ = ch; if (graph_table != nullptr) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->set_feature_separator(feature_separator_); } } void GraphGpuWrapper::set_slot_feature_separator(std::string ch) { slot_feature_separator_ = ch; if (graph_table != nullptr) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->set_slot_feature_separator(slot_feature_separator_); } } void GraphGpuWrapper::make_partitions(int idx, int64_t byte_size, int device_len) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->make_partitions(idx, byte_size, device_len); } int32_t GraphGpuWrapper::load_next_partition(int idx) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->load_next_partition(idx); } void GraphGpuWrapper::set_search_level(int level) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->set_search_level(level); } std::vector<uint64_t> GraphGpuWrapper::get_partition(int idx, int num) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_partition(idx, num); } int32_t GraphGpuWrapper::get_partition_num(int idx) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_partition_num(idx); } void GraphGpuWrapper::make_complementary_graph(int idx, int64_t byte_size) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->make_complementary_graph(idx, byte_size); } void GraphGpuWrapper::load_edge_file(std::string name, std::string filepath, bool reverse) { // 'e' means load edge std::string params = "e"; if (reverse) { // 'e<' means load edges from $2 to $1 params += "<" + name; } else { // 'e>' means load edges from $1 to $2 params += ">" + name; } if (edge_to_id.find(name) != edge_to_id.end()) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->Load(std::string(filepath), params); } } void GraphGpuWrapper::load_edge_file(std::string etype2files, std::string graph_data_local_path, int part_num, bool reverse) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->parse_edge_and_load( etype2files, graph_data_local_path, part_num, reverse); } void GraphGpuWrapper::load_node_file(std::string name, std::string filepath) { // 'n' means load nodes and 'node_type' follows std::string params = "n" + name; if (feature_to_id.find(name) != feature_to_id.end()) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->Load(std::string(filepath), params); } } void GraphGpuWrapper::load_node_file(std::string ntype2files, std::string graph_data_local_path, int part_num) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->parse_node_and_load( ntype2files, graph_data_local_path, part_num); } void GraphGpuWrapper::load_node_and_edge(std::string etype2files, std::string ntype2files, std::string graph_data_local_path, int part_num, bool reverse) { reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->load_node_and_edge_file( etype2files, ntype2files, graph_data_local_path, part_num, reverse); } void GraphGpuWrapper::add_table_feat_conf(std::string table_name, std::string feat_name, std::string feat_dtype, int feat_shape) { if (feature_to_id.find(table_name) != feature_to_id.end()) { int idx = feature_to_id[table_name]; if (table_feat_mapping[idx].find(feat_name) == table_feat_mapping[idx].end()) { int res = table_feat_mapping[idx].size(); table_feat_mapping[idx][feat_name] = res; } int feat_idx = table_feat_mapping[idx][feat_name]; VLOG(0) << "table_name " << table_name << " mapping id " << idx; VLOG(0) << " feat name " << feat_name << " feat id" << feat_idx; if (feat_idx < table_feat_conf_feat_name[idx].size()) { // overide table_feat_conf_feat_name[idx][feat_idx] = feat_name; table_feat_conf_feat_dtype[idx][feat_idx] = feat_dtype; table_feat_conf_feat_shape[idx][feat_idx] = feat_shape; } else { // new table_feat_conf_feat_name[idx].push_back(feat_name); table_feat_conf_feat_dtype[idx].push_back(feat_dtype); table_feat_conf_feat_shape[idx].push_back(feat_shape); } } VLOG(0) << "add conf over"; } void GraphGpuWrapper::init_search_level(int level) { search_level = level; } gpuStream_t GraphGpuWrapper::get_local_stream(int gpuid) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->get_local_stream(gpuid); } void GraphGpuWrapper::init_service() { table_proto.set_task_pool_size(64); table_proto.set_shard_num(1000); table_proto.set_build_sampler_on_cpu(false); table_proto.set_search_level(search_level); table_proto.set_table_name("cpu_graph_table_"); table_proto.set_use_cache(false); for (int i = 0; i < id_to_edge.size(); i++) table_proto.add_edge_types(id_to_edge[i]); for (int i = 0; i < id_to_feature.size(); i++) { table_proto.add_node_types(id_to_feature[i]); auto feat_node = id_to_feature[i]; ::paddle::distributed::GraphFeature *g_f = table_proto.add_graph_feature(); for (int x = 0; x < table_feat_conf_feat_name[i].size(); x++) { g_f->add_name(table_feat_conf_feat_name[i][x]); g_f->add_dtype(table_feat_conf_feat_dtype[i][x]); g_f->add_shape(table_feat_conf_feat_shape[i][x]); } } std::shared_ptr<HeterPsResource> resource = std::make_shared<HeterPsResource>(device_id_mapping); resource->enable_p2p(); GpuPsGraphTable *g = new GpuPsGraphTable(resource, id_to_edge.size()); size_t gpu_num = device_id_mapping.size(); g->init_cpu_table(table_proto, gpu_num); g->cpu_graph_table_->set_feature_separator(feature_separator_); g->cpu_graph_table_->set_slot_feature_separator(slot_feature_separator_); graph_table = reinterpret_cast<char *>(g); upload_num = gpu_num; upload_task_pool.reset(new ::ThreadPool(upload_num)); } void GraphGpuWrapper::finalize() { reinterpret_cast<GpuPsGraphTable *>(graph_table)->show_table_collisions(); } void GraphGpuWrapper::upload_batch(int type, int idx, int slice_num, const std::string &edge_type) { VLOG(0) << "begin upload edge, type[" << edge_type << "]"; auto iter = edge_to_id.find(edge_type); idx = iter->second; VLOG(2) << "cur edge: " << edge_type << ",idx: " << idx; std::vector<std::vector<uint64_t>> ids; reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_id(type, idx, slice_num, &ids); debug_gpu_memory_info("upload_batch node start"); GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table); std::vector<std::future<int>> tasks; for (int i = 0; i < ids.size(); i++) { tasks.push_back(upload_task_pool->enqueue([&, i, idx, this]() -> int { VLOG(0) << "begin make_gpu_ps_graph, node_id[" << i << "]_size[" << ids[i].size() << "]"; GpuPsCommGraph sub_graph = g->cpu_graph_table_->make_gpu_ps_graph(idx, ids[i]); g->build_graph_on_single_gpu(sub_graph, i, idx); sub_graph.release_on_cpu(); VLOG(0) << "sub graph on gpu " << i << " is built"; return 0; })); } for (size_t i = 0; i < tasks.size(); i++) tasks[i].get(); debug_gpu_memory_info("upload_batch node end"); } // feature table void GraphGpuWrapper::upload_batch(int type, int slice_num, int slot_num) { if (type == 1 && (FLAGS_gpugraph_storage_mode == paddle::framework::GpuGraphStorageMode:: MEM_EMB_FEATURE_AND_GPU_GRAPH || FLAGS_gpugraph_storage_mode == paddle::framework::GpuGraphStorageMode:: SSD_EMB_AND_MEM_FEATURE_GPU_GRAPH)) { return; } std::vector<std::vector<uint64_t>> node_ids; reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->get_all_id(type, slice_num, &node_ids); debug_gpu_memory_info("upload_batch feature start"); GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table); std::vector<std::future<int>> tasks; for (int i = 0; i < node_ids.size(); i++) { tasks.push_back(upload_task_pool->enqueue([&, i, this]() -> int { VLOG(0) << "begin make_gpu_ps_graph_fea, node_ids[" << i << "]_size[" << node_ids[i].size() << "]"; GpuPsCommGraphFea sub_graph = g->cpu_graph_table_->make_gpu_ps_graph_fea(i, node_ids[i], slot_num); // sub_graph.display_on_cpu(); VLOG(0) << "begin build_graph_fea_on_single_gpu, node_ids[" << i << "]_size[" << node_ids[i].size() << "]"; g->build_graph_fea_on_single_gpu(sub_graph, i); sub_graph.release_on_cpu(); VLOG(0) << "sub graph fea on gpu " << i << " is built"; return 0; })); } for (size_t i = 0; i < tasks.size(); i++) tasks[i].get(); // g->build_graph_from_cpu(vec); debug_gpu_memory_info("upload_batch feature end"); } // get sub_graph_fea std::vector<GpuPsCommGraphFea> GraphGpuWrapper::get_sub_graph_fea( std::vector<std::vector<uint64_t>> &node_ids, int slot_num) { GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table); std::vector<std::future<int>> tasks; std::vector<GpuPsCommGraphFea> sub_graph_feas(node_ids.size()); for (int i = 0; i < node_ids.size(); i++) { tasks.push_back(upload_task_pool->enqueue([&, i, this]() -> int { GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table); sub_graph_feas[i] = g->cpu_graph_table_->make_gpu_ps_graph_fea(i, node_ids[i], slot_num); return 0; })); } for (size_t i = 0; i < tasks.size(); i++) tasks[i].get(); return sub_graph_feas; } // build_gpu_graph_fea void GraphGpuWrapper::build_gpu_graph_fea(GpuPsCommGraphFea &sub_graph_fea, int i) { GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table); g->build_graph_fea_on_single_gpu(sub_graph_fea, i); sub_graph_fea.release_on_cpu(); VLOG(0) << "sub graph fea on gpu " << i << " is built"; return; } NeighborSampleResult GraphGpuWrapper::graph_neighbor_sample_v3( NeighborSampleQuery q, bool cpu_switch, bool compress = true) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->graph_neighbor_sample_v3(q, cpu_switch, compress); } NeighborSampleResultV2 GraphGpuWrapper::graph_neighbor_sample_all_edge_type( int gpu_id, int edge_type_len, uint64_t *key, int sample_size, int len, std::vector<std::shared_ptr<phi::Allocation>> edge_type_graphs) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->graph_neighbor_sample_all_edge_type( gpu_id, edge_type_len, key, sample_size, len, edge_type_graphs); } std::vector<std::shared_ptr<phi::Allocation>> GraphGpuWrapper::get_edge_type_graph(int gpu_id, int edge_type_len) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->get_edge_type_graph(gpu_id, edge_type_len); } int GraphGpuWrapper::get_feature_info_of_nodes( int gpu_id, uint64_t *d_nodes, int node_num, uint32_t *size_list, uint32_t *size_list_prefix_sum, std::shared_ptr<phi::Allocation> &feature_list, std::shared_ptr<phi::Allocation> &slot_list) { platform::CUDADeviceGuard guard(gpu_id); PADDLE_ENFORCE_NOT_NULL(graph_table, paddle::platform::errors::InvalidArgument( "graph_table should not be null")); return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->get_feature_info_of_nodes(gpu_id, d_nodes, node_num, size_list, size_list_prefix_sum, feature_list, slot_list); } int GraphGpuWrapper::get_feature_of_nodes(int gpu_id, uint64_t *d_walk, uint64_t *d_offset, uint32_t size, int slot_num, int *d_slot_feature_num_map, int fea_num_per_node) { platform::CUDADeviceGuard guard(gpu_id); PADDLE_ENFORCE_NOT_NULL(graph_table, paddle::platform::errors::InvalidArgument( "graph_table should not be null")); return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->get_feature_of_nodes(gpu_id, d_walk, d_offset, size, slot_num, d_slot_feature_num_map, fea_num_per_node); } NeighborSampleResult GraphGpuWrapper::graph_neighbor_sample( int gpu_id, uint64_t *device_keys, int walk_degree, int len) { platform::CUDADeviceGuard guard(gpu_id); auto neighbor_sample_res = reinterpret_cast<GpuPsGraphTable *>(graph_table) ->graph_neighbor_sample(gpu_id, device_keys, walk_degree, len); return neighbor_sample_res; } // this function is contributed by Liwb5 std::vector<uint64_t> GraphGpuWrapper::graph_neighbor_sample( int gpu_id, int idx, std::vector<uint64_t> &key, int sample_size) { std::vector<uint64_t> res; if (key.size() == 0) { return res; } uint64_t *cuda_key; platform::CUDADeviceGuard guard(gpu_id); cudaMalloc(&cuda_key, key.size() * sizeof(uint64_t)); cudaMemcpy(cuda_key, key.data(), key.size() * sizeof(uint64_t), cudaMemcpyHostToDevice); VLOG(0) << "key_size: " << key.size(); auto neighbor_sample_res = reinterpret_cast<GpuPsGraphTable *>(graph_table) ->graph_neighbor_sample_v2( gpu_id, idx, cuda_key, sample_size, key.size(), false, true); int *actual_sample_size = new int[key.size()]; cudaMemcpy(actual_sample_size, neighbor_sample_res.actual_sample_size, key.size() * sizeof(int), cudaMemcpyDeviceToHost); // 3, 1, 3 int cumsum = 0; for (int i = 0; i < key.size(); i++) { cumsum += actual_sample_size[i]; } std::vector<uint64_t> cpu_key; cpu_key.resize(key.size() * sample_size); cudaMemcpy(cpu_key.data(), neighbor_sample_res.val, key.size() * sample_size * sizeof(uint64_t), cudaMemcpyDeviceToHost); for (int i = 0; i < key.size(); i++) { for (int j = 0; j < actual_sample_size[i]; j++) { res.push_back(key[i]); res.push_back(cpu_key[i * sample_size + j]); } } delete[] actual_sample_size; cudaFree(cuda_key); return res; } NodeQueryResult GraphGpuWrapper::query_node_list(int gpu_id, int idx, int start, int query_size) { PADDLE_ENFORCE_EQ(FLAGS_gpugraph_load_node_list_into_hbm, true, paddle::platform::errors::PreconditionNotMet( "when use query_node_list should set " "gpugraph_load_node_list_into_hbm true")); return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->query_node_list(gpu_id, idx, start, query_size); } void GraphGpuWrapper::load_node_weight(int type_id, int idx, std::string path) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->load_node_weight(type_id, idx, path); } std::vector<int> GraphGpuWrapper::slot_feature_num_map() const { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->slot_feature_num_map(); } void GraphGpuWrapper::export_partition_files(int idx, std::string file_path) { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->export_partition_files(idx, file_path); } void GraphGpuWrapper::release_graph() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->release_graph(); } void GraphGpuWrapper::release_graph_edge() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->release_graph_edge(); } void GraphGpuWrapper::release_graph_node() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->release_graph_node(); } std::vector<uint64_t> &GraphGpuWrapper::get_graph_total_keys() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->graph_total_keys_; } std::vector<std::vector<uint64_t>> &GraphGpuWrapper::get_graph_type_keys() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->graph_type_keys_; } std::unordered_map<int, int> &GraphGpuWrapper::get_graph_type_to_index() { return reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->type_to_index_; } std::string &GraphGpuWrapper::get_node_type_size(std::string first_node_type) { auto node_types = paddle::string::split_string<std::string>(first_node_type, ";"); for (auto &type : node_types) { uniq_first_node_.insert(type); } auto &graph_all_type_total_keys = get_graph_type_keys(); auto &type_to_index = get_graph_type_to_index(); std::vector<std::string> node_type_size; for (auto node : uniq_first_node_) { auto it = feature_to_id.find(node); auto first_node_idx = it->second; size_t f_idx = type_to_index[first_node_idx]; int type_total_key_size = graph_all_type_total_keys[f_idx].size(); std::string node_type_str = node + ":" + std::to_string(type_total_key_size); node_type_size.push_back(node_type_str); } std::string delim = ";"; node_type_size_str_ = paddle::string::join_strings(node_type_size, delim); return node_type_size_str_; } std::string &GraphGpuWrapper::get_edge_type_size() { auto edge_type_size = reinterpret_cast<GpuPsGraphTable *>(graph_table) ->cpu_graph_table_->edge_type_size; std::string delim = ";"; edge_type_size_str_ = paddle::string::join_strings(edge_type_size, delim); std::cout << "edge_type_size_str: " << edge_type_size_str_ << std::endl; return edge_type_size_str_; } #endif } // namespace framework }; // namespace paddle
b560ca515ebfa6c96ab921a5598ae70ab86d7d31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sweep.h" void init_planes(struct plane** planes, unsigned int *num_planes, struct problem * problem, struct rankinfo * rankinfo) { *num_planes = rankinfo->nx + rankinfo->ny + problem->chunk - 2; *planes = (struct plane *)malloc(sizeof(struct plane) * *num_planes); for (unsigned int p = 0; p < *num_planes; p++) (*planes)[p].num_cells = 0; for (unsigned int k = 0; k < problem->chunk; k++) for (unsigned int j = 0; j < rankinfo->ny; j++) for (unsigned int i = 0; i < rankinfo->nx; i++) { unsigned int p = i + j + k; (*planes)[p].num_cells += 1; } for (unsigned int p = 0; p < *num_planes; p++) { (*planes)[p].cell_ids = (struct cell_id *)malloc(sizeof(struct cell_id) * (*planes)[p].num_cells); } unsigned int index[*num_planes]; for (unsigned int p = 0; p < *num_planes; p++) index[p] = 0; for (unsigned int k = 0; k < problem->chunk; k++) for (unsigned int j = 0; j < rankinfo->ny; j++) for (unsigned int i = 0; i < rankinfo->nx; i++) { unsigned int p = i + j + k; (*planes)[p].cell_ids[index[p]].i = i; (*planes)[p].cell_ids[index[p]].j = j; (*planes)[p].cell_ids[index[p]].k = k; index[p] += 1; } } void copy_planes(const struct plane * planes, const unsigned int num_planes, struct buffers * buffers) { buffers->planes = (struct cell_id **)malloc(sizeof(struct cell_id *)*num_planes); for (unsigned int p = 0; p < num_planes; p++) { hipMalloc(&(buffers->planes[p]), sizeof(struct cell_id)*planes[p].num_cells); check_cuda("Creating a plane cell indicies buffer"); hipMemcpy(buffers->planes[p], planes[p].cell_ids, sizeof(struct cell_id)*planes[p].num_cells, hipMemcpyHostToDevice); check_cuda("Creating and copying a plane cell indicies buffer"); } } void sweep_plane( const unsigned int z_pos, const int octant, const int istep, const int jstep, const int kstep, const unsigned int plane, const struct plane * planes, struct problem * problem, struct rankinfo * rankinfo, struct buffers * buffers ) { // 2 dimensional kernel // First dimension: number of angles * number of groups // Second dimension: number of cells in plane dim3 blocks(ceil(problem->nang*problem->ng/(double)BLOCK_SIZE_2D), ceil(planes[plane].num_cells/(double)BLOCK_SIZE_2D), 1); dim3 threads(BLOCK_SIZE_2D, BLOCK_SIZE_2D, 1); hipLaunchKernelGGL(( sweep_plane_kernel), dim3(blocks), dim3(threads) , 0, 0, rankinfo->nx, rankinfo->ny, rankinfo->nz, problem->nang, problem->ng, problem->cmom, istep, jstep, kstep, octant, z_pos, planes[plane].num_cells, buffers->planes[plane], buffers->inner_source, buffers->scat_coeff, buffers->dd_i, buffers->dd_j, buffers->dd_k, buffers->mu, buffers->velocity_delta, buffers->mat_cross_section, buffers->angular_flux_in[octant], buffers->flux_i, buffers->flux_j, buffers->flux_k, buffers->angular_flux_out[octant] ); check_cuda("Enqueue plane sweep kernel"); }
b560ca515ebfa6c96ab921a5598ae70ab86d7d31.cu
#include "sweep.h" void init_planes(struct plane** planes, unsigned int *num_planes, struct problem * problem, struct rankinfo * rankinfo) { *num_planes = rankinfo->nx + rankinfo->ny + problem->chunk - 2; *planes = (struct plane *)malloc(sizeof(struct plane) * *num_planes); for (unsigned int p = 0; p < *num_planes; p++) (*planes)[p].num_cells = 0; for (unsigned int k = 0; k < problem->chunk; k++) for (unsigned int j = 0; j < rankinfo->ny; j++) for (unsigned int i = 0; i < rankinfo->nx; i++) { unsigned int p = i + j + k; (*planes)[p].num_cells += 1; } for (unsigned int p = 0; p < *num_planes; p++) { (*planes)[p].cell_ids = (struct cell_id *)malloc(sizeof(struct cell_id) * (*planes)[p].num_cells); } unsigned int index[*num_planes]; for (unsigned int p = 0; p < *num_planes; p++) index[p] = 0; for (unsigned int k = 0; k < problem->chunk; k++) for (unsigned int j = 0; j < rankinfo->ny; j++) for (unsigned int i = 0; i < rankinfo->nx; i++) { unsigned int p = i + j + k; (*planes)[p].cell_ids[index[p]].i = i; (*planes)[p].cell_ids[index[p]].j = j; (*planes)[p].cell_ids[index[p]].k = k; index[p] += 1; } } void copy_planes(const struct plane * planes, const unsigned int num_planes, struct buffers * buffers) { buffers->planes = (struct cell_id **)malloc(sizeof(struct cell_id *)*num_planes); for (unsigned int p = 0; p < num_planes; p++) { cudaMalloc(&(buffers->planes[p]), sizeof(struct cell_id)*planes[p].num_cells); check_cuda("Creating a plane cell indicies buffer"); cudaMemcpy(buffers->planes[p], planes[p].cell_ids, sizeof(struct cell_id)*planes[p].num_cells, cudaMemcpyHostToDevice); check_cuda("Creating and copying a plane cell indicies buffer"); } } void sweep_plane( const unsigned int z_pos, const int octant, const int istep, const int jstep, const int kstep, const unsigned int plane, const struct plane * planes, struct problem * problem, struct rankinfo * rankinfo, struct buffers * buffers ) { // 2 dimensional kernel // First dimension: number of angles * number of groups // Second dimension: number of cells in plane dim3 blocks(ceil(problem->nang*problem->ng/(double)BLOCK_SIZE_2D), ceil(planes[plane].num_cells/(double)BLOCK_SIZE_2D), 1); dim3 threads(BLOCK_SIZE_2D, BLOCK_SIZE_2D, 1); sweep_plane_kernel<<< blocks, threads >>>( rankinfo->nx, rankinfo->ny, rankinfo->nz, problem->nang, problem->ng, problem->cmom, istep, jstep, kstep, octant, z_pos, planes[plane].num_cells, buffers->planes[plane], buffers->inner_source, buffers->scat_coeff, buffers->dd_i, buffers->dd_j, buffers->dd_k, buffers->mu, buffers->velocity_delta, buffers->mat_cross_section, buffers->angular_flux_in[octant], buffers->flux_i, buffers->flux_j, buffers->flux_k, buffers->angular_flux_out[octant] ); check_cuda("Enqueue plane sweep kernel"); }
6be059f5f05306bbcb8cf80bd49a0c0c746a1cfe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_ROCM #include "dragon/core/context_cuda.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { template <typename T> __global__ void _ChannelShuffle( const int NxCxS, const int S, const int G, const int K, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(index, NxCxS) { const int j = index % S; const int gi = index / S % G; const int ki = index / S / G % K; const int i = index / S / G / K; y[index] = x[((i * G + gi) * K + ki) * S + j]; } } } // namespace /* ------------------- Launcher Separator ------------------- */ #define DEFINE_KERNEL_LAUNCHER(T) \ template <> \ void ChannelShuffle<T, CUDAContext>( \ const int N, \ const int S, \ const int C, \ const int G, \ const T* x, \ T* y, \ CUDAContext* ctx) { \ const auto NxCxS = N * C * S; \ hipLaunchKernelGGL(( _ChannelShuffle), \ CUDA_BLOCKS(NxCxS), \ CUDA_THREADS, \ 0, \ ctx->cuda_stream(), NxCxS, S, G, C / G, x, y); \ } DEFINE_KERNEL_LAUNCHER(bool); DEFINE_KERNEL_LAUNCHER(uint8_t); DEFINE_KERNEL_LAUNCHER(int8_t); DEFINE_KERNEL_LAUNCHER(int); DEFINE_KERNEL_LAUNCHER(int64_t); DEFINE_KERNEL_LAUNCHER(float16); DEFINE_KERNEL_LAUNCHER(float); DEFINE_KERNEL_LAUNCHER(double); #undef DEFINE_KERNEL_LAUNCHER } // namespace kernels } // namespace dragon #endif // USE_ROCM
6be059f5f05306bbcb8cf80bd49a0c0c746a1cfe.cu
#ifdef USE_CUDA #include "dragon/core/context_cuda.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { template <typename T> __global__ void _ChannelShuffle( const int NxCxS, const int S, const int G, const int K, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(index, NxCxS) { const int j = index % S; const int gi = index / S % G; const int ki = index / S / G % K; const int i = index / S / G / K; y[index] = x[((i * G + gi) * K + ki) * S + j]; } } } // namespace /* ------------------- Launcher Separator ------------------- */ #define DEFINE_KERNEL_LAUNCHER(T) \ template <> \ void ChannelShuffle<T, CUDAContext>( \ const int N, \ const int S, \ const int C, \ const int G, \ const T* x, \ T* y, \ CUDAContext* ctx) { \ const auto NxCxS = N * C * S; \ _ChannelShuffle<<< \ CUDA_BLOCKS(NxCxS), \ CUDA_THREADS, \ 0, \ ctx->cuda_stream()>>>(NxCxS, S, G, C / G, x, y); \ } DEFINE_KERNEL_LAUNCHER(bool); DEFINE_KERNEL_LAUNCHER(uint8_t); DEFINE_KERNEL_LAUNCHER(int8_t); DEFINE_KERNEL_LAUNCHER(int); DEFINE_KERNEL_LAUNCHER(int64_t); DEFINE_KERNEL_LAUNCHER(float16); DEFINE_KERNEL_LAUNCHER(float); DEFINE_KERNEL_LAUNCHER(double); #undef DEFINE_KERNEL_LAUNCHER } // namespace kernels } // namespace dragon #endif // USE_CUDA
c5e031b72379ee78d07d1527076933398d7c12a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/device_functions.h> #include <stdlib.h> #include "opencv2\opencv.hpp" #include <stdio.h> #include <ctime> #define RADIUS 1 #define F_HEIGHT 3 #define F_WIDTH 3 __global__ void convolutionKernel(float *d_conmatrix, uchar *d_out, uchar *d_in, int inW, int inH) { //Position of the pixel that this thread is going to process. int blockid = blockIdx.x + blockIdx.y * gridDim.x; int pos = blockid * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; float sum = 0.0; float current = 0.0; for (int i = -RADIUS; i <= RADIUS; i++) { //Rows for (int j = -RADIUS; j <= RADIUS; j++) { //Columns //Checks if the thread is trying to process a pixel out of bounds. if (blockIdx.x == 0 && (threadIdx.x + j < 0)) { current = 0; } else if ((blockIdx.x == gridDim.x - 1) && (threadIdx.x + j) > blockDim.x - 1) { current = 0; } else { if (blockIdx.y == 0 && (threadIdx.y + i) < 0) { current = 0; } else if ((blockIdx.y == gridDim.y - 1) && (threadIdx.y + i) > blockDim.y - 1) { current = 0; } else { //Saves the pixel value in the variable. current = d_in[pos + (i * inW) + j]; } } sum += current * d_conmatrix[F_WIDTH * (i + RADIUS) + (j + RADIUS)]; } } //Sets the pixel of the new image. d_out[pos] = floor(sum); } __global__ void convolutionSharedKernel(float *d_conmatrix, uchar *d_out, uchar *d_in, int inW, int inH) { //Shared memory for the convolution matrix. __shared__ float shared_matrix[F_HEIGHT * F_WIDTH]; //Position of the pixel that this thread is going to process. int blockid = blockIdx.x + blockIdx.y * gridDim.x; int pos = blockid * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; //Each pixel of the block copies one pixel of the convolution matrix to shared memory. shared_matrix[(threadIdx.y * blockDim.x) + threadIdx.x] = d_conmatrix[(threadIdx.y * blockDim.x) + threadIdx.x]; __syncthreads(); float sum = 0.0; float current = 0.0; for (int i = -RADIUS; i <= RADIUS; i++) { //Rows for (int j = -RADIUS; j <= RADIUS; j++) { //Columns //Checks if the thread is trying to process a pixel out of bounds. if (blockIdx.x == 0 && (threadIdx.x + j < 0)) { current = 0; } else if ((blockIdx.x == gridDim.x - 1) && (threadIdx.x + j) > blockDim.x - 1) { current = 0; } else { if (blockIdx.y == 0 && (threadIdx.y + i) < 0) { current = 0; } else if ((blockIdx.y == gridDim.y - 1) && (threadIdx.y + i) > blockDim.y - 1) { current = 0; } else { //Saves the pixel value in the variable. current = d_in[pos + (i * inW) + j]; } } sum += current * shared_matrix[F_WIDTH * (i + RADIUS) + (j + RADIUS)]; } } //Sets the pixel of the new image. d_out[pos] = floor(sum); } void convolutionSerial(float *d_conmatrix, uchar *out, uchar *in, int inW, int inH) { for (int h = 0; h < inH; h++) { //Rows for (int l = 0; l < inW; l++) { //Columns float sum = 0.0; float current = 0.0; //Current pixel position. int pos = h * inW + l; for (int i = -RADIUS; i <= RADIUS; i++) { //Rows for (int j = -RADIUS; j <= RADIUS; j++) { //Columns if (l + j < 0) { current = 0; } else if (l + j > inW - 1) { current = 0; } else { if (h + i < 0) { current = 0; } else if (h + i > inH - 1) { current = 0; } else { //Saves the pixel value in the variable. current = in[pos + (i * inW) + j]; } } sum += current * d_conmatrix[F_WIDTH * (i + RADIUS) + (j + RADIUS)]; } } //Sets the pixel of the new image. out[pos] = floor(sum); } } } int main() { float *d_conmatrix; uchar *d_image, *d_out; uchar *h_out, *h_aux; clock_t t; //Gaussian box. float filter[F_HEIGHT * F_WIDTH] = { 1.0/9.0, 1.0 / 9.0,1.0 / 9.0, 1.0 / 9.0,1.0 / 9.0,1.0 / 9.0, 1.0 / 9.0,1.0 / 9.0,1.0 / 9.0 }; //Load image data cv::Mat image = cv::imread("jupiter.jpg", CV_LOAD_IMAGE_GRAYSCALE); int i_width = image.cols; int i_height = image.rows; if (!image.data) { std::cout << "Could not open or find the image" << std::endl; return -1; } //Show original image. cv::namedWindow("Original window", cv::WINDOW_AUTOSIZE); cv::imshow("Original window", image); //Allocate host memory. h_out = (uchar*)malloc(i_height * i_width * sizeof(uchar)); h_aux = (uchar*)malloc(i_height * i_width * sizeof(uchar)); t = clock(); //Applies three passes to maximize the blur effect. (Global memory filter) convolutionSerial(filter, h_out, image.data, i_width, i_height); convolutionSerial(filter, h_aux, h_out, i_width, i_height); convolutionSerial(filter, h_out, h_aux, i_width, i_height); t = clock() - t; //Print time std::cout << "Serial (3 convolutions):" << (float)t / CLOCKS_PER_SEC << " seconds\n" << std::endl; //Show processed image. cv::Mat res0(i_height, i_width, CV_8UC1, h_out); cv::namedWindow("Result window (serial)", cv::WINDOW_AUTOSIZE); cv::imshow("Result window (serial)", res0); //Alocate device memory. hipMalloc((void**)&d_conmatrix, F_HEIGHT * F_WIDTH * sizeof(float)); hipMalloc((void**)&d_image, i_height * i_width * sizeof(uchar)); hipMalloc((void**)&d_out, i_height * i_width * sizeof(uchar)); //Copy from host to device. hipMemcpy(d_conmatrix, filter, F_HEIGHT * F_WIDTH * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_image, image.data, i_height * i_width * sizeof(uchar), hipMemcpyHostToDevice); //Sizes of grid and block. dim3 block; dim3 grid; block.x = F_WIDTH; block.y = F_HEIGHT; grid.x = i_width / block.x; grid.y = i_height / block.y; t = clock(); //Applies three passes to maximize the blur effect. (Global memory filter) hipLaunchKernelGGL(( convolutionKernel), dim3(grid), dim3(block), 0, 0, d_conmatrix, d_out, d_image, i_width, i_height); convolutionKernel<<<grid, block >> > (d_conmatrix, d_out, d_out, i_width, i_height); convolutionKernel<<<grid, block >> > (d_conmatrix, d_out, d_out, i_width, i_height); hipDeviceSynchronize(); t = clock() - t; std::cout << "Kernel Global Memory (3 convolutions):" << (float)t / CLOCKS_PER_SEC << " seconds\n" << std::endl; //Copy result from device to host. hipMemcpy(h_out, d_out, i_height * i_width * sizeof(uchar), hipMemcpyDeviceToHost); //Show processed image. cv::Mat res(i_height, i_width, CV_8UC1, h_out); cv::namedWindow("Result window (global)", cv::WINDOW_AUTOSIZE); cv::imshow("Result window (global)", res); t = clock(); //Applies three passes to maximize the blur effect. (Global memory filter) convolutionKernel << <grid, block >> > (d_conmatrix, d_out, d_image, i_width, i_height); convolutionKernel << <grid, block >> > (d_conmatrix, d_out, d_out, i_width, i_height); convolutionKernel << <grid, block >> > (d_conmatrix, d_out, d_out, i_width, i_height); hipDeviceSynchronize(); t = clock() - t; std::cout << "Kernel Shared Memory (3 convolutions):" << (float)t / CLOCKS_PER_SEC << " seconds\n" << std::endl; //Copy result from device to host. hipMemcpy(h_out, d_out, i_height * i_width * sizeof(uchar), hipMemcpyDeviceToHost); //Show processed image. cv::Mat res2(i_height, i_width, CV_8UC1, h_out); cv::namedWindow("Result window (shared)", cv::WINDOW_AUTOSIZE); cv::imshow("Result window (shared)", res2); cv::waitKey(0); return 0; }
c5e031b72379ee78d07d1527076933398d7c12a7.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <device_functions.h> #include <stdlib.h> #include "opencv2\opencv.hpp" #include <stdio.h> #include <ctime> #define RADIUS 1 #define F_HEIGHT 3 #define F_WIDTH 3 __global__ void convolutionKernel(float *d_conmatrix, uchar *d_out, uchar *d_in, int inW, int inH) { //Position of the pixel that this thread is going to process. int blockid = blockIdx.x + blockIdx.y * gridDim.x; int pos = blockid * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; float sum = 0.0; float current = 0.0; for (int i = -RADIUS; i <= RADIUS; i++) { //Rows for (int j = -RADIUS; j <= RADIUS; j++) { //Columns //Checks if the thread is trying to process a pixel out of bounds. if (blockIdx.x == 0 && (threadIdx.x + j < 0)) { current = 0; } else if ((blockIdx.x == gridDim.x - 1) && (threadIdx.x + j) > blockDim.x - 1) { current = 0; } else { if (blockIdx.y == 0 && (threadIdx.y + i) < 0) { current = 0; } else if ((blockIdx.y == gridDim.y - 1) && (threadIdx.y + i) > blockDim.y - 1) { current = 0; } else { //Saves the pixel value in the variable. current = d_in[pos + (i * inW) + j]; } } sum += current * d_conmatrix[F_WIDTH * (i + RADIUS) + (j + RADIUS)]; } } //Sets the pixel of the new image. d_out[pos] = floor(sum); } __global__ void convolutionSharedKernel(float *d_conmatrix, uchar *d_out, uchar *d_in, int inW, int inH) { //Shared memory for the convolution matrix. __shared__ float shared_matrix[F_HEIGHT * F_WIDTH]; //Position of the pixel that this thread is going to process. int blockid = blockIdx.x + blockIdx.y * gridDim.x; int pos = blockid * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; //Each pixel of the block copies one pixel of the convolution matrix to shared memory. shared_matrix[(threadIdx.y * blockDim.x) + threadIdx.x] = d_conmatrix[(threadIdx.y * blockDim.x) + threadIdx.x]; __syncthreads(); float sum = 0.0; float current = 0.0; for (int i = -RADIUS; i <= RADIUS; i++) { //Rows for (int j = -RADIUS; j <= RADIUS; j++) { //Columns //Checks if the thread is trying to process a pixel out of bounds. if (blockIdx.x == 0 && (threadIdx.x + j < 0)) { current = 0; } else if ((blockIdx.x == gridDim.x - 1) && (threadIdx.x + j) > blockDim.x - 1) { current = 0; } else { if (blockIdx.y == 0 && (threadIdx.y + i) < 0) { current = 0; } else if ((blockIdx.y == gridDim.y - 1) && (threadIdx.y + i) > blockDim.y - 1) { current = 0; } else { //Saves the pixel value in the variable. current = d_in[pos + (i * inW) + j]; } } sum += current * shared_matrix[F_WIDTH * (i + RADIUS) + (j + RADIUS)]; } } //Sets the pixel of the new image. d_out[pos] = floor(sum); } void convolutionSerial(float *d_conmatrix, uchar *out, uchar *in, int inW, int inH) { for (int h = 0; h < inH; h++) { //Rows for (int l = 0; l < inW; l++) { //Columns float sum = 0.0; float current = 0.0; //Current pixel position. int pos = h * inW + l; for (int i = -RADIUS; i <= RADIUS; i++) { //Rows for (int j = -RADIUS; j <= RADIUS; j++) { //Columns if (l + j < 0) { current = 0; } else if (l + j > inW - 1) { current = 0; } else { if (h + i < 0) { current = 0; } else if (h + i > inH - 1) { current = 0; } else { //Saves the pixel value in the variable. current = in[pos + (i * inW) + j]; } } sum += current * d_conmatrix[F_WIDTH * (i + RADIUS) + (j + RADIUS)]; } } //Sets the pixel of the new image. out[pos] = floor(sum); } } } int main() { float *d_conmatrix; uchar *d_image, *d_out; uchar *h_out, *h_aux; clock_t t; //Gaussian box. float filter[F_HEIGHT * F_WIDTH] = { 1.0/9.0, 1.0 / 9.0,1.0 / 9.0, 1.0 / 9.0,1.0 / 9.0,1.0 / 9.0, 1.0 / 9.0,1.0 / 9.0,1.0 / 9.0 }; //Load image data cv::Mat image = cv::imread("jupiter.jpg", CV_LOAD_IMAGE_GRAYSCALE); int i_width = image.cols; int i_height = image.rows; if (!image.data) { std::cout << "Could not open or find the image" << std::endl; return -1; } //Show original image. cv::namedWindow("Original window", cv::WINDOW_AUTOSIZE); cv::imshow("Original window", image); //Allocate host memory. h_out = (uchar*)malloc(i_height * i_width * sizeof(uchar)); h_aux = (uchar*)malloc(i_height * i_width * sizeof(uchar)); t = clock(); //Applies three passes to maximize the blur effect. (Global memory filter) convolutionSerial(filter, h_out, image.data, i_width, i_height); convolutionSerial(filter, h_aux, h_out, i_width, i_height); convolutionSerial(filter, h_out, h_aux, i_width, i_height); t = clock() - t; //Print time std::cout << "Serial (3 convolutions):" << (float)t / CLOCKS_PER_SEC << " seconds\n" << std::endl; //Show processed image. cv::Mat res0(i_height, i_width, CV_8UC1, h_out); cv::namedWindow("Result window (serial)", cv::WINDOW_AUTOSIZE); cv::imshow("Result window (serial)", res0); //Alocate device memory. cudaMalloc((void**)&d_conmatrix, F_HEIGHT * F_WIDTH * sizeof(float)); cudaMalloc((void**)&d_image, i_height * i_width * sizeof(uchar)); cudaMalloc((void**)&d_out, i_height * i_width * sizeof(uchar)); //Copy from host to device. cudaMemcpy(d_conmatrix, filter, F_HEIGHT * F_WIDTH * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_image, image.data, i_height * i_width * sizeof(uchar), cudaMemcpyHostToDevice); //Sizes of grid and block. dim3 block; dim3 grid; block.x = F_WIDTH; block.y = F_HEIGHT; grid.x = i_width / block.x; grid.y = i_height / block.y; t = clock(); //Applies three passes to maximize the blur effect. (Global memory filter) convolutionKernel<<<grid, block>>> (d_conmatrix, d_out, d_image, i_width, i_height); convolutionKernel<<<grid, block >> > (d_conmatrix, d_out, d_out, i_width, i_height); convolutionKernel<<<grid, block >> > (d_conmatrix, d_out, d_out, i_width, i_height); cudaThreadSynchronize(); t = clock() - t; std::cout << "Kernel Global Memory (3 convolutions):" << (float)t / CLOCKS_PER_SEC << " seconds\n" << std::endl; //Copy result from device to host. cudaMemcpy(h_out, d_out, i_height * i_width * sizeof(uchar), cudaMemcpyDeviceToHost); //Show processed image. cv::Mat res(i_height, i_width, CV_8UC1, h_out); cv::namedWindow("Result window (global)", cv::WINDOW_AUTOSIZE); cv::imshow("Result window (global)", res); t = clock(); //Applies three passes to maximize the blur effect. (Global memory filter) convolutionKernel << <grid, block >> > (d_conmatrix, d_out, d_image, i_width, i_height); convolutionKernel << <grid, block >> > (d_conmatrix, d_out, d_out, i_width, i_height); convolutionKernel << <grid, block >> > (d_conmatrix, d_out, d_out, i_width, i_height); cudaThreadSynchronize(); t = clock() - t; std::cout << "Kernel Shared Memory (3 convolutions):" << (float)t / CLOCKS_PER_SEC << " seconds\n" << std::endl; //Copy result from device to host. cudaMemcpy(h_out, d_out, i_height * i_width * sizeof(uchar), cudaMemcpyDeviceToHost); //Show processed image. cv::Mat res2(i_height, i_width, CV_8UC1, h_out); cv::namedWindow("Result window (shared)", cv::WINDOW_AUTOSIZE); cv::imshow("Result window (shared)", res2); cv::waitKey(0); return 0; }
7f2782138ae3faf05f2fa2a887c1f02480c9f4be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "TetrahedronSystemInterface.h" #include "TetrahedronSystem.cuh" extern "C" { void tetrahedronSystemIntegrate(float3 * o_position, float3 * i_velocity, float dt, uint n) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(n, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( tetrahedronSystemIntegrate_kernel), dim3(grid), dim3(block) , 0, 0, o_position, i_velocity, dt, n); } } namespace tetrasys { void writeVicinity(int * vicinities, int * indices, int * offsets, uint n) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(n, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( writeVicinity_kernel<TETRAHEDRONSYSTEM_VICINITY_LENGTH>) , dim3(grid), dim3(block) , 0, 0, vicinities, indices, offsets, n); } void formTetrahedronAabbs(Aabb *dst, float3 * pos, float3 * vel, float timeStep, uint4 * tets, unsigned numTetrahedrons) { int tpb = CALC_TETRA_AABB_NUM_THREADS; dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numTetrahedrons<<2, tpb); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( formTetrahedronAabbs_kernel), dim3(grid), dim3(block) , 0, 0, dst, pos, vel, timeStep, tets, numTetrahedrons<<2); } void formTetrahedronAabbsImpulsed(Aabb * leafAabbs, float3 * pos, float3 * vel, float3 * deltaVel, float timeStep, uint4 * tets, uint numTetrahedrons) { int tpb = CALC_TETRA_AABB_NUM_THREADS; dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numTetrahedrons<<2, tpb); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( formTetrahedronAabbsImpulsed_kernel), dim3(grid), dim3(block) , 0, 0, leafAabbs, pos, vel, deltaVel, timeStep, tets, numTetrahedrons<<2); } }
7f2782138ae3faf05f2fa2a887c1f02480c9f4be.cu
#include "TetrahedronSystemInterface.h" #include "TetrahedronSystem.cuh" extern "C" { void tetrahedronSystemIntegrate(float3 * o_position, float3 * i_velocity, float dt, uint n) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(n, 512); dim3 grid(nblk, 1, 1); tetrahedronSystemIntegrate_kernel<<< grid, block >>>(o_position, i_velocity, dt, n); } } namespace tetrasys { void writeVicinity(int * vicinities, int * indices, int * offsets, uint n) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(n, 512); dim3 grid(nblk, 1, 1); writeVicinity_kernel<TETRAHEDRONSYSTEM_VICINITY_LENGTH> <<< grid, block >>>(vicinities, indices, offsets, n); } void formTetrahedronAabbs(Aabb *dst, float3 * pos, float3 * vel, float timeStep, uint4 * tets, unsigned numTetrahedrons) { int tpb = CALC_TETRA_AABB_NUM_THREADS; dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numTetrahedrons<<2, tpb); dim3 grid(nblk, 1, 1); formTetrahedronAabbs_kernel<<< grid, block >>>(dst, pos, vel, timeStep, tets, numTetrahedrons<<2); } void formTetrahedronAabbsImpulsed(Aabb * leafAabbs, float3 * pos, float3 * vel, float3 * deltaVel, float timeStep, uint4 * tets, uint numTetrahedrons) { int tpb = CALC_TETRA_AABB_NUM_THREADS; dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(numTetrahedrons<<2, tpb); dim3 grid(nblk, 1, 1); formTetrahedronAabbsImpulsed_kernel<<< grid, block >>>(leafAabbs, pos, vel, deltaVel, timeStep, tets, numTetrahedrons<<2); } }
ee4db0b46cd6ac98c3ae457f5f655996b9ff81e8.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <limits> namespace at::native { CONSTEXPR_EXCEPT_WIN_CUDA char tan_name[] = "tan"; void tan_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR static const auto tan_string = jiterator_stringify( template <typename T> T tan(T a) { return std::tan(a); }); AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "tan_name", [&]() { jitted_gpu_kernel< /*name=*/tan_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, tan_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "tan_name", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::tan(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "tan_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::tan(a); }); }); } } REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda); } // namespace at::native
ee4db0b46cd6ac98c3ae457f5f655996b9ff81e8.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <limits> namespace at::native { CONSTEXPR_EXCEPT_WIN_CUDA char tan_name[] = "tan"; void tan_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR static const auto tan_string = jiterator_stringify( template <typename T> T tan(T a) { return std::tan(a); }); AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "tan_name", [&]() { jitted_gpu_kernel< /*name=*/tan_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, tan_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "tan_name", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::tan(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "tan_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::tan(a); }); }); } } REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda); } // namespace at::native
7aa330cd1be7a06f780896631a37ee12bcf83832.hip
// !!! This is a file automatically generated by hipify!!! #include <f/device/device_assert/cuda_assert.hpp> #include <f/device/device_assert/cublas_assert.hpp> #include <f/device/device_assert/kernel_assert.hpp> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hip/hip_complex.h> #include <math_functions.h> __global__ void Dznrm2( unsigned long m, double2 *dA, double *dxnorm )// Dznrm2<<<1,128>>>(...) { unsigned long i = threadIdx.x; __shared__ double x[128]; double lsum = 0.0; for( unsigned long j = i; j < m; j += 128 ) { double const re = dA[j].x; double const im = dA[j].y; lsum += re*re + im*im; } x[i] = lsum; __syncthreads(); if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads(); if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads(); if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads(); if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads(); if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads(); if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads(); if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads(); if ( i == 0 ) *dxnorm = sqrt(x[0]); } __device__ void device_Dznrm2( unsigned long m, double2 *dA, double *dxnorm ) { double ans = 0.0; for ( unsigned long index = 0; index != m; ++index ) { double const real = dA[index].x; double const imag = dA[index].y; ans += real*real + imag*imag; } dxnorm[0] = ans; } __global__ void Dasum( unsigned long m, double2 *dA, double *dxnorm ) { unsigned long i = threadIdx.x; __shared__ double x[128]; double lsum = 0.0; for( unsigned long j = i; j < m; j += 128 ) { double const re = dA[j].x; double const im = dA[j].y; lsum += sqrt(re*re + im*im); } x[i] = lsum; __syncthreads(); if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads(); if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads(); if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads(); if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads(); if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads(); if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads(); if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads(); if ( i == 0 ) *dxnorm = x[0]; } //should call with Zscale<<<1, 128>>>(...); __global__ void Zscal( unsigned long m, double real, double2* dA ) { const int i = threadIdx.x; for( unsigned long j = i; j < m; j += 128 ) { dA[j].x *= real; dA[j].y *= real; } } __device__ void device_Zscal( unsigned long m, double real, double2* dA ) { //for ( unsigned long index = 0; index != m; ++index ) <<-- WHY this one doesnot work???????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????? for ( unsigned long index = 0; index < m; ++index ) { dA[index].x *= real; dA[index].y *= real; } } //TODO: optimization __global__ //<<<((dim+15)/16,(dim+15)/16), (16,16)>>> void Zgemm( double2* P, double2* M, double2* N, unsigned long dim, double alpha ) { typedef double value_type; typedef double2 complex_type; typedef unsigned long size_type; __shared__ value_type _M[16][17]; __shared__ value_type _m[16][17]; __shared__ value_type _N[16][17]; __shared__ value_type _n[16][17]; const size_type bx = blockIdx.x; const size_type by = blockIdx.y; const size_type tx = threadIdx.x; const size_type ty = threadIdx.y; const size_type row = by * 16 + ty; const size_type col = bx * 16 + tx; const size_type iter_n = (dim+15)/16; value_type R = 0.0; value_type I = 0.0; for ( size_type i = 0; i != iter_n; ++i ) { if ( i * 16 + tx < dim && row < dim ) { _M[ty][tx] = (*( M + row * dim + i * 16 + tx )).x; _m[ty][tx] = (*( M + row * dim + i * 16 + tx )).y; } else { _M[ty][tx] = 0.0; _m[ty][tx] = 0.0; } if ( i * 16 + ty < dim && col < dim ) { _N[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).x; _n[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).y; } else { _N[ty][tx] = 0.0; _n[ty][tx] = 0.0; } __syncthreads(); #pragma unroll for ( size_type j = 0; j != 16; ++j ) { R += _M[ty][j] * _N[j][tx] - _m[ty][j] * _n[j][tx]; I += _M[ty][j] * _n[j][tx] + _m[ty][j] * _N[j][tx]; } __syncthreads(); } if ( row < dim && col < dim ) { (*( P + row * dim + col )).x = alpha * R; (*( P + row * dim + col )).y = alpha * I; } } __global__ void //<<<1,128>>> Zcopy( unsigned long dims, double2* src, double2* dst ) { unsigned long const i = threadIdx.x; for( unsigned long j = i; j < dims; j += 128 ) { (*(dst+j)).x = (*(src+j)).x; (*(dst+j)).y = (*(src+j)).y; } } __device__ void device_Zcopy( unsigned long dims, double2* src, double2* dst ) { for ( unsigned long index = 0; index < dims; ++index ) { dst[index].x = src[index].x; dst[index].y = src[index].y; } } __global__ void//<<<1, 128>>> Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src { unsigned long const i = threadIdx.x; double R = 0.0; double I = 0.0; for( unsigned long j = i; j < dims; j += 128 ) { R = (*(src+j)).x; I = (*(src+j)).y; (*(dst+j)).x += real * R - imag * I; (*(dst+j)).y += real * I + imag * R; } } __device__ void//<<<1, 128>>> device_Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src { for ( unsigned long index = 0; index < dims; ++index ) { double const R = src[index].x; double const I = src[index].y; dst[index].x = real * R - imag * I; dst[index].y = real * I + imag * R; } } #if 0 __global__ void compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim ) { int const row_index = threadIdx.x; for ( unsigned long col_index = 0; col_index != dim; ++col_index ) { unsigned long a_offset = row_index * dim + col_index; unsigned long const ug_index = *(ar+a_offset); *(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) ); } *(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) ) ); } #endif __device__ void device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double gamma, double* beams ) { thickness *= 100.0; for ( unsigned long row_index = 0; row_index != dim; ++row_index ) { for ( unsigned long col_index = 0; col_index != dim; ++col_index ) { unsigned long a_offset = row_index * dim + col_index; unsigned long const ug_index = *(ar+a_offset); *(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) ); } unsigned long const beams_index = ar[row_index*dim]; double const kx = beams[beams_index*10+1]; double const ky = beams[beams_index*10+2]; *(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) + kx*alpha + ky*beta + gamma ) ); } } #if 0 __device__ void device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double* beams ) { thickness *= 100.0; for ( unsigned long row_index = 0; row_index != dim; ++row_index ) { for ( unsigned long col_index = 0; col_index != dim; ++col_index ) { unsigned long a_offset = row_index * dim + col_index; unsigned long const ug_index = *(ar+a_offset); *(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) ); } unsigned long const beams_index = ar[row_index*dim]; double const kx = beams[beams_index*10+1]; double const ky = beams[beams_index*10+2]; *(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness * ( *(diag+row_index) + kx*alpha + ky*beta ) ); } } #endif //TODO: optimization #if 0 Comment: When working with original global kernel 'extract_intensity_diff_with_offset_zigmoid', the generated residuals( all kinds ) are a little bit smaller(1.0e-6 order) than the new device routine 'device_extract_intensity_diff_with_offset_zigmoid' #endif __global__ void extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c ) { int const I_offset = threadIdx.x; int const S_offset = column_index + threadIdx.x * dim; double const norm = cuCabs(*(s+S_offset)); double const val = *(I_exp+I_offset); double const df = val - norm * norm * ac_offset - dc_offset; *(I_diff+I_offset) = df; *(I_zigmoid+I_offset) = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) ); } __device__ void device_extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c ) { for ( unsigned long index = 0; index < dim; ++index ) { unsigned long const I_offset = index; unsigned long const S_offset = column_index + index * dim; double const real = s[S_offset].x; double const imag = s[S_offset].y; double const norm = real*real + imag*imag; double const val = I_exp[I_offset]; double const df = val - norm * ac_offset - dc_offset; I_diff[I_offset] = df; I_zigmoid[I_offset] = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) ); } } //TODO: optimization __global__ void sum_diag( double2* a, unsigned long dim, double real, double imag ) { int const index = threadIdx.x; int const offset = index * dim + index; *(a+offset) = make_cuDoubleComplex( cuCreal(*(a+offset))+real, cuCimag(*(a+offset))+imag ); } __device__ void device_sum_diag( double2* a, unsigned long dim, double real, double imag ) { for ( unsigned long index = 0; index < dim; ++index ) { unsigned long const offset = index * dim + index; a[offset].x += real; a[offset].y += imag; } } /* * Input/Output: * ** ug[M] * ar[n][n] * diag[n] ==>> I_diff[n] ** thickness * dim -- n * I_exp[n] ** column_index * * cache: * a_[n][n] -- p2p3 * a^2_[n][n] -- s * a^3_[n][n] -- s_ * P1[n][n] * P2[n][n] * P3[n][n] * * 1) compose A * 2) scale to A_ * 3) compute A_^2 A_^3 * 4) compute (P1) (P2) (P3) * 5) square back * 6) extract one column */ __global__ void make_individual_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long max_dim, unsigned long tilt_size, double c, double * cuda_I_zigmoid, double* beams, double* kt_factor ) { unsigned long const tilt_index = blockDim.x * blockIdx.x + threadIdx.x; if ( tilt_index >= tilt_size ) return; unsigned long const dim = *(cuda_dim + tilt_index); double* ug = cuda_ug; unsigned long* ar = cuda_ar + tilt_index * max_dim * max_dim; double* diag = cuda_diag + tilt_index * max_dim; double* I_exp = cuda_I_exp + tilt_index * max_dim; double* I_diff = cuda_I_diff + tilt_index * max_dim; double* I_zigmoid = cuda_I_zigmoid + tilt_index * max_dim; double2* cache = cuda_cache + 6 * tilt_index * max_dim * max_dim; unsigned long dimdim = dim*dim; //cache should be of size 6*N^2 double2* a_ = cache; double2* aa_ = a_ + dimdim; double2* aaa_ = aa_ + dimdim; double2* p1 = aaa_ + dimdim; double2* p2 = p1 + dimdim; double2* p3 = p2 + dimdim; //reuse memory in latter steps, when a_, aa_ and aaa_ are idle //double2* p2p3 = a_; double2* p2p3 = aaa_; double2* s = aa_; double2* s_ = aaa_; //1) //kernel_assert( (compose_a<<<1, dim>>>( ug, ar, diag, thickness, a_, dim )) ); //cuda_assert( hipDeviceSynchronize() ); //device_compose_a( ug, ar, diag, thickness, a_, dim ); double const alpha = kt_factor[tilt_index*3]; double const beta = kt_factor[tilt_index*3+1]; double const gamma = kt_factor[tilt_index*3+2]; device_compose_a( ug, ar, diag, thickness, a_, dim, alpha, beta, gamma, beams ); //2) //TODO double* the_norm = (double*)aa_; //kernel_assert( (Dznrm2<<<1,128>>>( dimdim, a_, the_norm )) ); ////kernel_assert( (Dasum<<<1,128>>>( dimdim, a_, the_norm )) ); //cuda_assert( hipDeviceSynchronize() ); device_Dznrm2( dimdim, a_, the_norm ); //double const ratio = (*the_norm) * 53.71920351148152; double const ratio = (*the_norm) / 5.371920351148152; unsigned long const scaler = ratio < 1.0 ? 0 : ceil(log2(ratio)); unsigned long const scaling_factor = 1 << scaler; double const scale = scaling_factor; //kernel_assert( (Zscal<<<1, 128>>>( dimdim, 1.0/scale, a_ )) ); //a_ /= scale //cuda_assert( hipDeviceSynchronize() ); device_Zscal( dimdim, 1.0/scale, a_ ); //3) dim3 const mm_grids( (dim+15)/16, (dim+15)/16 ); dim3 const mm_threads( 16, 16 ); kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, aa_, a_, a_, dim, 1.0 )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, aaa_, aa_, a_, dim, 1.0 )) ); cuda_assert( hipDeviceSynchronize() ); //4) /* * Maple: * Digits := 25 * evalf(solve(_Z^9+9*_Z^8+72*_Z^7+504*_Z^6+3024*_Z^5+15120*_Z^4+60480*_Z^3+181440*_Z^2+362880*_Z+362880 = 0)) * Returns: * 2.697333461536989227389605+5.184162062649414177834087*I, //c1 * -.3810698456631129990312942+4.384644533145397950369203*I, //c2 * -2.110839800302654737498705+3.089910928725500922777702*I, //c3 * -3.038648072936697089212469+1.586801195758838328803868*I, //c4 * -3.333551485269048803294274, //c5 * -3.038648072936697089212469-1.586801195758838328803868*I, //c6 * -2.110839800302654737498705-3.089910928725500922777702*I, //c7 * -.3810698456631129990312942-4.384644533145397950369203*I, //c8 * 2.697333461536989227389605-5.184162062649414177834087*I //c9 * * expand((x-c1)*(x-c2)*(x-c3)) >> p1 ( p1_c ) * x^3-.205423815571221490859606*x^2-(12.65871752452031305098099*I)*x^2-58.21460179641193947200471*x-(3.189848964212376356715960*I)*x-19.71085376106750328141397+94.20645646169128946503649*I * * expand((x-c4)*(x-c5)*(x-c6)) >> p2 ( p2_c ) * x^3+9.410847631142442981719212*x^2+39.17363072664900708597702-6.123261017392618755198919*10^(-24)*I+32.01029973951970099352671*x+(4.*10^(-24)*I)*x * * expand((x-c7)*(x-c8)*(x-c9)) >> p3 ( p3_c ) * x^3-.205423815571221490859601*x^2+(12.65871752452031305098099*I)*x^2-58.21460179641193947200470*x+(3.18984896421237635671600*I)*x-19.71085376106750328141404-94.20645646169128946503646*I * * expand((x-c1)*(x-c2)*(x-c3)*(x-c4)*(x-c5)*(x-c6)*(x-c7)*(x-c8)*(x-c9)) * 3.628800000000000000000003*10^5-1.365022562699469279472268*10^(-19)*I+3.628800000000000000000003*10^5*x+x^9+9.00000000000000000000000*x^8+72.00000000000000000000006*x^7+503.9999999999999999999995*x^6+3024.000000000000000000002*x^5+15120.00000000000000000000*x^4+60479.99999999999999999995*x^3+1.814400000000000000000001*10^5*x^2-(5.*10^(-22)*I)*x^6-(1.*10^(-20)*I)*x^4-(1.0*10^(-19)*I)*x^3+(2.*10^(-24)*I)*x^8-(3.0*10^(-19)*I)*x^2-(7.*10^(-21)*I)*x^5-(4.*10^(-19)*I)*x+(2.*10^(-23)*I)*x^7 */ //4 - p1) //kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p1 )) ); //cuda_assert( hipDeviceSynchronize() ); device_Zcopy( dimdim, aaa_, p1 ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ )) ); //cuda_assert( hipDeviceSynchronize() ); device_Zaxpy( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ )) ); //cuda_assert( hipDeviceSynchronize() ); device_Zaxpy( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ ); //kernel_assert( (sum_diag<<<1,dim>>>( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 )) ); //cuda_assert( hipDeviceSynchronize() ); device_sum_diag( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 ); //4 - p2) //kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p2 )) ); //cuda_assert( hipDeviceSynchronize() ); device_Zcopy( dimdim, aaa_, p2 ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ )) ); //cuda_assert( hipDeviceSynchronize() ); device_Zaxpy( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 32.01029973951970099352671, 0.0, p2, a_ )) ); //cuda_assert( hipDeviceSynchronize() ); device_Zaxpy( dimdim, 32.01029973951970099352671, 0.0, p2, a_ ); //kernel_assert( (sum_diag<<<1,dim>>>( p2, dim, 39.17363072664900708597702, 0.0 )) ); //cuda_assert( hipDeviceSynchronize() ); device_sum_diag( p2, dim, 39.17363072664900708597702, 0.0 ); //4 - p3) //kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p3 )) ); //cuda_assert( hipDeviceSynchronize() ); device_Zcopy( dimdim, aaa_, p3 ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ )) ); //cuda_assert( hipDeviceSynchronize() ); device_Zaxpy( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ )) ); //cuda_assert( hipDeviceSynchronize() ); device_Zaxpy( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ ); //kernel_assert( (sum_diag<<<1,dim>>>( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 )) ); //cuda_assert( hipDeviceSynchronize() ); device_sum_diag( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 ); //4 - s) kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, p2p3, p2, p3, dim, 0.0016600397351866578333 )) ); cuda_assert( hipDeviceSynchronize() ); kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, s, p1, p2p3, dim, 0.0016600397351866578333 )) ); cuda_assert( hipDeviceSynchronize() ); //5) if ( scaler != 0 ) { for ( unsigned long index = 0; index != scaler; ++index ) { kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, s_, s, s, dim, 1.0 )) ); cuda_assert( hipDeviceSynchronize() ); double2* tmp = s_; s_ = s; s = tmp; } } //6) double const ac_offset = cuda_ug[0]; double const dc_offset = cuda_ug[1]; //kernel_assert( (extract_intensity_diff_with_offset_zigmoid<<<1,dim>>>( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c )) ); //cuda_assert( hipDeviceSynchronize() ); device_extract_intensity_diff_with_offset_zigmoid( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c ); } void make_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long tilt_size, unsigned long max_dim, double c, double* cuda_I_zigmoid, double* beams, double* kt_factor ) { //unsigned long const threads = 64; //unsigned long const threads = 128; unsigned long const threads = 256; unsigned long const grids = (tilt_size + threads - 1)/threads; kernel_assert( (hipLaunchKernelGGL(( make_individual_pattern_intensity_diff), dim3(grids), dim3(threads), 0, 0, cuda_ug, cuda_ar, cuda_diag, thickness, cuda_dim, cuda_I_exp, cuda_I_diff, column_index, cuda_cache, max_dim, tilt_size, c, cuda_I_zigmoid, beams, kt_factor ) ) ); //cuda_assert( hipDeviceSynchronize() ); }
7aa330cd1be7a06f780896631a37ee12bcf83832.cu
#include <f/device/device_assert/cuda_assert.hpp> #include <f/device/device_assert/cublas_assert.hpp> #include <f/device/device_assert/kernel_assert.hpp> #include <cuda_runtime.h> #include <cublas_v2.h> #include <cuComplex.h> #include <math_functions.h> __global__ void Dznrm2( unsigned long m, double2 *dA, double *dxnorm )// Dznrm2<<<1,128>>>(...) { unsigned long i = threadIdx.x; __shared__ double x[128]; double lsum = 0.0; for( unsigned long j = i; j < m; j += 128 ) { double const re = dA[j].x; double const im = dA[j].y; lsum += re*re + im*im; } x[i] = lsum; __syncthreads(); if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads(); if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads(); if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads(); if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads(); if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads(); if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads(); if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads(); if ( i == 0 ) *dxnorm = sqrt(x[0]); } __device__ void device_Dznrm2( unsigned long m, double2 *dA, double *dxnorm ) { double ans = 0.0; for ( unsigned long index = 0; index != m; ++index ) { double const real = dA[index].x; double const imag = dA[index].y; ans += real*real + imag*imag; } dxnorm[0] = ans; } __global__ void Dasum( unsigned long m, double2 *dA, double *dxnorm ) { unsigned long i = threadIdx.x; __shared__ double x[128]; double lsum = 0.0; for( unsigned long j = i; j < m; j += 128 ) { double const re = dA[j].x; double const im = dA[j].y; lsum += sqrt(re*re + im*im); } x[i] = lsum; __syncthreads(); if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads(); if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads(); if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads(); if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads(); if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads(); if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads(); if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads(); if ( i == 0 ) *dxnorm = x[0]; } //should call with Zscale<<<1, 128>>>(...); __global__ void Zscal( unsigned long m, double real, double2* dA ) { const int i = threadIdx.x; for( unsigned long j = i; j < m; j += 128 ) { dA[j].x *= real; dA[j].y *= real; } } __device__ void device_Zscal( unsigned long m, double real, double2* dA ) { //for ( unsigned long index = 0; index != m; ++index ) <<-- WHY this one doesnot work???????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????? for ( unsigned long index = 0; index < m; ++index ) { dA[index].x *= real; dA[index].y *= real; } } //TODO: optimization __global__ //<<<((dim+15)/16,(dim+15)/16), (16,16)>>> void Zgemm( double2* P, double2* M, double2* N, unsigned long dim, double alpha ) { typedef double value_type; typedef double2 complex_type; typedef unsigned long size_type; __shared__ value_type _M[16][17]; __shared__ value_type _m[16][17]; __shared__ value_type _N[16][17]; __shared__ value_type _n[16][17]; const size_type bx = blockIdx.x; const size_type by = blockIdx.y; const size_type tx = threadIdx.x; const size_type ty = threadIdx.y; const size_type row = by * 16 + ty; const size_type col = bx * 16 + tx; const size_type iter_n = (dim+15)/16; value_type R = 0.0; value_type I = 0.0; for ( size_type i = 0; i != iter_n; ++i ) { if ( i * 16 + tx < dim && row < dim ) { _M[ty][tx] = (*( M + row * dim + i * 16 + tx )).x; _m[ty][tx] = (*( M + row * dim + i * 16 + tx )).y; } else { _M[ty][tx] = 0.0; _m[ty][tx] = 0.0; } if ( i * 16 + ty < dim && col < dim ) { _N[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).x; _n[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).y; } else { _N[ty][tx] = 0.0; _n[ty][tx] = 0.0; } __syncthreads(); #pragma unroll for ( size_type j = 0; j != 16; ++j ) { R += _M[ty][j] * _N[j][tx] - _m[ty][j] * _n[j][tx]; I += _M[ty][j] * _n[j][tx] + _m[ty][j] * _N[j][tx]; } __syncthreads(); } if ( row < dim && col < dim ) { (*( P + row * dim + col )).x = alpha * R; (*( P + row * dim + col )).y = alpha * I; } } __global__ void //<<<1,128>>> Zcopy( unsigned long dims, double2* src, double2* dst ) { unsigned long const i = threadIdx.x; for( unsigned long j = i; j < dims; j += 128 ) { (*(dst+j)).x = (*(src+j)).x; (*(dst+j)).y = (*(src+j)).y; } } __device__ void device_Zcopy( unsigned long dims, double2* src, double2* dst ) { for ( unsigned long index = 0; index < dims; ++index ) { dst[index].x = src[index].x; dst[index].y = src[index].y; } } __global__ void//<<<1, 128>>> Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src { unsigned long const i = threadIdx.x; double R = 0.0; double I = 0.0; for( unsigned long j = i; j < dims; j += 128 ) { R = (*(src+j)).x; I = (*(src+j)).y; (*(dst+j)).x += real * R - imag * I; (*(dst+j)).y += real * I + imag * R; } } __device__ void//<<<1, 128>>> device_Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src { for ( unsigned long index = 0; index < dims; ++index ) { double const R = src[index].x; double const I = src[index].y; dst[index].x = real * R - imag * I; dst[index].y = real * I + imag * R; } } #if 0 __global__ void compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim ) { int const row_index = threadIdx.x; for ( unsigned long col_index = 0; col_index != dim; ++col_index ) { unsigned long a_offset = row_index * dim + col_index; unsigned long const ug_index = *(ar+a_offset); *(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) ); } *(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) ) ); } #endif __device__ void device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double gamma, double* beams ) { thickness *= 100.0; for ( unsigned long row_index = 0; row_index != dim; ++row_index ) { for ( unsigned long col_index = 0; col_index != dim; ++col_index ) { unsigned long a_offset = row_index * dim + col_index; unsigned long const ug_index = *(ar+a_offset); *(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) ); } unsigned long const beams_index = ar[row_index*dim]; double const kx = beams[beams_index*10+1]; double const ky = beams[beams_index*10+2]; *(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) + kx*alpha + ky*beta + gamma ) ); } } #if 0 __device__ void device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double* beams ) { thickness *= 100.0; for ( unsigned long row_index = 0; row_index != dim; ++row_index ) { for ( unsigned long col_index = 0; col_index != dim; ++col_index ) { unsigned long a_offset = row_index * dim + col_index; unsigned long const ug_index = *(ar+a_offset); *(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) ); } unsigned long const beams_index = ar[row_index*dim]; double const kx = beams[beams_index*10+1]; double const ky = beams[beams_index*10+2]; *(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness * ( *(diag+row_index) + kx*alpha + ky*beta ) ); } } #endif //TODO: optimization #if 0 Comment: When working with original global kernel 'extract_intensity_diff_with_offset_zigmoid', the generated residuals( all kinds ) are a little bit smaller(1.0e-6 order) than the new device routine 'device_extract_intensity_diff_with_offset_zigmoid' #endif __global__ void extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c ) { int const I_offset = threadIdx.x; int const S_offset = column_index + threadIdx.x * dim; double const norm = cuCabs(*(s+S_offset)); double const val = *(I_exp+I_offset); double const df = val - norm * norm * ac_offset - dc_offset; *(I_diff+I_offset) = df; *(I_zigmoid+I_offset) = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) ); } __device__ void device_extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c ) { for ( unsigned long index = 0; index < dim; ++index ) { unsigned long const I_offset = index; unsigned long const S_offset = column_index + index * dim; double const real = s[S_offset].x; double const imag = s[S_offset].y; double const norm = real*real + imag*imag; double const val = I_exp[I_offset]; double const df = val - norm * ac_offset - dc_offset; I_diff[I_offset] = df; I_zigmoid[I_offset] = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) ); } } //TODO: optimization __global__ void sum_diag( double2* a, unsigned long dim, double real, double imag ) { int const index = threadIdx.x; int const offset = index * dim + index; *(a+offset) = make_cuDoubleComplex( cuCreal(*(a+offset))+real, cuCimag(*(a+offset))+imag ); } __device__ void device_sum_diag( double2* a, unsigned long dim, double real, double imag ) { for ( unsigned long index = 0; index < dim; ++index ) { unsigned long const offset = index * dim + index; a[offset].x += real; a[offset].y += imag; } } /* * Input/Output: * ** ug[M] * ar[n][n] * diag[n] ==>> I_diff[n] ** thickness * dim -- n * I_exp[n] ** column_index * * cache: * a_[n][n] -- p2p3 * a^2_[n][n] -- s * a^3_[n][n] -- s_ * P1[n][n] * P2[n][n] * P3[n][n] * * 1) compose A * 2) scale to A_ * 3) compute A_^2 A_^3 * 4) compute (P1) (P2) (P3) * 5) square back * 6) extract one column */ __global__ void make_individual_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long max_dim, unsigned long tilt_size, double c, double * cuda_I_zigmoid, double* beams, double* kt_factor ) { unsigned long const tilt_index = blockDim.x * blockIdx.x + threadIdx.x; if ( tilt_index >= tilt_size ) return; unsigned long const dim = *(cuda_dim + tilt_index); double* ug = cuda_ug; unsigned long* ar = cuda_ar + tilt_index * max_dim * max_dim; double* diag = cuda_diag + tilt_index * max_dim; double* I_exp = cuda_I_exp + tilt_index * max_dim; double* I_diff = cuda_I_diff + tilt_index * max_dim; double* I_zigmoid = cuda_I_zigmoid + tilt_index * max_dim; double2* cache = cuda_cache + 6 * tilt_index * max_dim * max_dim; unsigned long dimdim = dim*dim; //cache should be of size 6*N^2 double2* a_ = cache; double2* aa_ = a_ + dimdim; double2* aaa_ = aa_ + dimdim; double2* p1 = aaa_ + dimdim; double2* p2 = p1 + dimdim; double2* p3 = p2 + dimdim; //reuse memory in latter steps, when a_, aa_ and aaa_ are idle //double2* p2p3 = a_; double2* p2p3 = aaa_; double2* s = aa_; double2* s_ = aaa_; //1) //kernel_assert( (compose_a<<<1, dim>>>( ug, ar, diag, thickness, a_, dim )) ); //cuda_assert( cudaDeviceSynchronize() ); //device_compose_a( ug, ar, diag, thickness, a_, dim ); double const alpha = kt_factor[tilt_index*3]; double const beta = kt_factor[tilt_index*3+1]; double const gamma = kt_factor[tilt_index*3+2]; device_compose_a( ug, ar, diag, thickness, a_, dim, alpha, beta, gamma, beams ); //2) //TODO double* the_norm = (double*)aa_; //kernel_assert( (Dznrm2<<<1,128>>>( dimdim, a_, the_norm )) ); ////kernel_assert( (Dasum<<<1,128>>>( dimdim, a_, the_norm )) ); //cuda_assert( cudaDeviceSynchronize() ); device_Dznrm2( dimdim, a_, the_norm ); //double const ratio = (*the_norm) * 53.71920351148152; double const ratio = (*the_norm) / 5.371920351148152; unsigned long const scaler = ratio < 1.0 ? 0 : ceil(log2(ratio)); unsigned long const scaling_factor = 1 << scaler; double const scale = scaling_factor; //kernel_assert( (Zscal<<<1, 128>>>( dimdim, 1.0/scale, a_ )) ); //a_ /= scale //cuda_assert( cudaDeviceSynchronize() ); device_Zscal( dimdim, 1.0/scale, a_ ); //3) dim3 const mm_grids( (dim+15)/16, (dim+15)/16 ); dim3 const mm_threads( 16, 16 ); kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( aa_, a_, a_, dim, 1.0 )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( aaa_, aa_, a_, dim, 1.0 )) ); cuda_assert( cudaDeviceSynchronize() ); //4) /* * Maple: * Digits := 25 * evalf(solve(_Z^9+9*_Z^8+72*_Z^7+504*_Z^6+3024*_Z^5+15120*_Z^4+60480*_Z^3+181440*_Z^2+362880*_Z+362880 = 0)) * Returns: * 2.697333461536989227389605+5.184162062649414177834087*I, //c1 * -.3810698456631129990312942+4.384644533145397950369203*I, //c2 * -2.110839800302654737498705+3.089910928725500922777702*I, //c3 * -3.038648072936697089212469+1.586801195758838328803868*I, //c4 * -3.333551485269048803294274, //c5 * -3.038648072936697089212469-1.586801195758838328803868*I, //c6 * -2.110839800302654737498705-3.089910928725500922777702*I, //c7 * -.3810698456631129990312942-4.384644533145397950369203*I, //c8 * 2.697333461536989227389605-5.184162062649414177834087*I //c9 * * expand((x-c1)*(x-c2)*(x-c3)) >> p1 ( p1_c ) * x^3-.205423815571221490859606*x^2-(12.65871752452031305098099*I)*x^2-58.21460179641193947200471*x-(3.189848964212376356715960*I)*x-19.71085376106750328141397+94.20645646169128946503649*I * * expand((x-c4)*(x-c5)*(x-c6)) >> p2 ( p2_c ) * x^3+9.410847631142442981719212*x^2+39.17363072664900708597702-6.123261017392618755198919*10^(-24)*I+32.01029973951970099352671*x+(4.*10^(-24)*I)*x * * expand((x-c7)*(x-c8)*(x-c9)) >> p3 ( p3_c ) * x^3-.205423815571221490859601*x^2+(12.65871752452031305098099*I)*x^2-58.21460179641193947200470*x+(3.18984896421237635671600*I)*x-19.71085376106750328141404-94.20645646169128946503646*I * * expand((x-c1)*(x-c2)*(x-c3)*(x-c4)*(x-c5)*(x-c6)*(x-c7)*(x-c8)*(x-c9)) * 3.628800000000000000000003*10^5-1.365022562699469279472268*10^(-19)*I+3.628800000000000000000003*10^5*x+x^9+9.00000000000000000000000*x^8+72.00000000000000000000006*x^7+503.9999999999999999999995*x^6+3024.000000000000000000002*x^5+15120.00000000000000000000*x^4+60479.99999999999999999995*x^3+1.814400000000000000000001*10^5*x^2-(5.*10^(-22)*I)*x^6-(1.*10^(-20)*I)*x^4-(1.0*10^(-19)*I)*x^3+(2.*10^(-24)*I)*x^8-(3.0*10^(-19)*I)*x^2-(7.*10^(-21)*I)*x^5-(4.*10^(-19)*I)*x+(2.*10^(-23)*I)*x^7 */ //4 - p1) //kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p1 )) ); //cuda_assert( cudaDeviceSynchronize() ); device_Zcopy( dimdim, aaa_, p1 ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ )) ); //cuda_assert( cudaDeviceSynchronize() ); device_Zaxpy( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ )) ); //cuda_assert( cudaDeviceSynchronize() ); device_Zaxpy( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ ); //kernel_assert( (sum_diag<<<1,dim>>>( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 )) ); //cuda_assert( cudaDeviceSynchronize() ); device_sum_diag( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 ); //4 - p2) //kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p2 )) ); //cuda_assert( cudaDeviceSynchronize() ); device_Zcopy( dimdim, aaa_, p2 ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ )) ); //cuda_assert( cudaDeviceSynchronize() ); device_Zaxpy( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 32.01029973951970099352671, 0.0, p2, a_ )) ); //cuda_assert( cudaDeviceSynchronize() ); device_Zaxpy( dimdim, 32.01029973951970099352671, 0.0, p2, a_ ); //kernel_assert( (sum_diag<<<1,dim>>>( p2, dim, 39.17363072664900708597702, 0.0 )) ); //cuda_assert( cudaDeviceSynchronize() ); device_sum_diag( p2, dim, 39.17363072664900708597702, 0.0 ); //4 - p3) //kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p3 )) ); //cuda_assert( cudaDeviceSynchronize() ); device_Zcopy( dimdim, aaa_, p3 ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ )) ); //cuda_assert( cudaDeviceSynchronize() ); device_Zaxpy( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ ); //kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ )) ); //cuda_assert( cudaDeviceSynchronize() ); device_Zaxpy( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ ); //kernel_assert( (sum_diag<<<1,dim>>>( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 )) ); //cuda_assert( cudaDeviceSynchronize() ); device_sum_diag( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 ); //4 - s) kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( p2p3, p2, p3, dim, 0.0016600397351866578333 )) ); cuda_assert( cudaDeviceSynchronize() ); kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( s, p1, p2p3, dim, 0.0016600397351866578333 )) ); cuda_assert( cudaDeviceSynchronize() ); //5) if ( scaler != 0 ) { for ( unsigned long index = 0; index != scaler; ++index ) { kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( s_, s, s, dim, 1.0 )) ); cuda_assert( cudaDeviceSynchronize() ); double2* tmp = s_; s_ = s; s = tmp; } } //6) double const ac_offset = cuda_ug[0]; double const dc_offset = cuda_ug[1]; //kernel_assert( (extract_intensity_diff_with_offset_zigmoid<<<1,dim>>>( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c )) ); //cuda_assert( cudaDeviceSynchronize() ); device_extract_intensity_diff_with_offset_zigmoid( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c ); } void make_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long tilt_size, unsigned long max_dim, double c, double* cuda_I_zigmoid, double* beams, double* kt_factor ) { //unsigned long const threads = 64; //unsigned long const threads = 128; unsigned long const threads = 256; unsigned long const grids = (tilt_size + threads - 1)/threads; kernel_assert( ( make_individual_pattern_intensity_diff<<<grids, threads>>>( cuda_ug, cuda_ar, cuda_diag, thickness, cuda_dim, cuda_I_exp, cuda_I_diff, column_index, cuda_cache, max_dim, tilt_size, c, cuda_I_zigmoid, beams, kt_factor ) ) ); //cuda_assert( cudaDeviceSynchronize() ); }
findiff_gpu.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include "findiff_gpu.h" #include "findiff.h" __device__ void calc_iterate(double *unew, double *uold, int n, int m, int idx, int idy, int ind){ if(1<idy && idy<m){ if(idx<n){ unew[ind] = (1.9*uold[ind-2] + 1.5*uold[ind-1] + uold[ind] + 0.5*uold[ind+1] + 0.1*uold[ind+2]); unew[ind] /= (double)(5.0); } } } __device__ void glob_shared_cpy(double *u_glob, double *unew, double *uold, int pitch, int n, int m, int idx, int idy, int ind){ if(idy<m && idx<n){ if(threadIdx.y==0 && 0<blockIdx.y){ unew[ind-2] = u_glob[(idy-2) + idx*pitch]; unew[ind-1] = u_glob[(idy-1) + idx*pitch]; uold[ind-2] = unew[ind-2]; uold[ind-1] = unew[ind-1]; } unew[ind] = u_glob[idy+idx*pitch]; uold[ind] = unew[ind]; if(threadIdx.y==(blockDim.y-1) || idy==(m-1)){ unew[ind+1] = u_glob[(idy+1)%m + idx*pitch]; unew[ind+2] = u_glob[(idy+2)%m + idx*pitch]; uold[ind+1] = unew[ind+1]; uold[ind+2] = unew[ind+2]; } } } __device__ void shared_glob_cpy(double *u_glob, double *unew, int pitch, int n, int m, int idx, int idy, int ind){ if(1<idy && idy<m) if(idx<n) u_glob[idy+idx*pitch] = unew[ind]; } __global__ void iterate_gpu(double *u_glob, int pitch, int n, int m){ int idx = blockIdx.x*blockDim.x + threadIdx.x; int idy = blockIdx.y*blockDim.y + threadIdx.y; int ind = threadIdx.y + 2; double *uold, *unew; extern __shared__ double s[]; unew = &(s[0]); uold = &(s[blockDim.y+4]); // initialising shared memory // glob_shared_cpy(u_glob, unew, uold, pitch, n, m, idx, idy, ind); // iterating and updating unew // calc_iterate(unew, uold, n, m, idx, idy, ind); // sending vals back to global mem // shared_glob_cpy(u_glob, unew, pitch, n, m, idx, idy, ind); } __global__ void iterate_gpu_slow(double* unew_glob, double* uold_glob, int n, int m){ int idx = blockIdx.x*blockDim.x + threadIdx.x; int idy = blockIdx.y*blockDim.y + threadIdx.y; if(1<idy && idy<m){ unew_glob[idy+idx*m] = (1.9*uold_glob[(idy+idx*m)-2] + 1.5*uold_glob[(idy+idx*m)-1] + uold_glob[idy+idx*m] + 0.5*uold_glob[(idy+1)%m+idx*m] + 0.1*uold_glob[(idy+2)%m+idx*m]); unew_glob[idy+idx*m] /= (double)(5.0); } } } __global__ void red_rows(double* u_glob, double* u_glob_out, int pitch, int n, int m){ int idx = blockIdx.x*blockDim.x + threadIdx.x; int idy = blockIdx.y*blockDim.y + threadIdx.y; int ind = threadIdx.y; extern __shared__ double tmp[]; int i, disp; if(idy<m && idx<n) tmp[ind] = u_glob[idy+idx*pitch]; disp = (1+blockIdx.y)*blockDim.y; i = (disp > m) ? (blockDim.y - (disp-m)):blockDim.y; for( ; i>1; i>>=1){ if(ind<(i/2)){ tmp[ind] += tmp[ind+(i/2)]; if(ind==0 && i%2!=0) tmp[ind] += tmp[ind+i-1]; } __syncthreads(); } if(ind==0) u_glob_out[blockIdx.y + idx*pitch] = tmp[0]; } __global__ void red_rows_glob(double* u_glob, double* u_glob_out, int pitch, int n, int m){ int idx = blockIdx.x*blockDim.x + threadIdx.x; int idy = blockIdx.y*blockDim.y + threadIdx.y; if(idy < (m/2) && idx < n){ u_glob_out[idy + idx*pitch] += u_glob[idy + (m/2) + idx*pitch]; if(m%2!=0 && idy==0) u_glob_out[idy + idx*pitch] += u_glob[idy + (m-1) + idx*pitch]; } } extern "C" { void fdiff_gpu(double *u_vals, double *temps, int n, int m, int p, int block_size_Y, Tau* tau, int mallocPitch, int red){ double *u_glob; size_t u_glob_size; int i, pitch, m_tmp; hipEvent_t start, finish; hipEventCreate(&start); hipEventCreate(&finish); if(!mallocPitch){ hipEventRecord(start, 0); hipMalloc( (void**)&u_glob, n*m*sizeof(double)); hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->alloc_GPU, start, finish); hipEventRecord(start,0); hipMemcpy(u_glob, u_vals, n*m*sizeof(double), hipMemcpyHostToDevice); hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->transf_GPU, start, finish); pitch = m; } else { hipEventRecord(start, 0); hipMallocPitch( (void**)&u_glob, &u_glob_size, (size_t)(m*sizeof(double)), n); hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->alloc_GPU, start, finish); hipEventRecord(start,0); hipMemcpy2D(u_glob, u_glob_size, u_vals, m*sizeof(double), m*sizeof(double), n, hipMemcpyHostToDevice); hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->transf_GPU, start, finish); pitch = (int)u_glob_size/sizeof(double); } dim3 dimBlock(1, block_size_Y); dim3 dimGrid((n/dimBlock.x)+(!(n%dimBlock.x)?0:1), (m/dimBlock.y)+(!(m%dimBlock.y)?0:1)); hipEventRecord(start, 0); for(i=0;i<p;i++) hipLaunchKernelGGL(( iterate_gpu), dim3(dimGrid),dim3(dimBlock),2*(block_size_Y+4)*sizeof(double), 0, u_glob, pitch, n, m); hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->calc_GPU, start, finish); if(!mallocPitch){ hipEventRecord(start, 0); hipMemcpy(u_vals, u_glob, n*m*sizeof(double), hipMemcpyDeviceToHost); } else { hipEventRecord(start, 0); hipMemcpy2D(u_vals, m*sizeof(double), u_glob, u_glob_size, m*sizeof(double), n, hipMemcpyDeviceToHost); } hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->transf_RAM, start, finish); if(red){ m_tmp = m; hipEventRecord(start, 0); while(m_tmp > 1){ hipLaunchKernelGGL(( red_rows), dim3(dimGrid),dim3(dimBlock),dimBlock.y*sizeof(double), 0, u_glob, u_glob, pitch, n, m_tmp); m_tmp = (m_tmp/dimBlock.y)+(!(m_tmp%dimBlock.y)?0:1); } hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->calc_avgGPU, start, finish); if(!mallocPitch){ for(i=0;i<n;i++) hipMemcpy(&temps[i], &u_glob[i*m], sizeof(double), hipMemcpyDeviceToHost); } else { hipMemcpy2D(temps, sizeof(double), &u_glob[0], u_glob_size, sizeof(double), n, hipMemcpyDeviceToHost); } } hipFree(u_glob); } void fdiff_gpu_glob(double* u_vals, double* temps, int n, int m, int p, int block_size, Tau* tau, int red){ double *uold_glob, *unew_glob, *tmp; int i, m_tmp; hipEvent_t start, finish; hipEventCreate(&start); hipEventCreate(&finish); hipEventRecord(start, 0); hipMalloc( (void**)&unew_glob, n*m*sizeof(double)); hipMalloc( (void**)&uold_glob, n*m*sizeof(double)); hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->alloc_GPU, start, finish); hipEventRecord(start, 0); hipMemcpy(unew_glob, u_vals, n*m*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(uold_glob, u_vals, n*m*sizeof(double), hipMemcpyHostToDevice); hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->transf_GPU, start, finish); dim3 dimBlock(1, block_size); dim3 dimGrid((n/dimBlock.x)+(!(n%dimBlock.x)?0:1), (m/dimBlock.y)+(!(m%dimBlock.y)?0:1)); hipEventRecord(start, 0); for(i=0;i<p;i++){ if(i%2==0) hipLaunchKernelGGL(( iterate_gpu_slow), dim3(dimGrid),dim3(dimBlock), 0, 0, unew_glob, uold_glob, n, m); else hipLaunchKernelGGL(( iterate_gpu_slow), dim3(dimGrid),dim3(dimBlock), 0, 0, uold_glob, unew_glob, n, m); } hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->calc_GPU, start, finish); hipEventRecord(start, 0); if(p%2==0) tmp = uold_glob; else tmp = unew_glob; hipMemcpy(u_vals, tmp, n*m*sizeof(double), hipMemcpyDeviceToHost); hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->transf_RAM, start, finish); m_tmp = m; if(red){ hipEventRecord(start, 0); for( ; m_tmp>1; m_tmp>>=1) hipLaunchKernelGGL(( red_rows_glob), dim3(dimGrid),dim3(dimBlock), 0, 0, tmp, tmp, m, n, m_tmp); hipEventRecord(finish, 0); hipEventSynchronize(finish); hipEventElapsedTime(&tau->calc_avgGPU, start, finish); for(i=0;i<n;i++) hipMemcpy(&temps[i], &tmp[i*m], sizeof(double), hipMemcpyDeviceToHost); } hipFree(unew_glob); hipFree(uold_glob); } }
findiff_gpu.cu
#include "utils.h" #include "findiff_gpu.h" #include "findiff.h" __device__ void calc_iterate(double *unew, double *uold, int n, int m, int idx, int idy, int ind){ if(1<idy && idy<m){ if(idx<n){ unew[ind] = (1.9*uold[ind-2] + 1.5*uold[ind-1] + uold[ind] + 0.5*uold[ind+1] + 0.1*uold[ind+2]); unew[ind] /= (double)(5.0); } } } __device__ void glob_shared_cpy(double *u_glob, double *unew, double *uold, int pitch, int n, int m, int idx, int idy, int ind){ if(idy<m && idx<n){ if(threadIdx.y==0 && 0<blockIdx.y){ unew[ind-2] = u_glob[(idy-2) + idx*pitch]; unew[ind-1] = u_glob[(idy-1) + idx*pitch]; uold[ind-2] = unew[ind-2]; uold[ind-1] = unew[ind-1]; } unew[ind] = u_glob[idy+idx*pitch]; uold[ind] = unew[ind]; if(threadIdx.y==(blockDim.y-1) || idy==(m-1)){ unew[ind+1] = u_glob[(idy+1)%m + idx*pitch]; unew[ind+2] = u_glob[(idy+2)%m + idx*pitch]; uold[ind+1] = unew[ind+1]; uold[ind+2] = unew[ind+2]; } } } __device__ void shared_glob_cpy(double *u_glob, double *unew, int pitch, int n, int m, int idx, int idy, int ind){ if(1<idy && idy<m) if(idx<n) u_glob[idy+idx*pitch] = unew[ind]; } __global__ void iterate_gpu(double *u_glob, int pitch, int n, int m){ int idx = blockIdx.x*blockDim.x + threadIdx.x; int idy = blockIdx.y*blockDim.y + threadIdx.y; int ind = threadIdx.y + 2; double *uold, *unew; extern __shared__ double s[]; unew = &(s[0]); uold = &(s[blockDim.y+4]); // initialising shared memory // glob_shared_cpy(u_glob, unew, uold, pitch, n, m, idx, idy, ind); // iterating and updating unew // calc_iterate(unew, uold, n, m, idx, idy, ind); // sending vals back to global mem // shared_glob_cpy(u_glob, unew, pitch, n, m, idx, idy, ind); } __global__ void iterate_gpu_slow(double* unew_glob, double* uold_glob, int n, int m){ int idx = blockIdx.x*blockDim.x + threadIdx.x; int idy = blockIdx.y*blockDim.y + threadIdx.y; if(1<idy && idy<m){ unew_glob[idy+idx*m] = (1.9*uold_glob[(idy+idx*m)-2] + 1.5*uold_glob[(idy+idx*m)-1] + uold_glob[idy+idx*m] + 0.5*uold_glob[(idy+1)%m+idx*m] + 0.1*uold_glob[(idy+2)%m+idx*m]); unew_glob[idy+idx*m] /= (double)(5.0); } } } __global__ void red_rows(double* u_glob, double* u_glob_out, int pitch, int n, int m){ int idx = blockIdx.x*blockDim.x + threadIdx.x; int idy = blockIdx.y*blockDim.y + threadIdx.y; int ind = threadIdx.y; extern __shared__ double tmp[]; int i, disp; if(idy<m && idx<n) tmp[ind] = u_glob[idy+idx*pitch]; disp = (1+blockIdx.y)*blockDim.y; i = (disp > m) ? (blockDim.y - (disp-m)):blockDim.y; for( ; i>1; i>>=1){ if(ind<(i/2)){ tmp[ind] += tmp[ind+(i/2)]; if(ind==0 && i%2!=0) tmp[ind] += tmp[ind+i-1]; } __syncthreads(); } if(ind==0) u_glob_out[blockIdx.y + idx*pitch] = tmp[0]; } __global__ void red_rows_glob(double* u_glob, double* u_glob_out, int pitch, int n, int m){ int idx = blockIdx.x*blockDim.x + threadIdx.x; int idy = blockIdx.y*blockDim.y + threadIdx.y; if(idy < (m/2) && idx < n){ u_glob_out[idy + idx*pitch] += u_glob[idy + (m/2) + idx*pitch]; if(m%2!=0 && idy==0) u_glob_out[idy + idx*pitch] += u_glob[idy + (m-1) + idx*pitch]; } } extern "C" { void fdiff_gpu(double *u_vals, double *temps, int n, int m, int p, int block_size_Y, Tau* tau, int mallocPitch, int red){ double *u_glob; size_t u_glob_size; int i, pitch, m_tmp; cudaEvent_t start, finish; cudaEventCreate(&start); cudaEventCreate(&finish); if(!mallocPitch){ cudaEventRecord(start, 0); cudaMalloc( (void**)&u_glob, n*m*sizeof(double)); cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->alloc_GPU, start, finish); cudaEventRecord(start,0); cudaMemcpy(u_glob, u_vals, n*m*sizeof(double), cudaMemcpyHostToDevice); cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->transf_GPU, start, finish); pitch = m; } else { cudaEventRecord(start, 0); cudaMallocPitch( (void**)&u_glob, &u_glob_size, (size_t)(m*sizeof(double)), n); cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->alloc_GPU, start, finish); cudaEventRecord(start,0); cudaMemcpy2D(u_glob, u_glob_size, u_vals, m*sizeof(double), m*sizeof(double), n, cudaMemcpyHostToDevice); cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->transf_GPU, start, finish); pitch = (int)u_glob_size/sizeof(double); } dim3 dimBlock(1, block_size_Y); dim3 dimGrid((n/dimBlock.x)+(!(n%dimBlock.x)?0:1), (m/dimBlock.y)+(!(m%dimBlock.y)?0:1)); cudaEventRecord(start, 0); for(i=0;i<p;i++) iterate_gpu<<<dimGrid,dimBlock,2*(block_size_Y+4)*sizeof(double)>>>(u_glob, pitch, n, m); cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->calc_GPU, start, finish); if(!mallocPitch){ cudaEventRecord(start, 0); cudaMemcpy(u_vals, u_glob, n*m*sizeof(double), cudaMemcpyDeviceToHost); } else { cudaEventRecord(start, 0); cudaMemcpy2D(u_vals, m*sizeof(double), u_glob, u_glob_size, m*sizeof(double), n, cudaMemcpyDeviceToHost); } cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->transf_RAM, start, finish); if(red){ m_tmp = m; cudaEventRecord(start, 0); while(m_tmp > 1){ red_rows<<<dimGrid,dimBlock,dimBlock.y*sizeof(double)>>>(u_glob, u_glob, pitch, n, m_tmp); m_tmp = (m_tmp/dimBlock.y)+(!(m_tmp%dimBlock.y)?0:1); } cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->calc_avgGPU, start, finish); if(!mallocPitch){ for(i=0;i<n;i++) cudaMemcpy(&temps[i], &u_glob[i*m], sizeof(double), cudaMemcpyDeviceToHost); } else { cudaMemcpy2D(temps, sizeof(double), &u_glob[0], u_glob_size, sizeof(double), n, cudaMemcpyDeviceToHost); } } cudaFree(u_glob); } void fdiff_gpu_glob(double* u_vals, double* temps, int n, int m, int p, int block_size, Tau* tau, int red){ double *uold_glob, *unew_glob, *tmp; int i, m_tmp; cudaEvent_t start, finish; cudaEventCreate(&start); cudaEventCreate(&finish); cudaEventRecord(start, 0); cudaMalloc( (void**)&unew_glob, n*m*sizeof(double)); cudaMalloc( (void**)&uold_glob, n*m*sizeof(double)); cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->alloc_GPU, start, finish); cudaEventRecord(start, 0); cudaMemcpy(unew_glob, u_vals, n*m*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(uold_glob, u_vals, n*m*sizeof(double), cudaMemcpyHostToDevice); cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->transf_GPU, start, finish); dim3 dimBlock(1, block_size); dim3 dimGrid((n/dimBlock.x)+(!(n%dimBlock.x)?0:1), (m/dimBlock.y)+(!(m%dimBlock.y)?0:1)); cudaEventRecord(start, 0); for(i=0;i<p;i++){ if(i%2==0) iterate_gpu_slow<<<dimGrid,dimBlock>>>(unew_glob, uold_glob, n, m); else iterate_gpu_slow<<<dimGrid,dimBlock>>>(uold_glob, unew_glob, n, m); } cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->calc_GPU, start, finish); cudaEventRecord(start, 0); if(p%2==0) tmp = uold_glob; else tmp = unew_glob; cudaMemcpy(u_vals, tmp, n*m*sizeof(double), cudaMemcpyDeviceToHost); cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->transf_RAM, start, finish); m_tmp = m; if(red){ cudaEventRecord(start, 0); for( ; m_tmp>1; m_tmp>>=1) red_rows_glob<<<dimGrid,dimBlock>>>(tmp, tmp, m, n, m_tmp); cudaEventRecord(finish, 0); cudaEventSynchronize(finish); cudaEventElapsedTime(&tau->calc_avgGPU, start, finish); for(i=0;i<n;i++) cudaMemcpy(&temps[i], &tmp[i*m], sizeof(double), cudaMemcpyDeviceToHost); } cudaFree(unew_glob); cudaFree(uold_glob); } }
18a2e09fd7ce0287985bd225bb014c08b14aa35a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cuv/image_ops/image_pyramid.hpp> #define iDivUp(X,Y) (ceil((X)/(float)(Y))) #define CB_TILE_W 16 #define CB_TILE_H 16 #define KERNEL_SIZE 5 #define HALF_KERNEL 2 #define NORM_FACTOR 0.00390625f // 1.0/(16^2) texture<float, 2, hipReadModeElementType> ip_float_tex; texture<unsigned char, 2, hipReadModeElementType> ip_uc_tex; texture<float4, 2, hipReadModeElementType> ip_float4_tex; texture<uchar4, 2, hipReadModeElementType> ip_uc4_tex; template<class T> struct texref{ }; template<> struct texref<float>{ typedef texture<float, 2, hipReadModeElementType> type; static type& get(){ return ip_float_tex; }; __device__ float operator()(float i, float j){return tex2D(ip_float_tex, i,j);} }; template<> struct texref<unsigned char>{ typedef texture<unsigned char, 2, hipReadModeElementType> type; static type& get(){ return ip_uc_tex; }; __device__ unsigned char operator()(float i, float j){return tex2D(ip_uc_tex,i,j);} }; template<> struct texref<float4>{ typedef texture<float4, 2, hipReadModeElementType> type; static type& get(){ return ip_float4_tex; }; __device__ float4 operator()(float i, float j){return tex2D(ip_float4_tex, i,j);} }; template<> struct texref<uchar4>{ typedef texture<uchar4, 2, hipReadModeElementType> type; static type& get(){ return ip_uc4_tex; }; __device__ uchar4 operator()(float i, float j){return tex2D(ip_uc4_tex,i,j);} }; namespace cuv{ template<class T> __device__ T plus4(const T& a, const T& b){ T tmp = a; tmp.x += b.x; tmp.y += b.y; tmp.z += b.z; return tmp; } template<class T, class S> __device__ T mul4 (const S& s, const T& a){ T tmp = a; tmp.x *= s; tmp.y *= s; tmp.z *= s; return tmp; } // // Gaussian 5 x 5 kernel = [1, 4, 6, 4, 1]/16 // template<class T4, class T> __global__ void gaussian_pyramid_downsample_kernel4val(T* downLevel, size_t downLevelPitch, unsigned int downWidth, unsigned int downHeight) { // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; T4 buf[KERNEL_SIZE]; if(x < downWidth && y < downHeight) { float u0 = (2.f * x) - HALF_KERNEL; float v0 = (2.f * y) - HALF_KERNEL; texref<T4> tex; for(int i = 0; i < KERNEL_SIZE; i++) { T4 tmp; tmp = plus4( tex(u0 , v0 + i) , tex(u0 + 4, v0 + i)); tmp = plus4(tmp, mul4(4, plus4(tex(u0 + 1, v0 + i) , tex(u0 + 3, v0 + i)))); tmp = plus4(tmp, mul4(6, tex(u0 + 2, v0 + 2))); buf[i] = tmp; } unsigned int pos = y*downLevelPitch + x; downLevel[pos + 0*downLevelPitch*downHeight] = (buf[0].x + buf[4].x + 4*(buf[1].x + buf[3].x) + 6 * buf[2].x) * NORM_FACTOR; downLevel[pos + 1*downLevelPitch*downHeight] = (buf[0].y + buf[4].y + 4*(buf[1].y + buf[3].y) + 6 * buf[2].y) * NORM_FACTOR; downLevel[pos + 2*downLevelPitch*downHeight] = (buf[0].z + buf[4].z + 4*(buf[1].z + buf[3].z) + 6 * buf[2].z) * NORM_FACTOR; } } // // Gaussian 5 x 5 kernel = [1, 4, 6, 4, 1]/16 // inspired by http://sourceforge.net/projects/openvidia/files/CUDA%20Bayesian%20Optical%20Flow/ // with bugfix... // template<class T> __global__ void gaussian_pyramid_downsample_kernel(T* downLevel, size_t downLevelPitch, unsigned int downWidth, unsigned int downHeight) { // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < downWidth && y < downHeight) { float buf[KERNEL_SIZE]; float u0 = (2.f * x) - HALF_KERNEL; float v0 = (2.f * y) - HALF_KERNEL; texref<T> tex; for(int i = 0; i < KERNEL_SIZE; i++) { buf[i] = ( tex(u0 , v0 + i) + tex(u0 + 4, v0 + i)) + 4 * (tex(u0 + 1, v0 + i) + tex(u0 + 3, v0 + i)) + 6 * tex(u0 + 2, v0 + 2); } downLevel[y * downLevelPitch + x] = (buf[0] + buf[4] + 4*(buf[1] + buf[3]) + 6 * buf[2]) * NORM_FACTOR; } } // Gaussian 5 x 5 kernel = [1, 4, 6, 4, 1]/16 // template<class T> __global__ void gaussian_kernel(T* dst, size_t dstPitch, unsigned int dstWidth, unsigned int dstHeight) { // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < dstWidth && y < dstHeight) { float buf[KERNEL_SIZE]; float u0 = x - (float)HALF_KERNEL; float v0 = y - (float)HALF_KERNEL; texref<T> tex; for(int i = 0; i < KERNEL_SIZE; i++) { buf[i] = ( tex(u0 , v0 + i) + tex(u0 + 4, v0 + i)) + 4 * (tex(u0 + 1, v0 + i) + tex(u0 + 3, v0 + i)) + 6 * tex(u0 + 2, v0 + 2); } dst[y * dstPitch + x] = (buf[0] + buf[4] + 4*(buf[1] + buf[3]) + 6 * buf[2]) * NORM_FACTOR; } } template<class T> __global__ void gaussian_pyramid_upsample_kernel(T* upLevel, size_t upLevelPitch, unsigned int upWidth, unsigned int upHeight) { // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < upWidth && y < upHeight) { float u0 = (x/2.f); float v0 = (y/2.f); texref<T> tex; upLevel[y * upLevelPitch + x] = tex(u0,v0); } } template<class T> struct single_to_4{}; template<> struct single_to_4<float> {typedef float4 type;}; template<> struct single_to_4<unsigned char>{typedef uchar4 type;}; template<class V,class S, class I> void gaussian( tensor<V,S,row_major>& dst, const cuda_array<V,S,I>& src){ cuvAssert(dst.shape().size()==2); typedef typename texref<V>::type textype; textype& tex = texref<V>::get(); tex.normalized = false; tex.filterMode = hipFilterModePoint; tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; dim3 grid,threads; grid = dim3 (iDivUp(dst.shape()[1], CB_TILE_W), iDivUp(dst.shape()[0], CB_TILE_H)); threads = dim3 (CB_TILE_W, CB_TILE_H); cuvAssert(dst.shape()[1] == src.w()); cuvAssert(dst.shape()[0] == src.h()); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<V>(); hipBindTextureToArray(tex, src.ptr(), channelDesc); checkCudaError("hipBindTextureToArray"); hipLaunchKernelGGL(( gaussian_kernel), dim3(grid),dim3(threads), 0, 0, dst.ptr(), dst.shape()[1], dst.shape()[1], dst.shape()[0]); cuvSafeCall(hipDeviceSynchronize()); hipUnbindTexture(tex); checkCudaError("hipUnbindTexture"); } template<class V,class S, class I> void gaussian_pyramid_downsample( tensor<V,S,row_major>& dst, const cuda_array<V,S,I>& src, const unsigned int interleaved_channels){ cuvAssert(dst.shape().size()==2); typedef typename single_to_4<V>::type V4; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<V>(); hipChannelFormatDesc channelDesc4 = hipCreateChannelDesc<V4>(); typedef typename texref<V>::type textype; typedef typename texref<V4>::type textype4; textype& tex = texref<V>::get(); tex.normalized = false; tex.filterMode = hipFilterModePoint; tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; textype4& tex4 = texref<V4>::get(); tex4.normalized = false; tex4.filterMode = hipFilterModeLinear; tex4.addressMode[0] = hipAddressModeClamp; tex4.addressMode[1] = hipAddressModeClamp; dim3 grid,threads; switch(interleaved_channels){ case 1: // deals with a single channel grid = dim3 (iDivUp(dst.shape()[1], CB_TILE_W), iDivUp(dst.shape()[0], CB_TILE_H)); threads = dim3 (CB_TILE_W, CB_TILE_H); cuvAssert(dst.shape()[1] < src.w()); cuvAssert(dst.shape()[0] < src.h()); hipBindTextureToArray(tex, src.ptr(), channelDesc); checkCudaError("hipBindTextureToArray"); hipLaunchKernelGGL(( gaussian_pyramid_downsample_kernel), dim3(grid),dim3(threads), 0, 0, dst.ptr(), dst.shape()[1], dst.shape()[1], dst.shape()[0]); cuvSafeCall(hipDeviceSynchronize()); hipUnbindTexture(tex); checkCudaError("hipUnbindTexture"); break; case 4: // deals with 4 interleaved channels (and writes to 3(!)) cuvAssert(dst.shape()[1] < src.w()); cuvAssert(dst.shape()[0] / 3 < src.h()); cuvAssert(dst.shape()[0] % 3 == 0); // three channels in destination (non-interleaved) cuvAssert(src.dim()==4); grid = dim3(iDivUp(dst.shape()[1], CB_TILE_W), iDivUp(dst.shape()[0]/3, CB_TILE_H)); threads = dim3(CB_TILE_W, CB_TILE_H); fill(dst, (V)0); hipBindTextureToArray(tex4, src.ptr(), channelDesc4); checkCudaError("hipBindTextureToArray"); hipLaunchKernelGGL(( gaussian_pyramid_downsample_kernel4val<V4,V>), dim3(grid),dim3(threads), 0, 0, dst.ptr(), dst.shape()[1], dst.shape()[1], dst.shape()[0]/3); cuvSafeCall(hipDeviceSynchronize()); hipUnbindTexture(tex4); checkCudaError("hipUnbindTexture"); break; default: cuvAssert(false); } cuvSafeCall(hipDeviceSynchronize()); } // Upsampling with hardware linear interpolation template<class V,class S, class I> void gaussian_pyramid_upsample( tensor<V,S,row_major>& dst, const cuda_array<V,S,I>& src){ cuvAssert(dst.shape().size()==2); cuvAssert(dst.shape()[1] > src.w()); cuvAssert(dst.shape()[0] > src.h()); dim3 grid(iDivUp(dst.shape()[1], CB_TILE_W), iDivUp(dst.shape()[0], CB_TILE_H)); dim3 threads(CB_TILE_W, CB_TILE_H); typedef typename texref<V>::type textype; textype& tex = texref<V>::get(); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<V>(); tex.normalized = false; tex.filterMode = hipFilterModeLinear; tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; hipBindTextureToArray(tex, src.ptr(), channelDesc); checkCudaError("hipBindTextureToArray"); hipLaunchKernelGGL(( gaussian_pyramid_upsample_kernel), dim3(grid),dim3(threads), 0, 0, dst.ptr(), dst.shape()[1], dst.shape()[1], dst.shape()[0]); cuvSafeCall(hipDeviceSynchronize()); hipUnbindTexture(tex); checkCudaError("hipUnbindTexture"); } template<class T> __device__ T colordist(float u0, float v0, float u1, float v1, const float& offset, const unsigned int& dim){ T d0 = (T) 0; texref<T> tex; for(unsigned int i=0;i<dim;i++){ float f = tex(u0,v0) - tex(u1,v1); d0 += f*f; v0 += offset; v1 += offset; } return d0; } template<class T> struct summer{ T mt; __device__ summer():mt(0){} __device__ void operator()(const T& t ){ mt+=t; } }; template<class T> struct expsummer{ T mt; __device__ expsummer():mt(0){} __device__ void operator()(const T& t ){ mt+=exp(-t); } }; template<class T, class TDest> __global__ void get_pixel_classes_kernel(TDest* dst, size_t dstPitch, unsigned int dstWidth, unsigned int dstHeight, float offset, float scale_fact) { // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float N = 1.f; if(x < dstWidth && y < dstHeight) { float u0 = (x/scale_fact); float v0 = (y/scale_fact); summer<T> sum; unsigned char arg_min_cd = 0; T min_cd = colordist<T>(u0,v0,u0+N,v0+N,offset,3u); sum(min_cd); T val = colordist<T>(u0,v0,u0+N,v0-N,offset,3u); if(val<min_cd){ min_cd = val; arg_min_cd = 1; } sum(val); val = colordist<T>(u0,v0,u0-N,v0+N,offset,3u); if(val<min_cd){ min_cd = val; arg_min_cd = 2; } sum(val); val = colordist<T>(u0,v0,u0-N,v0-N,offset,3u); if(val<min_cd){ min_cd = val; arg_min_cd = 3; } sum(val); TDest tmp = make_uchar4( arg_min_cd % 2 ? 255: 0, arg_min_cd > 1 ? 255: 0,0, max(0.f,min(255.f,sum.mt - 4*min_cd)) // for summer /*max(0.f,min(255.f,255.f * exp(-min_cd)/sum.mt)) // for expsummer*/ ); dst[y * dstPitch + x] = tmp;; } } // determine a number out of [0,3] for every pixel which should vary // smoothly and according to detail level in the image template<class VDest, class V, class S, class I> void get_pixel_classes( tensor<VDest,S,row_major>& dst, const cuda_array<V,S,I>& src_smooth, float scale_fact ){ cuvAssert(dst.shape().size()==2); dim3 grid(iDivUp(dst.shape()[1], CB_TILE_W), iDivUp(dst.shape()[0], CB_TILE_H)); dim3 threads(CB_TILE_W, CB_TILE_H); typedef typename texref<V>::type textype; textype& tex = texref<V>::get(); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<V>(); tex.normalized = false; tex.filterMode = hipFilterModeLinear; tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; hipBindTextureToArray(tex, src_smooth.ptr(), channelDesc); checkCudaError("hipBindTextureToArray"); cuvAssert(src_smooth.h() % 3 == 0); cuvAssert(dst.shape()[1] % 4 == 0); // float4! float offset = src_smooth.h()/3; offset=0; hipLaunchKernelGGL(( get_pixel_classes_kernel<float>), dim3(grid),dim3(threads), 0, 0, (uchar4*)dst.ptr(), dst.shape()[1]/4, dst.shape()[1]/4, dst.shape()[0], offset, scale_fact ); cuvSafeCall(hipDeviceSynchronize()); hipUnbindTexture(tex); checkCudaError("hipUnbindTexture"); } // explicit instantiation template void gaussian( tensor<float,dev_memory_space,row_major>& dst, const cuda_array<float,dev_memory_space,unsigned int>& src); template void gaussian( tensor<unsigned char,dev_memory_space,row_major>& dst, const cuda_array<unsigned char,dev_memory_space,unsigned int>& src); template void gaussian_pyramid_downsample( tensor<float,dev_memory_space,row_major>& dst, const cuda_array<float,dev_memory_space,unsigned int>& src, const unsigned int); template void gaussian_pyramid_downsample( tensor<unsigned char,dev_memory_space,row_major>& dst, const cuda_array<unsigned char,dev_memory_space,unsigned int>& src, const unsigned int); template void gaussian_pyramid_upsample( tensor<float,dev_memory_space,row_major>& dst, const cuda_array<float,dev_memory_space,unsigned int>& src); template void gaussian_pyramid_upsample( tensor<unsigned char,dev_memory_space,row_major>& dst, const cuda_array<unsigned char,dev_memory_space,unsigned int>& src); template void get_pixel_classes( tensor<unsigned char,dev_memory_space,row_major>& dst, const cuda_array<unsigned char,dev_memory_space,unsigned int>& src, float scale_fact); template void get_pixel_classes( tensor<float,dev_memory_space,row_major>& dst, const cuda_array<float,dev_memory_space,unsigned int>& src, float scale_fact); template void get_pixel_classes( tensor<unsigned char,dev_memory_space,row_major>& dst, const cuda_array<float,dev_memory_space,unsigned int>& src, float scale_fact); }
18a2e09fd7ce0287985bd225bb014c08b14aa35a.cu
#include <cuv/image_ops/image_pyramid.hpp> #define iDivUp(X,Y) (ceil((X)/(float)(Y))) #define CB_TILE_W 16 #define CB_TILE_H 16 #define KERNEL_SIZE 5 #define HALF_KERNEL 2 #define NORM_FACTOR 0.00390625f // 1.0/(16^2) texture<float, 2, cudaReadModeElementType> ip_float_tex; texture<unsigned char, 2, cudaReadModeElementType> ip_uc_tex; texture<float4, 2, cudaReadModeElementType> ip_float4_tex; texture<uchar4, 2, cudaReadModeElementType> ip_uc4_tex; template<class T> struct texref{ }; template<> struct texref<float>{ typedef texture<float, 2, cudaReadModeElementType> type; static type& get(){ return ip_float_tex; }; __device__ float operator()(float i, float j){return tex2D(ip_float_tex, i,j);} }; template<> struct texref<unsigned char>{ typedef texture<unsigned char, 2, cudaReadModeElementType> type; static type& get(){ return ip_uc_tex; }; __device__ unsigned char operator()(float i, float j){return tex2D(ip_uc_tex,i,j);} }; template<> struct texref<float4>{ typedef texture<float4, 2, cudaReadModeElementType> type; static type& get(){ return ip_float4_tex; }; __device__ float4 operator()(float i, float j){return tex2D(ip_float4_tex, i,j);} }; template<> struct texref<uchar4>{ typedef texture<uchar4, 2, cudaReadModeElementType> type; static type& get(){ return ip_uc4_tex; }; __device__ uchar4 operator()(float i, float j){return tex2D(ip_uc4_tex,i,j);} }; namespace cuv{ template<class T> __device__ T plus4(const T& a, const T& b){ T tmp = a; tmp.x += b.x; tmp.y += b.y; tmp.z += b.z; return tmp; } template<class T, class S> __device__ T mul4 (const S& s, const T& a){ T tmp = a; tmp.x *= s; tmp.y *= s; tmp.z *= s; return tmp; } // // Gaussian 5 x 5 kernel = [1, 4, 6, 4, 1]/16 // template<class T4, class T> __global__ void gaussian_pyramid_downsample_kernel4val(T* downLevel, size_t downLevelPitch, unsigned int downWidth, unsigned int downHeight) { // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; T4 buf[KERNEL_SIZE]; if(x < downWidth && y < downHeight) { float u0 = (2.f * x) - HALF_KERNEL; float v0 = (2.f * y) - HALF_KERNEL; texref<T4> tex; for(int i = 0; i < KERNEL_SIZE; i++) { T4 tmp; tmp = plus4( tex(u0 , v0 + i) , tex(u0 + 4, v0 + i)); tmp = plus4(tmp, mul4(4, plus4(tex(u0 + 1, v0 + i) , tex(u0 + 3, v0 + i)))); tmp = plus4(tmp, mul4(6, tex(u0 + 2, v0 + 2))); buf[i] = tmp; } unsigned int pos = y*downLevelPitch + x; downLevel[pos + 0*downLevelPitch*downHeight] = (buf[0].x + buf[4].x + 4*(buf[1].x + buf[3].x) + 6 * buf[2].x) * NORM_FACTOR; downLevel[pos + 1*downLevelPitch*downHeight] = (buf[0].y + buf[4].y + 4*(buf[1].y + buf[3].y) + 6 * buf[2].y) * NORM_FACTOR; downLevel[pos + 2*downLevelPitch*downHeight] = (buf[0].z + buf[4].z + 4*(buf[1].z + buf[3].z) + 6 * buf[2].z) * NORM_FACTOR; } } // // Gaussian 5 x 5 kernel = [1, 4, 6, 4, 1]/16 // inspired by http://sourceforge.net/projects/openvidia/files/CUDA%20Bayesian%20Optical%20Flow/ // with bugfix... // template<class T> __global__ void gaussian_pyramid_downsample_kernel(T* downLevel, size_t downLevelPitch, unsigned int downWidth, unsigned int downHeight) { // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < downWidth && y < downHeight) { float buf[KERNEL_SIZE]; float u0 = (2.f * x) - HALF_KERNEL; float v0 = (2.f * y) - HALF_KERNEL; texref<T> tex; for(int i = 0; i < KERNEL_SIZE; i++) { buf[i] = ( tex(u0 , v0 + i) + tex(u0 + 4, v0 + i)) + 4 * (tex(u0 + 1, v0 + i) + tex(u0 + 3, v0 + i)) + 6 * tex(u0 + 2, v0 + 2); } downLevel[y * downLevelPitch + x] = (buf[0] + buf[4] + 4*(buf[1] + buf[3]) + 6 * buf[2]) * NORM_FACTOR; } } // Gaussian 5 x 5 kernel = [1, 4, 6, 4, 1]/16 // template<class T> __global__ void gaussian_kernel(T* dst, size_t dstPitch, unsigned int dstWidth, unsigned int dstHeight) { // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < dstWidth && y < dstHeight) { float buf[KERNEL_SIZE]; float u0 = x - (float)HALF_KERNEL; float v0 = y - (float)HALF_KERNEL; texref<T> tex; for(int i = 0; i < KERNEL_SIZE; i++) { buf[i] = ( tex(u0 , v0 + i) + tex(u0 + 4, v0 + i)) + 4 * (tex(u0 + 1, v0 + i) + tex(u0 + 3, v0 + i)) + 6 * tex(u0 + 2, v0 + 2); } dst[y * dstPitch + x] = (buf[0] + buf[4] + 4*(buf[1] + buf[3]) + 6 * buf[2]) * NORM_FACTOR; } } template<class T> __global__ void gaussian_pyramid_upsample_kernel(T* upLevel, size_t upLevelPitch, unsigned int upWidth, unsigned int upHeight) { // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < upWidth && y < upHeight) { float u0 = (x/2.f); float v0 = (y/2.f); texref<T> tex; upLevel[y * upLevelPitch + x] = tex(u0,v0); } } template<class T> struct single_to_4{}; template<> struct single_to_4<float> {typedef float4 type;}; template<> struct single_to_4<unsigned char>{typedef uchar4 type;}; template<class V,class S, class I> void gaussian( tensor<V,S,row_major>& dst, const cuda_array<V,S,I>& src){ cuvAssert(dst.shape().size()==2); typedef typename texref<V>::type textype; textype& tex = texref<V>::get(); tex.normalized = false; tex.filterMode = cudaFilterModePoint; tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; dim3 grid,threads; grid = dim3 (iDivUp(dst.shape()[1], CB_TILE_W), iDivUp(dst.shape()[0], CB_TILE_H)); threads = dim3 (CB_TILE_W, CB_TILE_H); cuvAssert(dst.shape()[1] == src.w()); cuvAssert(dst.shape()[0] == src.h()); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<V>(); cudaBindTextureToArray(tex, src.ptr(), channelDesc); checkCudaError("cudaBindTextureToArray"); gaussian_kernel<<<grid,threads>>>(dst.ptr(), dst.shape()[1], dst.shape()[1], dst.shape()[0]); cuvSafeCall(cudaThreadSynchronize()); cudaUnbindTexture(tex); checkCudaError("cudaUnbindTexture"); } template<class V,class S, class I> void gaussian_pyramid_downsample( tensor<V,S,row_major>& dst, const cuda_array<V,S,I>& src, const unsigned int interleaved_channels){ cuvAssert(dst.shape().size()==2); typedef typename single_to_4<V>::type V4; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<V>(); cudaChannelFormatDesc channelDesc4 = cudaCreateChannelDesc<V4>(); typedef typename texref<V>::type textype; typedef typename texref<V4>::type textype4; textype& tex = texref<V>::get(); tex.normalized = false; tex.filterMode = cudaFilterModePoint; tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; textype4& tex4 = texref<V4>::get(); tex4.normalized = false; tex4.filterMode = cudaFilterModeLinear; tex4.addressMode[0] = cudaAddressModeClamp; tex4.addressMode[1] = cudaAddressModeClamp; dim3 grid,threads; switch(interleaved_channels){ case 1: // deals with a single channel grid = dim3 (iDivUp(dst.shape()[1], CB_TILE_W), iDivUp(dst.shape()[0], CB_TILE_H)); threads = dim3 (CB_TILE_W, CB_TILE_H); cuvAssert(dst.shape()[1] < src.w()); cuvAssert(dst.shape()[0] < src.h()); cudaBindTextureToArray(tex, src.ptr(), channelDesc); checkCudaError("cudaBindTextureToArray"); gaussian_pyramid_downsample_kernel<<<grid,threads>>>(dst.ptr(), dst.shape()[1], dst.shape()[1], dst.shape()[0]); cuvSafeCall(cudaThreadSynchronize()); cudaUnbindTexture(tex); checkCudaError("cudaUnbindTexture"); break; case 4: // deals with 4 interleaved channels (and writes to 3(!)) cuvAssert(dst.shape()[1] < src.w()); cuvAssert(dst.shape()[0] / 3 < src.h()); cuvAssert(dst.shape()[0] % 3 == 0); // three channels in destination (non-interleaved) cuvAssert(src.dim()==4); grid = dim3(iDivUp(dst.shape()[1], CB_TILE_W), iDivUp(dst.shape()[0]/3, CB_TILE_H)); threads = dim3(CB_TILE_W, CB_TILE_H); fill(dst, (V)0); cudaBindTextureToArray(tex4, src.ptr(), channelDesc4); checkCudaError("cudaBindTextureToArray"); gaussian_pyramid_downsample_kernel4val<V4,V><<<grid,threads>>>( dst.ptr(), dst.shape()[1], dst.shape()[1], dst.shape()[0]/3); cuvSafeCall(cudaThreadSynchronize()); cudaUnbindTexture(tex4); checkCudaError("cudaUnbindTexture"); break; default: cuvAssert(false); } cuvSafeCall(cudaThreadSynchronize()); } // Upsampling with hardware linear interpolation template<class V,class S, class I> void gaussian_pyramid_upsample( tensor<V,S,row_major>& dst, const cuda_array<V,S,I>& src){ cuvAssert(dst.shape().size()==2); cuvAssert(dst.shape()[1] > src.w()); cuvAssert(dst.shape()[0] > src.h()); dim3 grid(iDivUp(dst.shape()[1], CB_TILE_W), iDivUp(dst.shape()[0], CB_TILE_H)); dim3 threads(CB_TILE_W, CB_TILE_H); typedef typename texref<V>::type textype; textype& tex = texref<V>::get(); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<V>(); tex.normalized = false; tex.filterMode = cudaFilterModeLinear; tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; cudaBindTextureToArray(tex, src.ptr(), channelDesc); checkCudaError("cudaBindTextureToArray"); gaussian_pyramid_upsample_kernel<<<grid,threads>>>(dst.ptr(), dst.shape()[1], dst.shape()[1], dst.shape()[0]); cuvSafeCall(cudaThreadSynchronize()); cudaUnbindTexture(tex); checkCudaError("cudaUnbindTexture"); } template<class T> __device__ T colordist(float u0, float v0, float u1, float v1, const float& offset, const unsigned int& dim){ T d0 = (T) 0; texref<T> tex; for(unsigned int i=0;i<dim;i++){ float f = tex(u0,v0) - tex(u1,v1); d0 += f*f; v0 += offset; v1 += offset; } return d0; } template<class T> struct summer{ T mt; __device__ summer():mt(0){} __device__ void operator()(const T& t ){ mt+=t; } }; template<class T> struct expsummer{ T mt; __device__ expsummer():mt(0){} __device__ void operator()(const T& t ){ mt+=exp(-t); } }; template<class T, class TDest> __global__ void get_pixel_classes_kernel(TDest* dst, size_t dstPitch, unsigned int dstWidth, unsigned int dstHeight, float offset, float scale_fact) { // calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float N = 1.f; if(x < dstWidth && y < dstHeight) { float u0 = (x/scale_fact); float v0 = (y/scale_fact); summer<T> sum; unsigned char arg_min_cd = 0; T min_cd = colordist<T>(u0,v0,u0+N,v0+N,offset,3u); sum(min_cd); T val = colordist<T>(u0,v0,u0+N,v0-N,offset,3u); if(val<min_cd){ min_cd = val; arg_min_cd = 1; } sum(val); val = colordist<T>(u0,v0,u0-N,v0+N,offset,3u); if(val<min_cd){ min_cd = val; arg_min_cd = 2; } sum(val); val = colordist<T>(u0,v0,u0-N,v0-N,offset,3u); if(val<min_cd){ min_cd = val; arg_min_cd = 3; } sum(val); TDest tmp = make_uchar4( arg_min_cd % 2 ? 255: 0, arg_min_cd > 1 ? 255: 0,0, max(0.f,min(255.f,sum.mt - 4*min_cd)) // for summer /*max(0.f,min(255.f,255.f * exp(-min_cd)/sum.mt)) // for expsummer*/ ); dst[y * dstPitch + x] = tmp;; } } // determine a number out of [0,3] for every pixel which should vary // smoothly and according to detail level in the image template<class VDest, class V, class S, class I> void get_pixel_classes( tensor<VDest,S,row_major>& dst, const cuda_array<V,S,I>& src_smooth, float scale_fact ){ cuvAssert(dst.shape().size()==2); dim3 grid(iDivUp(dst.shape()[1], CB_TILE_W), iDivUp(dst.shape()[0], CB_TILE_H)); dim3 threads(CB_TILE_W, CB_TILE_H); typedef typename texref<V>::type textype; textype& tex = texref<V>::get(); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<V>(); tex.normalized = false; tex.filterMode = cudaFilterModeLinear; tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; cudaBindTextureToArray(tex, src_smooth.ptr(), channelDesc); checkCudaError("cudaBindTextureToArray"); cuvAssert(src_smooth.h() % 3 == 0); cuvAssert(dst.shape()[1] % 4 == 0); // float4! float offset = src_smooth.h()/3; offset=0; get_pixel_classes_kernel<float><<<grid,threads>>>((uchar4*)dst.ptr(), dst.shape()[1]/4, dst.shape()[1]/4, dst.shape()[0], offset, scale_fact ); cuvSafeCall(cudaThreadSynchronize()); cudaUnbindTexture(tex); checkCudaError("cudaUnbindTexture"); } // explicit instantiation template void gaussian( tensor<float,dev_memory_space,row_major>& dst, const cuda_array<float,dev_memory_space,unsigned int>& src); template void gaussian( tensor<unsigned char,dev_memory_space,row_major>& dst, const cuda_array<unsigned char,dev_memory_space,unsigned int>& src); template void gaussian_pyramid_downsample( tensor<float,dev_memory_space,row_major>& dst, const cuda_array<float,dev_memory_space,unsigned int>& src, const unsigned int); template void gaussian_pyramid_downsample( tensor<unsigned char,dev_memory_space,row_major>& dst, const cuda_array<unsigned char,dev_memory_space,unsigned int>& src, const unsigned int); template void gaussian_pyramid_upsample( tensor<float,dev_memory_space,row_major>& dst, const cuda_array<float,dev_memory_space,unsigned int>& src); template void gaussian_pyramid_upsample( tensor<unsigned char,dev_memory_space,row_major>& dst, const cuda_array<unsigned char,dev_memory_space,unsigned int>& src); template void get_pixel_classes( tensor<unsigned char,dev_memory_space,row_major>& dst, const cuda_array<unsigned char,dev_memory_space,unsigned int>& src, float scale_fact); template void get_pixel_classes( tensor<float,dev_memory_space,row_major>& dst, const cuda_array<float,dev_memory_space,unsigned int>& src, float scale_fact); template void get_pixel_classes( tensor<unsigned char,dev_memory_space,row_major>& dst, const cuda_array<float,dev_memory_space,unsigned int>& src, float scale_fact); }
233181f8781aa099491c415b0d77f97bffcd6b02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include <stdio.h> # include <hip/hip_runtime.h> # include <sys/time.h> # include <string.h> # define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ printf("Error: %s: %d \n",__FILE__,__LINE__); \ printf("reason: %s \n",hipGetErrorString(error)); \ exit(1); \ } \ } // number of iteration, number pf paticle elements # define ITER 10 //# define N 5000 // define struct of Particles, don't know if float3 type can be used in cpu. struct Particle { float3 position; float3 velocity; }; double cpuSecond(){ // this returns the time in double format. struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6); } void checkResult(struct Particle *hostRef,struct Particle *gpuRef,const int size){ // Check if the cpu and gpu implementation is the same. double epsilon = 1.0E-8; bool match = 1; for (int i=0; i<size ; i++){ // check on x axis if(abs(hostRef[i].position.x-gpuRef[i].position.x)>epsilon){ match =0; printf("array not match on x axis \n"); printf("host %5.2f gpu %5.2f at current %d \n",hostRef[i].position.x,gpuRef[i].position.x, i); break; } // check on y axis if(abs(hostRef[i].position.y-gpuRef[i].position.y)>epsilon){ match =0; printf("array not match on y axis \n"); printf("host %5.2f gpu %5.2f at current %d \n",hostRef[i].position.y,gpuRef[i].position.y, i); break; } // check on z axis if(abs(hostRef[i].position.z-gpuRef[i].position.z)>epsilon){ match =0; printf("array not match on z axis \n"); printf("host %5.2f gpu %5.2f at current %d \n",hostRef[i].position.z,gpuRef[i].position.z, i); break; } } if (match) printf("Match!\n"); } void randomacc(float3 *acc, const int size) { // this generate randome accelaration with the seed of time time_t t; srand((unsigned) time(&t)); for (int i=0; i < size; i++) { // generate accelerate acc[i].x = (float) (rand() &0xFF) / 10.0f; acc[i].y = (float) (rand() &0xFE) / 10.0f; acc[i].z = (float) (rand() &0xFD) / 10.0f; } } void update_CPU(struct Particle *array,float3 *acc,int size) { // this is used to update the position of Particles wrt random acceleration for one iteration. for (int i=0;i<size;i++){ // velocity update array[i].velocity.x += acc[i].x; array[i].velocity.y += acc[i].y; array[i].velocity.z += acc[i].z; // position update array[i].position.x += array[i].velocity.x; array[i].position.y += array[i].velocity.y; array[i].position.z += array[i].velocity.z; } } __global__ void update_GPU(struct Particle *array,float3 *acc,int size) { // this is used to update the position of Paticles wrt to random acc in GPU. int i= blockIdx.x*blockDim.x + threadIdx.x; if(i<size) { // velocity update array[i].velocity.x += acc[i].x; array[i].velocity.y += acc[i].y; array[i].velocity.z += acc[i].z; // position update array[i].position.x += array[i].velocity.x; array[i].position.y += array[i].velocity.y; array[i].position.z += array[i].velocity.z; } } int main(){ // malloc memory int block_size = 8; int rate =2; int element_number = 20000; int block_size_iter = block_size; // create csv file to record. FILE *fp; printf("In this test we are going to test out with %ld iteration with fixing blokc_size of %d ", ITER, block_size ); char *filename=(char *)"fixing_element_number.csv"; fp=fopen(filename,"w+"); fprintf(fp,"blokc_size,gpu_time_gap,cpu_time_gap \n"); printf("created the csv file.\n"); for (int i=0;i<ITER;i++) { //element_number = rate * element_number; // for check out the blokc_size // element_number = element_number; size_t nBytes = element_number * sizeof(struct Particle); block_size_iter = block_size_iter*rate; struct Particle *particles,*hostRef,*gpuRef,*d_p; particles = (struct Particle *)malloc(nBytes); hostRef = (struct Particle *)malloc(nBytes); gpuRef = (struct Particle *)malloc(nBytes); CHECK(hipMalloc((struct Particle**)&d_p,nBytes)); // transfer data to GPU memory CHECK(hipMemcpy(d_p,particles,nBytes,hipMemcpyHostToDevice)); // get the random acc for cpu and gpu float3 *acc,*d_acc; // malloc memory to acc size_t acc_bytes= element_number* sizeof(float3); acc = (float3 *)malloc(acc_bytes); randomacc(acc,element_number); CHECK(hipMalloc((float3**)&d_acc,acc_bytes)); // calculate the time needed for GPU start here. double d_start, d_gap; d_start = cpuSecond(); CHECK(hipMemcpy(d_acc,acc,acc_bytes,hipMemcpyHostToDevice)); hipLaunchKernelGGL(( update_GPU), dim3(((element_number+block_size_iter-1)/block_size_iter)), dim3(block_size_iter), 0, 0, d_p,d_acc,element_number); CHECK(hipDeviceSynchronize()); // copy the kernel result to host side hipMemcpy(gpuRef,d_p,nBytes,hipMemcpyDeviceToHost); // GPU timing end here. d_gap = cpuSecond()-d_start; printf("Summary: with block_size %d, particle number %d the time gap on GPU for one iteration is %f \n",block_size_iter,element_number,d_gap); // printf("gpuRef after copy %5.2f %5.2f\n",gpuRef[0].position.x,gpuRef[6].position.x); //update in cpu // time count of cpu start here. double h_start,h_gap; h_start= cpuSecond(); update_CPU(hostRef,acc,element_number); // printf("hostRef after operate %5.2f %5.2f\n",hostRef[0].position.x,hostRef[6].position.x); h_gap=cpuSecond()-h_start; printf("Summary:with the particle number of %d, the time gap on CPU for one iteration is %f \n ",element_number,h_gap); // compare the result if they match. checkResult(hostRef,gpuRef,element_number); // write line of data to record. // fprintf(fp,"%d,%f,%f \n",element_number,d_gap,h_gap); // write data of block size fprintf(fp,"%d,%f,%f \n",block_size_iter,d_gap,h_gap); // free the device global memory hipFree(d_p); hipFree(d_acc); // free the host global memory free(acc); free(particles); free(hostRef); free(gpuRef); } // end of for loop here. fclose(fp); return 0; }
233181f8781aa099491c415b0d77f97bffcd6b02.cu
# include <stdio.h> # include <cuda_runtime.h> # include <sys/time.h> # include <string.h> # define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ printf("Error: %s: %d \n",__FILE__,__LINE__); \ printf("reason: %s \n",cudaGetErrorString(error)); \ exit(1); \ } \ } // number of iteration, number pf paticle elements # define ITER 10 //# define N 5000 // define struct of Particles, don't know if float3 type can be used in cpu. struct Particle { float3 position; float3 velocity; }; double cpuSecond(){ // this returns the time in double format. struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6); } void checkResult(struct Particle *hostRef,struct Particle *gpuRef,const int size){ // Check if the cpu and gpu implementation is the same. double epsilon = 1.0E-8; bool match = 1; for (int i=0; i<size ; i++){ // check on x axis if(abs(hostRef[i].position.x-gpuRef[i].position.x)>epsilon){ match =0; printf("array not match on x axis \n"); printf("host %5.2f gpu %5.2f at current %d \n",hostRef[i].position.x,gpuRef[i].position.x, i); break; } // check on y axis if(abs(hostRef[i].position.y-gpuRef[i].position.y)>epsilon){ match =0; printf("array not match on y axis \n"); printf("host %5.2f gpu %5.2f at current %d \n",hostRef[i].position.y,gpuRef[i].position.y, i); break; } // check on z axis if(abs(hostRef[i].position.z-gpuRef[i].position.z)>epsilon){ match =0; printf("array not match on z axis \n"); printf("host %5.2f gpu %5.2f at current %d \n",hostRef[i].position.z,gpuRef[i].position.z, i); break; } } if (match) printf("Match!\n"); } void randomacc(float3 *acc, const int size) { // this generate randome accelaration with the seed of time time_t t; srand((unsigned) time(&t)); for (int i=0; i < size; i++) { // generate accelerate acc[i].x = (float) (rand() &0xFF) / 10.0f; acc[i].y = (float) (rand() &0xFE) / 10.0f; acc[i].z = (float) (rand() &0xFD) / 10.0f; } } void update_CPU(struct Particle *array,float3 *acc,int size) { // this is used to update the position of Particles wrt random acceleration for one iteration. for (int i=0;i<size;i++){ // velocity update array[i].velocity.x += acc[i].x; array[i].velocity.y += acc[i].y; array[i].velocity.z += acc[i].z; // position update array[i].position.x += array[i].velocity.x; array[i].position.y += array[i].velocity.y; array[i].position.z += array[i].velocity.z; } } __global__ void update_GPU(struct Particle *array,float3 *acc,int size) { // this is used to update the position of Paticles wrt to random acc in GPU. int i= blockIdx.x*blockDim.x + threadIdx.x; if(i<size) { // velocity update array[i].velocity.x += acc[i].x; array[i].velocity.y += acc[i].y; array[i].velocity.z += acc[i].z; // position update array[i].position.x += array[i].velocity.x; array[i].position.y += array[i].velocity.y; array[i].position.z += array[i].velocity.z; } } int main(){ // malloc memory int block_size = 8; int rate =2; int element_number = 20000; int block_size_iter = block_size; // create csv file to record. FILE *fp; printf("In this test we are going to test out with %ld iteration with fixing blokc_size of %d ", ITER, block_size ); char *filename=(char *)"fixing_element_number.csv"; fp=fopen(filename,"w+"); fprintf(fp,"blokc_size,gpu_time_gap,cpu_time_gap \n"); printf("created the csv file.\n"); for (int i=0;i<ITER;i++) { //element_number = rate * element_number; // for check out the blokc_size // element_number = element_number; size_t nBytes = element_number * sizeof(struct Particle); block_size_iter = block_size_iter*rate; struct Particle *particles,*hostRef,*gpuRef,*d_p; particles = (struct Particle *)malloc(nBytes); hostRef = (struct Particle *)malloc(nBytes); gpuRef = (struct Particle *)malloc(nBytes); CHECK(cudaMalloc((struct Particle**)&d_p,nBytes)); // transfer data to GPU memory CHECK(cudaMemcpy(d_p,particles,nBytes,cudaMemcpyHostToDevice)); // get the random acc for cpu and gpu float3 *acc,*d_acc; // malloc memory to acc size_t acc_bytes= element_number* sizeof(float3); acc = (float3 *)malloc(acc_bytes); randomacc(acc,element_number); CHECK(cudaMalloc((float3**)&d_acc,acc_bytes)); // calculate the time needed for GPU start here. double d_start, d_gap; d_start = cpuSecond(); CHECK(cudaMemcpy(d_acc,acc,acc_bytes,cudaMemcpyHostToDevice)); update_GPU<<<((element_number+block_size_iter-1)/block_size_iter), block_size_iter>>>(d_p,d_acc,element_number); CHECK(cudaDeviceSynchronize()); // copy the kernel result to host side cudaMemcpy(gpuRef,d_p,nBytes,cudaMemcpyDeviceToHost); // GPU timing end here. d_gap = cpuSecond()-d_start; printf("Summary: with block_size %d, particle number %d the time gap on GPU for one iteration is %f \n",block_size_iter,element_number,d_gap); // printf("gpuRef after copy %5.2f %5.2f\n",gpuRef[0].position.x,gpuRef[6].position.x); //update in cpu // time count of cpu start here. double h_start,h_gap; h_start= cpuSecond(); update_CPU(hostRef,acc,element_number); // printf("hostRef after operate %5.2f %5.2f\n",hostRef[0].position.x,hostRef[6].position.x); h_gap=cpuSecond()-h_start; printf("Summary:with the particle number of %d, the time gap on CPU for one iteration is %f \n ",element_number,h_gap); // compare the result if they match. checkResult(hostRef,gpuRef,element_number); // write line of data to record. // fprintf(fp,"%d,%f,%f \n",element_number,d_gap,h_gap); // write data of block size fprintf(fp,"%d,%f,%f \n",block_size_iter,d_gap,h_gap); // free the device global memory cudaFree(d_p); cudaFree(d_acc); // free the host global memory free(acc); free(particles); free(hostRef); free(gpuRef); } // end of for loop here. fclose(fp); return 0; }
772053c4e1fa97a33a678251b539da622bc7bfae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void add(int a, int b, int *c) { *c = a + b; } int main() { //----------- cuda devices info --------------- int cuda_count; hipDeviceProp_t prop; hipGetDeviceCount(&cuda_count); printf("Exist %d device with cuda support\n",cuda_count); for(int device=0; device<cuda_count;device++) { hipGetDeviceProperties(&prop,device); printf("--- General Information for device: %d ---\n", device); printf("\tDevice name: %s\n", prop.name); printf("\tComputer capability: %d.%d\n", prop.major,prop.minor); printf("\tClock rate: %d\n",prop.clockRate); printf("\tDevice copy overlap: "); if(prop.deviceOverlap) printf("Enabled\n"); else printf("Disabled\n"); printf("\tKernel execition timeout: "); if(prop.kernelExecTimeoutEnabled) printf("Enabled\n"); else printf("Disabled\n"); printf("\tZero copy supported (hipHostMallocMapped): "); if(prop.canMapHostMemory) printf("Yes\n"); else printf("Not\n"); printf("\t---Memory Information---\n"); printf("\tTotal global memory: %ld bytes\n",prop.totalGlobalMem); printf("\tTotal const memory: %ld bytes\n",prop.totalConstMem); printf("\tMax memory pitch: %ld bytes\n",prop.memPitch); printf("\tTexture alignment: %ld bytes\n",prop.textureAlignment); printf("\t---Multiprocessor Information---\n"); printf("\tMultiprocessor count: %d\n",prop.multiProcessorCount); printf("\tShared memory per multiprocessor: %ld bytes\n",prop.sharedMemPerBlock); printf("\tRegisters per multiprocessor: %d\n",prop.regsPerBlock); printf("\tThreads in warp:: %d\n",prop.warpSize); printf("\tMax thread dimensions: %d, %d, %d\n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("\tMax grid dimensions: %d, %d, %d\n",prop.maxGridSize[0], prop.maxGridSize[1],prop.maxGridSize[2] ); printf("\tMax texture 1D dimensions: %d\n",prop.maxTexture1D); printf("\tMax texture 2D dimensions: %d,%d\n",prop.maxTexture2D[0], prop.maxTexture2D[1]); printf("\tMax texture 3D dimensions: %d,%d,%d\n",prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2]); printf("\tConcurrent kernels: "); if(prop.concurrentKernels) printf("Enabled\n"); else printf("Disabled\n"); printf("\n"); } int c; int *dev_c; hipMalloc((void**)&dev_c,sizeof(int)); hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 1,2,dev_c); hipMemcpy(&c,dev_c,sizeof(int),hipMemcpyDeviceToHost); printf("Resul: %d\n",c); hipFree(dev_c); return 0; }
772053c4e1fa97a33a678251b539da622bc7bfae.cu
#include <stdio.h> __global__ void add(int a, int b, int *c) { *c = a + b; } int main() { //----------- cuda devices info --------------- int cuda_count; cudaDeviceProp prop; cudaGetDeviceCount(&cuda_count); printf("Exist %d device with cuda support\n",cuda_count); for(int device=0; device<cuda_count;device++) { cudaGetDeviceProperties(&prop,device); printf("--- General Information for device: %d ---\n", device); printf("\tDevice name: %s\n", prop.name); printf("\tComputer capability: %d.%d\n", prop.major,prop.minor); printf("\tClock rate: %d\n",prop.clockRate); printf("\tDevice copy overlap: "); if(prop.deviceOverlap) printf("Enabled\n"); else printf("Disabled\n"); printf("\tKernel execition timeout: "); if(prop.kernelExecTimeoutEnabled) printf("Enabled\n"); else printf("Disabled\n"); printf("\tZero copy supported (cudaHostAllocMapped): "); if(prop.canMapHostMemory) printf("Yes\n"); else printf("Not\n"); printf("\t---Memory Information---\n"); printf("\tTotal global memory: %ld bytes\n",prop.totalGlobalMem); printf("\tTotal const memory: %ld bytes\n",prop.totalConstMem); printf("\tMax memory pitch: %ld bytes\n",prop.memPitch); printf("\tTexture alignment: %ld bytes\n",prop.textureAlignment); printf("\t---Multiprocessor Information---\n"); printf("\tMultiprocessor count: %d\n",prop.multiProcessorCount); printf("\tShared memory per multiprocessor: %ld bytes\n",prop.sharedMemPerBlock); printf("\tRegisters per multiprocessor: %d\n",prop.regsPerBlock); printf("\tThreads in warp:: %d\n",prop.warpSize); printf("\tMax thread dimensions: %d, %d, %d\n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("\tMax grid dimensions: %d, %d, %d\n",prop.maxGridSize[0], prop.maxGridSize[1],prop.maxGridSize[2] ); printf("\tMax texture 1D dimensions: %d\n",prop.maxTexture1D); printf("\tMax texture 2D dimensions: %d,%d\n",prop.maxTexture2D[0], prop.maxTexture2D[1]); printf("\tMax texture 3D dimensions: %d,%d,%d\n",prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2]); printf("\tConcurrent kernels: "); if(prop.concurrentKernels) printf("Enabled\n"); else printf("Disabled\n"); printf("\n"); } int c; int *dev_c; cudaMalloc((void**)&dev_c,sizeof(int)); add<<<1,1>>>(1,2,dev_c); cudaMemcpy(&c,dev_c,sizeof(int),cudaMemcpyDeviceToHost); printf("Resul: %d\n",c); cudaFree(dev_c); return 0; }
0bcdc15ceade1bb2b4c0150224398019d735e214.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "magnitude_threshold_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *grad_x = NULL; hipMalloc(&grad_x, XSIZE*YSIZE); float *grad_y = NULL; hipMalloc(&grad_y, XSIZE*YSIZE); float *grad_z = NULL; hipMalloc(&grad_z, XSIZE*YSIZE); float gamma = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( magnitude_threshold_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, grad_x,grad_y,grad_z,gamma); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( magnitude_threshold_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, grad_x,grad_y,grad_z,gamma); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( magnitude_threshold_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, grad_x,grad_y,grad_z,gamma); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0bcdc15ceade1bb2b4c0150224398019d735e214.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "magnitude_threshold_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *grad_x = NULL; cudaMalloc(&grad_x, XSIZE*YSIZE); float *grad_y = NULL; cudaMalloc(&grad_y, XSIZE*YSIZE); float *grad_z = NULL; cudaMalloc(&grad_z, XSIZE*YSIZE); float gamma = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); magnitude_threshold_kernel<<<gridBlock,threadBlock>>>(grad_x,grad_y,grad_z,gamma); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { magnitude_threshold_kernel<<<gridBlock,threadBlock>>>(grad_x,grad_y,grad_z,gamma); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { magnitude_threshold_kernel<<<gridBlock,threadBlock>>>(grad_x,grad_y,grad_z,gamma); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
920dcb145cd7bed723e2bf29e0fdcd9f7b6a339c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /** Purpose ------- ZGEQR2 computes a QR factorization of a complex m by n matrix A: A = Q * R. This expert routine requires two more arguments than the standard zgeqr2, namely, dT and ddA, explained below. The storage for A is also not as in the LAPACK's zgeqr2 routine (see below). The first is used to output the triangular n x n factor T of the block reflector used in the factorization. The second holds the diagonal nxn blocks of A, i.e., the diagonal submatrices of R. This routine implements the left looking QR. This version adds internal blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA COMPLEX_16 array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the unitary matrix Q as a product of elementary reflectors (see Further Details). \n the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the unitary matrix Q as a product of elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] dtau COMPLEX_16 array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] dT COMPLEX_16 array, dimension N x N. Stores the triangular N x N factor T of the block reflector used in the factorization. The lower triangular part is 0. @param[out] ddA COMPLEX_16 array, dimension N x N. Stores the elements of the upper N x N diagonal block of A. LAPACK stores this array in A. There are 0s below the diagonal. @param dwork (workspace) DOUBLE PRECISION array, dimension (3 N) @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v**H where tau is a complex scalar, and v is a complex vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_zgeqrf_aux ********************************************************************/ extern "C" magma_int_t magma_zgeqr2x4_gpu( magma_int_t m, magma_int_t n, magmaDoubleComplex_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr dtau, magmaDoubleComplex_ptr dT, magmaDoubleComplex_ptr ddA, magmaDouble_ptr dwork, magma_queue_t queue, magma_int_t *info) { #define dA(i_,j_) (dA + (j_)*(ldda) + (i_)) #define dT(i_,j_) (dT + (j_)*(k) + (i_)) #define BS 32 magma_int_t i, k; magmaDouble_ptr dnorm = (magmaDouble_ptr)dwork; magmaDoubleComplex_ptr dwork2 = (magmaDoubleComplex_ptr)(dwork + 2*n); *info = 0; if (m < 0) { *info = -1; } else if (n < 0) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } /* Compute the norms of the trailing columns */ k = min(m,n); magmablas_dznrm2_cols( m, k, dA(0,0), ldda, dnorm, queue ); for (magma_int_t b=0; b < k; b += BS) { for (i = b; i < min(k, b+BS); ++i) { /* Apply H**H to A(:,i) from the left */ if (i-b > 0) { /* Compute the (i-1)th column of T */ if ( i-1 > 0 ) { hipLaunchKernelGGL(( magma_zgemv_kernel3) , dim3(i-1), dim3(BLOCK_SIZE), 0, queue->cuda_stream() , m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1); hipLaunchKernelGGL(( magma_ztrmv_kernel2) , dim3(i-1), dim3(i-1), 0, queue->cuda_stream() , dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1); } /* dwork = V**H c */ hipLaunchKernelGGL(( magma_zgemv_kernel1) , dim3(i-b), dim3(BLOCK_SIZE), 0, queue->cuda_stream() , m-b, dA(b, b), ldda, dA(b,i), dwork2); /* dwork = T**H dwork2 */ hipLaunchKernelGGL(( magma_ztrmv_tkernel) , dim3(i-b), dim3(i-b), 0, queue->cuda_stream() , dT(b,b), k, dwork2, dwork2+i-b); /* c = c - V dwork2 */ if ( m-b > 0 ) { dim3 blocks3( magma_ceildiv( m-b, BLOCK_SIZE ) ); dim3 threads3( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_zgemv_kernel2) , dim3(blocks3), dim3(threads3), 0, queue->cuda_stream() , m-b, i-b, dA(b,b), ldda, dwork2+i-b, dA(b, i)); } } /* Adjust the dnorm[i] to hold the norm of A(i:m,i) */ if ( i > 0 ) { hipLaunchKernelGGL(( magma_dznrm2_adjust_kernel) , dim3(1), dim3(i), 0, queue->cuda_stream() , dnorm+i, dA(0, i)); } /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) 1. 1 is not yet put on the diagonal of A 2. Elements above the diagonal are copied in ddA and the ones in A are set to zero 3. update T */ magma_zlarfgx_gpu( m-i, dA(i, i), dA(min(i+1,m),i), dtau+i, dnorm+i, ddA + i + i*n, i, queue ); if (i == 0) { magmaDoubleComplex tt = MAGMA_Z_ONE; magmablas_zlacpy( MagmaFull, 1, 1, dtau, 1, dT(0,0), 1, queue ); magma_zsetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, queue ); } } if ( i-1 > 0 ) { hipLaunchKernelGGL(( magma_zgemv_kernel3) , dim3(i-1), dim3(BLOCK_SIZE), 0, queue->cuda_stream() , m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1); hipLaunchKernelGGL(( magma_ztrmv_kernel2) , dim3(i-1), dim3(i-1), 0, queue->cuda_stream() , dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1); } /* Apply the transformations to the trailing matrix. */ //magma_zlarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise, magma_zlarfb2_gpu( m-b, k-i, BS, dA(b, b), ldda, dT+b+b*k, k, dA(b, i), ldda, dwork2, k-i, queue ); } return *info; } /* magma_zgeqr2 */
920dcb145cd7bed723e2bf29e0fdcd9f7b6a339c.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /** Purpose ------- ZGEQR2 computes a QR factorization of a complex m by n matrix A: A = Q * R. This expert routine requires two more arguments than the standard zgeqr2, namely, dT and ddA, explained below. The storage for A is also not as in the LAPACK's zgeqr2 routine (see below). The first is used to output the triangular n x n factor T of the block reflector used in the factorization. The second holds the diagonal nxn blocks of A, i.e., the diagonal submatrices of R. This routine implements the left looking QR. This version adds internal blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA COMPLEX_16 array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the unitary matrix Q as a product of elementary reflectors (see Further Details). \n the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the unitary matrix Q as a product of elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] dtau COMPLEX_16 array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] dT COMPLEX_16 array, dimension N x N. Stores the triangular N x N factor T of the block reflector used in the factorization. The lower triangular part is 0. @param[out] ddA COMPLEX_16 array, dimension N x N. Stores the elements of the upper N x N diagonal block of A. LAPACK stores this array in A. There are 0s below the diagonal. @param dwork (workspace) DOUBLE PRECISION array, dimension (3 N) @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v**H where tau is a complex scalar, and v is a complex vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_zgeqrf_aux ********************************************************************/ extern "C" magma_int_t magma_zgeqr2x4_gpu( magma_int_t m, magma_int_t n, magmaDoubleComplex_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr dtau, magmaDoubleComplex_ptr dT, magmaDoubleComplex_ptr ddA, magmaDouble_ptr dwork, magma_queue_t queue, magma_int_t *info) { #define dA(i_,j_) (dA + (j_)*(ldda) + (i_)) #define dT(i_,j_) (dT + (j_)*(k) + (i_)) #define BS 32 magma_int_t i, k; magmaDouble_ptr dnorm = (magmaDouble_ptr)dwork; magmaDoubleComplex_ptr dwork2 = (magmaDoubleComplex_ptr)(dwork + 2*n); *info = 0; if (m < 0) { *info = -1; } else if (n < 0) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } /* Compute the norms of the trailing columns */ k = min(m,n); magmablas_dznrm2_cols( m, k, dA(0,0), ldda, dnorm, queue ); for (magma_int_t b=0; b < k; b += BS) { for (i = b; i < min(k, b+BS); ++i) { /* Apply H**H to A(:,i) from the left */ if (i-b > 0) { /* Compute the (i-1)th column of T */ if ( i-1 > 0 ) { magma_zgemv_kernel3 <<< i-1, BLOCK_SIZE, 0, queue->cuda_stream() >>> ( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1); magma_ztrmv_kernel2 <<< i-1, i-1, 0, queue->cuda_stream() >>> ( dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1); } /* dwork = V**H c */ magma_zgemv_kernel1 <<< i-b, BLOCK_SIZE, 0, queue->cuda_stream() >>> (m-b, dA(b, b), ldda, dA(b,i), dwork2); /* dwork = T**H dwork2 */ magma_ztrmv_tkernel <<< i-b, i-b, 0, queue->cuda_stream() >>> (dT(b,b), k, dwork2, dwork2+i-b); /* c = c - V dwork2 */ if ( m-b > 0 ) { dim3 blocks3( magma_ceildiv( m-b, BLOCK_SIZE ) ); dim3 threads3( BLOCK_SIZE ); magma_zgemv_kernel2 <<< blocks3, threads3, 0, queue->cuda_stream() >>> (m-b, i-b, dA(b,b), ldda, dwork2+i-b, dA(b, i)); } } /* Adjust the dnorm[i] to hold the norm of A(i:m,i) */ if ( i > 0 ) { magma_dznrm2_adjust_kernel <<< 1, i, 0, queue->cuda_stream() >>> (dnorm+i, dA(0, i)); } /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) 1. 1 is not yet put on the diagonal of A 2. Elements above the diagonal are copied in ddA and the ones in A are set to zero 3. update T */ magma_zlarfgx_gpu( m-i, dA(i, i), dA(min(i+1,m),i), dtau+i, dnorm+i, ddA + i + i*n, i, queue ); if (i == 0) { magmaDoubleComplex tt = MAGMA_Z_ONE; magmablas_zlacpy( MagmaFull, 1, 1, dtau, 1, dT(0,0), 1, queue ); magma_zsetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, queue ); } } if ( i-1 > 0 ) { magma_zgemv_kernel3 <<< i-1, BLOCK_SIZE, 0, queue->cuda_stream() >>> ( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1); magma_ztrmv_kernel2 <<< i-1, i-1, 0, queue->cuda_stream() >>> ( dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1); } /* Apply the transformations to the trailing matrix. */ //magma_zlarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise, magma_zlarfb2_gpu( m-b, k-i, BS, dA(b, b), ldda, dT+b+b*k, k, dA(b, i), ldda, dwork2, k-i, queue ); } return *info; } /* magma_zgeqr2 */
d29ab9ff9e229ac1df482f4153563045d948bcbb.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/operators/audio/mfcc/mfcc.h" #include <vector> #include "dali/kernels/signal/dct/dct_gpu.h" #include "dali/core/static_switch.h" namespace dali { namespace detail { template <> DLL_PUBLIC void LifterCoeffs<GPUBackend>::Calculate(int64_t target_length, float lifter, hipStream_t stream) { // If different lifter argument, clear previous coefficients if (lifter_ != lifter) { coeffs_.clear(); lifter_ = lifter; } // 0 means no liftering if (lifter_ == 0.0f) return; // Calculate remaining coefficients (if necessary) if (static_cast<int64_t>(coeffs_.size()) < target_length) { int start_idx = coeffs_.size(); int added_length = target_length - start_idx; coeffs_.resize(target_length, stream); std::vector<float> new_coeffs(added_length); CalculateCoeffs(new_coeffs.data(), start_idx, added_length); CUDA_CALL( hipMemcpyAsync(&coeffs_.data()[start_idx], new_coeffs.data(), added_length * sizeof(float), hipMemcpyHostToDevice, stream)); } } template <typename T> std::vector<OutputDesc> SetupKernel(kernels::KernelManager &kmgr, kernels::KernelContext &ctx, const TensorList<GPUBackend> &input, span<const MFCC<GPUBackend>::DctArgs> args, int axis) { using Kernel = kernels::signal::dct::Dct1DGpu<T>; kmgr.Initialize<Kernel>(); kmgr.Resize<Kernel>(1, 1); auto in_view = view<const T>(input); auto &req = kmgr.Setup<Kernel>(0, ctx, in_view, args, axis); return {{req.output_shapes[0], input.type()}}; } } // namespace detail template<> bool MFCC<GPUBackend>::SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) { GetArguments(ws); ctx_.gpu.stream = ws.stream(); auto &input = ws.InputRef<GPUBackend>(0); auto in_shape = input.shape(); int ndim = in_shape.sample_dim(); DALI_ENFORCE(axis_ >= 0 && axis_ < ndim, make_string("Axis ", axis_, " is out of bounds [0,", ndim, ")")); TYPE_SWITCH(input.type().id(), type2id, T, MFCC_SUPPORTED_TYPES, ( output_desc = detail::SetupKernel<T>(kmgr_, ctx_, input, make_cspan(args_), axis_); ), DALI_FAIL(make_string("Unsupported data type: ", input.type().id()))); // NOLINT int64_t max_ndct = 0; for (int i = 0; i < output_desc[0].shape.num_samples(); ++i) { int64_t ndct = output_desc[0].shape[i][axis_]; if (ndct > max_ndct) max_ndct = ndct; } lifter_coeffs_.Calculate(max_ndct, lifter_, ws.stream()); return true; } template<> void MFCC<GPUBackend>::RunImpl(workspace_t<GPUBackend> &ws) { auto &input = ws.InputRef<GPUBackend>(0); TYPE_SWITCH(input.type().id(), type2id, T, MFCC_SUPPORTED_TYPES, ( using Kernel = kernels::signal::dct::Dct1DGpu<T>; auto in_view = view<const T>(input); auto out_view = view<T>(ws.OutputRef<GPUBackend>(0)); auto lifter_view = make_tensor_gpu<1>(lifter_coeffs_.data(), {static_cast<int64_t>(lifter_coeffs_.size())}); kmgr_.Run<Kernel>(0, 0, ctx_, out_view, in_view, lifter_view); ), DALI_FAIL(make_string("Unsupported data type: ", input.type().id()))); // NOLINT } DALI_REGISTER_OPERATOR(MFCC, MFCC<GPUBackend>, GPU); } // namespace dali
d29ab9ff9e229ac1df482f4153563045d948bcbb.cu
// Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/operators/audio/mfcc/mfcc.h" #include <vector> #include "dali/kernels/signal/dct/dct_gpu.h" #include "dali/core/static_switch.h" namespace dali { namespace detail { template <> DLL_PUBLIC void LifterCoeffs<GPUBackend>::Calculate(int64_t target_length, float lifter, cudaStream_t stream) { // If different lifter argument, clear previous coefficients if (lifter_ != lifter) { coeffs_.clear(); lifter_ = lifter; } // 0 means no liftering if (lifter_ == 0.0f) return; // Calculate remaining coefficients (if necessary) if (static_cast<int64_t>(coeffs_.size()) < target_length) { int start_idx = coeffs_.size(); int added_length = target_length - start_idx; coeffs_.resize(target_length, stream); std::vector<float> new_coeffs(added_length); CalculateCoeffs(new_coeffs.data(), start_idx, added_length); CUDA_CALL( cudaMemcpyAsync(&coeffs_.data()[start_idx], new_coeffs.data(), added_length * sizeof(float), cudaMemcpyHostToDevice, stream)); } } template <typename T> std::vector<OutputDesc> SetupKernel(kernels::KernelManager &kmgr, kernels::KernelContext &ctx, const TensorList<GPUBackend> &input, span<const MFCC<GPUBackend>::DctArgs> args, int axis) { using Kernel = kernels::signal::dct::Dct1DGpu<T>; kmgr.Initialize<Kernel>(); kmgr.Resize<Kernel>(1, 1); auto in_view = view<const T>(input); auto &req = kmgr.Setup<Kernel>(0, ctx, in_view, args, axis); return {{req.output_shapes[0], input.type()}}; } } // namespace detail template<> bool MFCC<GPUBackend>::SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) { GetArguments(ws); ctx_.gpu.stream = ws.stream(); auto &input = ws.InputRef<GPUBackend>(0); auto in_shape = input.shape(); int ndim = in_shape.sample_dim(); DALI_ENFORCE(axis_ >= 0 && axis_ < ndim, make_string("Axis ", axis_, " is out of bounds [0,", ndim, ")")); TYPE_SWITCH(input.type().id(), type2id, T, MFCC_SUPPORTED_TYPES, ( output_desc = detail::SetupKernel<T>(kmgr_, ctx_, input, make_cspan(args_), axis_); ), DALI_FAIL(make_string("Unsupported data type: ", input.type().id()))); // NOLINT int64_t max_ndct = 0; for (int i = 0; i < output_desc[0].shape.num_samples(); ++i) { int64_t ndct = output_desc[0].shape[i][axis_]; if (ndct > max_ndct) max_ndct = ndct; } lifter_coeffs_.Calculate(max_ndct, lifter_, ws.stream()); return true; } template<> void MFCC<GPUBackend>::RunImpl(workspace_t<GPUBackend> &ws) { auto &input = ws.InputRef<GPUBackend>(0); TYPE_SWITCH(input.type().id(), type2id, T, MFCC_SUPPORTED_TYPES, ( using Kernel = kernels::signal::dct::Dct1DGpu<T>; auto in_view = view<const T>(input); auto out_view = view<T>(ws.OutputRef<GPUBackend>(0)); auto lifter_view = make_tensor_gpu<1>(lifter_coeffs_.data(), {static_cast<int64_t>(lifter_coeffs_.size())}); kmgr_.Run<Kernel>(0, 0, ctx_, out_view, in_view, lifter_view); ), DALI_FAIL(make_string("Unsupported data type: ", input.type().id()))); // NOLINT } DALI_REGISTER_OPERATOR(MFCC, MFCC<GPUBackend>, GPU); } // namespace dali
785db20449d18478a8b342fb8079b40dc65cf4d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void SetupPoissKernel(hiprandState_t *curand_state, uint64_t n_dir_conn, unsigned long long seed) { uint64_t blockId = (uint64_t)blockIdx.y * gridDim.x + blockIdx.x; uint64_t i_conn = blockId * blockDim.x + threadIdx.x; if (i_conn<n_dir_conn) { hiprand_init(seed, i_conn, 0, &curand_state[i_conn]); } }
785db20449d18478a8b342fb8079b40dc65cf4d1.cu
#include "includes.h" __global__ void SetupPoissKernel(curandState *curand_state, uint64_t n_dir_conn, unsigned long long seed) { uint64_t blockId = (uint64_t)blockIdx.y * gridDim.x + blockIdx.x; uint64_t i_conn = blockId * blockDim.x + threadIdx.x; if (i_conn<n_dir_conn) { curand_init(seed, i_conn, 0, &curand_state[i_conn]); } }
c5b3982369a4d60a3184fee17a6969dd9ebfab9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2006-2018 Istituto Italiano di Tecnologia (IIT) * Copyright (C) 2007 Giacomo Spigler * All rights reserved. * * This software may be modified and distributed under the terms of the * BSD-3-Clause license. See the accompanying LICENSE file for details. */ extern "C" { __global__ void FragmentProgram(int w, int h, unsigned char *in, unsigned char *out) { int i=0; for(i=threadIdx.x+blockIdx.x*blockDim.x; i<w*h; i+=blockDim.x*gridDim.x) { out[i*3]=(in[i*3]+in[i*3+1]+in[i*3+2])/3; out[i*3+1]=out[i*3]; out[i*3+2]=out[i*3]; } } }
c5b3982369a4d60a3184fee17a6969dd9ebfab9d.cu
/* * Copyright (C) 2006-2018 Istituto Italiano di Tecnologia (IIT) * Copyright (C) 2007 Giacomo Spigler * All rights reserved. * * This software may be modified and distributed under the terms of the * BSD-3-Clause license. See the accompanying LICENSE file for details. */ extern "C" { __global__ void FragmentProgram(int w, int h, unsigned char *in, unsigned char *out) { int i=0; for(i=threadIdx.x+blockIdx.x*blockDim.x; i<w*h; i+=blockDim.x*gridDim.x) { out[i*3]=(in[i*3]+in[i*3+1]+in[i*3+2])/3; out[i*3+1]=out[i*3]; out[i*3+2]=out[i*3]; } } }
99440ff07aa1e12e0955fce0da8ed80c48b37307.hip
// !!! This is a file automatically generated by hipify!!! /* * functions.cu * * Created on: Jan 4, 2017 * Author: boris */ #include "functions_hip.cuh"
99440ff07aa1e12e0955fce0da8ed80c48b37307.cu
/* * functions.cu * * Created on: Jan 4, 2017 * Author: boris */ #include "functions.cuh"
db683b28918f3813f334dd75f3edb6fc124f1278.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "histogram_creation.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; hipMalloc(&A, XSIZE*YSIZE); int *hist = NULL; hipMalloc(&hist, XSIZE*YSIZE); int no_of_threads = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( histogram_creation), dim3(gridBlock),dim3(threadBlock), 0, 0, A,hist,no_of_threads); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( histogram_creation), dim3(gridBlock),dim3(threadBlock), 0, 0, A,hist,no_of_threads); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( histogram_creation), dim3(gridBlock),dim3(threadBlock), 0, 0, A,hist,no_of_threads); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
db683b28918f3813f334dd75f3edb6fc124f1278.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "histogram_creation.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); int *hist = NULL; cudaMalloc(&hist, XSIZE*YSIZE); int no_of_threads = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); histogram_creation<<<gridBlock,threadBlock>>>(A,hist,no_of_threads); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { histogram_creation<<<gridBlock,threadBlock>>>(A,hist,no_of_threads); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { histogram_creation<<<gridBlock,threadBlock>>>(A,hist,no_of_threads); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
97a3cdc3cfa666f104e1a1552dbb77bd04382751.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <Python.h> #include <math.h> #include <cstdint> #include <stdio.h> /* Holds to elements part of a struct array */ typedef struct { float x, y; } Data; /* Global name variables to seperate CPU from GPU */ static const char CPU[] = "CPU"; static const char GPU[] = "GPU"; __global__ void add(int r, int n, Data *g) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = 0; i < r; i += 1) { for (int j = index; j < n; j += stride) g[j].y = g[j].x + g[j].y; } } /* Destructor function for cpu data stucture */ static void del_Data(PyObject *obj) { const char *name = PyCapsule_GetName(obj); if (strcmp(name, CPU)) { printf("%s", name); free(PyCapsule_GetPointer(obj, name)); } else if (strcmp(name, GPU)) { printf("%s", name); hipFree(PyCapsule_GetPointer(obj, name)); } else { printf("PyCapsule failed, couldn't get capsule name"); } } /* Get py object pointer object */ static Data *PyData_AsPoint(PyObject *obj, const char *name) { return (Data *) PyCapsule_GetPointer(obj, name); } /* Create new PyCapsule (keeping mem state) */ static PyObject *PyData_FromPoint(Data *d, int must_free, const char *name) { return PyCapsule_New(d, name, must_free ? del_Data : NULL); } /* Initiate Cuda Mem, struct array, struct x and y with N length*/ static PyObject *method_cuda_allocate(PyObject *self, PyObject *args) { Data *gpu_device; Data *host_device; int n; if (!PyArg_ParseTuple(args,"i",&n)) { return NULL; } host_device = (Data *) malloc(n * sizeof(Data)); hipMalloc((void**)&gpu_device, n * sizeof(Data)); return Py_BuildValue("OO", PyData_FromPoint(host_device, 1, CPU), \ PyData_FromPoint(gpu_device, 1, GPU)); } /* Set value to each element pair */ static PyObject *method_cuda_set(PyObject *self, PyObject *args) { Data *d; Data *g; PyObject *py_host_device; PyObject *py_gpu_device; int n; if (!PyArg_ParseTuple(args, "iOO", &n, &py_host_device, &py_gpu_device)) { return NULL; } if (!(d = PyData_AsPoint(py_host_device, CPU)) | \ !(g = PyData_AsPoint(py_gpu_device, GPU))) { return NULL; } for (int i = 0; i < n; i++) { d[i].x = 1.0f; d[i].y = 2.0f; } hipMemcpy(g, d, n * sizeof(Data), hipMemcpyHostToDevice); Py_RETURN_NONE; } /* Add x and y, return error check */ static PyObject *method_cuda_add(PyObject *self, PyObject *args) { Data *d; Data *g; PyObject *py_host_device; PyObject *py_gpu_device; int n; int r; if (!PyArg_ParseTuple(args, "iiOO", &r, &n, &py_host_device, &py_gpu_device)) { return NULL; } if (!(d = PyData_AsPoint(py_host_device, CPU)) | \ !(g = PyData_AsPoint(py_gpu_device, GPU))) { return NULL; } // Run kernel on 1M elements on the GPU int devID; int numSMs; hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, hipGetDevice(&devID)); hipLaunchKernelGGL(( add), dim3(32 * numSMs), dim3(256), 0, 0, r, n, g); // add<<<1, 1>>>(N, x, y); // Debug // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); hipMemcpy(d, g, n * sizeof(Data), hipMemcpyDeviceToHost); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < n; i++) { maxError = fmax(maxError, fabs(d[i].y-3.0f)); } return PyLong_FromLong(maxError); } static PyMethodDef CudaMethods[] = { {"cuda_allocate", method_cuda_allocate, METH_VARARGS, \ "Python interface for cuda GPU memory allocation"}, {"cuda_set", method_cuda_set, METH_VARARGS, \ "Python interface for cuda GPU mem set values"}, {"cuda_add", method_cuda_add, METH_VARARGS, \ "Python interface for cuda GPU mem set values"}, {NULL, NULL, 0, NULL} }; static struct PyModuleDef cudamodule = { PyModuleDef_HEAD_INIT, "cuda_add_rmk", "Python interface for Cuda library functions", -1, CudaMethods }; PyMODINIT_FUNC PyInit_cuda_add_rmk(void) { return PyModule_Create(&cudamodule); }
97a3cdc3cfa666f104e1a1552dbb77bd04382751.cu
#include <Python.h> #include <math.h> #include <cstdint> #include <stdio.h> /* Holds to elements part of a struct array */ typedef struct { float x, y; } Data; /* Global name variables to seperate CPU from GPU */ static const char CPU[] = "CPU"; static const char GPU[] = "GPU"; __global__ void add(int r, int n, Data *g) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = 0; i < r; i += 1) { for (int j = index; j < n; j += stride) g[j].y = g[j].x + g[j].y; } } /* Destructor function for cpu data stucture */ static void del_Data(PyObject *obj) { const char *name = PyCapsule_GetName(obj); if (strcmp(name, CPU)) { printf("%s", name); free(PyCapsule_GetPointer(obj, name)); } else if (strcmp(name, GPU)) { printf("%s", name); cudaFree(PyCapsule_GetPointer(obj, name)); } else { printf("PyCapsule failed, couldn't get capsule name"); } } /* Get py object pointer object */ static Data *PyData_AsPoint(PyObject *obj, const char *name) { return (Data *) PyCapsule_GetPointer(obj, name); } /* Create new PyCapsule (keeping mem state) */ static PyObject *PyData_FromPoint(Data *d, int must_free, const char *name) { return PyCapsule_New(d, name, must_free ? del_Data : NULL); } /* Initiate Cuda Mem, struct array, struct x and y with N length*/ static PyObject *method_cuda_allocate(PyObject *self, PyObject *args) { Data *gpu_device; Data *host_device; int n; if (!PyArg_ParseTuple(args,"i",&n)) { return NULL; } host_device = (Data *) malloc(n * sizeof(Data)); cudaMalloc((void**)&gpu_device, n * sizeof(Data)); return Py_BuildValue("OO", PyData_FromPoint(host_device, 1, CPU), \ PyData_FromPoint(gpu_device, 1, GPU)); } /* Set value to each element pair */ static PyObject *method_cuda_set(PyObject *self, PyObject *args) { Data *d; Data *g; PyObject *py_host_device; PyObject *py_gpu_device; int n; if (!PyArg_ParseTuple(args, "iOO", &n, &py_host_device, &py_gpu_device)) { return NULL; } if (!(d = PyData_AsPoint(py_host_device, CPU)) | \ !(g = PyData_AsPoint(py_gpu_device, GPU))) { return NULL; } for (int i = 0; i < n; i++) { d[i].x = 1.0f; d[i].y = 2.0f; } cudaMemcpy(g, d, n * sizeof(Data), cudaMemcpyHostToDevice); Py_RETURN_NONE; } /* Add x and y, return error check */ static PyObject *method_cuda_add(PyObject *self, PyObject *args) { Data *d; Data *g; PyObject *py_host_device; PyObject *py_gpu_device; int n; int r; if (!PyArg_ParseTuple(args, "iiOO", &r, &n, &py_host_device, &py_gpu_device)) { return NULL; } if (!(d = PyData_AsPoint(py_host_device, CPU)) | \ !(g = PyData_AsPoint(py_gpu_device, GPU))) { return NULL; } // Run kernel on 1M elements on the GPU int devID; int numSMs; cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, cudaGetDevice(&devID)); add<<<32 * numSMs, 256>>>(r, n, g); // add<<<1, 1>>>(N, x, y); // Debug // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); cudaMemcpy(d, g, n * sizeof(Data), cudaMemcpyDeviceToHost); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < n; i++) { maxError = fmax(maxError, fabs(d[i].y-3.0f)); } return PyLong_FromLong(maxError); } static PyMethodDef CudaMethods[] = { {"cuda_allocate", method_cuda_allocate, METH_VARARGS, \ "Python interface for cuda GPU memory allocation"}, {"cuda_set", method_cuda_set, METH_VARARGS, \ "Python interface for cuda GPU mem set values"}, {"cuda_add", method_cuda_add, METH_VARARGS, \ "Python interface for cuda GPU mem set values"}, {NULL, NULL, 0, NULL} }; static struct PyModuleDef cudamodule = { PyModuleDef_HEAD_INIT, "cuda_add_rmk", "Python interface for Cuda library functions", -1, CudaMethods }; PyMODINIT_FUNC PyInit_cuda_add_rmk(void) { return PyModule_Create(&cudamodule); }
bfa8a4f4c0169609f5c4624b9015c4418889f562.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/hip/ForeachFunctors.cuh> namespace at { namespace native { template<template<class> class Op> std::vector<Tensor> foreach_tensor_list_op(TensorList tensors1, TensorList tensors2, Scalar alpha = 1) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; vec_res.reserve(tensors1.size()); for (const auto& t: tensors1) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors1[0].scalar_type(), "foreach_binary_op_list_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<3>(tensor_lists, BinaryOpListAlphaFunctor<scalar_t, /* depth */ 3, /* r_args_depth */ 2, /* res_arg_index */ 2>(), Op<opmath_t>(), alpha.to<opmath_t>()); }); return tensor_lists[2]; } template<template<class> class Op> void foreach_tensor_list_op_(TensorList tensors1, TensorList tensors2, Scalar alpha = 1) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors1[0].scalar_type(), "foreach_binary_op_list_cuda_", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<2>(tensor_lists, BinaryOpListAlphaFunctor<scalar_t, /* depth */ 2, /* r_args_depth */ 2, /* res_arg_index */ 0>(), Op<opmath_t>(), alpha.to<opmath_t>()); }); } #define FOREACH_BINARY_OP_LIST(NAME, OP) \ void foreach_tensor_##NAME##_list_kernel_cuda_(TensorList tensors1, TensorList tensors2) { \ check_foreach_api_restrictions(tensors1, tensors2); \ if (!can_use_fast_route({tensors1, tensors2})) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow_(tensors1, tensors2); \ } \ \ foreach_tensor_list_op_<OP>(tensors1, tensors2); \ } \ \ std::vector<Tensor> foreach_tensor_##NAME##_list_kernel_cuda(TensorList tensors1, TensorList tensors2) { \ check_foreach_api_restrictions(tensors1, tensors2); \ if (!can_use_fast_route({tensors1, tensors2})) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow(tensors1, tensors2); \ } \ \ return foreach_tensor_list_op<OP>(tensors1, tensors2); \ } #define FOREACH_BINARY_OP_LIST_ALPHA(NAME, OP) \ void foreach_tensor_##NAME##_list_kernel_cuda_(TensorList tensors1, TensorList tensors2, Scalar alpha) { \ check_foreach_api_restrictions(tensors1, tensors2); \ if (!can_use_fast_route({tensors1, tensors2}, alpha)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow_(tensors1, tensors2, alpha); \ } \ \ foreach_tensor_list_op_<OP>(tensors1, tensors2, alpha); \ } \ \ std::vector<Tensor> foreach_tensor_##NAME##_list_kernel_cuda(TensorList tensors1, TensorList tensors2, Scalar alpha) { \ check_foreach_api_restrictions(tensors1, tensors2); \ if (!can_use_fast_route({tensors1, tensors2}, alpha)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow(tensors1, tensors2, alpha); \ } \ \ return foreach_tensor_list_op<OP>(tensors1, tensors2, alpha); \ } FOREACH_BINARY_OP_LIST_ALPHA(add, std::plus); FOREACH_BINARY_OP_LIST_ALPHA(sub, std::minus); FOREACH_BINARY_OP_LIST(mul, std::multiplies); FOREACH_BINARY_OP_LIST(div, std::divides); }} // namespace at::native
bfa8a4f4c0169609f5c4624b9015c4418889f562.cu
#include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/cuda/ForeachFunctors.cuh> namespace at { namespace native { template<template<class> class Op> std::vector<Tensor> foreach_tensor_list_op(TensorList tensors1, TensorList tensors2, Scalar alpha = 1) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; vec_res.reserve(tensors1.size()); for (const auto& t: tensors1) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors1[0].scalar_type(), "foreach_binary_op_list_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<3>(tensor_lists, BinaryOpListAlphaFunctor<scalar_t, /* depth */ 3, /* r_args_depth */ 2, /* res_arg_index */ 2>(), Op<opmath_t>(), alpha.to<opmath_t>()); }); return tensor_lists[2]; } template<template<class> class Op> void foreach_tensor_list_op_(TensorList tensors1, TensorList tensors2, Scalar alpha = 1) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors1[0].scalar_type(), "foreach_binary_op_list_cuda_", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<2>(tensor_lists, BinaryOpListAlphaFunctor<scalar_t, /* depth */ 2, /* r_args_depth */ 2, /* res_arg_index */ 0>(), Op<opmath_t>(), alpha.to<opmath_t>()); }); } #define FOREACH_BINARY_OP_LIST(NAME, OP) \ void foreach_tensor_##NAME##_list_kernel_cuda_(TensorList tensors1, TensorList tensors2) { \ check_foreach_api_restrictions(tensors1, tensors2); \ if (!can_use_fast_route({tensors1, tensors2})) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow_(tensors1, tensors2); \ } \ \ foreach_tensor_list_op_<OP>(tensors1, tensors2); \ } \ \ std::vector<Tensor> foreach_tensor_##NAME##_list_kernel_cuda(TensorList tensors1, TensorList tensors2) { \ check_foreach_api_restrictions(tensors1, tensors2); \ if (!can_use_fast_route({tensors1, tensors2})) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow(tensors1, tensors2); \ } \ \ return foreach_tensor_list_op<OP>(tensors1, tensors2); \ } #define FOREACH_BINARY_OP_LIST_ALPHA(NAME, OP) \ void foreach_tensor_##NAME##_list_kernel_cuda_(TensorList tensors1, TensorList tensors2, Scalar alpha) { \ check_foreach_api_restrictions(tensors1, tensors2); \ if (!can_use_fast_route({tensors1, tensors2}, alpha)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow_(tensors1, tensors2, alpha); \ } \ \ foreach_tensor_list_op_<OP>(tensors1, tensors2, alpha); \ } \ \ std::vector<Tensor> foreach_tensor_##NAME##_list_kernel_cuda(TensorList tensors1, TensorList tensors2, Scalar alpha) { \ check_foreach_api_restrictions(tensors1, tensors2); \ if (!can_use_fast_route({tensors1, tensors2}, alpha)) { \ return at::native::foreach_tensor_##NAME##_list_kernel_slow(tensors1, tensors2, alpha); \ } \ \ return foreach_tensor_list_op<OP>(tensors1, tensors2, alpha); \ } FOREACH_BINARY_OP_LIST_ALPHA(add, std::plus); FOREACH_BINARY_OP_LIST_ALPHA(sub, std::minus); FOREACH_BINARY_OP_LIST(mul, std::multiplies); FOREACH_BINARY_OP_LIST(div, std::divides); }} // namespace at::native
f7c79d800726f43a7f9bb619d5ec6197abf705be.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <helper_cuda.h> __constant__ int N; __constant__ float a,b,c; __global__ void cal(float *z, float *v) { int ind; float sum = 0; float zi; // move array pointers to correct position ind = threadIdx.x + N*blockIdx.x*blockDim.x; for (int n=0; n<N; n++) { zi = z[ind]; ind += blockDim.x; // shift pointer to next element sum += (a * zi * zi + b * zi +c); } // put payoff value into device array v[threadIdx.x + blockIdx.x*blockDim.x] = sum/100; } //////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////// int main(int argc, const char **argv){ int NCAL = 960000; int constN = 100; float *z, *v; // initialise card findCudaDevice(argc, argv); // initialise CUDA timing float milli; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // allocate memory on host and device checkCudaErrors(hipMallocManaged(&z, sizeof(float)*NCAL*constN)); checkCudaErrors(hipMallocManaged(&v, sizeof(float)*NCAL)); /* h_v = (float *)malloc(sizeof(float)*NPATH); checkCudaErrors(hipMalloc((void **)&d_v, sizeof(float)*NPATH) ); checkCudaErrors(hipMalloc((void **)&d_z, sizeof(float)*2*h_N*NPATH) ); */ // define constants and transfer to GPU float constA = 3.0f; float constB = 2.0f; float constC = 1.0f; printf("Initialized value: a = %.2f b = %.2f c = %.2f \n",constA, constB, constC); checkCudaErrors(hipMemcpyToSymbol(N,&constN,sizeof(constN))); checkCudaErrors(hipMemcpyToSymbol(a,&constA,sizeof(constA))); checkCudaErrors(hipMemcpyToSymbol(b,&constB,sizeof(constB))); checkCudaErrors(hipMemcpyToSymbol(c,&constC,sizeof(constC))); // random number generation hipEventRecord(start); hiprandGenerator_t gen; checkCudaErrors(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)); checkCudaErrors(hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL)); checkCudaErrors(hiprandGenerateNormal(gen, z, NCAL*constN, 0.0f, 1.0f)); hipEventRecord(stop); hipEventSynchronize(stop); // ensure all the threads in GPU finish hipEventElapsedTime(&milli, start, stop); printf("CURAND normal RNG execution time (ms): %f, samples/sec: %e \n",milli, NCAL*constN/(0.001*milli)); // execute kernel and time it hipEventRecord(start); hipLaunchKernelGGL(( cal), dim3(NCAL/64), dim3(64), 0, 0, z, v); getLastCudaError("cal execution failed\n"); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); printf("Kernel execution time (ms): %f \n",milli); // synchronize to wait for kernel to finish, and data copied back hipDeviceSynchronize(); // compute average float result = 0.0; for (int i=0; i<NCAL; i++) { result += v[i]; } printf("Average value = %13.8f \n", result/NCAL); // Tidy up library checkCudaErrors(hiprandDestroyGenerator(gen)); // Release memory and exit cleanly checkCudaErrors(hipFree(v)); checkCudaErrors(hipFree(z)); /* free(h_v); checkCudaErrors( hipFree(d_v) ); checkCudaErrors( hipFree(d_z) ); */ // CUDA exit -- needed to flush printf write buffer hipDeviceReset(); return 0; }
f7c79d800726f43a7f9bb619d5ec6197abf705be.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <cuda.h> #include <curand.h> #include <helper_cuda.h> __constant__ int N; __constant__ float a,b,c; __global__ void cal(float *z, float *v) { int ind; float sum = 0; float zi; // move array pointers to correct position ind = threadIdx.x + N*blockIdx.x*blockDim.x; for (int n=0; n<N; n++) { zi = z[ind]; ind += blockDim.x; // shift pointer to next element sum += (a * zi * zi + b * zi +c); } // put payoff value into device array v[threadIdx.x + blockIdx.x*blockDim.x] = sum/100; } //////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////// int main(int argc, const char **argv){ int NCAL = 960000; int constN = 100; float *z, *v; // initialise card findCudaDevice(argc, argv); // initialise CUDA timing float milli; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // allocate memory on host and device checkCudaErrors(cudaMallocManaged(&z, sizeof(float)*NCAL*constN)); checkCudaErrors(cudaMallocManaged(&v, sizeof(float)*NCAL)); /* h_v = (float *)malloc(sizeof(float)*NPATH); checkCudaErrors(cudaMalloc((void **)&d_v, sizeof(float)*NPATH) ); checkCudaErrors(cudaMalloc((void **)&d_z, sizeof(float)*2*h_N*NPATH) ); */ // define constants and transfer to GPU float constA = 3.0f; float constB = 2.0f; float constC = 1.0f; printf("Initialized value: a = %.2f b = %.2f c = %.2f \n",constA, constB, constC); checkCudaErrors(cudaMemcpyToSymbol(N,&constN,sizeof(constN))); checkCudaErrors(cudaMemcpyToSymbol(a,&constA,sizeof(constA))); checkCudaErrors(cudaMemcpyToSymbol(b,&constB,sizeof(constB))); checkCudaErrors(cudaMemcpyToSymbol(c,&constC,sizeof(constC))); // random number generation cudaEventRecord(start); curandGenerator_t gen; checkCudaErrors(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); checkCudaErrors(curandSetPseudoRandomGeneratorSeed(gen, 1234ULL)); checkCudaErrors(curandGenerateNormal(gen, z, NCAL*constN, 0.0f, 1.0f)); cudaEventRecord(stop); cudaEventSynchronize(stop); // ensure all the threads in GPU finish cudaEventElapsedTime(&milli, start, stop); printf("CURAND normal RNG execution time (ms): %f, samples/sec: %e \n",milli, NCAL*constN/(0.001*milli)); // execute kernel and time it cudaEventRecord(start); cal<<<NCAL/64, 64>>>(z, v); getLastCudaError("cal execution failed\n"); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); printf("Kernel execution time (ms): %f \n",milli); // synchronize to wait for kernel to finish, and data copied back cudaDeviceSynchronize(); // compute average float result = 0.0; for (int i=0; i<NCAL; i++) { result += v[i]; } printf("Average value = %13.8f \n", result/NCAL); // Tidy up library checkCudaErrors(curandDestroyGenerator(gen)); // Release memory and exit cleanly checkCudaErrors(cudaFree(v)); checkCudaErrors(cudaFree(z)); /* free(h_v); checkCudaErrors( cudaFree(d_v) ); checkCudaErrors( cudaFree(d_z) ); */ // CUDA exit -- needed to flush printf write buffer cudaDeviceReset(); return 0; }
1728219b34d2dc9b3657d9b00c36e91fbfabe564.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<Windows.h> #include<stdio.h> #include<stdlib.h> #include<math.h> #include<gl\glew.h> #include<gl\GL.h> #include<cuda.h> #include<cuda_gl_interop.h> #include"vmath.h" #define DIM_W 1920 #define DIM_H 1080 #define DIM 1024 #define PI 3.1415926535897932f #define sm_rand( x ) (x * rand() / RAND_MAX) #define INF 2e10f #define MAX_TEMP 1.0f #define MIN_TEMP 0.0001f #define SPEED 1.25f #define WIN_WIDTH 800 #define WIN_HEIGHT 600 #pragma comment(lib,"user32.lib") #pragma comment(lib,"gdi32.lib") #pragma comment(lib,"glew32.lib") #pragma comment(lib,"opengl32.lib") #pragma comment(lib,"cudart.lib") //using namespace std; enum CUDAInitErrorCodes { /* min no -10 */ INIT_CUDA_SETGLDEVICE_FAILED = -30, CUDA_EVENT_RECORD_FAILED, CUDA_EVENT_ELP_FAILED, CUDA_EVENT_SYNC_FAILED, CUDA_EVENT_CREATE_FAILED, CUDA_EVENT_DESTROY_FAILED, INIT_CUDA_MEMCPY_FAILED, INIT_CUDA_MALLOC_FAILED, CUDA_STREAM_SYNC_FAILED, CUDA_INIT_DESTROY_SURFACE_OBJ_FAILED, INIT_CUDA_REGISTER_IMAGE_FAILED, INIT_CUDA_REGISTER_BUFFER_FAILED, CUDA_INIT_GRAPHICS_MAPPED_ARRAY_FAILED, CUDA_INIT_GRAPHICS_MAPPED_RES_FAILED, CUDA_INIT_GRAPHICS_MAPPED_RES_POINTER_FAILED, CUDA_INIT_GRAPHICS_UNMAP_RES_FAILED, CUDA_INIT_BIND_TEX, CUDA_DEVICE_SYNC, INIT_CUDA_CHOOSEDEVICE_FAILED = -10, CUDA_INIT_ALL_OK = 0, }; enum InitErrorCodes { INIT_VERTEX_SHADER_COMPILATION_FAILED = -9, INIT_FRAGMENT_SHADER_COMPILATION_FAILED, INIT_LINK_SHADER_PROGRAM_FAILED, INIT_FAIL_GLEW_INIT, INIT_FAIL_BRIDGE_CONTEX_SET, INIT_FAIL_BRIDGE_CONTEX_CREATION, INIT_FAIL_SET_PIXEL_FORMAT, INIT_FAIL_NO_PIXEL_FORMAT, INIT_FAIL_NO_HDC, INIT_ALL_OK, }; enum attributeBindLocations { SAM_ATTRIBUTE_POSITION = 0, SAM_ATTRIBUTE_COLOR, SAM_ATTRIBUTE_NORNAL, SAM_ATTRIBUTE_TEXTURE0, }; LRESULT CALLBACK MainWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam); bool g_bWindowActive = false; HWND g_hwnd = NULL; HDC g_hdc = NULL; HGLRC g_hrc = NULL; WINDOWPLACEMENT wpPrev; DWORD dwStyle; bool g_bFullScreen = false; FILE *g_pFile = NULL; // Shaders //GLuint iVertexShaderObject = 0; //GLuint iFragmentShaderObject = 0; GLuint g_ShaderProgramObject = 0; // All Vertex Buffers GLuint g_VertexArrayObject = 0; GLuint g_VertexBufferObject_Position = 0; GLuint g_VertexBufferObject_TexCoords = 0; // Uniforms GLuint g_Uniform_Model_Matrix = 0; GLuint g_Uniform_View_Matrix = 0; GLuint g_Uniform_Projection_Matrix = 0; // sampler GLuint g_uniform_TextureSampler; GLuint g_cuda_texture; // Projection vmath::mat4 g_PersPectiveProjectionMatrix; //CUDA Res cudaGraphicsResource *resource = NULL; float g_fanimate = 0.0f; bool animation_flag = false; void swapFunc(float *ip1,float *ip2) { float *temp = ip1; ip1 = ip2; ip2 = temp; } __device__ unsigned char value(float n1,float n2,int hue) { if(hue > 360) hue -= 360; else if(hue < 0) hue += 360; if(hue < 60) return (unsigned char)(255 * (n1 + (n2-n1)*hue/60)); if(hue < 180) return (unsigned char)(255 * n2); if(hue < 240) return (unsigned char)(255 * (n1 + (n2-n1)*(240-hue)/60)); return (unsigned char)(255*n1); } __global__ void float_to_color(hipSurfaceObject_t target, const float *outSrc ,dim3 texDim) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = x + y * blockDim.x * gridDim.x; float l = outSrc[offset]; float s = l; int h = (180 + (int)(360.0f * outSrc[offset])) % 360; float m1,m2; if(l <= 0.5f) m2 = l * (l+s); else m2 = l + s - l * s; m1 = 2 * l - m2; if (x < texDim.x && y < texDim.y) { uchar4 data = make_uchar4(value( m1, m2, h+120 ),value( m1, m2, h ),value( m1, m2, h -120 ), 255); surf2Dwrite(data, target, x * sizeof(uchar4), y, hipBoundaryModeTrap); } } /** * Animating Kernel */ __global__ void blend_kernel(float *outSrc,const float *inSrc) { // map from threadIdx/blockIdx to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = x + y * blockDim.x * gridDim.x; int left = offset - 1; int right = offset + 1; if(x==0) left++; if(y==DIM -1) right--; int top = offset - DIM; int bottom = offset + DIM; if(y==0) top += DIM; if(y==DIM-1) bottom -= DIM; outSrc[offset] = inSrc[offset] + SPEED * (inSrc[top] + inSrc[bottom] + inSrc[left] + inSrc[right] - inSrc[offset] * 4); } /** * Constant Kernel */ __global__ void copy_const_kernel(float *iptr,const float *cptr) { // map threads and blocks to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; if(cptr[offset]!=0) iptr[offset] = cptr[offset]; } float *dev_inSrc; float *dev_outSrc; float *dev_constSrc; int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR szCmdLine, int iCmdShow) { //int UnInitialize(void); int Initialize(void); int Update(void); void Render(void); // Windowing Elelments WNDCLASSEX wndclass; MSG msg; HWND hwnd = NULL; TCHAR szClassName[] = TEXT("Sam_OGL"); RECT windowRect; // Game Loop Control bool bDone = false; // Initialization Status int iInitRet = 0; SecureZeroMemory((void*)&wndclass, sizeof(wndclass)); wndclass.cbSize = sizeof(wndclass); wndclass.cbClsExtra = 0; wndclass.cbWndExtra = 0; wndclass.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC; wndclass.lpfnWndProc = MainWndProc; wndclass.lpszClassName = szClassName; wndclass.lpszMenuName = NULL; wndclass.hInstance = hInstance; wndclass.hbrBackground = (HBRUSH)GetStockObject(GRAY_BRUSH); wndclass.hIcon = LoadIcon(hInstance, IDI_APPLICATION); wndclass.hIconSm = LoadIcon(hInstance, IDI_APPLICATION); wndclass.hCursor = LoadCursor(hInstance, IDC_ARROW); if (!RegisterClassEx(&wndclass)) { MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could Not RegisterClass() "), MB_OK | MB_ICONERROR); exit(EXIT_FAILURE); } if ((fopen_s(&g_pFile, "SamLogFile.txt", "w+")) == 0) { fprintf_s(g_pFile, "File Opened Successfully. \n"); } else { MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could not open File"), MB_OK | MB_ICONERROR); exit(EXIT_FAILURE); } SecureZeroMemory((void*)&windowRect, sizeof(windowRect)); windowRect.left = 0; windowRect.top = 0; windowRect.bottom = WIN_HEIGHT; windowRect.right = WIN_WIDTH; AdjustWindowRectEx(&windowRect, WS_OVERLAPPEDWINDOW, FALSE, WS_EX_APPWINDOW); hwnd = CreateWindowEx(WS_EX_APPWINDOW, szClassName, TEXT("First_OpenGL_Window"), WS_OVERLAPPEDWINDOW | WS_CLIPCHILDREN | WS_CLIPSIBLINGS | WS_VISIBLE, CW_USEDEFAULT, CW_USEDEFAULT, windowRect.right - windowRect.left, windowRect.bottom - windowRect.top, NULL, NULL, hInstance, NULL); if (hwnd == NULL) { MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could Not CreateWindow() "), MB_OK | MB_ICONERROR); exit(EXIT_FAILURE); } g_hwnd = hwnd; iInitRet = Initialize(); switch (iInitRet) { case INIT_ALL_OK: fprintf_s(g_pFile, "Initialize Complete \n"); break; case INIT_FAIL_NO_HDC: fprintf_s(g_pFile, "Failed to Get HDC \n"); DestroyWindow(hwnd); break; case INIT_FAIL_NO_PIXEL_FORMAT: fprintf_s(g_pFile, "Failed to get PixelFormat \n"); DestroyWindow(hwnd); break; case INIT_FAIL_SET_PIXEL_FORMAT: fprintf_s(g_pFile, "Failed to set Pixel Format \n"); DestroyWindow(hwnd); break; case INIT_FAIL_BRIDGE_CONTEX_CREATION: fprintf_s(g_pFile, "Failed to wglCreateContext \n"); DestroyWindow(hwnd); break; case INIT_FAIL_BRIDGE_CONTEX_SET: fprintf_s(g_pFile, "Failed to wglMakeCurrent \n"); DestroyWindow(hwnd); break; case INIT_FAIL_GLEW_INIT: fprintf_s(g_pFile, "Failed to glewInit \n"); DestroyWindow(hwnd); break; case INIT_LINK_SHADER_PROGRAM_FAILED: fprintf_s(g_pFile, "Failed to Link Shader Program Object \n"); DestroyWindow(hwnd); break; case INIT_VERTEX_SHADER_COMPILATION_FAILED: fprintf_s(g_pFile, "Failed to Compile vertex Shader \n"); DestroyWindow(hwnd); break; case INIT_FRAGMENT_SHADER_COMPILATION_FAILED: fprintf_s(g_pFile, "Failed to Compile fragment Shader \n"); DestroyWindow(hwnd); break; default: /*fprintf_s(g_pFile, "Failed UnKnown Reasons \n"); DestroyWindow(hwnd);*/ switch (iInitRet) { case INIT_CUDA_CHOOSEDEVICE_FAILED: fprintf_s(g_pFile, "hipChooseDevice Failed \n"); DestroyWindow(hwnd); break; /*default: fprintf_s(g_pFile, "CUDA Failed UnKnown Reasons \n"); DestroyWindow(hwnd); break;*/ } // General Failure fprintf_s(g_pFile, "Failed UnKnown Reasons \n"); DestroyWindow(hwnd); break; } ShowWindow(hwnd, SW_SHOWNORMAL); SetForegroundWindow(hwnd); SetFocus(hwnd); while (bDone == false) { if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) { if (msg.message == WM_QUIT) { bDone = true; } else { TranslateMessage(&msg); DispatchMessage(&msg); } } else { if (g_bWindowActive) { Update(); } // Show all Animations Render(); } } //UnInitialize(); return ((int)msg.wParam); } LRESULT CALLBACK MainWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam) { int UnInitialize(void); void FullScreen(void); bool Resize(int, int); switch (iMsg) { case WM_CREATE: PostMessage(hwnd, WM_KEYDOWN, (WPARAM)0x46, (LPARAM)NULL); break; case WM_SETFOCUS: g_bWindowActive = true; break; case WM_KILLFOCUS: g_bWindowActive = false; break; case WM_KEYDOWN: switch (LOWORD(wParam)) { case VK_ESCAPE: DestroyWindow(hwnd); break; case 0x46: // 'f' or 'F' //MessageBox(hwnd, TEXT("F is pressed"), TEXT("Status"), MB_OK); FullScreen(); break; default: break; } break; case WM_SIZE: Resize(LOWORD(lParam), HIWORD(lParam)); break; case WM_ERASEBKGND: return(0); //break; case WM_CLOSE: DestroyWindow(hwnd); break; case WM_DESTROY: UnInitialize(); PostQuitMessage(0); break; default: break; } return (DefWindowProc(hwnd, iMsg, wParam, lParam)); } int Initialize(void) { bool Resize(int, int); int iPixelIndex = 0; PIXELFORMATDESCRIPTOR pfd; hipError_t cuErr; // Shader Programs GLuint iVertexShaderObject = 0; GLuint iFragmentShaderObject = 0; GLenum err = NULL; // GLEW Error codes SecureZeroMemory(&pfd, sizeof(pfd)); pfd.nSize = sizeof(pfd); pfd.nVersion = 1; pfd.dwFlags = PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW | PFD_DOUBLEBUFFER; pfd.iPixelType = PFD_TYPE_RGBA; pfd.cColorBits = 32; pfd.cRedBits = 8; pfd.cGreenBits = 8; pfd.cBlueBits = 8; pfd.cAlphaBits = 8; g_hdc = GetDC(g_hwnd); if (g_hdc == NULL) { return INIT_FAIL_NO_HDC; } iPixelIndex = ChoosePixelFormat(g_hdc, &pfd); if (iPixelIndex == 0) { return INIT_FAIL_NO_PIXEL_FORMAT; } if (SetPixelFormat(g_hdc, iPixelIndex, &pfd) == FALSE) { return INIT_FAIL_SET_PIXEL_FORMAT; } g_hrc = wglCreateContext(g_hdc); if (g_hrc == NULL) { return INIT_FAIL_BRIDGE_CONTEX_CREATION; } if (wglMakeCurrent(g_hdc, g_hrc) == FALSE) { return INIT_FAIL_BRIDGE_CONTEX_SET; } // Enables Feature Required for Programable Pipeline err = glewInit(); if (err != GLEW_OK) { return INIT_FAIL_GLEW_INIT; } // CUDA Initalization cuErr = hipGLSetGLDevice(0); // Default device 0 will share resources with OpenGL if (cuErr != hipSuccess) { return INIT_CUDA_SETGLDEVICE_FAILED; } // GL information Start fprintf_s(g_pFile, "SHADER_INFO : Vendor is : %s\n", glGetString(GL_VENDOR)); fprintf_s(g_pFile, "SHADER_INFO : Renderer is : %s\n", glGetString(GL_RENDER)); fprintf_s(g_pFile, "SHADER_INFO : OpenGL Version is : %s\n", glGetString(GL_VERSION)); fprintf_s(g_pFile, "SHADER_INFO : GLSL Version is : %s\n", glGetString(GL_SHADING_LANGUAGE_VERSION)); int maxAttachments = 0; glGetIntegerv(GL_MAX_COLOR_ATTACHMENTS, &maxAttachments); fprintf_s(g_pFile, "SHADER_INFO : GL_MAX_COLOR_ATTACHMENTS is : %d\n", maxAttachments); //fprintf_s(g_pFile, "SHADER_INFO : Extention is : %s \n", glGetString(GL_EXTENSIONS)); // GL information End /// Sam : all Shader Code Start /*gl_Position = u_projection_matrix * u_view_matrix * u_model_matrix * vPosition;*/ /*Vertex Shader Start*/ iVertexShaderObject = glCreateShader(GL_VERTEX_SHADER); const GLchar *vertexShaderSourceCode = "#version 450 core" \ "\n" \ "layout (location = 0)in vec4 vPosition;" \ "layout (location = 3)in vec2 vTexture0_Coord;" \ "layout (location = 0)out vec2 out_Texture0_Coord;" \ "uniform mat4 u_model_matrix,u_view_matrix,u_projection_matrix;" \ "void main(void)" \ "{" \ " gl_Position = vPosition;" \ " out_Texture0_Coord = vTexture0_Coord;" \ "}"; glShaderSource(iVertexShaderObject, 1, (const GLchar**)&vertexShaderSourceCode, NULL); // Compile it glCompileShader(iVertexShaderObject); GLint iInfoLogLength = 0; GLint iShaderCompileStatus = 0; GLchar *szInfoLog = NULL; glGetShaderiv(iVertexShaderObject, GL_COMPILE_STATUS, &iShaderCompileStatus); if (iShaderCompileStatus == GL_FALSE) { glGetShaderiv(iVertexShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength>0) { szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar)); if (szInfoLog != NULL) { GLsizei written; glGetShaderInfoLog(iVertexShaderObject, GL_INFO_LOG_LENGTH, &written, szInfoLog); fprintf_s(g_pFile, "ERROR : Vertex Shader Compilation Log : %s \n", szInfoLog); free(szInfoLog); szInfoLog = NULL; return INIT_VERTEX_SHADER_COMPILATION_FAILED; //DestroyWindow(g_hwnd); //exit(EXIT_FAILURE); } } } /*Vertex Shader End*/ /*Fragment Shader Start*/ iFragmentShaderObject = glCreateShader(GL_FRAGMENT_SHADER); const GLchar *fragmentShaderSourceCode = "#version 450 core" \ "\n" \ "layout (location = 0)in vec2 out_Texture0_Coord;" \ "layout (location = 0)out vec4 FragColor;" \ "uniform sampler2D u_texture0_sampler;" \ "void main(void)" \ "{" \ " FragColor = texture(u_texture0_sampler,out_Texture0_Coord);" \ "}"; glShaderSource(iFragmentShaderObject, 1, (const GLchar**)&fragmentShaderSourceCode, NULL); glCompileShader(iFragmentShaderObject); iInfoLogLength = 0; iShaderCompileStatus = 0; szInfoLog = NULL; glGetShaderiv(iFragmentShaderObject, GL_COMPILE_STATUS, &iShaderCompileStatus); if (iShaderCompileStatus == GL_FALSE) { glGetShaderiv(iFragmentShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength>0) { szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar)); if (szInfoLog != NULL) { GLsizei written; glGetShaderInfoLog(iFragmentShaderObject, GL_INFO_LOG_LENGTH, &written, szInfoLog); fprintf(g_pFile, "ERROR: Fragment Shader Compilation Log : %s \n", szInfoLog); free(szInfoLog); szInfoLog = NULL; return INIT_FRAGMENT_SHADER_COMPILATION_FAILED; //DestroyWindow(g_hwnd); //exit(EXIT_FAILURE); } } } /*Fragment Shader End*/ /* Shader Program Start */ g_ShaderProgramObject = glCreateProgram(); glAttachShader(g_ShaderProgramObject, iVertexShaderObject); glAttachShader(g_ShaderProgramObject, iFragmentShaderObject); glBindAttribLocation(g_ShaderProgramObject, SAM_ATTRIBUTE_POSITION, "vPosition"); glBindAttribLocation(g_ShaderProgramObject, SAM_ATTRIBUTE_TEXTURE0, "vTexture0_Coord"); glLinkProgram(g_ShaderProgramObject); GLint iShaderLinkStatus = 0; iInfoLogLength = 0; glGetProgramiv(g_ShaderProgramObject, GL_LINK_STATUS, &iShaderLinkStatus); if (iShaderLinkStatus == GL_FALSE) { glGetProgramiv(g_ShaderProgramObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength>0) { szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar)); if (szInfoLog != NULL) { GLsizei written; glGetShaderInfoLog(g_ShaderProgramObject, GL_INFO_LOG_LENGTH, &written, szInfoLog); fprintf_s(g_pFile, "ERROR : Linking Shader Program Objects Failed %s \n", szInfoLog); free(szInfoLog); szInfoLog = NULL; return INIT_LINK_SHADER_PROGRAM_FAILED; //DestroyWindow(g_hwnd); //exit(EXIT_FAILURE); } } } /* Shader Program End */ /*Setup Uniforms Start*/ g_Uniform_Model_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_model_matrix"); g_Uniform_Projection_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_projection_matrix"); g_Uniform_View_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_view_matrix"); //g_uniform_TextureSampler = glGetUniformLocation(g_ShaderProgramObject, "u_texture0_sampler"); /*Setup Uniforms End*/ /* Fill Buffers Start*/ //// Cube Section Start const GLfloat squareVertices[] = { -1.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f }; const GLfloat squareTexCords[] = { 0.0f, 1.0f, 0.0f, 0.0f, 1.0f,0.0f, 1.0f,1.0f }; glGenVertexArrays(1, &g_VertexArrayObject);//VAO glBindVertexArray(g_VertexArrayObject); glGenBuffers(1, &g_VertexBufferObject_Position);// vbo position glBindBuffer(GL_ARRAY_BUFFER, g_VertexBufferObject_Position); glBufferData(GL_ARRAY_BUFFER, sizeof(squareVertices), squareVertices, GL_STATIC_DRAW); glVertexAttribPointer(SAM_ATTRIBUTE_POSITION, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(SAM_ATTRIBUTE_POSITION); glBindBuffer(GL_ARRAY_BUFFER, 0); glGenBuffers(1, &g_VertexBufferObject_TexCoords); // vbo texcoords glBindBuffer(GL_ARRAY_BUFFER, g_VertexBufferObject_TexCoords); glBufferData(GL_ARRAY_BUFFER, sizeof(squareTexCords), squareTexCords, GL_STATIC_DRAW); glVertexAttribPointer(SAM_ATTRIBUTE_TEXTURE0, 2, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(SAM_ATTRIBUTE_TEXTURE0); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindVertexArray(0); /* Fill Buffers End*/ // Generate texture for working with cuda glGenTextures(1, &g_cuda_texture); glBindTexture(GL_TEXTURE_2D, g_cuda_texture); // Texture parameters glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); // give texture some storage glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA, DIM, DIM,0,GL_RGBA,GL_UNSIGNED_INT,NULL); glBindTexture(GL_TEXTURE_2D, 0); /// Sam : all Shader Code End glEnable(GL_TEXTURE_2D); glShadeModel(GL_SMOOTH); glClearDepth(1.0f); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LEQUAL); glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); //glEnable(GL_CULL_FACE); glClearColor(0.125f, 0.125f, 0.125f, 1.0f); g_PersPectiveProjectionMatrix = vmath::mat4::identity(); // CUDA Allocations // allocate for dev_inSrc // considering float == 4 chars (rgba) cuErr = hipMalloc((void**)&dev_inSrc,4*DIM*DIM); if(cuErr!=hipSuccess) { fprintf_s(g_pFile, "CUDA ERROR : hipMalloc failed at line %d\n",__LINE__); return INIT_CUDA_MALLOC_FAILED; } cuErr = hipMalloc((void**)&dev_outSrc,4*DIM*DIM); if(cuErr!=hipSuccess) { fprintf_s(g_pFile, "CUDA ERROR : hipMalloc failed at line %d\n",__LINE__); return INIT_CUDA_MALLOC_FAILED; } cuErr=hipMalloc((void**)&dev_constSrc,4*DIM*DIM); if(cuErr!=hipSuccess) { fprintf_s(g_pFile, "CUDA ERROR : hipMalloc failed at line %d\n",__LINE__); return INIT_CUDA_MALLOC_FAILED; } float *temp = (float*)calloc(4*DIM*DIM,sizeof(char)); for(int i=0; i < DIM*DIM; i++) { temp[i] = 0; int x = i % DIM; int y = i / DIM; if((x>300)&&(x<600)&&(y>310)&&(y<601)) temp[i] = MAX_TEMP; } temp[DIM*100+100] = (MAX_TEMP + MIN_TEMP)/2; temp[DIM*700+100] = MIN_TEMP; temp[DIM*300+300] = MIN_TEMP; temp[DIM*200+700] = MIN_TEMP; for(int y = 800;y<900;y++) { for(int x = 400;x<500;x++) { temp[x+y*DIM] = MIN_TEMP; } } cuErr = hipMemcpy(dev_constSrc,temp,4*DIM*DIM,hipMemcpyHostToDevice); if(cuErr != hipSuccess) { fprintf_s(g_pFile, "CUDA ERROR : hipMemcpy failed at line %d\n",__LINE__); return INIT_CUDA_MEMCPY_FAILED; } for(int y = 800; y<DIM; y++) { for(int x = 0; x<200; x++) { temp[x+y*DIM] = MAX_TEMP; } } cuErr = hipMemcpy(dev_inSrc,temp,4*DIM*DIM,hipMemcpyHostToDevice); if(cuErr != hipSuccess) { fprintf_s(g_pFile, "CUDA ERROR : hipMemcpy failed at line %d\n",__LINE__); return INIT_CUDA_MEMCPY_FAILED; } if(temp) { free(temp); temp=NULL; } //CUDA_INIT_BIND_TEX /// Register With CUDA Start // last param as "hipGraphicsRegisterFlagsSurfaceLoadStore" cuErr = hipGraphicsGLRegisterImage(&resource, g_cuda_texture, GL_TEXTURE_2D, hipGraphicsRegisterFlagsSurfaceLoadStore|hipGraphicsRegisterFlagsWriteDiscard); if (cuErr!=hipSuccess) { fprintf_s(g_pFile, "CUDA ERROR : hipGraphicsGLRegisterImage failed at line %d\n",__LINE__); return INIT_CUDA_REGISTER_IMAGE_FAILED; } /// Register With CUDA Stop Resize(WIN_WIDTH, WIN_HEIGHT); return INIT_ALL_OK; } int Update(void) { void swapFunc(float *ip1,float *ip2); //float4 *devPtr = NULL; //size_t size; hipError_t status; hipArray_t cudaWriteArray; hipEvent_t start,stop; float elapsedTime = 0.0f; if (animation_flag) { g_fanimate = g_fanimate + 0.009f; if ((g_fanimate >6.0f)) { animation_flag = false; } } else { g_fanimate = g_fanimate - 0.008f; if ((g_fanimate <1.0f)) { animation_flag = true; } } status = hipEventCreate(&start); if (status != hipSuccess) { fprintf_s(g_pFile,"IN Update() hipEventCreate failed...!! \n"); return CUDA_EVENT_CREATE_FAILED; } status = hipEventCreate(&stop); if (status != hipSuccess) { fprintf_s(g_pFile,"IN Update() hipEventCreate failed...!! \n"); return CUDA_EVENT_CREATE_FAILED; } status = hipEventRecord(start,0); if (status != hipSuccess) { fprintf_s(g_pFile,"IN Update() hipEventCreate failed...!! \n"); return CUDA_EVENT_RECORD_FAILED; } status = hipGraphicsMapResources(1, &resource, 0); if (status != hipSuccess) { fprintf_s(g_pFile,"IN Update() hipGraphicsMapResources failed...!! \n"); return CUDA_INIT_GRAPHICS_MAPPED_RES_FAILED; } status = hipGraphicsSubResourceGetMappedArray(&cudaWriteArray,resource,0,0); if (status != hipSuccess) { fprintf_s(g_pFile, "IN Update() hipGraphicsSubResourceGetMappedArray failed...!! \n"); return CUDA_INIT_GRAPHICS_MAPPED_ARRAY_FAILED; } // Prepare a Surface object for cuda hipResourceDesc writeDescriptor; ZeroMemory((void**)&writeDescriptor,sizeof(writeDescriptor)); writeDescriptor.resType = hipResourceTypeArray; writeDescriptor.res.array.array = cudaWriteArray; hipSurfaceObject_t writeSurface; status = hipCreateSurfaceObject(&writeSurface, &writeDescriptor); if (status != hipSuccess) { fprintf_s(g_pFile, "IN Update() hipCreateSurfaceObject failed...!! \n"); return CUDA_INIT_GRAPHICS_MAPPED_ARRAY_FAILED; } // After successfully creating surface object write to the texture using kernel //dim3 thread(30,30); //dim3 block(ceil(DIM_W/thread.x), ceil(DIM_H/thread.y)); //rt_noConst_kernel<<<block, thread >>>(writeSurface, dim3(DIM_W, DIM_H)); dim3 blocks(DIM/16,DIM/16,1); dim3 threads(16,16); volatile bool dstOut = true; float *temp_inSrc,*temp_constSrc,*temp_outSrc; temp_inSrc = dev_inSrc; temp_constSrc = dev_constSrc; temp_outSrc = dev_outSrc; for(int i=0;i<90;i++) { hipLaunchKernelGGL(( copy_const_kernel), dim3(blocks),dim3(threads), 0, 0, temp_inSrc,temp_constSrc); hipLaunchKernelGGL(( blend_kernel), dim3(blocks),dim3(threads), 0, 0, temp_outSrc,temp_inSrc); swapFunc(temp_inSrc,temp_outSrc); } status = hipDeviceSynchronize(); if (status != hipSuccess) { fprintf_s(g_pFile, "IN Update() hipDeviceSynchronize failed...!! \n"); //return CUDA_DEVICE_SYNC; } hipLaunchKernelGGL(( float_to_color), dim3(blocks),dim3(threads), 0, 0, writeSurface,dev_constSrc,dim3(DIM,DIM)); status = hipDeviceSynchronize(); if (status != hipSuccess) { fprintf_s(g_pFile, "IN Update() hipDeviceSynchronize failed...!! \n"); //return CUDA_DEVICE_SYNC; } status = hipGetLastError(); if (status != hipSuccess) { fprintf_s(g_pFile, "IN Update() Kernel failed : %s \n", hipGetErrorString(status)); } /*status = hipDeviceSynchronize(); if (status != hipSuccess) { fprintf_s(g_pFile, "IN Update() hipDeviceSynchronize failed...!! \n"); return CUDA_DEVICE_SYNC; }*/ status = hipDestroySurfaceObject(writeSurface); if (status != hipSuccess) { fprintf_s(g_pFile, "IN Update() hipDestroySurfaceObject failed...!! \n"); return CUDA_INIT_DESTROY_SURFACE_OBJ_FAILED; } status = hipGraphicsUnmapResources(1, &resource, 0); if (status != hipSuccess) { fprintf_s(g_pFile, "IN Update() hipGraphicsUnmapResources failed...!! \n"); return CUDA_INIT_GRAPHICS_UNMAP_RES_FAILED; } status = hipStreamSynchronize(0); if (status != hipSuccess) { fprintf_s(g_pFile, "IN Update() hipStreamSynchronize failed...!! \n"); return CUDA_INIT_GRAPHICS_UNMAP_RES_FAILED; } status = hipEventRecord(stop,0); if (status != hipSuccess) { fprintf_s(g_pFile,"IN Update() hipEventRecord failed...!! \n"); return CUDA_EVENT_RECORD_FAILED; } status = hipEventSynchronize(stop); if (status != hipSuccess) { fprintf_s(g_pFile,"IN Update() hipEventSynchronize failed...!! \n"); return CUDA_EVENT_SYNC_FAILED; } status = hipEventElapsedTime(&elapsedTime,start,stop); if (status != hipSuccess) { fprintf_s(g_pFile,"IN Update() hipEventElapsedTime failed...!! \n"); return CUDA_EVENT_ELP_FAILED; } fprintf_s(g_pFile,"Time to generate: %3.1f ms\n",elapsedTime); status = hipEventDestroy(start); if (status != hipSuccess) { fprintf_s(g_pFile,"IN Update() hipEventDestroy failed...!! \n"); return CUDA_EVENT_DESTROY_FAILED; } status = hipEventDestroy(stop); if (status != hipSuccess) { fprintf_s(g_pFile,"IN Update() hipEventDestroy failed...!! \n"); return CUDA_EVENT_DESTROY_FAILED; } return INIT_ALL_OK; } void Render(void) { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); vmath::mat4 modelMatrix = vmath::mat4::identity(); vmath::mat4 viewMatrix = vmath::mat4::identity(); glUseProgram(g_ShaderProgramObject); modelMatrix = vmath::translate(0.0f, 0.0f, -3.0f); glUniformMatrix4fv(g_Uniform_Model_Matrix, 1, GL_FALSE, modelMatrix); glUniformMatrix4fv(g_Uniform_View_Matrix, 1, GL_FALSE, viewMatrix); glUniformMatrix4fv(g_Uniform_Projection_Matrix, 1, GL_FALSE, g_PersPectiveProjectionMatrix); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, g_cuda_texture); //glUniform1i(g_uniform_TextureSampler, 0); glBindVertexArray(g_VertexArrayObject); glDrawArrays(GL_TRIANGLE_FAN, 0, 4); glBindVertexArray(0); glUseProgram(0); SwapBuffers(g_hdc); } void FullScreen(void) { MONITORINFO mi = { sizeof(mi) }; dwStyle = GetWindowLong(g_hwnd, GWL_STYLE); if (g_bFullScreen == false) { if (dwStyle & WS_OVERLAPPEDWINDOW) { if (GetWindowPlacement(g_hwnd, &wpPrev) && GetMonitorInfo(MonitorFromWindow(g_hwnd, MONITORINFOF_PRIMARY), &mi)) { SetWindowLong(g_hwnd, GWL_STYLE, dwStyle & ~WS_OVERLAPPEDWINDOW); SetWindowPos(g_hwnd, HWND_TOP, mi.rcMonitor.left, mi.rcMonitor.top, mi.rcMonitor.right - mi.rcMonitor.left, mi.rcMonitor.bottom - mi.rcMonitor.top, SWP_NOZORDER | SWP_FRAMECHANGED); } } ShowCursor(FALSE); g_bFullScreen = true; } else { SetWindowLong(g_hwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW); SetWindowPlacement(g_hwnd, &wpPrev); SetWindowPos(g_hwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOOWNERZORDER | SWP_NOZORDER | SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE); ShowCursor(TRUE); g_bFullScreen = false; } } bool Resize(int iWidth, int iHeight) { if (iHeight <= 0) { iHeight = 1; } glViewport(0, 0, (GLsizei)iWidth, (GLsizei)iHeight); g_PersPectiveProjectionMatrix = vmath::perspective(45.0f, (float)iWidth / (float)iHeight, 0.1f, 100.0f); return true; } int UnInitialize(void) { hipError_t status; if (g_bFullScreen == true) { SetWindowLong(g_hwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW); SetWindowPlacement(g_hwnd, &wpPrev); SetWindowPos(g_hwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOOWNERZORDER | SWP_NOZORDER | SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE); ShowCursor(TRUE); g_bFullScreen = false; } // Uninitalize CUDA objects if(dev_inSrc) { hipFree(dev_inSrc); dev_inSrc=NULL; } if(dev_outSrc) { hipFree(dev_outSrc); dev_outSrc=NULL; } if(dev_constSrc) { hipFree(dev_constSrc); dev_constSrc=NULL; } status = hipGraphicsUnmapResources(1, &resource, 0); if (status != hipSuccess) { fprintf_s(g_pFile, "IN UnInitialize() hipGraphicsUnmapResources failed...!! \n"); hipDeviceReset(); //return CUDA_INIT_GRAPHICS_UNMAP_RES_FAILED; } hipDeviceReset(); if (g_VertexBufferObject_TexCoords) { glDeleteBuffers(1, &g_VertexBufferObject_TexCoords); g_VertexBufferObject_TexCoords = NULL; } if (g_VertexBufferObject_Position) { glDeleteBuffers(1, &g_VertexBufferObject_Position); g_VertexBufferObject_Position = NULL; } if (g_VertexArrayObject) { glDeleteVertexArrays(1, &g_VertexArrayObject); g_VertexArrayObject = NULL; } glUseProgram(0); /* glDetachShader(g_ShaderProgramObject, iVertexShaderObject); glDetachShader(g_ShaderProgramObject, iFragmentShaderObject); if (iFragmentShaderObject) { glDeleteShader(iFragmentShaderObject); iFragmentShaderObject = 0; } if (iVertexShaderObject) { glDeleteShader(iVertexShaderObject); iVertexShaderObject = 0; } if (g_ShaderProgramObject) { glDeleteProgram(g_ShaderProgramObject); g_ShaderProgramObject = NULL; }*/ if (g_ShaderProgramObject) { GLsizei iShaderCount; GLsizei iShaderNumber; glUseProgram(g_ShaderProgramObject); glGetProgramiv(g_ShaderProgramObject, GL_ATTACHED_SHADERS, &iShaderCount); GLuint *pShaders = (GLuint*)calloc(iShaderCount, sizeof(GLuint)); if (pShaders) { glGetAttachedShaders(g_ShaderProgramObject, iShaderCount, &iShaderCount, pShaders); for (iShaderNumber = 0; iShaderNumber < iShaderCount; iShaderNumber++) { glDetachShader(g_ShaderProgramObject, pShaders[iShaderNumber]); glDeleteShader(pShaders[iShaderNumber]); pShaders[iShaderNumber] = 0; } free(pShaders); pShaders = NULL; } glUseProgram(0); glDeleteProgram(g_ShaderProgramObject); g_ShaderProgramObject = NULL; } if (g_cuda_texture) { glDeleteTextures(1,&g_cuda_texture); g_cuda_texture = 0; } if (wglGetCurrentContext() == g_hrc) { wglMakeCurrent(NULL, NULL); } if (g_hrc) { wglDeleteContext(g_hrc); g_hrc = NULL; } if (g_hdc) { ReleaseDC(g_hwnd, g_hdc); g_hdc = NULL; } if (g_pFile) { fprintf_s(g_pFile, "Closing File \n"); fclose(g_pFile); g_pFile = NULL; } return 0; }
1728219b34d2dc9b3657d9b00c36e91fbfabe564.cu
#include<Windows.h> #include<stdio.h> #include<stdlib.h> #include<math.h> #include<gl\glew.h> #include<gl\GL.h> #include<cuda.h> #include<cuda_gl_interop.h> #include"vmath.h" #define DIM_W 1920 #define DIM_H 1080 #define DIM 1024 #define PI 3.1415926535897932f #define sm_rand( x ) (x * rand() / RAND_MAX) #define INF 2e10f #define MAX_TEMP 1.0f #define MIN_TEMP 0.0001f #define SPEED 1.25f #define WIN_WIDTH 800 #define WIN_HEIGHT 600 #pragma comment(lib,"user32.lib") #pragma comment(lib,"gdi32.lib") #pragma comment(lib,"glew32.lib") #pragma comment(lib,"opengl32.lib") #pragma comment(lib,"cudart.lib") //using namespace std; enum CUDAInitErrorCodes { /* min no -10 */ INIT_CUDA_SETGLDEVICE_FAILED = -30, CUDA_EVENT_RECORD_FAILED, CUDA_EVENT_ELP_FAILED, CUDA_EVENT_SYNC_FAILED, CUDA_EVENT_CREATE_FAILED, CUDA_EVENT_DESTROY_FAILED, INIT_CUDA_MEMCPY_FAILED, INIT_CUDA_MALLOC_FAILED, CUDA_STREAM_SYNC_FAILED, CUDA_INIT_DESTROY_SURFACE_OBJ_FAILED, INIT_CUDA_REGISTER_IMAGE_FAILED, INIT_CUDA_REGISTER_BUFFER_FAILED, CUDA_INIT_GRAPHICS_MAPPED_ARRAY_FAILED, CUDA_INIT_GRAPHICS_MAPPED_RES_FAILED, CUDA_INIT_GRAPHICS_MAPPED_RES_POINTER_FAILED, CUDA_INIT_GRAPHICS_UNMAP_RES_FAILED, CUDA_INIT_BIND_TEX, CUDA_DEVICE_SYNC, INIT_CUDA_CHOOSEDEVICE_FAILED = -10, CUDA_INIT_ALL_OK = 0, }; enum InitErrorCodes { INIT_VERTEX_SHADER_COMPILATION_FAILED = -9, INIT_FRAGMENT_SHADER_COMPILATION_FAILED, INIT_LINK_SHADER_PROGRAM_FAILED, INIT_FAIL_GLEW_INIT, INIT_FAIL_BRIDGE_CONTEX_SET, INIT_FAIL_BRIDGE_CONTEX_CREATION, INIT_FAIL_SET_PIXEL_FORMAT, INIT_FAIL_NO_PIXEL_FORMAT, INIT_FAIL_NO_HDC, INIT_ALL_OK, }; enum attributeBindLocations { SAM_ATTRIBUTE_POSITION = 0, SAM_ATTRIBUTE_COLOR, SAM_ATTRIBUTE_NORNAL, SAM_ATTRIBUTE_TEXTURE0, }; LRESULT CALLBACK MainWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam); bool g_bWindowActive = false; HWND g_hwnd = NULL; HDC g_hdc = NULL; HGLRC g_hrc = NULL; WINDOWPLACEMENT wpPrev; DWORD dwStyle; bool g_bFullScreen = false; FILE *g_pFile = NULL; // Shaders //GLuint iVertexShaderObject = 0; //GLuint iFragmentShaderObject = 0; GLuint g_ShaderProgramObject = 0; // All Vertex Buffers GLuint g_VertexArrayObject = 0; GLuint g_VertexBufferObject_Position = 0; GLuint g_VertexBufferObject_TexCoords = 0; // Uniforms GLuint g_Uniform_Model_Matrix = 0; GLuint g_Uniform_View_Matrix = 0; GLuint g_Uniform_Projection_Matrix = 0; // sampler GLuint g_uniform_TextureSampler; GLuint g_cuda_texture; // Projection vmath::mat4 g_PersPectiveProjectionMatrix; //CUDA Res cudaGraphicsResource *resource = NULL; float g_fanimate = 0.0f; bool animation_flag = false; void swapFunc(float *ip1,float *ip2) { float *temp = ip1; ip1 = ip2; ip2 = temp; } __device__ unsigned char value(float n1,float n2,int hue) { if(hue > 360) hue -= 360; else if(hue < 0) hue += 360; if(hue < 60) return (unsigned char)(255 * (n1 + (n2-n1)*hue/60)); if(hue < 180) return (unsigned char)(255 * n2); if(hue < 240) return (unsigned char)(255 * (n1 + (n2-n1)*(240-hue)/60)); return (unsigned char)(255*n1); } __global__ void float_to_color(cudaSurfaceObject_t target, const float *outSrc ,dim3 texDim) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = x + y * blockDim.x * gridDim.x; float l = outSrc[offset]; float s = l; int h = (180 + (int)(360.0f * outSrc[offset])) % 360; float m1,m2; if(l <= 0.5f) m2 = l * (l+s); else m2 = l + s - l * s; m1 = 2 * l - m2; if (x < texDim.x && y < texDim.y) { uchar4 data = make_uchar4(value( m1, m2, h+120 ),value( m1, m2, h ),value( m1, m2, h -120 ), 255); surf2Dwrite(data, target, x * sizeof(uchar4), y, cudaBoundaryModeTrap); } } /** * Animating Kernel */ __global__ void blend_kernel(float *outSrc,const float *inSrc) { // map from threadIdx/blockIdx to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = x + y * blockDim.x * gridDim.x; int left = offset - 1; int right = offset + 1; if(x==0) left++; if(y==DIM -1) right--; int top = offset - DIM; int bottom = offset + DIM; if(y==0) top += DIM; if(y==DIM-1) bottom -= DIM; outSrc[offset] = inSrc[offset] + SPEED * (inSrc[top] + inSrc[bottom] + inSrc[left] + inSrc[right] - inSrc[offset] * 4); } /** * Constant Kernel */ __global__ void copy_const_kernel(float *iptr,const float *cptr) { // map threads and blocks to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; if(cptr[offset]!=0) iptr[offset] = cptr[offset]; } float *dev_inSrc; float *dev_outSrc; float *dev_constSrc; int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR szCmdLine, int iCmdShow) { //int UnInitialize(void); int Initialize(void); int Update(void); void Render(void); // Windowing Elelments WNDCLASSEX wndclass; MSG msg; HWND hwnd = NULL; TCHAR szClassName[] = TEXT("Sam_OGL"); RECT windowRect; // Game Loop Control bool bDone = false; // Initialization Status int iInitRet = 0; SecureZeroMemory((void*)&wndclass, sizeof(wndclass)); wndclass.cbSize = sizeof(wndclass); wndclass.cbClsExtra = 0; wndclass.cbWndExtra = 0; wndclass.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC; wndclass.lpfnWndProc = MainWndProc; wndclass.lpszClassName = szClassName; wndclass.lpszMenuName = NULL; wndclass.hInstance = hInstance; wndclass.hbrBackground = (HBRUSH)GetStockObject(GRAY_BRUSH); wndclass.hIcon = LoadIcon(hInstance, IDI_APPLICATION); wndclass.hIconSm = LoadIcon(hInstance, IDI_APPLICATION); wndclass.hCursor = LoadCursor(hInstance, IDC_ARROW); if (!RegisterClassEx(&wndclass)) { MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could Not RegisterClass() "), MB_OK | MB_ICONERROR); exit(EXIT_FAILURE); } if ((fopen_s(&g_pFile, "SamLogFile.txt", "w+")) == 0) { fprintf_s(g_pFile, "File Opened Successfully. \n"); } else { MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could not open File"), MB_OK | MB_ICONERROR); exit(EXIT_FAILURE); } SecureZeroMemory((void*)&windowRect, sizeof(windowRect)); windowRect.left = 0; windowRect.top = 0; windowRect.bottom = WIN_HEIGHT; windowRect.right = WIN_WIDTH; AdjustWindowRectEx(&windowRect, WS_OVERLAPPEDWINDOW, FALSE, WS_EX_APPWINDOW); hwnd = CreateWindowEx(WS_EX_APPWINDOW, szClassName, TEXT("First_OpenGL_Window"), WS_OVERLAPPEDWINDOW | WS_CLIPCHILDREN | WS_CLIPSIBLINGS | WS_VISIBLE, CW_USEDEFAULT, CW_USEDEFAULT, windowRect.right - windowRect.left, windowRect.bottom - windowRect.top, NULL, NULL, hInstance, NULL); if (hwnd == NULL) { MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could Not CreateWindow() "), MB_OK | MB_ICONERROR); exit(EXIT_FAILURE); } g_hwnd = hwnd; iInitRet = Initialize(); switch (iInitRet) { case INIT_ALL_OK: fprintf_s(g_pFile, "Initialize Complete \n"); break; case INIT_FAIL_NO_HDC: fprintf_s(g_pFile, "Failed to Get HDC \n"); DestroyWindow(hwnd); break; case INIT_FAIL_NO_PIXEL_FORMAT: fprintf_s(g_pFile, "Failed to get PixelFormat \n"); DestroyWindow(hwnd); break; case INIT_FAIL_SET_PIXEL_FORMAT: fprintf_s(g_pFile, "Failed to set Pixel Format \n"); DestroyWindow(hwnd); break; case INIT_FAIL_BRIDGE_CONTEX_CREATION: fprintf_s(g_pFile, "Failed to wglCreateContext \n"); DestroyWindow(hwnd); break; case INIT_FAIL_BRIDGE_CONTEX_SET: fprintf_s(g_pFile, "Failed to wglMakeCurrent \n"); DestroyWindow(hwnd); break; case INIT_FAIL_GLEW_INIT: fprintf_s(g_pFile, "Failed to glewInit \n"); DestroyWindow(hwnd); break; case INIT_LINK_SHADER_PROGRAM_FAILED: fprintf_s(g_pFile, "Failed to Link Shader Program Object \n"); DestroyWindow(hwnd); break; case INIT_VERTEX_SHADER_COMPILATION_FAILED: fprintf_s(g_pFile, "Failed to Compile vertex Shader \n"); DestroyWindow(hwnd); break; case INIT_FRAGMENT_SHADER_COMPILATION_FAILED: fprintf_s(g_pFile, "Failed to Compile fragment Shader \n"); DestroyWindow(hwnd); break; default: /*fprintf_s(g_pFile, "Failed UnKnown Reasons \n"); DestroyWindow(hwnd);*/ switch (iInitRet) { case INIT_CUDA_CHOOSEDEVICE_FAILED: fprintf_s(g_pFile, "cudaChooseDevice Failed \n"); DestroyWindow(hwnd); break; /*default: fprintf_s(g_pFile, "CUDA Failed UnKnown Reasons \n"); DestroyWindow(hwnd); break;*/ } // General Failure fprintf_s(g_pFile, "Failed UnKnown Reasons \n"); DestroyWindow(hwnd); break; } ShowWindow(hwnd, SW_SHOWNORMAL); SetForegroundWindow(hwnd); SetFocus(hwnd); while (bDone == false) { if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) { if (msg.message == WM_QUIT) { bDone = true; } else { TranslateMessage(&msg); DispatchMessage(&msg); } } else { if (g_bWindowActive) { Update(); } // Show all Animations Render(); } } //UnInitialize(); return ((int)msg.wParam); } LRESULT CALLBACK MainWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam) { int UnInitialize(void); void FullScreen(void); bool Resize(int, int); switch (iMsg) { case WM_CREATE: PostMessage(hwnd, WM_KEYDOWN, (WPARAM)0x46, (LPARAM)NULL); break; case WM_SETFOCUS: g_bWindowActive = true; break; case WM_KILLFOCUS: g_bWindowActive = false; break; case WM_KEYDOWN: switch (LOWORD(wParam)) { case VK_ESCAPE: DestroyWindow(hwnd); break; case 0x46: // 'f' or 'F' //MessageBox(hwnd, TEXT("F is pressed"), TEXT("Status"), MB_OK); FullScreen(); break; default: break; } break; case WM_SIZE: Resize(LOWORD(lParam), HIWORD(lParam)); break; case WM_ERASEBKGND: return(0); //break; case WM_CLOSE: DestroyWindow(hwnd); break; case WM_DESTROY: UnInitialize(); PostQuitMessage(0); break; default: break; } return (DefWindowProc(hwnd, iMsg, wParam, lParam)); } int Initialize(void) { bool Resize(int, int); int iPixelIndex = 0; PIXELFORMATDESCRIPTOR pfd; cudaError cuErr; // Shader Programs GLuint iVertexShaderObject = 0; GLuint iFragmentShaderObject = 0; GLenum err = NULL; // GLEW Error codes SecureZeroMemory(&pfd, sizeof(pfd)); pfd.nSize = sizeof(pfd); pfd.nVersion = 1; pfd.dwFlags = PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW | PFD_DOUBLEBUFFER; pfd.iPixelType = PFD_TYPE_RGBA; pfd.cColorBits = 32; pfd.cRedBits = 8; pfd.cGreenBits = 8; pfd.cBlueBits = 8; pfd.cAlphaBits = 8; g_hdc = GetDC(g_hwnd); if (g_hdc == NULL) { return INIT_FAIL_NO_HDC; } iPixelIndex = ChoosePixelFormat(g_hdc, &pfd); if (iPixelIndex == 0) { return INIT_FAIL_NO_PIXEL_FORMAT; } if (SetPixelFormat(g_hdc, iPixelIndex, &pfd) == FALSE) { return INIT_FAIL_SET_PIXEL_FORMAT; } g_hrc = wglCreateContext(g_hdc); if (g_hrc == NULL) { return INIT_FAIL_BRIDGE_CONTEX_CREATION; } if (wglMakeCurrent(g_hdc, g_hrc) == FALSE) { return INIT_FAIL_BRIDGE_CONTEX_SET; } // Enables Feature Required for Programable Pipeline err = glewInit(); if (err != GLEW_OK) { return INIT_FAIL_GLEW_INIT; } // CUDA Initalization cuErr = cudaGLSetGLDevice(0); // Default device 0 will share resources with OpenGL if (cuErr != cudaSuccess) { return INIT_CUDA_SETGLDEVICE_FAILED; } // GL information Start fprintf_s(g_pFile, "SHADER_INFO : Vendor is : %s\n", glGetString(GL_VENDOR)); fprintf_s(g_pFile, "SHADER_INFO : Renderer is : %s\n", glGetString(GL_RENDER)); fprintf_s(g_pFile, "SHADER_INFO : OpenGL Version is : %s\n", glGetString(GL_VERSION)); fprintf_s(g_pFile, "SHADER_INFO : GLSL Version is : %s\n", glGetString(GL_SHADING_LANGUAGE_VERSION)); int maxAttachments = 0; glGetIntegerv(GL_MAX_COLOR_ATTACHMENTS, &maxAttachments); fprintf_s(g_pFile, "SHADER_INFO : GL_MAX_COLOR_ATTACHMENTS is : %d\n", maxAttachments); //fprintf_s(g_pFile, "SHADER_INFO : Extention is : %s \n", glGetString(GL_EXTENSIONS)); // GL information End /// Sam : all Shader Code Start /*gl_Position = u_projection_matrix * u_view_matrix * u_model_matrix * vPosition;*/ /*Vertex Shader Start*/ iVertexShaderObject = glCreateShader(GL_VERTEX_SHADER); const GLchar *vertexShaderSourceCode = "#version 450 core" \ "\n" \ "layout (location = 0)in vec4 vPosition;" \ "layout (location = 3)in vec2 vTexture0_Coord;" \ "layout (location = 0)out vec2 out_Texture0_Coord;" \ "uniform mat4 u_model_matrix,u_view_matrix,u_projection_matrix;" \ "void main(void)" \ "{" \ " gl_Position = vPosition;" \ " out_Texture0_Coord = vTexture0_Coord;" \ "}"; glShaderSource(iVertexShaderObject, 1, (const GLchar**)&vertexShaderSourceCode, NULL); // Compile it glCompileShader(iVertexShaderObject); GLint iInfoLogLength = 0; GLint iShaderCompileStatus = 0; GLchar *szInfoLog = NULL; glGetShaderiv(iVertexShaderObject, GL_COMPILE_STATUS, &iShaderCompileStatus); if (iShaderCompileStatus == GL_FALSE) { glGetShaderiv(iVertexShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength>0) { szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar)); if (szInfoLog != NULL) { GLsizei written; glGetShaderInfoLog(iVertexShaderObject, GL_INFO_LOG_LENGTH, &written, szInfoLog); fprintf_s(g_pFile, "ERROR : Vertex Shader Compilation Log : %s \n", szInfoLog); free(szInfoLog); szInfoLog = NULL; return INIT_VERTEX_SHADER_COMPILATION_FAILED; //DestroyWindow(g_hwnd); //exit(EXIT_FAILURE); } } } /*Vertex Shader End*/ /*Fragment Shader Start*/ iFragmentShaderObject = glCreateShader(GL_FRAGMENT_SHADER); const GLchar *fragmentShaderSourceCode = "#version 450 core" \ "\n" \ "layout (location = 0)in vec2 out_Texture0_Coord;" \ "layout (location = 0)out vec4 FragColor;" \ "uniform sampler2D u_texture0_sampler;" \ "void main(void)" \ "{" \ " FragColor = texture(u_texture0_sampler,out_Texture0_Coord);" \ "}"; glShaderSource(iFragmentShaderObject, 1, (const GLchar**)&fragmentShaderSourceCode, NULL); glCompileShader(iFragmentShaderObject); iInfoLogLength = 0; iShaderCompileStatus = 0; szInfoLog = NULL; glGetShaderiv(iFragmentShaderObject, GL_COMPILE_STATUS, &iShaderCompileStatus); if (iShaderCompileStatus == GL_FALSE) { glGetShaderiv(iFragmentShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength>0) { szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar)); if (szInfoLog != NULL) { GLsizei written; glGetShaderInfoLog(iFragmentShaderObject, GL_INFO_LOG_LENGTH, &written, szInfoLog); fprintf(g_pFile, "ERROR: Fragment Shader Compilation Log : %s \n", szInfoLog); free(szInfoLog); szInfoLog = NULL; return INIT_FRAGMENT_SHADER_COMPILATION_FAILED; //DestroyWindow(g_hwnd); //exit(EXIT_FAILURE); } } } /*Fragment Shader End*/ /* Shader Program Start */ g_ShaderProgramObject = glCreateProgram(); glAttachShader(g_ShaderProgramObject, iVertexShaderObject); glAttachShader(g_ShaderProgramObject, iFragmentShaderObject); glBindAttribLocation(g_ShaderProgramObject, SAM_ATTRIBUTE_POSITION, "vPosition"); glBindAttribLocation(g_ShaderProgramObject, SAM_ATTRIBUTE_TEXTURE0, "vTexture0_Coord"); glLinkProgram(g_ShaderProgramObject); GLint iShaderLinkStatus = 0; iInfoLogLength = 0; glGetProgramiv(g_ShaderProgramObject, GL_LINK_STATUS, &iShaderLinkStatus); if (iShaderLinkStatus == GL_FALSE) { glGetProgramiv(g_ShaderProgramObject, GL_INFO_LOG_LENGTH, &iInfoLogLength); if (iInfoLogLength>0) { szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar)); if (szInfoLog != NULL) { GLsizei written; glGetShaderInfoLog(g_ShaderProgramObject, GL_INFO_LOG_LENGTH, &written, szInfoLog); fprintf_s(g_pFile, "ERROR : Linking Shader Program Objects Failed %s \n", szInfoLog); free(szInfoLog); szInfoLog = NULL; return INIT_LINK_SHADER_PROGRAM_FAILED; //DestroyWindow(g_hwnd); //exit(EXIT_FAILURE); } } } /* Shader Program End */ /*Setup Uniforms Start*/ g_Uniform_Model_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_model_matrix"); g_Uniform_Projection_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_projection_matrix"); g_Uniform_View_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_view_matrix"); //g_uniform_TextureSampler = glGetUniformLocation(g_ShaderProgramObject, "u_texture0_sampler"); /*Setup Uniforms End*/ /* Fill Buffers Start*/ //// Cube Section Start const GLfloat squareVertices[] = { -1.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f }; const GLfloat squareTexCords[] = { 0.0f, 1.0f, 0.0f, 0.0f, 1.0f,0.0f, 1.0f,1.0f }; glGenVertexArrays(1, &g_VertexArrayObject);//VAO glBindVertexArray(g_VertexArrayObject); glGenBuffers(1, &g_VertexBufferObject_Position);// vbo position glBindBuffer(GL_ARRAY_BUFFER, g_VertexBufferObject_Position); glBufferData(GL_ARRAY_BUFFER, sizeof(squareVertices), squareVertices, GL_STATIC_DRAW); glVertexAttribPointer(SAM_ATTRIBUTE_POSITION, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(SAM_ATTRIBUTE_POSITION); glBindBuffer(GL_ARRAY_BUFFER, 0); glGenBuffers(1, &g_VertexBufferObject_TexCoords); // vbo texcoords glBindBuffer(GL_ARRAY_BUFFER, g_VertexBufferObject_TexCoords); glBufferData(GL_ARRAY_BUFFER, sizeof(squareTexCords), squareTexCords, GL_STATIC_DRAW); glVertexAttribPointer(SAM_ATTRIBUTE_TEXTURE0, 2, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(SAM_ATTRIBUTE_TEXTURE0); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindVertexArray(0); /* Fill Buffers End*/ // Generate texture for working with cuda glGenTextures(1, &g_cuda_texture); glBindTexture(GL_TEXTURE_2D, g_cuda_texture); // Texture parameters glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); // give texture some storage glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA, DIM, DIM,0,GL_RGBA,GL_UNSIGNED_INT,NULL); glBindTexture(GL_TEXTURE_2D, 0); /// Sam : all Shader Code End glEnable(GL_TEXTURE_2D); glShadeModel(GL_SMOOTH); glClearDepth(1.0f); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LEQUAL); glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); //glEnable(GL_CULL_FACE); glClearColor(0.125f, 0.125f, 0.125f, 1.0f); g_PersPectiveProjectionMatrix = vmath::mat4::identity(); // CUDA Allocations // allocate for dev_inSrc // considering float == 4 chars (rgba) cuErr = cudaMalloc((void**)&dev_inSrc,4*DIM*DIM); if(cuErr!=cudaSuccess) { fprintf_s(g_pFile, "CUDA ERROR : cudaMalloc failed at line %d\n",__LINE__); return INIT_CUDA_MALLOC_FAILED; } cuErr = cudaMalloc((void**)&dev_outSrc,4*DIM*DIM); if(cuErr!=cudaSuccess) { fprintf_s(g_pFile, "CUDA ERROR : cudaMalloc failed at line %d\n",__LINE__); return INIT_CUDA_MALLOC_FAILED; } cuErr=cudaMalloc((void**)&dev_constSrc,4*DIM*DIM); if(cuErr!=cudaSuccess) { fprintf_s(g_pFile, "CUDA ERROR : cudaMalloc failed at line %d\n",__LINE__); return INIT_CUDA_MALLOC_FAILED; } float *temp = (float*)calloc(4*DIM*DIM,sizeof(char)); for(int i=0; i < DIM*DIM; i++) { temp[i] = 0; int x = i % DIM; int y = i / DIM; if((x>300)&&(x<600)&&(y>310)&&(y<601)) temp[i] = MAX_TEMP; } temp[DIM*100+100] = (MAX_TEMP + MIN_TEMP)/2; temp[DIM*700+100] = MIN_TEMP; temp[DIM*300+300] = MIN_TEMP; temp[DIM*200+700] = MIN_TEMP; for(int y = 800;y<900;y++) { for(int x = 400;x<500;x++) { temp[x+y*DIM] = MIN_TEMP; } } cuErr = cudaMemcpy(dev_constSrc,temp,4*DIM*DIM,cudaMemcpyHostToDevice); if(cuErr != cudaSuccess) { fprintf_s(g_pFile, "CUDA ERROR : cudaMemcpy failed at line %d\n",__LINE__); return INIT_CUDA_MEMCPY_FAILED; } for(int y = 800; y<DIM; y++) { for(int x = 0; x<200; x++) { temp[x+y*DIM] = MAX_TEMP; } } cuErr = cudaMemcpy(dev_inSrc,temp,4*DIM*DIM,cudaMemcpyHostToDevice); if(cuErr != cudaSuccess) { fprintf_s(g_pFile, "CUDA ERROR : cudaMemcpy failed at line %d\n",__LINE__); return INIT_CUDA_MEMCPY_FAILED; } if(temp) { free(temp); temp=NULL; } //CUDA_INIT_BIND_TEX /// Register With CUDA Start // last param as "cudaGraphicsRegisterFlagsSurfaceLoadStore" cuErr = cudaGraphicsGLRegisterImage(&resource, g_cuda_texture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsSurfaceLoadStore|cudaGraphicsRegisterFlagsWriteDiscard); if (cuErr!=cudaSuccess) { fprintf_s(g_pFile, "CUDA ERROR : cudaGraphicsGLRegisterImage failed at line %d\n",__LINE__); return INIT_CUDA_REGISTER_IMAGE_FAILED; } /// Register With CUDA Stop Resize(WIN_WIDTH, WIN_HEIGHT); return INIT_ALL_OK; } int Update(void) { void swapFunc(float *ip1,float *ip2); //float4 *devPtr = NULL; //size_t size; cudaError status; cudaArray_t cudaWriteArray; cudaEvent_t start,stop; float elapsedTime = 0.0f; if (animation_flag) { g_fanimate = g_fanimate + 0.009f; if ((g_fanimate >6.0f)) { animation_flag = false; } } else { g_fanimate = g_fanimate - 0.008f; if ((g_fanimate <1.0f)) { animation_flag = true; } } status = cudaEventCreate(&start); if (status != cudaSuccess) { fprintf_s(g_pFile,"IN Update() cudaEventCreate failed...!! \n"); return CUDA_EVENT_CREATE_FAILED; } status = cudaEventCreate(&stop); if (status != cudaSuccess) { fprintf_s(g_pFile,"IN Update() cudaEventCreate failed...!! \n"); return CUDA_EVENT_CREATE_FAILED; } status = cudaEventRecord(start,0); if (status != cudaSuccess) { fprintf_s(g_pFile,"IN Update() cudaEventCreate failed...!! \n"); return CUDA_EVENT_RECORD_FAILED; } status = cudaGraphicsMapResources(1, &resource, 0); if (status != cudaSuccess) { fprintf_s(g_pFile,"IN Update() cudaGraphicsMapResources failed...!! \n"); return CUDA_INIT_GRAPHICS_MAPPED_RES_FAILED; } status = cudaGraphicsSubResourceGetMappedArray(&cudaWriteArray,resource,0,0); if (status != cudaSuccess) { fprintf_s(g_pFile, "IN Update() cudaGraphicsSubResourceGetMappedArray failed...!! \n"); return CUDA_INIT_GRAPHICS_MAPPED_ARRAY_FAILED; } // Prepare a Surface object for cuda cudaResourceDesc writeDescriptor; ZeroMemory((void**)&writeDescriptor,sizeof(writeDescriptor)); writeDescriptor.resType = cudaResourceTypeArray; writeDescriptor.res.array.array = cudaWriteArray; cudaSurfaceObject_t writeSurface; status = cudaCreateSurfaceObject(&writeSurface, &writeDescriptor); if (status != cudaSuccess) { fprintf_s(g_pFile, "IN Update() cudaCreateSurfaceObject failed...!! \n"); return CUDA_INIT_GRAPHICS_MAPPED_ARRAY_FAILED; } // After successfully creating surface object write to the texture using kernel //dim3 thread(30,30); //dim3 block(ceil(DIM_W/thread.x), ceil(DIM_H/thread.y)); //rt_noConst_kernel<<<block, thread >>>(writeSurface, dim3(DIM_W, DIM_H)); dim3 blocks(DIM/16,DIM/16,1); dim3 threads(16,16); volatile bool dstOut = true; float *temp_inSrc,*temp_constSrc,*temp_outSrc; temp_inSrc = dev_inSrc; temp_constSrc = dev_constSrc; temp_outSrc = dev_outSrc; for(int i=0;i<90;i++) { copy_const_kernel<<<blocks,threads>>>(temp_inSrc,temp_constSrc); blend_kernel<<<blocks,threads>>>(temp_outSrc,temp_inSrc); swapFunc(temp_inSrc,temp_outSrc); } status = cudaDeviceSynchronize(); if (status != cudaSuccess) { fprintf_s(g_pFile, "IN Update() cudaDeviceSynchronize failed...!! \n"); //return CUDA_DEVICE_SYNC; } float_to_color<<<blocks,threads>>>(writeSurface,dev_constSrc,dim3(DIM,DIM)); status = cudaDeviceSynchronize(); if (status != cudaSuccess) { fprintf_s(g_pFile, "IN Update() cudaDeviceSynchronize failed...!! \n"); //return CUDA_DEVICE_SYNC; } status = cudaGetLastError(); if (status != cudaSuccess) { fprintf_s(g_pFile, "IN Update() Kernel failed : %s \n", cudaGetErrorString(status)); } /*status = cudaDeviceSynchronize(); if (status != cudaSuccess) { fprintf_s(g_pFile, "IN Update() cudaDeviceSynchronize failed...!! \n"); return CUDA_DEVICE_SYNC; }*/ status = cudaDestroySurfaceObject(writeSurface); if (status != cudaSuccess) { fprintf_s(g_pFile, "IN Update() cudaDestroySurfaceObject failed...!! \n"); return CUDA_INIT_DESTROY_SURFACE_OBJ_FAILED; } status = cudaGraphicsUnmapResources(1, &resource, 0); if (status != cudaSuccess) { fprintf_s(g_pFile, "IN Update() cudaGraphicsUnmapResources failed...!! \n"); return CUDA_INIT_GRAPHICS_UNMAP_RES_FAILED; } status = cudaStreamSynchronize(0); if (status != cudaSuccess) { fprintf_s(g_pFile, "IN Update() cudaStreamSynchronize failed...!! \n"); return CUDA_INIT_GRAPHICS_UNMAP_RES_FAILED; } status = cudaEventRecord(stop,0); if (status != cudaSuccess) { fprintf_s(g_pFile,"IN Update() cudaEventRecord failed...!! \n"); return CUDA_EVENT_RECORD_FAILED; } status = cudaEventSynchronize(stop); if (status != cudaSuccess) { fprintf_s(g_pFile,"IN Update() cudaEventSynchronize failed...!! \n"); return CUDA_EVENT_SYNC_FAILED; } status = cudaEventElapsedTime(&elapsedTime,start,stop); if (status != cudaSuccess) { fprintf_s(g_pFile,"IN Update() cudaEventElapsedTime failed...!! \n"); return CUDA_EVENT_ELP_FAILED; } fprintf_s(g_pFile,"Time to generate: %3.1f ms\n",elapsedTime); status = cudaEventDestroy(start); if (status != cudaSuccess) { fprintf_s(g_pFile,"IN Update() cudaEventDestroy failed...!! \n"); return CUDA_EVENT_DESTROY_FAILED; } status = cudaEventDestroy(stop); if (status != cudaSuccess) { fprintf_s(g_pFile,"IN Update() cudaEventDestroy failed...!! \n"); return CUDA_EVENT_DESTROY_FAILED; } return INIT_ALL_OK; } void Render(void) { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); vmath::mat4 modelMatrix = vmath::mat4::identity(); vmath::mat4 viewMatrix = vmath::mat4::identity(); glUseProgram(g_ShaderProgramObject); modelMatrix = vmath::translate(0.0f, 0.0f, -3.0f); glUniformMatrix4fv(g_Uniform_Model_Matrix, 1, GL_FALSE, modelMatrix); glUniformMatrix4fv(g_Uniform_View_Matrix, 1, GL_FALSE, viewMatrix); glUniformMatrix4fv(g_Uniform_Projection_Matrix, 1, GL_FALSE, g_PersPectiveProjectionMatrix); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, g_cuda_texture); //glUniform1i(g_uniform_TextureSampler, 0); glBindVertexArray(g_VertexArrayObject); glDrawArrays(GL_TRIANGLE_FAN, 0, 4); glBindVertexArray(0); glUseProgram(0); SwapBuffers(g_hdc); } void FullScreen(void) { MONITORINFO mi = { sizeof(mi) }; dwStyle = GetWindowLong(g_hwnd, GWL_STYLE); if (g_bFullScreen == false) { if (dwStyle & WS_OVERLAPPEDWINDOW) { if (GetWindowPlacement(g_hwnd, &wpPrev) && GetMonitorInfo(MonitorFromWindow(g_hwnd, MONITORINFOF_PRIMARY), &mi)) { SetWindowLong(g_hwnd, GWL_STYLE, dwStyle & ~WS_OVERLAPPEDWINDOW); SetWindowPos(g_hwnd, HWND_TOP, mi.rcMonitor.left, mi.rcMonitor.top, mi.rcMonitor.right - mi.rcMonitor.left, mi.rcMonitor.bottom - mi.rcMonitor.top, SWP_NOZORDER | SWP_FRAMECHANGED); } } ShowCursor(FALSE); g_bFullScreen = true; } else { SetWindowLong(g_hwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW); SetWindowPlacement(g_hwnd, &wpPrev); SetWindowPos(g_hwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOOWNERZORDER | SWP_NOZORDER | SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE); ShowCursor(TRUE); g_bFullScreen = false; } } bool Resize(int iWidth, int iHeight) { if (iHeight <= 0) { iHeight = 1; } glViewport(0, 0, (GLsizei)iWidth, (GLsizei)iHeight); g_PersPectiveProjectionMatrix = vmath::perspective(45.0f, (float)iWidth / (float)iHeight, 0.1f, 100.0f); return true; } int UnInitialize(void) { cudaError status; if (g_bFullScreen == true) { SetWindowLong(g_hwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW); SetWindowPlacement(g_hwnd, &wpPrev); SetWindowPos(g_hwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOOWNERZORDER | SWP_NOZORDER | SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE); ShowCursor(TRUE); g_bFullScreen = false; } // Uninitalize CUDA objects if(dev_inSrc) { cudaFree(dev_inSrc); dev_inSrc=NULL; } if(dev_outSrc) { cudaFree(dev_outSrc); dev_outSrc=NULL; } if(dev_constSrc) { cudaFree(dev_constSrc); dev_constSrc=NULL; } status = cudaGraphicsUnmapResources(1, &resource, 0); if (status != cudaSuccess) { fprintf_s(g_pFile, "IN UnInitialize() cudaGraphicsUnmapResources failed...!! \n"); cudaDeviceReset(); //return CUDA_INIT_GRAPHICS_UNMAP_RES_FAILED; } cudaDeviceReset(); if (g_VertexBufferObject_TexCoords) { glDeleteBuffers(1, &g_VertexBufferObject_TexCoords); g_VertexBufferObject_TexCoords = NULL; } if (g_VertexBufferObject_Position) { glDeleteBuffers(1, &g_VertexBufferObject_Position); g_VertexBufferObject_Position = NULL; } if (g_VertexArrayObject) { glDeleteVertexArrays(1, &g_VertexArrayObject); g_VertexArrayObject = NULL; } glUseProgram(0); /* glDetachShader(g_ShaderProgramObject, iVertexShaderObject); glDetachShader(g_ShaderProgramObject, iFragmentShaderObject); if (iFragmentShaderObject) { glDeleteShader(iFragmentShaderObject); iFragmentShaderObject = 0; } if (iVertexShaderObject) { glDeleteShader(iVertexShaderObject); iVertexShaderObject = 0; } if (g_ShaderProgramObject) { glDeleteProgram(g_ShaderProgramObject); g_ShaderProgramObject = NULL; }*/ if (g_ShaderProgramObject) { GLsizei iShaderCount; GLsizei iShaderNumber; glUseProgram(g_ShaderProgramObject); glGetProgramiv(g_ShaderProgramObject, GL_ATTACHED_SHADERS, &iShaderCount); GLuint *pShaders = (GLuint*)calloc(iShaderCount, sizeof(GLuint)); if (pShaders) { glGetAttachedShaders(g_ShaderProgramObject, iShaderCount, &iShaderCount, pShaders); for (iShaderNumber = 0; iShaderNumber < iShaderCount; iShaderNumber++) { glDetachShader(g_ShaderProgramObject, pShaders[iShaderNumber]); glDeleteShader(pShaders[iShaderNumber]); pShaders[iShaderNumber] = 0; } free(pShaders); pShaders = NULL; } glUseProgram(0); glDeleteProgram(g_ShaderProgramObject); g_ShaderProgramObject = NULL; } if (g_cuda_texture) { glDeleteTextures(1,&g_cuda_texture); g_cuda_texture = 0; } if (wglGetCurrentContext() == g_hrc) { wglMakeCurrent(NULL, NULL); } if (g_hrc) { wglDeleteContext(g_hrc); g_hrc = NULL; } if (g_hdc) { ReleaseDC(g_hwnd, g_hdc); g_hdc = NULL; } if (g_pFile) { fprintf_s(g_pFile, "Closing File \n"); fclose(g_pFile); g_pFile = NULL; } return 0; }
2f51c8498fa5c59f468cd9bf088c5aad0491109e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<sys/time.h> /* #define BLOCKSIZEX 64 #define BLOCKSIZEY 16 #define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY #define GRIDSIZEX 8 #define GRIDSIZEY 16 #define GRIDSIZE GRIDSIZEX * GRIDSIZEY #define THREAD_NUM BLOCKSIZE * GRIDSIZE #define MIMAX 256 #define MJMAX GRIDSIZEY * (BLOCKSIZEY - 2) + 2 #define MKMAX GRIDSIZEX * (BLOCKSIZEX - 2) + 2 #define NN 750 */ #define BLOCKSIZEX 6 #define BLOCKSIZEY 4 #define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY #define GRIDSIZEX 3 #define GRIDSIZEY 6 #define GRIDSIZE GRIDSIZEX * GRIDSIZEY #define THREAD_NUM BLOCKSIZE * GRIDSIZE #define MIMAX 6 #define MJMAX GRIDSIZEY * (BLOCKSIZEY - 2) + 2 #define MKMAX GRIDSIZEX * (BLOCKSIZEX - 2) + 2 #define NN 3 /*static float p[MIMAX][MJMAX][MKMAX]; static float a[MIMAX][MJMAX][MKMAX][4]; static float b[MIMAX][MJMAX][MKMAX][3]; static float c[MIMAX][MJMAX][MKMAX][3]; static float bnd[MIMAX][MJMAX][MKMAX]; static float work1[MIMAX][MJMAX][MKMAX]; static float work2[MIMAX][MJMAX][MKMAX];*/ static int imax, jmax, kmax, mimax, mjmax, mkmax; static float omega; double second(){ struct timeval tm; double t; static int base_sec = 0, base_usec = 0; gettimeofday(&tm, NULL); if(base_sec == 0 && base_usec == 0){ base_sec = tm.tv_sec; base_usec = tm.tv_usec; t = 0.0; } else{ t = (double)(tm.tv_sec-base_sec) + ((double)(tm.tv_usec-base_usec))/1.0e6; } return t; } __global__ void jacobi(float *a0, float *a1, float *a2, float *a3, float *b0, float *b1, float *b2, float *c0, float *c1, float *c2, float *p, float *wrk1, float *wrk2, float *bnd, int nn, int imax, int jmax, int kmax, float omega, float *gosa){ int i, j, k, i2, j2, k2, n, xy, c, csb; i = 1; float s0, ss, temp; //const int size = (imax-1)/(imax-1); k = threadIdx.x + (blockDim.x-2) * blockIdx.x + 1; j = threadIdx.y + (blockDim.y-2) * blockIdx.y + 1; i2 = i-1; k2 = threadIdx.x + blockDim.x * blockIdx.x; j2 = threadIdx.y + blockDim.y * blockIdx.y; const int tid = (k-1) + (j-1) * (kmax-2); xy = kmax * jmax; //__shared__ float sb[BLOCKSIZE]; //__shared__ float sb2[BLOCKSIZE]; extern __shared__ float sb[]; float *sb_t = sb; float *sb_m = sb + blockDim.x * blockDim.y; float *sb_b = sb + 2 * blockDim.x * blockDim.y; extern __shared__ float sb2[]; float *sb2_t = sb2; float *sb2_m = sb2 + blockDim.x * blockDim.y; float *sb2_b = sb2 + 2 * blockDim.x * blockDim.y; csb = threadIdx.x + threadIdx.y * blockDim.x; for(n=0;n<nn;++n){ temp=0.0; s0 = a0[i*jmax*kmax+j*kmax+k] * p[(i+1)*jmax*kmax+j*kmax+k] + a1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j+1)*kmax+k] + a2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k+1)] + b0[i*jmax*kmax+j*kmax+k] *(p[(i+1)*jmax*kmax+(j+1)*kmax+k] - p[(i+1)*jmax*kmax+(j-1)*kmax+k] - p[(i-1)*jmax*kmax+(j+1)*kmax+k] + p[(i-1)*jmax*kmax+(j-1)*kmax+k] ) + b1[i*jmax*kmax+j*kmax+k] *(p[i*jmax*kmax+(j+1)*kmax+(k+1)] - p[i*jmax*kmax+(j-1)*kmax+(k+1)] - p[i*jmax*kmax+(j-1)*kmax+(k-1)] + p[i*jmax*kmax+(j+1)*kmax+(k-1)]) + b2[i*jmax*kmax+j*kmax+k] *(p[(i+1)*jmax*kmax+j*kmax+(k+1)] - p[(i-1)*jmax*kmax+j*kmax+(k+1)] - p[(i+1)*jmax*kmax+j*kmax+(k-1)] + p[(i-1)*jmax*kmax+j*kmax+(k-1)] ) + c0[i*jmax*kmax+j*kmax+k] * p[(i-1)*jmax*kmax+j*kmax+k] + c1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j-1)*kmax+k] + c2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k-1)] + wrk1[i*jmax*kmax+j*kmax+k]; ss = (s0 * a3[i*jmax*kmax+j*kmax+k] - p[i*jmax*kmax+j*kmax+k]) * bnd[i*jmax*kmax+j*kmax+k]; temp = temp + ss*ss; wrk2[i*jmax*kmax+j*kmax+k] = p[i*jmax*kmax+j*kmax+k] + omega * ss; for(i=2 ; i<imax-1 ; ++i){ i2 = i-1; printf("%f\n", sb_m[csb]); sb_m[csb] = p[i*jmax*kmax+j*kmax+k]; if(0 < threadIdx.x && k < kmax-1 && 0 < j && j < jmax-1){ s0 = a0[i*jmax*kmax+j*kmax+k] * p[(i+1)*jmax*kmax+j*kmax+k] + a1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j+1)*kmax+k] + a2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k+1)] + b0[i*jmax*kmax+j*kmax+k] *(p[(i+1)*jmax*kmax+(j+1)*kmax+k] - p[(i+1)*jmax*kmax+(j-1)*kmax+k] - p[(i-1)*jmax*kmax+(j+1)*kmax+k] + p[(i-1)*jmax*kmax+(j-1)*kmax+k] ) + b1[i*jmax*kmax+j*kmax+k] *(p[i*jmax*kmax+(j+1)*kmax+(k+1)] - p[i*jmax*kmax+(j-1)*kmax+(k+1)] - p[i*jmax*kmax+(j-1)*kmax+(k-1)] + p[i*jmax*kmax+(j+1)*kmax+(k-1)]) + b2[i*jmax*kmax+j*kmax+k] *(p[(i+1)*jmax*kmax+j*kmax+(k+1)] - p[(i-1)*jmax*kmax+j*kmax+(k+1)] - p[(i+1)*jmax*kmax+j*kmax+(k-1)] + p[(i-1)*jmax*kmax+j*kmax+(k-1)] ) + c0[i*jmax*kmax+j*kmax+k] * p[(i-1)*jmax*kmax+j*kmax+k] + c1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j-1)*kmax+k] + c2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k-1)] + wrk1[i*jmax*kmax+j*kmax+k]; ss = (s0 * a3[i*jmax*kmax+j*kmax+k] - p[i*jmax*kmax+j*kmax+k]) * bnd[i*jmax*kmax+j*kmax+k]; temp = temp + ss*ss; wrk2[i*jmax*kmax+j*kmax+k] = p[i*jmax*kmax+j*kmax+k] + omega * ss; } sb2_m[csb] = wrk2[i*jmax*kmax+j*kmax+k]; printf("%f\n", sb2_m[csb]); __syncthreads(); if(0 < threadIdx.x && threadIdx.x < blockDim.x-1 && 0 < threadIdx.y && threadIdx.y < blockDim.y-1){ s0 = a0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2+1)*jmax*kmax+j*kmax+k] + a1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j+1)*kmax+k] + a2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k+1)] + b0[i2*jmax*kmax+j*kmax+k] *(wrk2[(i2+1)*jmax*kmax+(j+1)*kmax+k] - wrk2[(i2+1)*jmax*kmax+(j-1)*kmax+k] - wrk2[(i2-1)*jmax*kmax+(j+1)*kmax+k] + wrk2[(i2-1)*jmax*kmax+(j-1)*kmax+k] ) + b1[i2*jmax*kmax+j*kmax+k] *(wrk2[i2*jmax*kmax+(j+1)*kmax+(k+1)] - wrk2[i2*jmax*kmax+(j-1)*kmax+(k+1)] - wrk2[i2*jmax*kmax+(j-1)*kmax+(k-1)] + wrk2[i2*jmax*kmax+(j+1)*kmax+(k-1)]) + b2[i2*jmax*kmax+j*kmax+k] *(wrk2[(i2+1)*jmax*kmax+j*kmax+(k+1)] - wrk2[(i2-1)*jmax*kmax+j*kmax+(k+1)] - wrk2[(i2+1)*jmax*kmax+j*kmax+(k-1)] + wrk2[(i2-1)*jmax*kmax+j*kmax+(k-1)] ) + c0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2-1)*jmax*kmax+j*kmax+k] + c1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j-1)*kmax+k] + c2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k-1)] + wrk1[i2*jmax*kmax+j*kmax+k]; ss = ( s0 * a3[i2*jmax*kmax+j*kmax+k] - wrk2[i2*jmax*kmax+j*kmax+k] ) * bnd[i2*jmax*kmax+j*kmax+k]; temp = temp + ss*ss; p[i2*jmax*kmax+j*kmax+k] = wrk2[i2*jmax*kmax+j*kmax+k] + omega * ss; c += xy; } } i2 = imax-1; if(0 < threadIdx.x && threadIdx.x < blockDim.x-1 && 0 < threadIdx.y && threadIdx.y < blockDim.y-1){ s0 = a0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2+1)*jmax*kmax+j*kmax+k] + a1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j+1)*kmax+k] + a2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k+1)] + b0[i2*jmax*kmax+j*kmax+k] *(wrk2[(i2+1)*jmax*kmax+(j+1)*kmax+k] - wrk2[(i2+1)*jmax*kmax+(j-1)*kmax+k] - wrk2[(i2-1)*jmax*kmax+(j+1)*kmax+k] + wrk2[(i2-1)*jmax*kmax+(j-1)*kmax+k] ) + b1[i2*jmax*kmax+j*kmax+k] *(wrk2[i2*jmax*kmax+(j+1)*kmax+(k+1)] - wrk2[i2*jmax*kmax+(j-1)*kmax+(k+1)] - wrk2[i2*jmax*kmax+(j-1)*kmax+(k-1)] + wrk2[i2*jmax*kmax+(j+1)*kmax+(k-1)]) + b2[i2*jmax*kmax+j*kmax+k] *(wrk2[(i2+1)*jmax*kmax+j*kmax+(k+1)] - wrk2[(i2-1)*jmax*kmax+j*kmax+(k+1)] - wrk2[(i2+1)*jmax*kmax+j*kmax+(k-1)] + wrk2[(i2-1)*jmax*kmax+j*kmax+(k-1)] ) + c0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2-1)*jmax*kmax+j*kmax+k] + c1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j-1)*kmax+k] + c2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k-1)] + wrk1[i2*jmax*kmax+j*kmax+k]; ss = ( s0 * a3[i2*jmax*kmax+j*kmax+k] - wrk2[i2*jmax*kmax+j*kmax+k] ) * bnd[i2*jmax*kmax+j*kmax+k]; temp = temp + ss*ss; p[i2*jmax*kmax+j*kmax+k] = wrk2[i2*jmax*kmax+j*kmax+k] + omega * ss; } } /* end n loop */ __syncthreads(); gosa[tid] = temp; } int main(){ int i, j, k; float final_gosa; double cpu0, cpu1, nflop, xmflops2, score; float gosa[THREAD_NUM]; /************************************/ float *p; float *a0, *a1, *a2, *a3; float *b0, *b1, *b2; float *c0, *c1, *c2; float *bnd; float *wrk1, *wrk2; /************************************/ mimax = MIMAX; mjmax = MJMAX; mkmax = MKMAX; imax = MIMAX-1; jmax = MJMAX-1; kmax = MKMAX-1; //int N_IJK = MIMAX*MJMAX*MKMAX; int N_IJK = mimax*mjmax*mkmax; int WORKSIZE = THREAD_NUM*mimax; /************************************/ float *dev_p; float *dev_a0, *dev_a1, *dev_a2, *dev_a3; float *dev_b0, *dev_b1, *dev_b2; float *dev_c0, *dev_c1, *dev_c2; float *dev_bnd; float *dev_wrk1, *dev_wrk2; float *dev_gosa; /************************************/ omega = 0.8; //initial_maxtrix(); /******allocate mem on CPU***********/ a0 = (float*)malloc(sizeof(float)*N_IJK); a1 = (float*)malloc(sizeof(float)*N_IJK); a2 = (float*)malloc(sizeof(float)*N_IJK); a3 = (float*)malloc(sizeof(float)*N_IJK); b0 = (float*)malloc(sizeof(float)*N_IJK); b1 = (float*)malloc(sizeof(float)*N_IJK); b2 = (float*)malloc(sizeof(float)*N_IJK); c0 = (float*)malloc(sizeof(float)*N_IJK); c1 = (float*)malloc(sizeof(float)*N_IJK); c2 = (float*)malloc(sizeof(float)*N_IJK); p = (float*)malloc(sizeof(float)*N_IJK); wrk1 = (float*)malloc(sizeof(float)*N_IJK); wrk2 = (float*)malloc(sizeof(float)*WORKSIZE); bnd = (float*)malloc(sizeof(float)*N_IJK); //gosa = (float*)malloc(sizeof(float)); /************************************/ /******allocate mem on GPU***********/ hipMalloc((void**)&dev_a0, N_IJK*sizeof(float)); hipMalloc((void**)&dev_a1, N_IJK*sizeof(float)); hipMalloc((void**)&dev_a2, N_IJK*sizeof(float)); hipMalloc((void**)&dev_a3, N_IJK*sizeof(float)); hipMalloc((void**)&dev_b0, N_IJK*sizeof(float)); hipMalloc((void**)&dev_b1, N_IJK*sizeof(float)); hipMalloc((void**)&dev_b2, N_IJK*sizeof(float)); hipMalloc((void**)&dev_c0, N_IJK*sizeof(float)); hipMalloc((void**)&dev_c1, N_IJK*sizeof(float)); hipMalloc((void**)&dev_c2, N_IJK*sizeof(float)); hipMalloc((void**)&dev_p, N_IJK*sizeof(float)); hipMalloc((void**)&dev_bnd, N_IJK*sizeof(float)); hipMalloc((void**)&dev_wrk1, N_IJK*sizeof(float)); hipMalloc((void**)&dev_wrk2, WORKSIZE*sizeof(float)); hipMalloc((void**)&dev_gosa, sizeof(float)*THREAD_NUM); /************************************/ /*****Initialize*********************/ //int i,j,k; /* for(i=0 ; i<mimax ; ++i) for(j=0 ; j<mjmax ; ++j) for(k=0 ; k<mkmax ; ++k){ a0[i*mjmax*mkmax+j*mkmax+k]=0.0; a1[i*mjmax*mkmax+j*mkmax+k]=0.0; a2[i*mjmax*mkmax+j*mkmax+k]=0.0; a3[i*mjmax*mkmax+j*mkmax+k]=0.0; b0[i*mjmax*mkmax+j*mkmax+k]=0.0; b1[i*mjmax*mkmax+j*mkmax+k]=0.0; b2[i*mjmax*mkmax+j*mkmax+k]=0.0; c0[i*mjmax*mkmax+j*mkmax+k]=0.0; c1[i*mjmax*mkmax+j*mkmax+k]=0.0; c2[i*mjmax*mkmax+j*mkmax+k]=0.0; p[i*mjmax*mkmax+j*mkmax+k]=0.0; wrk1[i*mjmax*mkmax+j*mkmax+k]=0.0; bnd[i*mjmax*mkmax+j*mkmax+k]=0.0; } */ for(i=0 ; i<mimax ; ++i){ for(j=0 ; j<mjmax ; ++j){ for(k=0 ; k<mkmax ; ++k){ a0[i*mjmax*mkmax+j*mkmax+k]=1.0; a1[i*mjmax*mkmax+j*mkmax+k]=1.0; a2[i*mjmax*mkmax+j*mkmax+k]=1.0; a3[i*mjmax*mkmax+j*mkmax+k]=1.0/6.0; b0[i*mjmax*mkmax+j*mkmax+k]=0.0; b1[i*mjmax*mkmax+j*mkmax+k]=0.0; b2[i*mjmax*mkmax+j*mkmax+k]=0.0; c0[i*mjmax*mkmax+j*mkmax+k]=1.0; c1[i*mjmax*mkmax+j*mkmax+k]=1.0; c2[i*mjmax*mkmax+j*mkmax+k]=1.0; p[i*mjmax*mkmax+j*mkmax+k]=(float)(i*i)/(float)(imax*imax); wrk1[i*mjmax*mkmax+j*mkmax+k]=0.0; bnd[i*mjmax*mkmax+j*mkmax+k]=1.0; } } } /************************************/ /*****copy array to device mem*******/ hipMemcpy(dev_a0, a0, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_a1, a1, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_a2, a2, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_a3, a3, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_b0, b0, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_b1, b1, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_b2, b2, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_c0, c0, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_c1, c1, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_c2, c2, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_wrk1, wrk1, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_wrk2, wrk2, WORKSIZE*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_bnd, bnd, N_IJK*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_p, p, N_IJK*sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(dev_gosa, gosa, sizeof(float), hipMemcpyHostToDevice); /************************************/ printf("mimax = %d mjmax = %d mkmax = %d\n", MIMAX, MJMAX, MKMAX); printf("imax = %d jmax = %d kmax = %d\n", imax, jmax, kmax); cpu0 = second(); /**measuring**/ dim3 block(BLOCKSIZEX, BLOCKSIZEY, 1); dim3 grid(GRIDSIZEX, GRIDSIZEY, 1); hipLaunchKernelGGL(( jacobi), dim3(grid), dim3(block), sizeof(float) * BLOCKSIZE * 6, 0, dev_a0, dev_a1, dev_a2, dev_a3, dev_b0, dev_b1, dev_b2, dev_c0, dev_c1, dev_c2, dev_p, dev_wrk1, dev_wrk2, dev_bnd, NN, mimax, mjmax, mkmax, omega, dev_gosa); hipDeviceSynchronize(); cpu1 = second(); hipMemcpy(&gosa, dev_gosa, sizeof(float)*THREAD_NUM, hipMemcpyDeviceToHost); /******Free mem on the GPU**********/ hipFree(dev_a0); hipFree(dev_a1); hipFree(dev_a2); hipFree(dev_a3); hipFree(dev_b0); hipFree(dev_b1); hipFree(dev_b2); hipFree(dev_c0); hipFree(dev_c1); hipFree(dev_c2); hipFree(dev_p); hipFree(dev_wrk1); hipFree(dev_wrk2); hipFree(dev_bnd); hipFree(dev_gosa); /************************************/ /********Final sum of gosa***********/ for(int gosa_index=0; gosa_index<THREAD_NUM; gosa_index++){ //printf("%f\n", gosa[gosa_index]); final_gosa += gosa[gosa_index]; //printf("Gosa%d: %e \n", gosa_index, gosa[gosa_index]); } /************************************/ nflop = (kmax-2)*(jmax-2)*(imax-2)*34; if(cpu1 != 0.0){ xmflops2 = nflop/cpu1*1.0e-6*(float)NN; } score = xmflops2/32.27; printf("gpu: %f sec.\n", cpu1); printf("Loop executed for %d times\n", NN); printf("Gosa: %e \n", final_gosa); //printf("MFLOPS measured: %f\n", xmflops2); //printf("Score: %f\n", score); return(0); }
2f51c8498fa5c59f468cd9bf088c5aad0491109e.cu
#include<stdio.h> #include<sys/time.h> /* #define BLOCKSIZEX 64 #define BLOCKSIZEY 16 #define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY #define GRIDSIZEX 8 #define GRIDSIZEY 16 #define GRIDSIZE GRIDSIZEX * GRIDSIZEY #define THREAD_NUM BLOCKSIZE * GRIDSIZE #define MIMAX 256 #define MJMAX GRIDSIZEY * (BLOCKSIZEY - 2) + 2 #define MKMAX GRIDSIZEX * (BLOCKSIZEX - 2) + 2 #define NN 750 */ #define BLOCKSIZEX 6 #define BLOCKSIZEY 4 #define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY #define GRIDSIZEX 3 #define GRIDSIZEY 6 #define GRIDSIZE GRIDSIZEX * GRIDSIZEY #define THREAD_NUM BLOCKSIZE * GRIDSIZE #define MIMAX 6 #define MJMAX GRIDSIZEY * (BLOCKSIZEY - 2) + 2 #define MKMAX GRIDSIZEX * (BLOCKSIZEX - 2) + 2 #define NN 3 /*static float p[MIMAX][MJMAX][MKMAX]; static float a[MIMAX][MJMAX][MKMAX][4]; static float b[MIMAX][MJMAX][MKMAX][3]; static float c[MIMAX][MJMAX][MKMAX][3]; static float bnd[MIMAX][MJMAX][MKMAX]; static float work1[MIMAX][MJMAX][MKMAX]; static float work2[MIMAX][MJMAX][MKMAX];*/ static int imax, jmax, kmax, mimax, mjmax, mkmax; static float omega; double second(){ struct timeval tm; double t; static int base_sec = 0, base_usec = 0; gettimeofday(&tm, NULL); if(base_sec == 0 && base_usec == 0){ base_sec = tm.tv_sec; base_usec = tm.tv_usec; t = 0.0; } else{ t = (double)(tm.tv_sec-base_sec) + ((double)(tm.tv_usec-base_usec))/1.0e6; } return t; } __global__ void jacobi(float *a0, float *a1, float *a2, float *a3, float *b0, float *b1, float *b2, float *c0, float *c1, float *c2, float *p, float *wrk1, float *wrk2, float *bnd, int nn, int imax, int jmax, int kmax, float omega, float *gosa){ int i, j, k, i2, j2, k2, n, xy, c, csb; i = 1; float s0, ss, temp; //const int size = (imax-1)/(imax-1); k = threadIdx.x + (blockDim.x-2) * blockIdx.x + 1; j = threadIdx.y + (blockDim.y-2) * blockIdx.y + 1; i2 = i-1; k2 = threadIdx.x + blockDim.x * blockIdx.x; j2 = threadIdx.y + blockDim.y * blockIdx.y; const int tid = (k-1) + (j-1) * (kmax-2); xy = kmax * jmax; //__shared__ float sb[BLOCKSIZE]; //__shared__ float sb2[BLOCKSIZE]; extern __shared__ float sb[]; float *sb_t = sb; float *sb_m = sb + blockDim.x * blockDim.y; float *sb_b = sb + 2 * blockDim.x * blockDim.y; extern __shared__ float sb2[]; float *sb2_t = sb2; float *sb2_m = sb2 + blockDim.x * blockDim.y; float *sb2_b = sb2 + 2 * blockDim.x * blockDim.y; csb = threadIdx.x + threadIdx.y * blockDim.x; for(n=0;n<nn;++n){ temp=0.0; s0 = a0[i*jmax*kmax+j*kmax+k] * p[(i+1)*jmax*kmax+j*kmax+k] + a1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j+1)*kmax+k] + a2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k+1)] + b0[i*jmax*kmax+j*kmax+k] *(p[(i+1)*jmax*kmax+(j+1)*kmax+k] - p[(i+1)*jmax*kmax+(j-1)*kmax+k] - p[(i-1)*jmax*kmax+(j+1)*kmax+k] + p[(i-1)*jmax*kmax+(j-1)*kmax+k] ) + b1[i*jmax*kmax+j*kmax+k] *(p[i*jmax*kmax+(j+1)*kmax+(k+1)] - p[i*jmax*kmax+(j-1)*kmax+(k+1)] - p[i*jmax*kmax+(j-1)*kmax+(k-1)] + p[i*jmax*kmax+(j+1)*kmax+(k-1)]) + b2[i*jmax*kmax+j*kmax+k] *(p[(i+1)*jmax*kmax+j*kmax+(k+1)] - p[(i-1)*jmax*kmax+j*kmax+(k+1)] - p[(i+1)*jmax*kmax+j*kmax+(k-1)] + p[(i-1)*jmax*kmax+j*kmax+(k-1)] ) + c0[i*jmax*kmax+j*kmax+k] * p[(i-1)*jmax*kmax+j*kmax+k] + c1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j-1)*kmax+k] + c2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k-1)] + wrk1[i*jmax*kmax+j*kmax+k]; ss = (s0 * a3[i*jmax*kmax+j*kmax+k] - p[i*jmax*kmax+j*kmax+k]) * bnd[i*jmax*kmax+j*kmax+k]; temp = temp + ss*ss; wrk2[i*jmax*kmax+j*kmax+k] = p[i*jmax*kmax+j*kmax+k] + omega * ss; for(i=2 ; i<imax-1 ; ++i){ i2 = i-1; printf("%f\n", sb_m[csb]); sb_m[csb] = p[i*jmax*kmax+j*kmax+k]; if(0 < threadIdx.x && k < kmax-1 && 0 < j && j < jmax-1){ s0 = a0[i*jmax*kmax+j*kmax+k] * p[(i+1)*jmax*kmax+j*kmax+k] + a1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j+1)*kmax+k] + a2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k+1)] + b0[i*jmax*kmax+j*kmax+k] *(p[(i+1)*jmax*kmax+(j+1)*kmax+k] - p[(i+1)*jmax*kmax+(j-1)*kmax+k] - p[(i-1)*jmax*kmax+(j+1)*kmax+k] + p[(i-1)*jmax*kmax+(j-1)*kmax+k] ) + b1[i*jmax*kmax+j*kmax+k] *(p[i*jmax*kmax+(j+1)*kmax+(k+1)] - p[i*jmax*kmax+(j-1)*kmax+(k+1)] - p[i*jmax*kmax+(j-1)*kmax+(k-1)] + p[i*jmax*kmax+(j+1)*kmax+(k-1)]) + b2[i*jmax*kmax+j*kmax+k] *(p[(i+1)*jmax*kmax+j*kmax+(k+1)] - p[(i-1)*jmax*kmax+j*kmax+(k+1)] - p[(i+1)*jmax*kmax+j*kmax+(k-1)] + p[(i-1)*jmax*kmax+j*kmax+(k-1)] ) + c0[i*jmax*kmax+j*kmax+k] * p[(i-1)*jmax*kmax+j*kmax+k] + c1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j-1)*kmax+k] + c2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k-1)] + wrk1[i*jmax*kmax+j*kmax+k]; ss = (s0 * a3[i*jmax*kmax+j*kmax+k] - p[i*jmax*kmax+j*kmax+k]) * bnd[i*jmax*kmax+j*kmax+k]; temp = temp + ss*ss; wrk2[i*jmax*kmax+j*kmax+k] = p[i*jmax*kmax+j*kmax+k] + omega * ss; } sb2_m[csb] = wrk2[i*jmax*kmax+j*kmax+k]; printf("%f\n", sb2_m[csb]); __syncthreads(); if(0 < threadIdx.x && threadIdx.x < blockDim.x-1 && 0 < threadIdx.y && threadIdx.y < blockDim.y-1){ s0 = a0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2+1)*jmax*kmax+j*kmax+k] + a1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j+1)*kmax+k] + a2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k+1)] + b0[i2*jmax*kmax+j*kmax+k] *(wrk2[(i2+1)*jmax*kmax+(j+1)*kmax+k] - wrk2[(i2+1)*jmax*kmax+(j-1)*kmax+k] - wrk2[(i2-1)*jmax*kmax+(j+1)*kmax+k] + wrk2[(i2-1)*jmax*kmax+(j-1)*kmax+k] ) + b1[i2*jmax*kmax+j*kmax+k] *(wrk2[i2*jmax*kmax+(j+1)*kmax+(k+1)] - wrk2[i2*jmax*kmax+(j-1)*kmax+(k+1)] - wrk2[i2*jmax*kmax+(j-1)*kmax+(k-1)] + wrk2[i2*jmax*kmax+(j+1)*kmax+(k-1)]) + b2[i2*jmax*kmax+j*kmax+k] *(wrk2[(i2+1)*jmax*kmax+j*kmax+(k+1)] - wrk2[(i2-1)*jmax*kmax+j*kmax+(k+1)] - wrk2[(i2+1)*jmax*kmax+j*kmax+(k-1)] + wrk2[(i2-1)*jmax*kmax+j*kmax+(k-1)] ) + c0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2-1)*jmax*kmax+j*kmax+k] + c1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j-1)*kmax+k] + c2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k-1)] + wrk1[i2*jmax*kmax+j*kmax+k]; ss = ( s0 * a3[i2*jmax*kmax+j*kmax+k] - wrk2[i2*jmax*kmax+j*kmax+k] ) * bnd[i2*jmax*kmax+j*kmax+k]; temp = temp + ss*ss; p[i2*jmax*kmax+j*kmax+k] = wrk2[i2*jmax*kmax+j*kmax+k] + omega * ss; c += xy; } } i2 = imax-1; if(0 < threadIdx.x && threadIdx.x < blockDim.x-1 && 0 < threadIdx.y && threadIdx.y < blockDim.y-1){ s0 = a0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2+1)*jmax*kmax+j*kmax+k] + a1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j+1)*kmax+k] + a2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k+1)] + b0[i2*jmax*kmax+j*kmax+k] *(wrk2[(i2+1)*jmax*kmax+(j+1)*kmax+k] - wrk2[(i2+1)*jmax*kmax+(j-1)*kmax+k] - wrk2[(i2-1)*jmax*kmax+(j+1)*kmax+k] + wrk2[(i2-1)*jmax*kmax+(j-1)*kmax+k] ) + b1[i2*jmax*kmax+j*kmax+k] *(wrk2[i2*jmax*kmax+(j+1)*kmax+(k+1)] - wrk2[i2*jmax*kmax+(j-1)*kmax+(k+1)] - wrk2[i2*jmax*kmax+(j-1)*kmax+(k-1)] + wrk2[i2*jmax*kmax+(j+1)*kmax+(k-1)]) + b2[i2*jmax*kmax+j*kmax+k] *(wrk2[(i2+1)*jmax*kmax+j*kmax+(k+1)] - wrk2[(i2-1)*jmax*kmax+j*kmax+(k+1)] - wrk2[(i2+1)*jmax*kmax+j*kmax+(k-1)] + wrk2[(i2-1)*jmax*kmax+j*kmax+(k-1)] ) + c0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2-1)*jmax*kmax+j*kmax+k] + c1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j-1)*kmax+k] + c2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k-1)] + wrk1[i2*jmax*kmax+j*kmax+k]; ss = ( s0 * a3[i2*jmax*kmax+j*kmax+k] - wrk2[i2*jmax*kmax+j*kmax+k] ) * bnd[i2*jmax*kmax+j*kmax+k]; temp = temp + ss*ss; p[i2*jmax*kmax+j*kmax+k] = wrk2[i2*jmax*kmax+j*kmax+k] + omega * ss; } } /* end n loop */ __syncthreads(); gosa[tid] = temp; } int main(){ int i, j, k; float final_gosa; double cpu0, cpu1, nflop, xmflops2, score; float gosa[THREAD_NUM]; /************************************/ float *p; float *a0, *a1, *a2, *a3; float *b0, *b1, *b2; float *c0, *c1, *c2; float *bnd; float *wrk1, *wrk2; /************************************/ mimax = MIMAX; mjmax = MJMAX; mkmax = MKMAX; imax = MIMAX-1; jmax = MJMAX-1; kmax = MKMAX-1; //int N_IJK = MIMAX*MJMAX*MKMAX; int N_IJK = mimax*mjmax*mkmax; int WORKSIZE = THREAD_NUM*mimax; /************************************/ float *dev_p; float *dev_a0, *dev_a1, *dev_a2, *dev_a3; float *dev_b0, *dev_b1, *dev_b2; float *dev_c0, *dev_c1, *dev_c2; float *dev_bnd; float *dev_wrk1, *dev_wrk2; float *dev_gosa; /************************************/ omega = 0.8; //initial_maxtrix(); /******allocate mem on CPU***********/ a0 = (float*)malloc(sizeof(float)*N_IJK); a1 = (float*)malloc(sizeof(float)*N_IJK); a2 = (float*)malloc(sizeof(float)*N_IJK); a3 = (float*)malloc(sizeof(float)*N_IJK); b0 = (float*)malloc(sizeof(float)*N_IJK); b1 = (float*)malloc(sizeof(float)*N_IJK); b2 = (float*)malloc(sizeof(float)*N_IJK); c0 = (float*)malloc(sizeof(float)*N_IJK); c1 = (float*)malloc(sizeof(float)*N_IJK); c2 = (float*)malloc(sizeof(float)*N_IJK); p = (float*)malloc(sizeof(float)*N_IJK); wrk1 = (float*)malloc(sizeof(float)*N_IJK); wrk2 = (float*)malloc(sizeof(float)*WORKSIZE); bnd = (float*)malloc(sizeof(float)*N_IJK); //gosa = (float*)malloc(sizeof(float)); /************************************/ /******allocate mem on GPU***********/ cudaMalloc((void**)&dev_a0, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_a1, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_a2, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_a3, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_b0, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_b1, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_b2, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_c0, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_c1, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_c2, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_p, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_bnd, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_wrk1, N_IJK*sizeof(float)); cudaMalloc((void**)&dev_wrk2, WORKSIZE*sizeof(float)); cudaMalloc((void**)&dev_gosa, sizeof(float)*THREAD_NUM); /************************************/ /*****Initialize*********************/ //int i,j,k; /* for(i=0 ; i<mimax ; ++i) for(j=0 ; j<mjmax ; ++j) for(k=0 ; k<mkmax ; ++k){ a0[i*mjmax*mkmax+j*mkmax+k]=0.0; a1[i*mjmax*mkmax+j*mkmax+k]=0.0; a2[i*mjmax*mkmax+j*mkmax+k]=0.0; a3[i*mjmax*mkmax+j*mkmax+k]=0.0; b0[i*mjmax*mkmax+j*mkmax+k]=0.0; b1[i*mjmax*mkmax+j*mkmax+k]=0.0; b2[i*mjmax*mkmax+j*mkmax+k]=0.0; c0[i*mjmax*mkmax+j*mkmax+k]=0.0; c1[i*mjmax*mkmax+j*mkmax+k]=0.0; c2[i*mjmax*mkmax+j*mkmax+k]=0.0; p[i*mjmax*mkmax+j*mkmax+k]=0.0; wrk1[i*mjmax*mkmax+j*mkmax+k]=0.0; bnd[i*mjmax*mkmax+j*mkmax+k]=0.0; } */ for(i=0 ; i<mimax ; ++i){ for(j=0 ; j<mjmax ; ++j){ for(k=0 ; k<mkmax ; ++k){ a0[i*mjmax*mkmax+j*mkmax+k]=1.0; a1[i*mjmax*mkmax+j*mkmax+k]=1.0; a2[i*mjmax*mkmax+j*mkmax+k]=1.0; a3[i*mjmax*mkmax+j*mkmax+k]=1.0/6.0; b0[i*mjmax*mkmax+j*mkmax+k]=0.0; b1[i*mjmax*mkmax+j*mkmax+k]=0.0; b2[i*mjmax*mkmax+j*mkmax+k]=0.0; c0[i*mjmax*mkmax+j*mkmax+k]=1.0; c1[i*mjmax*mkmax+j*mkmax+k]=1.0; c2[i*mjmax*mkmax+j*mkmax+k]=1.0; p[i*mjmax*mkmax+j*mkmax+k]=(float)(i*i)/(float)(imax*imax); wrk1[i*mjmax*mkmax+j*mkmax+k]=0.0; bnd[i*mjmax*mkmax+j*mkmax+k]=1.0; } } } /************************************/ /*****copy array to device mem*******/ cudaMemcpy(dev_a0, a0, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_a1, a1, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_a2, a2, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_a3, a3, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b0, b0, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b1, b1, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b2, b2, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_c0, c0, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_c1, c1, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_c2, c2, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_wrk1, wrk1, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_wrk2, wrk2, WORKSIZE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_bnd, bnd, N_IJK*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_p, p, N_IJK*sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(dev_gosa, gosa, sizeof(float), cudaMemcpyHostToDevice); /************************************/ printf("mimax = %d mjmax = %d mkmax = %d\n", MIMAX, MJMAX, MKMAX); printf("imax = %d jmax = %d kmax = %d\n", imax, jmax, kmax); cpu0 = second(); /**measuring**/ dim3 block(BLOCKSIZEX, BLOCKSIZEY, 1); dim3 grid(GRIDSIZEX, GRIDSIZEY, 1); jacobi<<<grid, block, sizeof(float) * BLOCKSIZE * 6>>>(dev_a0, dev_a1, dev_a2, dev_a3, dev_b0, dev_b1, dev_b2, dev_c0, dev_c1, dev_c2, dev_p, dev_wrk1, dev_wrk2, dev_bnd, NN, mimax, mjmax, mkmax, omega, dev_gosa); cudaDeviceSynchronize(); cpu1 = second(); cudaMemcpy(&gosa, dev_gosa, sizeof(float)*THREAD_NUM, cudaMemcpyDeviceToHost); /******Free mem on the GPU**********/ cudaFree(dev_a0); cudaFree(dev_a1); cudaFree(dev_a2); cudaFree(dev_a3); cudaFree(dev_b0); cudaFree(dev_b1); cudaFree(dev_b2); cudaFree(dev_c0); cudaFree(dev_c1); cudaFree(dev_c2); cudaFree(dev_p); cudaFree(dev_wrk1); cudaFree(dev_wrk2); cudaFree(dev_bnd); cudaFree(dev_gosa); /************************************/ /********Final sum of gosa***********/ for(int gosa_index=0; gosa_index<THREAD_NUM; gosa_index++){ //printf("%f\n", gosa[gosa_index]); final_gosa += gosa[gosa_index]; //printf("Gosa%d: %e \n", gosa_index, gosa[gosa_index]); } /************************************/ nflop = (kmax-2)*(jmax-2)*(imax-2)*34; if(cpu1 != 0.0){ xmflops2 = nflop/cpu1*1.0e-6*(float)NN; } score = xmflops2/32.27; printf("gpu: %f sec.\n", cpu1); printf("Loop executed for %d times\n", NN); printf("Gosa: %e \n", final_gosa); //printf("MFLOPS measured: %f\n", xmflops2); //printf("Score: %f\n", score); return(0); }
d7d5ac8e3bf7e7a2fc3eb564ab5ba82a8e394560.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdint.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "tuple.h" extern "C" { __global__ void join( TUPLE *lt, TUPLE *rt, JOIN_TUPLE *p, int *count, int ltn, int rtn ) { int j,k; //i,j /* *xy *xleftyright */ //int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; __shared__ TUPLE Tleft[BLOCK_SIZE_X]; if(threadIdx.y==0){ for(j=0;(j<BLOCK_SIZE_X)&&((j+BLOCK_SIZE_X*blockIdx.x)<ltn);j++){ Tleft[j] = lt[j + BLOCK_SIZE_X * blockIdx.x]; } } __syncthreads(); TUPLE Tright = rt[i]; //the first write location int writeloc = 0; if(i != 0){ writeloc = count[i + blockIdx.x*blockDim.y*gridDim.y -1]; } for(j = 0; j<BLOCK_SIZE_X &&((j+BLOCK_SIZE_X*blockIdx.x)<ltn);j++){ if(i<rtn){ if(&(Tleft[j])==NULL||&(Tright)==NULL||&(p[writeloc])==NULL){ printf("memory error in .cu.\n"); return;// -1; } if((Tleft[j].val[0]==Tright.val[0])) { for(k=0; k<VAL_NUM; k++) { p[writeloc].lval[k] = Tleft[j].val[k]; p[writeloc].rval[k] = Tright.val[k]; } // lid & rid are just for debug p[writeloc].lid = Tleft[j].id; p[writeloc].rid = Tright.id; writeloc++; } } } } }
d7d5ac8e3bf7e7a2fc3eb564ab5ba82a8e394560.cu
#include <stdio.h> #include <stdint.h> #include <cuda.h> #include <sys/time.h> #include "tuple.h" extern "C" { __global__ void join( TUPLE *lt, TUPLE *rt, JOIN_TUPLE *p, int *count, int ltn, int rtn ) { int j,k; //i,jの方向を間違えないように /* *x軸が縦の方向、y軸が横の方向だよ。 *だから、xがleft、yがrightに対応しているよ */ //int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; __shared__ TUPLE Tleft[BLOCK_SIZE_X]; if(threadIdx.y==0){ for(j=0;(j<BLOCK_SIZE_X)&&((j+BLOCK_SIZE_X*blockIdx.x)<ltn);j++){ Tleft[j] = lt[j + BLOCK_SIZE_X * blockIdx.x]; } } __syncthreads(); TUPLE Tright = rt[i]; //the first write location int writeloc = 0; if(i != 0){ writeloc = count[i + blockIdx.x*blockDim.y*gridDim.y -1]; } for(j = 0; j<BLOCK_SIZE_X &&((j+BLOCK_SIZE_X*blockIdx.x)<ltn);j++){ if(i<rtn){ if(&(Tleft[j])==NULL||&(Tright)==NULL||&(p[writeloc])==NULL){ printf("memory error in .cu.\n"); return;// -1; } if((Tleft[j].val[0]==Tright.val[0])) { for(k=0; k<VAL_NUM; k++) { p[writeloc].lval[k] = Tleft[j].val[k]; p[writeloc].rval[k] = Tright.val[k]; } // lid & rid are just for debug p[writeloc].lid = Tleft[j].id; p[writeloc].rid = Tright.id; writeloc++; } } } } }
b5b8c7b275022971e618c3300bd9982f4fd6e52c.hip
// !!! This is a file automatically generated by hipify!!! #include "iostream" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "rocblas.h" #include "cublas_api.h" #include "pch.h" //#include "stdafx.h" #include "cv.h" //#include <process.h> //#include "CameraApi.h" #include "LaserRange.h" //#include "afxwin.h" //#include "windows.h" #include "math.h" //#include "cstdlib" //#include "sstream" //#include "ImProcess.h" //#include "opencv2/core/core.hpp" //#include "opencv2/imgproc/imgproc.hpp" //#include "opencv2/calib3d/calib3d.hpp" #include "CudaTest.h" #include <device_launch_parameters.h> #include <hip/device_functions.h> //#include <stdio.h> //#include <stdlib.h> #include <opencv2/opencv.hpp> #include <opencv2/core/cuda.hpp> //#include "CudaKernal.cuh" using namespace std; using namespace cv; using namespace cv::cuda; //extern "C" void GetGaussFitCuda(GpuMat gpuMat, MPoint *point, double maxError, double minError, int yRange, int Colonce); // a[M][N]*b[N][S] hipError_t checkCudaError(hipError_t CudaFunction,const char* ident) { hipError_t err = CudaFunction; if (err != hipSuccess) { fprintf(stderr, "%s \t hipError_t:%s\n",ident,hipGetErrorString(hipGetLastError())); } return err; } //Coloncel __global__ void GetGaussPointCuda(PtrStepSz<uchar1> src, MPoint *point, int **gpu_data, int Colonce, int Rows, int Cols) { int threadId = threadIdx.x; //printf("%d\n",threadId); //__shared__ int *gpu_cr; //gpu_cr = new int [Rows*Cols]; // for (int j = 0; j < Rows; j++) { for (int i = 0; i < Colonce; i++) { gpu_data[i + threadId * Colonce][j] = (int)src(j, threadId*Colonce + i).x; } } //int i = 0, j = 0; //gpu_cr[i*Rows + j] = *((int*)&src( j, threadId*Colonce + i)); // for (int i = 0; i < Colonce; i++) { int MaxPixel = gpu_data[i + threadId * Colonce][0]; //printf("the first pixel is %d \n", MaxPixel); int MaxY = 0; for (int j = 1; j < Rows; j++) { if (gpu_data[i + threadId * Colonce][j] > MaxPixel) { MaxPixel = gpu_data[i + threadId * Colonce][j]; MaxY = j; } } point[threadId*Colonce + i].x = threadId * Colonce + i; point[threadId*Colonce + i].y = MaxY; point[threadId*Colonce + i].bright = MaxPixel; } __syncthreads(); } // __global__ void GetGaussFitRes(MPoint *point, MatrixUnion *gpu_mar,int **gpu_data, double maxError, double minError, int yRange, int Rows, int Cols,int Precision) { //blockthread int threadId = blockIdx.x*blockDim.x + threadIdx.x; // if (threadId < Cols) { //// //int *y; // //int *br; // GPoint *gpoint = new GPoint[2 * yRange]; int Pixnum = 0; // // double minLine = minError * point[threadId].bright; double maxLine = (1-maxError) * point[threadId].bright; // for (int i = (point[threadId].y - yRange); i < (point[threadId].y+yRange+1); i++) { if ((gpu_data[threadId][i] > minLine)&&(gpu_data[threadId][i] < maxLine)) { gpoint[Pixnum].x = i; gpoint[Pixnum].brightness = gpu_data[threadId][i]; Pixnum++; } } point[threadId].Pixnum = Pixnum; //__shared__ MatrixUnion *gpu_mar; //3 if (Pixnum > 3) { // int n = Pixnum; //X1 x x^2 n*3 gpu_mar[threadId].X = new long long int *[n]; for (int i = 0; i < n; i++) { gpu_mar[threadId].X[i] = new long long int[3]; } //XT X 3*n gpu_mar[threadId].XT = new long long int *[3]; for (int i = 0; i < 3; i++) { gpu_mar[threadId].XT[i] = new long long int[n]; } //Z(brightness) n*1 gpu_mar[threadId].Z = new long long int[n]; //B 3*1 gpu_mar[threadId].B = new long long int[3]; //SA XT*X 3*3 gpu_mar[threadId].SA = new long long int *[3]; for (int i = 0; i < 3; i++) { gpu_mar[threadId].SA[i] = new long long int[3]; } //SAN SA 3*3 gpu_mar[threadId].SAN = new double *[3]; for (int i = 0; i < 3; i++) { gpu_mar[threadId].SAN[i] = new double[3]; } //SC SAN*XT 3*n gpu_mar[threadId].SC = new double *[3]; for (int i = 0; i < 3; i++) { gpu_mar[threadId].SC[i] = new double [n]; } /* //X1 x x^2 n*3 double **X = new double*[n]; for (int i = 0; i < n; i++) { X[i] = new double[3]; } //XT X 3*n double **XT = new double*[3]; for (int i = 0; i < 3; i++) { XT[i] = new double[n]; } //Z(brightness) n*1 double *Z = new double[n]; //B 3*1 double *B = new double[3]; //SA XT*X 3*3 double **SA = new double*[3]; for (int i = 0; i < 3; i++) { SA[i] = new double[3]; } //SAN SA 3*3 double **SAN = new double*[3]; for (int i = 0; i < 3; i++) { SAN[i] = new double[3]; } */ //XZ XT for (int i = 0; i < n; i++) { gpu_mar[threadId].X[i][0] = 1*Precision; gpu_mar[threadId].X[i][1] = gpoint[i].x*Precision; gpu_mar[threadId].X[i][2] = gpoint[i].x*gpoint[i].x*Precision; gpu_mar[threadId].Z[i] = gpoint[i].brightness*Precision; gpu_mar[threadId].XT[0][i] = 1 * Precision; gpu_mar[threadId].XT[1][i] = gpoint[i].x*Precision; gpu_mar[threadId].XT[2][i] = gpoint[i].x*gpoint[i].x*Precision; } /*for (int i_ = 0; i_ < n; i_++) { for (int j_ = 0; j_ < 3; j_++) { printf("%d\t", gpu_mar[threadId].X[i_][j_]); } printf("\n"); }*/ //SA = XT*X for (int m = 0; m < 3; m++) { for (int s = 0; s < 3; s++) { gpu_mar[threadId].SA[m][s] = 0; for (int i = 0; i < n; i++) { gpu_mar[threadId].SA[m][s] += gpu_mar[threadId].XT[m][i] * gpu_mar[threadId].X[i][s]; } } } /*if (threadId == 20) { for (int i_ = 0; i_ < 3; i_++) { for (int j_ = 0; j_ < 3; j_++) { printf("%lld\t", gpu_mar[threadId].SA[i_][j_]); } printf("\n"); } }*/ //SAN const int mat_num = 3;// gpu_mar[threadId].in_v = new double*[mat_num]; double **in_v = gpu_mar[threadId].in_v; for (int i_ = 0; i_ < mat_num; i_++) { in_v[i_] = new double[mat_num]; } gpu_mar[threadId].BC = new double*[mat_num]; double **BC = gpu_mar[threadId].BC; for (int i_ = 0; i_ < mat_num; i_++) { BC[i_] = new double[mat_num]; } for (int i_ = 0; i_ < mat_num; i_++) { BC[i_][i_] = 1; for (int j_ = 0; j_ < mat_num; j_++) { if (j_ == i_) continue; BC[i_][j_] = 0; } } for (int i = 0; i < mat_num; i++) { //L gpu_mar[threadId].L = new double*[mat_num]; double **L = gpu_mar[threadId].L; for (int i_ = 0; i_ < mat_num; i_++) { L[i_] = new double[mat_num]; } //U gpu_mar[threadId].U = new double*[mat_num]; double **U = gpu_mar[threadId].U; for (int i_ = 0; i_ < mat_num; i_++) { U[i_] = new double[mat_num]; } gpu_mar[threadId].P = new int[mat_num]; int *P = gpu_mar[threadId].P; //SA gpu_mar[threadId].A_mirror = new long long int*[mat_num]; long long int **A_mirror = gpu_mar[threadId].A_mirror; for (int i_ = 0; i_ < mat_num; i_++) { A_mirror[i_] = new long long int[mat_num]; } for (int i_ = 0; i_ < mat_num; i_++) { for (int j_ = 0; j_ < mat_num; j_++) { A_mirror[i_][j_] = gpu_mar[threadId].SA[i_][j_]; } } //part int row = 0; for (int i_ = 0; i_ < mat_num; i_++) P[i_] = i_; for (int i_ = 0; i_ < mat_num - 1; i_++) { int p = 0; for (int j_ = i_; j_ < mat_num; j_++) { if (A_mirror[j_][i_] > p || (-1)* A_mirror[j_][i_] > p) { p = A_mirror[j_][i_] > 0 ? A_mirror[j_][i_] : (-1)*A_mirror[j_][i_]; row = j_; } } /* if (p == 0) { ; } */ //P[i_] P[row] int tmp = P[i_]; P[i_] = P[row]; P[row] = tmp; //A[i_][j_] A[row][j_] double tmp2 = 0; for (int j_ = 0; j_ < mat_num; j_++) { tmp2 = A_mirror[i_][j_]; A_mirror[i_][j_] = A_mirror[row][j_]; A_mirror[row][j_] = tmp2; } double u = A_mirror[i_][i_], l = 0; for (int j_ = i_ + 1; j_ < mat_num; j_++) { l = A_mirror[j_][i_] / u; A_mirror[j_][i_] = l; for (int k_ = i_ + 1; k_ < mat_num; k_++) { A_mirror[j_][k_] = A_mirror[j_][k_] - A_mirror[i_][k_] * l; } } } //LU for (int i_ = 0; i_ < mat_num; i_++) { for (int j_ = 0; j_ <= i_; j_++) { if (i_ != j_) L[i_][j_] = A_mirror[i_][j_]; else L[i_][j_] = 1; } for (int k_ = i_; k_ < mat_num; k_++) { U[i_][k_] = A_mirror[i_][k_]; } } double *y = new double[mat_num]; // for (int i_ = 0; i_ < mat_num; i_++) { y[i_] = BC[i][P[i_]]; for (int j_ = 0; j_ < i_; j_++) { y[i_] = y[i_] - L[i_][j_] * y[j_]; } } // for (int i_ = mat_num - 1; i_ >= 0; i_--) { in_v[i][i_] = y[i_]; for (int j_ = mat_num - 1; j_ > i_; j_--) { in_v[i][i_] = in_v[i][i_] - U[i_][j_] * in_v[i][j_]; } in_v[i][i_] /= U[i_][i_]; } for (int i_ = 0; i_ < mat_num; i_++) delete[] L[i_]; delete[]L; for (int i_ = 0; i_ < mat_num; i_++) delete[]U[i_]; delete[]U; for (int i_ = 0; i_ < mat_num; i_++) delete[]A_mirror[i_]; delete[]A_mirror; delete[]P; delete[]y; } for (int i_ = 0; i_ < mat_num; i_++) { for (int j_ = 0; j_ < i_; j_++) { gpu_mar[threadId].SAN[i_][j_] = in_v[j_][i_]; gpu_mar[threadId].SAN[j_][i_] = in_v[i_][j_]; } gpu_mar[threadId].SAN[i_][i_] = in_v[i_][i_]; } for (int i = 0; i < mat_num; i++) delete[]in_v[i]; delete[]in_v; for (int i = 0; i < mat_num; i++) delete[]BC[i]; delete[]BC; /* if (threadId == 20) { for (int i_ = 0; i_ < mat_num; i_++) { for (int j_ = 0; j_ < mat_num; j_++) { printf("%lf\t", gpu_mar[threadId].SAN[i_][j_]); } printf("\n"); } }*/ //SC = SAN*XT for (int m = 0; m < 3; m++) { for (int s = 0; s < n; s++) { gpu_mar[threadId].SC[m][s] = 0; for (int i = 0; i < 3; i++) { gpu_mar[threadId].SC[m][s] += gpu_mar[threadId].SAN[m][i] * gpu_mar[threadId].XT[i][s]; } } } /*if (threadId == 20) { for (int i_ = 0; i_ < 3; i_++) { for (int j_ = 0; j_ < n; j_++) { printf("%lf\t", gpu_mar[threadId].SC[i_][j_]); } printf("\n"); } }*/ //B = SC*Z for (int m = 0; m < 3; m++) { gpu_mar[threadId].B[m] = 0; for (int i = 0; i < n; i++) { gpu_mar[threadId].B[m] += gpu_mar[threadId].SC[m][i] * gpu_mar[threadId].Z[i]; } } //B //printf("B1 = %d , B2 = %d\n", gpu_mar[threadId].B[1], gpu_mar[threadId].B[2]); point[threadId].cx = threadId; point[threadId].cy = (-gpu_mar[threadId].B[1]) / ((2 * gpu_mar[threadId].B[2])); //point[threadId].gaussbright = exp((float)(gpu_mar[threadId].B[0] - gpu_mar[threadId].B[1] * gpu_mar[threadId].B[1] / (4 * gpu_mar[threadId].B[2]))); for (int i = 0; i < n; i++) { delete[] gpu_mar[threadId].X[i]; } delete[] gpu_mar[threadId].X; for (int i = 0; i < 3; i++) { delete[] gpu_mar[threadId].XT[i]; } delete[] gpu_mar[threadId].XT; delete[] gpu_mar[threadId].Z; delete[] gpu_mar[threadId].B; for (int i = 0; i < 3; i++) { delete[] gpu_mar[threadId].SA[i]; } delete[] gpu_mar[threadId].SA; for (int i = 0; i < 3; i++) { delete[] gpu_mar[threadId].SAN[i]; } delete[] gpu_mar[threadId].SAN; for (int i = 0; i < 3; i++) { delete[] gpu_mar[threadId].SC[i]; } delete[] gpu_mar[threadId].SC; } else { point[threadId].cx = threadId; point[threadId].cy = 0; point[threadId].bright = 0; } delete[] gpoint; } } // //for (int i = 0; i < Colonce; i++) //{ // int Pixnum = 0; // //GPoint *gpoint; // //point[threadId*Colonce+i].gpoint = new GPoint[Rows]; // //point[i].gpoint = new GPoint[Rows]; // for (int j = 0; j < Rows; j++) // { // if ((gpu_cr[Rows*i + j] > minError*point[threadId*Colonce + i].bright) // && (gpu_cr[Rows*i + j] < (1 - maxError)*point[threadId*Colonce + i].bright) // && (abs(j - point[threadId*Colonce + i].y) < yRange)) // { // point[threadId*Colonce + i].gpoint[Pixnum].x = threadId * Colonce + i; // point[threadId*Colonce + i].gpoint[Pixnum].brightness = gpu_cr[Rows*i + j]; // Pixnum++; // } // if ((j - point[threadId*Colonce + i].y) < yRange) // break; // } // point[threadId*Colonce + i].Pixnum = Pixnum; /* // if (Pixnum >= 3) { __shared__ int *X; X = new int[Pixnum * 3]; __shared__ int *Z; Z = new int[Pixnum]; //<3.5 //dim3 blockSEX(1, 0, 0); //dim3 threadSEX(Pixnum, 0, 0); //XZ //SetElementX << <blockSEX, threadSEX >> > (gpoint, X, Pixnum); //X(n*3) Zn*1) for (int i = 0; i < Pixnum; i++) { for (int j = 0; j < 3; j++) { if (j = 0) { X[i * 3 + j] = 1; } if (j = 1) { X[i * 3 + j] = gpoint[i].x; } if (j = 2) { X[i * 3 + j] = gpoint[i].x*gpoint[i].x; } } Z[i] = gpoint[i].brightness; } //X __shared__ int *XT; XT = new int[Pixnum* 3]; for (int i = 0; i < 3; i++) { for (int j = 0; j < Pixnum; j++) { XT[i*Pixnum + j] = X[j * 3 + i]; } } //XT*X __shared__ int *SA; SA = new int[3 * 3]; for (int m = 0; i < 3; i++) { for (int s = 0; s < 3; s++) { for (int n = 0; n < Pixnum; n++) { SA[m * 3 + s] = XT[m*Pixnum + n] * X[n * 3 + s]; } } } //SA __shared__ int *SAN; SAN = new int[3 * 3]; }*/ //} //delete &gpu_cr; //#define N 3 //__global__ void MatAdd(const int **A, const int **B, int **C) //{ // int i = threadIdx.x; // int j = threadIdx.y; // C[i][j] = A[i][j] + B[i][j]; // //__syncthreads(); //} //extern "C" void GetGaussFitCuda(GpuMat gpuMat, MPoint *point, double maxError, double minError, int yRange, int Colonce); extern "C" void CudaGuassHC(Mat matImage, MPoint *point, double maxError, double minError, int yRange, int Colonce,int Precision) { int Rows = matImage.rows; int Cols = matImage.cols;// *matImage.channels(); //InputArray inputMat(matImage); //for (int j = 0; j < Rows; j++) { // //uchar* data = gpuMat.ptr<uchar>(j); // for (int i = 0; i < Cols; i++) { // int datt = inputMat.ptr<uchar>(j)[i]; // //cout << "(" << i << "," <<j << "):" << datt << endl; // printf("(%d,%d):%d\n", i, j, datt); // } //} //cout << Cols << endl; GpuMat gpuMat(matImage); //gpuMat.upload(matImage); //for (int j = 0; j < Rows; j++) { // //uchar* data = gpuMat.ptr<uchar>(j); // for (int i = 0; i < Cols; i++) { // int datt = gpuMat.ptr<uchar>(j)[i]; // //cout << "(" << i << "," <<j << "):" << datt << endl; // printf("(%d,%d):%d\n", i, j, datt); // } //} // MPoint *gpu_point; //gpu_point = new MPoint[Cols]; checkCudaError(hipMalloc((void**)&gpu_point, sizeof(MPoint)*Cols), "malloc error1"); // int **gpu_data; int *gpu_data_d; int **cpu_data = (int**)malloc(sizeof(int*)*Cols); int *cpu_data_d = (int*)malloc(sizeof(int)*Cols*Rows); checkCudaError(hipMalloc((void**)&gpu_data, Cols * sizeof(int**)), "malloc error2"); checkCudaError(hipMalloc((void**)&gpu_data_d, Cols *Rows * sizeof(int)), " malloc error2"); for (int i = 0; i < Cols; i++) { cpu_data[i] = gpu_data_d + Rows * i; // } checkCudaError(hipMemcpy(gpu_data, cpu_data, sizeof(int*)*Cols, hipMemcpyHostToDevice), "memcpy error1"); checkCudaError(hipMemcpy(gpu_data_d, cpu_data_d, sizeof(int)*Rows*Cols, hipMemcpyHostToDevice), "memcpy error1"); /*if (hipSuccess != hipMemcpy(gpu_point, point, sizeof(point)*Cols, hipMemcpyHostToDevice)) { printf("cuda memcpy up error1!\n"); }*/ //dim3 threads_all(Cols / Colonce); //GPU MatrixUnion *gpu_mar; checkCudaError(hipMalloc((void**)&gpu_mar, sizeof(MatrixUnion)*Cols), "malloc error3"); //colonce GetGaussPointCuda << <1, Cols/Colonce >> > (gpuMat, gpu_point, gpu_data, Colonce, Rows, Cols); hipDeviceSynchronize(); // const int BlockPMat = 1280; int Blocknum, Threadnum; Blocknum = BlockPMat; Threadnum = Cols / Blocknum+1; /* if (Cols > 1024) { Blocknum = Cols / 1024 + 1; Threadnum = 1024; } else { Blocknum = 1; Threadnum = Cols; } */ // GetGaussFitRes << <Blocknum, Threadnum >> > (gpu_point, gpu_mar,gpu_data, maxError, minError, yRange, Rows, Cols,Precision); hipDeviceSynchronize(); checkCudaError(hipMemcpy(point, gpu_point, sizeof(MPoint)*Cols, hipMemcpyDeviceToHost), "memcpy down error1"); //for (int i = 0; i < Cols; i++) //{ // //cout << "("<<point[i].x<<","<< point[i].y<<"):"<< point[i].bright << endl; // printf("(%d,%d):%d\t, here are %d GaussPoints, the result is %lf,%lf\n", point[i].x, point[i].y, point[i].bright,point[i].Pixnum,point[i].cx,point[i].cy); //} // /*for (int i = 0; i < Cols; i++) { free((void*)cpu_data[i]); }*/ free((void*)cpu_data); free(cpu_data_d); /*for (int i = 0; i < Cols; i++) { hipFree((void*)gpu_data[i]); }*/ hipFree(gpu_data); hipFree(gpu_point); hipFree(gpu_data_d); hipFree(gpu_mar); gpuMat.release(); } extern "C" void GuassFitGpuHcT(Mat matImage, MPoint *point, double maxError, double minError, int yRange, int Colonce) { }
b5b8c7b275022971e618c3300bd9982f4fd6e52c.cu
#include "iostream" #include "cuda_runtime.h" #include "cuda.h" #include "cublas.h" #include "cublas_api.h" #include "pch.h" //#include "stdafx.h" #include "cv.h" //#include <process.h> //#include "CameraApi.h" #include "LaserRange.h" //#include "afxwin.h" //#include "windows.h" #include "math.h" //#include "cstdlib" //#include "sstream" //#include "ImProcess.h" //#include "opencv2/core/core.hpp" //#include "opencv2/imgproc/imgproc.hpp" //#include "opencv2/calib3d/calib3d.hpp" #include "CudaTest.h" #include <device_launch_parameters.h> #include <device_functions.h> //#include <stdio.h> //#include <stdlib.h> #include <opencv2/opencv.hpp> #include <opencv2/core/cuda.hpp> //#include "CudaKernal.cuh" using namespace std; using namespace cv; using namespace cv::cuda; //extern "C" void GetGaussFitCuda(GpuMat gpuMat, MPoint *point, double maxError, double minError, int yRange, int Colonce); //矩阵乘法 a[M][N]*b[N][S] cudaError_t checkCudaError(cudaError_t CudaFunction,const char* ident) { cudaError_t err = CudaFunction; if (err != cudaSuccess) { fprintf(stderr, "%s \t cudaError:%s\n",ident,cudaGetErrorString(cudaGetLastError())); } return err; } //Coloncel行扫描得点存储 __global__ void GetGaussPointCuda(PtrStepSz<uchar1> src, MPoint *point, int **gpu_data, int Colonce, int Rows, int Cols) { int threadId = threadIdx.x; //printf("%d\n",threadId); //__shared__ int *gpu_cr; //gpu_cr = new int [Rows*Cols]; //逐行存入数组 for (int j = 0; j < Rows; j++) { for (int i = 0; i < Colonce; i++) { gpu_data[i + threadId * Colonce][j] = (int)src(j, threadId*Colonce + i).x; } } //int i = 0, j = 0; //gpu_cr[i*Rows + j] = *((int*)&src( j, threadId*Colonce + i)); //取每列最大值位置 for (int i = 0; i < Colonce; i++) { int MaxPixel = gpu_data[i + threadId * Colonce][0]; //printf("the first pixel is %d \n", MaxPixel); int MaxY = 0; for (int j = 1; j < Rows; j++) { if (gpu_data[i + threadId * Colonce][j] > MaxPixel) { MaxPixel = gpu_data[i + threadId * Colonce][j]; MaxY = j; } } point[threadId*Colonce + i].x = threadId * Colonce + i; point[threadId*Colonce + i].y = MaxY; point[threadId*Colonce + i].bright = MaxPixel; } __syncthreads(); } //按列筛选并处理高斯点 __global__ void GetGaussFitRes(MPoint *point, MatrixUnion *gpu_mar,int **gpu_data, double maxError, double minError, int yRange, int Rows, int Cols,int Precision) { //通过块并行解决一个block内thread不够用的问题 int threadId = blockIdx.x*blockDim.x + threadIdx.x; //判断以确定该线程有可处理数据 if (threadId < Cols) { ////高斯点存储申请 //int *y; //存储高斯点在每列的行位置 //int *br; //存储高斯点的值 GPoint *gpoint = new GPoint[2 * yRange]; int Pixnum = 0; //统计高斯点个数 //确定上下界位置 减少计算次数 double minLine = minError * point[threadId].bright; double maxLine = (1-maxError) * point[threadId].bright; //高斯点筛选 for (int i = (point[threadId].y - yRange); i < (point[threadId].y+yRange+1); i++) { if ((gpu_data[threadId][i] > minLine)&&(gpu_data[threadId][i] < maxLine)) { gpoint[Pixnum].x = i; gpoint[Pixnum].brightness = gpu_data[threadId][i]; Pixnum++; } } point[threadId].Pixnum = Pixnum; //__shared__ MatrixUnion *gpu_mar; //高斯点大于3时进行拟合 if (Pixnum > 3) { //运算矩阵申请 int n = Pixnum; //X矩阵(1 x x^2) n*3 gpu_mar[threadId].X = new long long int *[n]; for (int i = 0; i < n; i++) { gpu_mar[threadId].X[i] = new long long int[3]; } //XT矩阵 X的转置 3*n gpu_mar[threadId].XT = new long long int *[3]; for (int i = 0; i < 3; i++) { gpu_mar[threadId].XT[i] = new long long int[n]; } //Z矩阵(brightness) n*1 gpu_mar[threadId].Z = new long long int[n]; //B矩阵(结果) 3*1 gpu_mar[threadId].B = new long long int[3]; //SA矩阵 (XT*X) 3*3 gpu_mar[threadId].SA = new long long int *[3]; for (int i = 0; i < 3; i++) { gpu_mar[threadId].SA[i] = new long long int[3]; } //SAN矩阵 SA的逆矩阵 3*3 gpu_mar[threadId].SAN = new double *[3]; for (int i = 0; i < 3; i++) { gpu_mar[threadId].SAN[i] = new double[3]; } //SC矩阵 SAN*XT 3*n gpu_mar[threadId].SC = new double *[3]; for (int i = 0; i < 3; i++) { gpu_mar[threadId].SC[i] = new double [n]; } /* //X矩阵(1 x x^2) n*3 double **X = new double*[n]; for (int i = 0; i < n; i++) { X[i] = new double[3]; } //XT矩阵 X的转置 3*n double **XT = new double*[3]; for (int i = 0; i < 3; i++) { XT[i] = new double[n]; } //Z矩阵(brightness) n*1 double *Z = new double[n]; //B矩阵(结果) 3*1 double *B = new double[3]; //SA矩阵 (XT*X) 3*3 double **SA = new double*[3]; for (int i = 0; i < 3; i++) { SA[i] = new double[3]; } //SAN矩阵 SA的逆矩阵 3*3 double **SAN = new double*[3]; for (int i = 0; i < 3; i++) { SAN[i] = new double[3]; } */ //存入X矩阵和Z矩阵 顺手存入转置XT for (int i = 0; i < n; i++) { gpu_mar[threadId].X[i][0] = 1*Precision; gpu_mar[threadId].X[i][1] = gpoint[i].x*Precision; gpu_mar[threadId].X[i][2] = gpoint[i].x*gpoint[i].x*Precision; gpu_mar[threadId].Z[i] = gpoint[i].brightness*Precision; gpu_mar[threadId].XT[0][i] = 1 * Precision; gpu_mar[threadId].XT[1][i] = gpoint[i].x*Precision; gpu_mar[threadId].XT[2][i] = gpoint[i].x*gpoint[i].x*Precision; } /*for (int i_ = 0; i_ < n; i_++) { for (int j_ = 0; j_ < 3; j_++) { printf("%d\t", gpu_mar[threadId].X[i_][j_]); } printf("\n"); }*/ //计算SA = XT*X for (int m = 0; m < 3; m++) { for (int s = 0; s < 3; s++) { gpu_mar[threadId].SA[m][s] = 0; for (int i = 0; i < n; i++) { gpu_mar[threadId].SA[m][s] += gpu_mar[threadId].XT[m][i] * gpu_mar[threadId].X[i][s]; } } } /*if (threadId == 20) { for (int i_ = 0; i_ < 3; i_++) { for (int j_ = 0; j_ < 3; j_++) { printf("%lld\t", gpu_mar[threadId].SA[i_][j_]); } printf("\n"); } }*/ //计算SAN const int mat_num = 3;//求逆矩阵的阶数 gpu_mar[threadId].in_v = new double*[mat_num]; double **in_v = gpu_mar[threadId].in_v; for (int i_ = 0; i_ < mat_num; i_++) { in_v[i_] = new double[mat_num]; } gpu_mar[threadId].BC = new double*[mat_num]; double **BC = gpu_mar[threadId].BC; for (int i_ = 0; i_ < mat_num; i_++) { BC[i_] = new double[mat_num]; } for (int i_ = 0; i_ < mat_num; i_++) { BC[i_][i_] = 1; for (int j_ = 0; j_ < mat_num; j_++) { if (j_ == i_) continue; BC[i_][j_] = 0; } } for (int i = 0; i < mat_num; i++) { //L矩阵建立 gpu_mar[threadId].L = new double*[mat_num]; double **L = gpu_mar[threadId].L; for (int i_ = 0; i_ < mat_num; i_++) { L[i_] = new double[mat_num]; } //U矩阵建立 gpu_mar[threadId].U = new double*[mat_num]; double **U = gpu_mar[threadId].U; for (int i_ = 0; i_ < mat_num; i_++) { U[i_] = new double[mat_num]; } gpu_mar[threadId].P = new int[mat_num]; int *P = gpu_mar[threadId].P; //SA复制 gpu_mar[threadId].A_mirror = new long long int*[mat_num]; long long int **A_mirror = gpu_mar[threadId].A_mirror; for (int i_ = 0; i_ < mat_num; i_++) { A_mirror[i_] = new long long int[mat_num]; } for (int i_ = 0; i_ < mat_num; i_++) { for (int j_ = 0; j_ < mat_num; j_++) { A_mirror[i_][j_] = gpu_mar[threadId].SA[i_][j_]; } } //核心part 并没有看懂 int row = 0; for (int i_ = 0; i_ < mat_num; i_++) P[i_] = i_; for (int i_ = 0; i_ < mat_num - 1; i_++) { int p = 0; for (int j_ = i_; j_ < mat_num; j_++) { if (A_mirror[j_][i_] > p || (-1)* A_mirror[j_][i_] > p) { p = A_mirror[j_][i_] > 0 ? A_mirror[j_][i_] : (-1)*A_mirror[j_][i_]; row = j_; } } /* if (p == 0) { ; } */ //交换P[i_] P[row] int tmp = P[i_]; P[i_] = P[row]; P[row] = tmp; //交换A[i_][j_] 和A[row][j_] double tmp2 = 0; for (int j_ = 0; j_ < mat_num; j_++) { tmp2 = A_mirror[i_][j_]; A_mirror[i_][j_] = A_mirror[row][j_]; A_mirror[row][j_] = tmp2; } double u = A_mirror[i_][i_], l = 0; for (int j_ = i_ + 1; j_ < mat_num; j_++) { l = A_mirror[j_][i_] / u; A_mirror[j_][i_] = l; for (int k_ = i_ + 1; k_ < mat_num; k_++) { A_mirror[j_][k_] = A_mirror[j_][k_] - A_mirror[i_][k_] * l; } } } //构造LU for (int i_ = 0; i_ < mat_num; i_++) { for (int j_ = 0; j_ <= i_; j_++) { if (i_ != j_) L[i_][j_] = A_mirror[i_][j_]; else L[i_][j_] = 1; } for (int k_ = i_; k_ < mat_num; k_++) { U[i_][k_] = A_mirror[i_][k_]; } } double *y = new double[mat_num]; //正向替换 for (int i_ = 0; i_ < mat_num; i_++) { y[i_] = BC[i][P[i_]]; for (int j_ = 0; j_ < i_; j_++) { y[i_] = y[i_] - L[i_][j_] * y[j_]; } } //反向替换 for (int i_ = mat_num - 1; i_ >= 0; i_--) { in_v[i][i_] = y[i_]; for (int j_ = mat_num - 1; j_ > i_; j_--) { in_v[i][i_] = in_v[i][i_] - U[i_][j_] * in_v[i][j_]; } in_v[i][i_] /= U[i_][i_]; } for (int i_ = 0; i_ < mat_num; i_++) delete[] L[i_]; delete[]L; for (int i_ = 0; i_ < mat_num; i_++) delete[]U[i_]; delete[]U; for (int i_ = 0; i_ < mat_num; i_++) delete[]A_mirror[i_]; delete[]A_mirror; delete[]P; delete[]y; } for (int i_ = 0; i_ < mat_num; i_++) { for (int j_ = 0; j_ < i_; j_++) { gpu_mar[threadId].SAN[i_][j_] = in_v[j_][i_]; gpu_mar[threadId].SAN[j_][i_] = in_v[i_][j_]; } gpu_mar[threadId].SAN[i_][i_] = in_v[i_][i_]; } for (int i = 0; i < mat_num; i++) delete[]in_v[i]; delete[]in_v; for (int i = 0; i < mat_num; i++) delete[]BC[i]; delete[]BC; /* if (threadId == 20) { for (int i_ = 0; i_ < mat_num; i_++) { for (int j_ = 0; j_ < mat_num; j_++) { printf("%lf\t", gpu_mar[threadId].SAN[i_][j_]); } printf("\n"); } }*/ //计算SC = SAN*XT for (int m = 0; m < 3; m++) { for (int s = 0; s < n; s++) { gpu_mar[threadId].SC[m][s] = 0; for (int i = 0; i < 3; i++) { gpu_mar[threadId].SC[m][s] += gpu_mar[threadId].SAN[m][i] * gpu_mar[threadId].XT[i][s]; } } } /*if (threadId == 20) { for (int i_ = 0; i_ < 3; i_++) { for (int j_ = 0; j_ < n; j_++) { printf("%lf\t", gpu_mar[threadId].SC[i_][j_]); } printf("\n"); } }*/ //计算B = SC*Z for (int m = 0; m < 3; m++) { gpu_mar[threadId].B[m] = 0; for (int i = 0; i < n; i++) { gpu_mar[threadId].B[m] += gpu_mar[threadId].SC[m][i] * gpu_mar[threadId].Z[i]; } } //解析B //printf("B1 = %d , B2 = %d\n", gpu_mar[threadId].B[1], gpu_mar[threadId].B[2]); point[threadId].cx = threadId; point[threadId].cy = (-gpu_mar[threadId].B[1]) / ((2 * gpu_mar[threadId].B[2])); //point[threadId].gaussbright = exp((float)(gpu_mar[threadId].B[0] - gpu_mar[threadId].B[1] * gpu_mar[threadId].B[1] / (4 * gpu_mar[threadId].B[2]))); for (int i = 0; i < n; i++) { delete[] gpu_mar[threadId].X[i]; } delete[] gpu_mar[threadId].X; for (int i = 0; i < 3; i++) { delete[] gpu_mar[threadId].XT[i]; } delete[] gpu_mar[threadId].XT; delete[] gpu_mar[threadId].Z; delete[] gpu_mar[threadId].B; for (int i = 0; i < 3; i++) { delete[] gpu_mar[threadId].SA[i]; } delete[] gpu_mar[threadId].SA; for (int i = 0; i < 3; i++) { delete[] gpu_mar[threadId].SAN[i]; } delete[] gpu_mar[threadId].SAN; for (int i = 0; i < 3; i++) { delete[] gpu_mar[threadId].SC[i]; } delete[] gpu_mar[threadId].SC; } else { point[threadId].cx = threadId; point[threadId].cy = 0; point[threadId].bright = 0; } delete[] gpoint; } } //高斯点筛选 //for (int i = 0; i < Colonce; i++) //{ // int Pixnum = 0; // //GPoint *gpoint; // //point[threadId*Colonce+i].gpoint = new GPoint[Rows]; // //point[i].gpoint = new GPoint[Rows]; // for (int j = 0; j < Rows; j++) // { // if ((gpu_cr[Rows*i + j] > minError*point[threadId*Colonce + i].bright) // && (gpu_cr[Rows*i + j] < (1 - maxError)*point[threadId*Colonce + i].bright) // && (abs(j - point[threadId*Colonce + i].y) < yRange)) // { // point[threadId*Colonce + i].gpoint[Pixnum].x = threadId * Colonce + i; // point[threadId*Colonce + i].gpoint[Pixnum].brightness = gpu_cr[Rows*i + j]; // Pixnum++; // } // if ((j - point[threadId*Colonce + i].y) < yRange) // break; // } // point[threadId*Colonce + i].Pixnum = Pixnum; /* //矩阵运算 if (Pixnum >= 3) { __shared__ int *X; X = new int[Pixnum * 3]; __shared__ int *Z; Z = new int[Pixnum]; //计算能力<3.5 不能嵌套并行核函数 //dim3 blockSEX(1, 0, 0); //dim3 threadSEX(Pixnum, 0, 0); //存入X、Z矩阵 //SetElementX << <blockSEX, threadSEX >> > (gpoint, X, Pixnum); //存入X矩阵(n*3) Z矩阵(n*1) for (int i = 0; i < Pixnum; i++) { for (int j = 0; j < 3; j++) { if (j = 0) { X[i * 3 + j] = 1; } if (j = 1) { X[i * 3 + j] = gpoint[i].x; } if (j = 2) { X[i * 3 + j] = gpoint[i].x*gpoint[i].x; } } Z[i] = gpoint[i].brightness; } //求X转置 __shared__ int *XT; XT = new int[Pixnum* 3]; for (int i = 0; i < 3; i++) { for (int j = 0; j < Pixnum; j++) { XT[i*Pixnum + j] = X[j * 3 + i]; } } //求XT*X结果 __shared__ int *SA; SA = new int[3 * 3]; for (int m = 0; i < 3; i++) { for (int s = 0; s < 3; s++) { for (int n = 0; n < Pixnum; n++) { SA[m * 3 + s] = XT[m*Pixnum + n] * X[n * 3 + s]; } } } //求SA逆矩阵 __shared__ int *SAN; SAN = new int[3 * 3]; }*/ //} //delete &gpu_cr; //#define N 3 //__global__ void MatAdd(const int **A, const int **B, int **C) //{ // int i = threadIdx.x; // int j = threadIdx.y; // C[i][j] = A[i][j] + B[i][j]; // //__syncthreads(); //} //extern "C" void GetGaussFitCuda(GpuMat gpuMat, MPoint *point, double maxError, double minError, int yRange, int Colonce); extern "C" void CudaGuassHC(Mat matImage, MPoint *point, double maxError, double minError, int yRange, int Colonce,int Precision) { int Rows = matImage.rows; int Cols = matImage.cols;// *matImage.channels(); //InputArray inputMat(matImage); //for (int j = 0; j < Rows; j++) { // //uchar* data = gpuMat.ptr<uchar>(j); // for (int i = 0; i < Cols; i++) { // int datt = inputMat.ptr<uchar>(j)[i]; // //cout << "(" << i << "," <<j << "):" << datt << endl; // printf("(%d,%d):%d\n", i, j, datt); // } //} //cout << Cols << endl; GpuMat gpuMat(matImage); //gpuMat.upload(matImage); //for (int j = 0; j < Rows; j++) { // //uchar* data = gpuMat.ptr<uchar>(j); // for (int i = 0; i < Cols; i++) { // int datt = gpuMat.ptr<uchar>(j)[i]; // //cout << "(" << i << "," <<j << "):" << datt << endl; // printf("(%d,%d):%d\n", i, j, datt); // } //} //结构体指针上传 MPoint *gpu_point; //gpu_point = new MPoint[Cols]; checkCudaError(cudaMalloc((void**)&gpu_point, sizeof(MPoint)*Cols), "malloc error1"); //显存图像缓存矩阵 int **gpu_data; int *gpu_data_d; int **cpu_data = (int**)malloc(sizeof(int*)*Cols); int *cpu_data_d = (int*)malloc(sizeof(int)*Cols*Rows); checkCudaError(cudaMalloc((void**)&gpu_data, Cols * sizeof(int**)), "malloc error2"); checkCudaError(cudaMalloc((void**)&gpu_data_d, Cols *Rows * sizeof(int)), " malloc error2"); for (int i = 0; i < Cols; i++) { cpu_data[i] = gpu_data_d + Rows * i; //首地址赋值 将一维矩阵转为二维 } checkCudaError(cudaMemcpy(gpu_data, cpu_data, sizeof(int*)*Cols, cudaMemcpyHostToDevice), "memcpy error1"); checkCudaError(cudaMemcpy(gpu_data_d, cpu_data_d, sizeof(int)*Rows*Cols, cudaMemcpyHostToDevice), "memcpy error1"); /*if (cudaSuccess != cudaMemcpy(gpu_point, point, sizeof(point)*Cols, cudaMemcpyHostToDevice)) { printf("cuda memcpy up error1!\n"); }*/ //dim3 threads_all(Cols / Colonce); //GPU端矩阵集 MatrixUnion *gpu_mar; checkCudaError(cudaMalloc((void**)&gpu_mar, sizeof(MatrixUnion)*Cols), "malloc error3"); //每colonce列统一存入 GetGaussPointCuda << <1, Cols/Colonce >> > (gpuMat, gpu_point, gpu_data, Colonce, Rows, Cols); cudaDeviceSynchronize(); //规划并行流 之后设计为只规划一次 const int BlockPMat = 1280; int Blocknum, Threadnum; Blocknum = BlockPMat; Threadnum = Cols / Blocknum+1; /* if (Cols > 1024) { Blocknum = Cols / 1024 + 1; Threadnum = 1024; } else { Blocknum = 1; Threadnum = Cols; } */ //进行高斯拟合 GetGaussFitRes << <Blocknum, Threadnum >> > (gpu_point, gpu_mar,gpu_data, maxError, minError, yRange, Rows, Cols,Precision); cudaDeviceSynchronize(); checkCudaError(cudaMemcpy(point, gpu_point, sizeof(MPoint)*Cols, cudaMemcpyDeviceToHost), "memcpy down error1"); //for (int i = 0; i < Cols; i++) //{ // //cout << "("<<point[i].x<<","<< point[i].y<<"):"<< point[i].bright << endl; // printf("(%d,%d):%d\t, here are %d GaussPoints, the result is (%lf,%lf)\n", point[i].x, point[i].y, point[i].bright,point[i].Pixnum,point[i].cx,point[i].cy); //} // /*for (int i = 0; i < Cols; i++) { free((void*)cpu_data[i]); }*/ free((void*)cpu_data); free(cpu_data_d); /*for (int i = 0; i < Cols; i++) { cudaFree((void*)gpu_data[i]); }*/ cudaFree(gpu_data); cudaFree(gpu_point); cudaFree(gpu_data_d); cudaFree(gpu_mar); gpuMat.release(); } extern "C" void GuassFitGpuHcT(Mat matImage, MPoint *point, double maxError, double minError, int yRange, int Colonce) { }
af40f69d79aa024609ee1ed5a5dfc70da2d06e06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda_runtime.h> #include<iostream> #include<stdio.h> #include<math.h> const int N = 1024 * 1024; const int GPUTHREADNUM = 256; const int GPUBLOCKNUM = 256; __global__ void kernelA(int *a,int *b,int *c) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < N) { c[idx] = (a[idx] + b[idx])/2; } } __global__ void kernelB(int *a) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < N) { // r[idx] = (c[idx] +a[idx])/2; a[idx] = a[idx] +1; } } int main() { hipStream_t stream; hipStreamCreate(&stream); int *host_a,*host_b,*host_c; int *dev_a,*dev_b,*dev_c; int i; hipMalloc((void**)&dev_a,N*sizeof(int)); hipMalloc((void**)&dev_b,N*sizeof(int)); hipMalloc((void**)&dev_c,N*sizeof(int)); //hipMalloc((void**)&dev_r,N*sizeof(int)); hipHostMalloc((void **)&host_a,N * sizeof(int),hipHostMallocDefault); hipHostMalloc((void **)&host_b,N * sizeof(int),hipHostMallocDefault); hipHostMalloc((void **)&host_c,N * sizeof(int),hipHostMallocDefault); for(i=0;i<N;i++) { host_a[i] = N - i; host_b[i] = i; } hipMemcpyAsync(dev_a,host_a,N * sizeof(int),hipMemcpyHostToDevice,stream); hipMemcpyAsync(dev_b,host_b,N * sizeof(int),hipMemcpyHostToDevice,stream); for(i=0;i<100;i++) { kernelA << <N/GPUBLOCKNUM , GPUTHREADNUM,0,stream >> >(dev_a,dev_b,dev_c); kernelB << <N/GPUBLOCKNUM,GPUTHREADNUM,0,stream >> >(dev_a); } hipMemcpyAsync(host_c,dev_c,N * sizeof(int),hipMemcpyDeviceToHost,stream); hipStreamSynchronize(stream); for(i=0;i<10;i++) { printf("%d ",host_c[i]); } hipHostFree(host_a); hipHostFree(host_b); hipHostFree(host_c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipStreamDestroy(stream); }
af40f69d79aa024609ee1ed5a5dfc70da2d06e06.cu
#include<cuda_runtime.h> #include<iostream> #include<stdio.h> #include<math.h> const int N = 1024 * 1024; const int GPUTHREADNUM = 256; const int GPUBLOCKNUM = 256; __global__ void kernelA(int *a,int *b,int *c) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < N) { c[idx] = (a[idx] + b[idx])/2; } } __global__ void kernelB(int *a) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < N) { // r[idx] = (c[idx] +a[idx])/2; a[idx] = a[idx] +1; } } int main() { cudaStream_t stream; cudaStreamCreate(&stream); int *host_a,*host_b,*host_c; int *dev_a,*dev_b,*dev_c; int i; cudaMalloc((void**)&dev_a,N*sizeof(int)); cudaMalloc((void**)&dev_b,N*sizeof(int)); cudaMalloc((void**)&dev_c,N*sizeof(int)); //cudaMalloc((void**)&dev_r,N*sizeof(int)); cudaHostAlloc((void **)&host_a,N * sizeof(int),cudaHostAllocDefault); cudaHostAlloc((void **)&host_b,N * sizeof(int),cudaHostAllocDefault); cudaHostAlloc((void **)&host_c,N * sizeof(int),cudaHostAllocDefault); for(i=0;i<N;i++) { host_a[i] = N - i; host_b[i] = i; } cudaMemcpyAsync(dev_a,host_a,N * sizeof(int),cudaMemcpyHostToDevice,stream); cudaMemcpyAsync(dev_b,host_b,N * sizeof(int),cudaMemcpyHostToDevice,stream); for(i=0;i<100;i++) { kernelA << <N/GPUBLOCKNUM , GPUTHREADNUM,0,stream >> >(dev_a,dev_b,dev_c); kernelB << <N/GPUBLOCKNUM,GPUTHREADNUM,0,stream >> >(dev_a); } cudaMemcpyAsync(host_c,dev_c,N * sizeof(int),cudaMemcpyDeviceToHost,stream); cudaStreamSynchronize(stream); for(i=0;i<10;i++) { printf("%d ",host_c[i]); } cudaFreeHost(host_a); cudaFreeHost(host_b); cudaFreeHost(host_c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaStreamDestroy(stream); }
860eba654f8017d434fe460bdcdc6f8b514d9db0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float* var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16) { for (int i=0; i < var_1; ++i) { comp = (var_3 / (+1.7978E34f - (var_4 - (var_5 * +1.5645E-8f)))); var_2[i] = atanf((-0.0f * (+0.0f + (var_6 / -1.8126E-23f - var_7 - var_8)))); float tmp_1 = var_9 / +1.5703E9f; comp = tmp_1 * var_2[i] * (var_10 - +1.4446E-36f); if (comp >= +1.7516E36f / (var_11 / (-1.0045E36f / var_12))) { comp += +1.8739E36f * (var_13 * (+1.5092E15f + +1.8891E-35f)); float tmp_2 = (-1.1125E-41f / cosf(-1.7053E-44f * +1.7129E-42f)); float tmp_3 = -1.1475E-37f / (var_14 / var_15); comp = tmp_3 + tmp_2 + (var_16 / +1.9488E-30f); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float* tmp_3 = initPointer( atof(argv[3]) ); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17); hipDeviceSynchronize(); return 0; }
860eba654f8017d434fe460bdcdc6f8b514d9db0.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float* var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16) { for (int i=0; i < var_1; ++i) { comp = (var_3 / (+1.7978E34f - (var_4 - (var_5 * +1.5645E-8f)))); var_2[i] = atanf((-0.0f * (+0.0f + (var_6 / -1.8126E-23f - var_7 - var_8)))); float tmp_1 = var_9 / +1.5703E9f; comp = tmp_1 * var_2[i] * (var_10 - +1.4446E-36f); if (comp >= +1.7516E36f / (var_11 / (-1.0045E36f / var_12))) { comp += +1.8739E36f * (var_13 * (+1.5092E15f + +1.8891E-35f)); float tmp_2 = (-1.1125E-41f / cosf(-1.7053E-44f * +1.7129E-42f)); float tmp_3 = -1.1475E-37f / (var_14 / var_15); comp = tmp_3 + tmp_2 + (var_16 / +1.9488E-30f); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float* tmp_3 = initPointer( atof(argv[3]) ); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17); cudaDeviceSynchronize(); return 0; }
85ad063eefb4e7dc090199a6a9b6af4c668941fc.hip
// !!! This is a file automatically generated by hipify!!! #include "error.cuh" #include <math.h> #include <stdio.h> #ifdef USE_DP typedef double real; const real EPSILON = 1.0e-15; #else typedef float real; const real EPSILON = 1.0e-6f; #endif const int NUM_REPEATS = 10; const real a = 1.23; const real b = 2.34; const real c = 3.57; void add(const real *x, const real *y, real *z, const int N); void check(const real *z, const int N); int main(void) { const int N = 100000000; const int M = sizeof(real) * N; real *x = (real*) malloc(M); real *y = (real*) malloc(M); real *z = (real*) malloc(M); for (int n = 0; n < N; ++n) { x[n] = a; y[n] = b; } float t_sum = 0; float t2_sum = 0; for (int repeat = 0; repeat <= NUM_REPEATS; ++repeat) { hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); CHECK(hipEventRecord(start)); hipEventQuery(start); add(x, y, z, N); CHECK(hipEventRecord(stop)); CHECK(hipEventSynchronize(stop)); float elapsed_time; CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Time = %g ms.\n", elapsed_time); if (repeat > 0) { t_sum += elapsed_time; t2_sum += elapsed_time * elapsed_time; } CHECK(hipEventDestroy(start)); CHECK(hipEventDestroy(stop)); } const float t_ave = t_sum / NUM_REPEATS; const float t_err = sqrt(t2_sum / NUM_REPEATS - t_ave * t_ave); printf("Time = %g +- %g ms.\n", t_ave, t_err); check(z, N); free(x); free(y); free(z); return 0; } void add(const real *x, const real *y, real *z, const int N) { for (int n = 0; n < N; ++n) { z[n] = x[n] + y[n]; } } void check(const real *z, const int N) { bool has_error = false; for (int n = 0; n < N; ++n) { if (fabs(z[n] - c) > EPSILON) { has_error = true; } } printf("%s\n", has_error ? "Has errors" : "No errors"); }
85ad063eefb4e7dc090199a6a9b6af4c668941fc.cu
#include "error.cuh" #include <math.h> #include <stdio.h> #ifdef USE_DP typedef double real; const real EPSILON = 1.0e-15; #else typedef float real; const real EPSILON = 1.0e-6f; #endif const int NUM_REPEATS = 10; const real a = 1.23; const real b = 2.34; const real c = 3.57; void add(const real *x, const real *y, real *z, const int N); void check(const real *z, const int N); int main(void) { const int N = 100000000; const int M = sizeof(real) * N; real *x = (real*) malloc(M); real *y = (real*) malloc(M); real *z = (real*) malloc(M); for (int n = 0; n < N; ++n) { x[n] = a; y[n] = b; } float t_sum = 0; float t2_sum = 0; for (int repeat = 0; repeat <= NUM_REPEATS; ++repeat) { cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); CHECK(cudaEventRecord(start)); cudaEventQuery(start); add(x, y, z, N); CHECK(cudaEventRecord(stop)); CHECK(cudaEventSynchronize(stop)); float elapsed_time; CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Time = %g ms.\n", elapsed_time); if (repeat > 0) { t_sum += elapsed_time; t2_sum += elapsed_time * elapsed_time; } CHECK(cudaEventDestroy(start)); CHECK(cudaEventDestroy(stop)); } const float t_ave = t_sum / NUM_REPEATS; const float t_err = sqrt(t2_sum / NUM_REPEATS - t_ave * t_ave); printf("Time = %g +- %g ms.\n", t_ave, t_err); check(z, N); free(x); free(y); free(z); return 0; } void add(const real *x, const real *y, real *z, const int N) { for (int n = 0; n < N; ++n) { z[n] = x[n] + y[n]; } } void check(const real *z, const int N) { bool has_error = false; for (int n = 0; n < N; ++n) { if (fabs(z[n] - c) > EPSILON) { has_error = true; } } printf("%s\n", has_error ? "Has errors" : "No errors"); }
d63c7470cc86ba3a9186a720dfec9ad589dbef07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Cortex.cuh" #include <iostream> #include "sm_60_atomic_functions.h" #include "CUDAHelper.cuh" #ifndef M_PI #define M_PI 3.14159265358979323846 #endif struct add_double2 { __device__ double2 operator()(const double2& a, const double2& b) const { double2 r; r.x = a.x + b.x; r.y = a.y + b.y; return r; } }; struct min_vals_double2 { __device__ double2 operator()(const double2& a, const double2& b) const { double2 r; r.x = a.x < b.x ? a.x : b.x ; r.y = a.y < b.y ? a.y : b.y; return r; } }; struct max_vals_double2 { __device__ double2 operator()(const double2& a, const double2& b) const { double2 r; r.x = a.x < b.x ? b.x : a.x ; r.y = a.y < b.y ? b.y : a.y; return r; } }; __device__ double gauss(float sigma, float x, float y, float mean = 0.0) { float norm = sqrtf(x*x + y*y); return exp(-powf((norm - mean), 2) / (2 * powf(sigma, 2))) / sqrtf(2 * M_PI * powf(sigma, 2)); } __global__ void cort_map_left_kernel(SamplingPoint *d_leftFields, float alpha, double2 *d_leftLoc, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size <= index) return; SamplingPoint *point = &d_leftFields[index]; d_leftLoc[index].y = sqrtf(powf(point->_x - alpha, 2) + powf(point->_y, 2)); double theta = atan2(point->_y, point->_x - alpha); d_leftLoc[index].x = theta + (theta < 0 ? M_PI : -M_PI); } __global__ void cort_map_right_kernel(SamplingPoint *d_rightFields, float alpha, double2 *d_rightLoc, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size <= index) return; SamplingPoint *point = &d_rightFields[index]; d_rightLoc[index].y = sqrtf(powf(point->_x + alpha, 2) + powf(point->_y, 2)); d_rightLoc[index].x = atan2(point->_y, point->_x + alpha); } __global__ void cort_norm_kernel(double *d_norm_img, double2 *d_loc, uint2 cortImgSize, double *d_gauss, size_t guassKernelWidth, size_t locSize, bool rgb) { int globalIndex = blockIdx.x * blockDim.x + threadIdx.x; if (locSize <= globalIndex) return; int channel = globalIndex / (locSize / (rgb ? 3 : 1)); int offset = channel * cortImgSize.x * cortImgSize.y; int index = globalIndex % (locSize / (rgb ? 3 : 1)); double x = d_loc[index].x; double y = d_loc[index].y; int dx = (int)(10 * ((round(x * 10) / 10 - round(x)))); dx < 0 ? dx = 10 + dx : dx; int dy = (int)(10 * ((round(y * 10) / 10 - round(y)))); dy < 0 ? dy = 10 + dy : dy; double *kernel = &d_gauss[(dx * 10 + dy) * guassKernelWidth * guassKernelWidth]; int X = (int)round(x) - guassKernelWidth / 2; int Y = (int)round(y) - guassKernelWidth / 2; for (int i = 0; i != guassKernelWidth; ++i) { for (int j = 0; j != guassKernelWidth; ++j) { if (X + j >= 0 && Y + i >= 0 && X + j < cortImgSize.x && Y + i < cortImgSize.y) atomicAdd(&d_norm_img[offset + (Y + i) * cortImgSize.x + X + j], kernel[i * guassKernelWidth + j]); } } } __global__ void cort_image_kernel(double *d_img, double *d_img_vector, SamplingPoint *d_fields, uint2 cortImgSize, double2 *d_loc, double *d_gauss, size_t guassKernelWidth, size_t locSize, size_t vecLen, bool rgb) { int globalIndex = blockIdx.x * blockDim.x + threadIdx.x; if (locSize <= globalIndex) return; int channel = globalIndex / (locSize / (rgb ? 3 : 1)); int offset = channel * cortImgSize.x * cortImgSize.y; int index = globalIndex % (locSize / (rgb ? 3 : 1)); int vecOffset = channel * vecLen; double x = d_loc[index].x; double y = d_loc[index].y; int dx = (int)(10 * ((round(x * 10) / 10 - round(x)))); dx < 0 ? dx = 10 + dx : dx; int dy = (int)(10 * ((round(y * 10) / 10 - round(y)))); dy < 0 ? dy = 10 + dy : dy; double *kernel = &d_gauss[(dx * 10 + dy) * guassKernelWidth * guassKernelWidth]; int X = (int)round(x) - guassKernelWidth / 2; int Y = (int)round(y) - guassKernelWidth / 2; double value = d_img_vector[vecOffset + d_fields[index]._i]; for (int i = 0; i != guassKernelWidth; ++i) { for (int j = 0; j != guassKernelWidth; ++j) { if (X + j >= 0 && Y + i >= 0 && X + j < cortImgSize.x && Y + i < cortImgSize.y) atomicAdd(&d_img[offset + (Y + i) * cortImgSize.x + X + j], value * kernel[i * guassKernelWidth + j]); } } } __global__ void cort_prepare_kernel(double2 *d_loc, double2 min, float shrink, size_t guassKernelWidth, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size <= index) return; d_loc[index].x += guassKernelWidth - min.x; d_loc[index].x *= shrink; d_loc[index].y += guassKernelWidth - min.y; d_loc[index].y *= shrink; } __global__ void euclidean_distance_kernel(double2 *d_loc, double2 *d_out, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size * size <= index) return; int x = index % size; int y = index / size; double2 a = d_loc[x]; double2 b = d_loc[y]; d_out[index].x = sqrtf(powf((b.x - a.x), 2)); d_out[index].y = sqrtf(powf((b.y - a.y), 2)); } __global__ void scale_theta_flip_y_kernel(double2 *d_loc, double norm, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size <= index) return; d_loc[index].x *= norm; d_loc[index].y *= -1; } __global__ void gauss_kernel(double *gauss100, float sigma, size_t guassKernelWidth) { int index = (blockIdx.x + threadIdx.x * blockDim.x ) * guassKernelWidth * guassKernelWidth; float x = blockIdx.x * 0.1; float y = threadIdx.x * 0.1; float dx = guassKernelWidth / 2 + x; float dy = guassKernelWidth / 2 + y; for (int i = 0; i != guassKernelWidth; ++i) { for (int j = 0; j != guassKernelWidth; ++j) { gauss100[index + i * guassKernelWidth + j] = gauss(sigma, dx - i, dy - j); } } } __global__ void normalise(uchar *d_norm, double *d_image, double *normaliser, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size <= index) return; d_norm[index] = normaliser[index] == 0.0 ? 0 : (int)(d_image[index] / normaliser[index]); } template <class T> void setPointerToNull(T **d_ptr) { if (*d_ptr != nullptr){ hipFree(*d_ptr); cudaCheckErrors("ERROR"); *d_ptr = nullptr; } } Cortex::~Cortex() { setPointerToNull(&d_leftFields); setPointerToNull(&d_rightFields); setPointerToNull(&d_leftLoc); setPointerToNull(&d_rightLoc); setPointerToNull(&d_gauss); setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); } int Cortex::cortImage(double *h_imageVector, size_t vecLen, double **d_norm, uchar *h_result, size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector, SamplingPoint *d_fields, double2 *d_loc, size_t locSize) { if (!isReady()) return ERRORS::uninitialized; if ((h_imageVector == nullptr && d_imageVector == nullptr) || h_result == nullptr) return ERRORS::invalidArguments; if (cortImgX != _cortImgSize.x || cortImgY != _cortImgSize.y || rgb != _rgb || vecLen != _channels * (_leftCortexSize + _rightCortexSize)) return ERRORS::imageParametersDidNotMatch; double *d_img; hipMalloc((void**)&d_img, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double)); hipMemset(d_img, 0.0, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double)); double *_d_imageVector; if (d_imageVector != nullptr) _d_imageVector = d_imageVector; else { hipMalloc((void**)&_d_imageVector, _channels * (_leftCortexSize + _rightCortexSize) * sizeof(double)); hipMemcpy(_d_imageVector, h_imageVector, _channels * (_leftCortexSize + _rightCortexSize) * sizeof(double), hipMemcpyHostToDevice); } hipLaunchKernelGGL(( cort_image_kernel), dim3(ceil(_channels * locSize / 512.0)), dim3(512), 0, 0, d_img, _d_imageVector, d_fields, _cortImgSize, d_loc, d_gauss, _gaussKernelWidth , _channels * locSize, _leftCortexSize + _rightCortexSize, _rgb); //hipDeviceSynchronize(); cudaCheckErrors("ERROR"); if (*d_norm == nullptr) { hipMalloc((void**)d_norm, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double)); hipMemset(*d_norm, 0.0, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double)); hipLaunchKernelGGL(( cort_norm_kernel), dim3(ceil(_channels * locSize / 512.0)), dim3(512), 0, 0, *d_norm, d_loc, _cortImgSize, d_gauss, _gaussKernelWidth, _channels * locSize, _rgb); //hipDeviceSynchronize(); cudaCheckErrors("ERROR"); } uchar *d_normalised; hipMalloc((void**)&d_normalised, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(uchar)); hipLaunchKernelGGL(( normalise), dim3(ceil(_channels * _cortImgSize.x * _cortImgSize.y / 512.0)), dim3(512), 0, 0, d_normalised, d_img, *d_norm, _channels * _cortImgSize.x * _cortImgSize.y); hipDeviceSynchronize(); cudaCheckErrors("ERROR"); hipMemcpy(h_result, d_normalised, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(uchar), hipMemcpyDeviceToHost); cudaCheckErrors("ERROR"); hipFree(d_normalised); if (d_imageVector == nullptr) hipFree(_d_imageVector); hipFree(d_img); return 0; } int Cortex::cortImageLeft(double *h_imageVector, size_t vecLen, uchar *h_result, size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector) { return cortImage(h_imageVector, vecLen, &d_leftNorm, h_result, cortImgX, cortImgY, rgb, d_imageVector, d_leftFields, d_leftLoc, _leftCortexSize); } int Cortex::cortImageRight(double *h_imageVector, size_t vecLen, uchar *h_result, size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector) { return cortImage(h_imageVector, vecLen, &d_rightNorm, h_result, cortImgX, cortImgY, rgb, d_imageVector, d_rightFields, d_rightLoc, _rightCortexSize); } error Cortex::initFromCortexFields(SamplingPoint *h_leftFields, size_t leftSize, SamplingPoint *h_rightFields, size_t rightSize) { if (isnan(_shrink) || isnan(_alpha)) return ERRORS::uninitialized; setLeftCortexFields(h_leftFields, leftSize); setRightCortexFields(h_rightFields, rightSize); if (d_leftFields == nullptr || d_rightFields == nullptr) return ERRORS::invalidArguments; setPointerToNull(&d_leftLoc); hipMalloc((void**)&d_leftLoc, _leftCortexSize * sizeof(double2)); hipLaunchKernelGGL(( cort_map_left_kernel), dim3(ceil(_leftCortexSize / 512.0)), dim3(512), 0, 0, d_leftFields, _alpha, d_leftLoc, _leftCortexSize); //hipDeviceSynchronize(); //cudaCheckErrors("ERROR"); setPointerToNull(&d_rightLoc); hipMalloc((void**)&d_rightLoc, _rightCortexSize * sizeof(double2)); hipLaunchKernelGGL(( cort_map_right_kernel), dim3(ceil(_rightCortexSize / 512.0)), dim3(512), 0, 0, d_rightFields, _alpha, d_rightLoc, _rightCortexSize); hipDeviceSynchronize(); cudaCheckErrors("ERROR"); double2 *d_eucl_left; hipMalloc((void**)&d_eucl_left, _leftCortexSize * _leftCortexSize * sizeof(double2)); hipLaunchKernelGGL(( euclidean_distance_kernel), dim3(ceil(_leftCortexSize * _leftCortexSize / 1024.0)), dim3(1024), 0, 0, d_leftLoc, d_eucl_left, _leftCortexSize); //hipDeviceSynchronize(); //cudaCheckErrors("ERROR"); double2 *d_eucl_right; hipMalloc((void**)&d_eucl_right, _rightCortexSize * _rightCortexSize * sizeof(double2)); hipLaunchKernelGGL(( euclidean_distance_kernel), dim3(ceil(_rightCortexSize * _rightCortexSize / 1024.0)), dim3(1024), 0, 0, d_rightLoc, d_eucl_right, _rightCortexSize); hipDeviceSynchronize(); cudaCheckErrors("ERROR"); thrust::device_ptr<double2> d_leftLoc_begin(d_eucl_left); thrust::device_ptr<double2> d_leftLoc_end(d_eucl_left + _leftCortexSize * _leftCortexSize); thrust::device_ptr<double2> d_rightLoc_begin(d_eucl_right); thrust::device_ptr<double2> d_rightLoc_end(d_eucl_right + _rightCortexSize * _rightCortexSize); double2 init; init.x = init.y = 0.0; double2 sum_left = thrust::reduce(d_leftLoc_begin, d_leftLoc_end, init, add_double2()); init.x = init.y = 0.0; double2 sum_right = thrust::reduce(d_rightLoc_begin, d_rightLoc_end, init, add_double2()); double xd = (sum_left.x / (_leftCortexSize * _leftCortexSize) + sum_right.x / (_rightCortexSize * _rightCortexSize)) / 2; double yd = (sum_left.y / (_leftCortexSize * _leftCortexSize) + sum_right.y / (_rightCortexSize * _rightCortexSize)) / 2; hipLaunchKernelGGL(( scale_theta_flip_y_kernel), dim3(ceil(_leftCortexSize / 512.0)), dim3(512), 0, 0, d_leftLoc, yd/xd, _leftCortexSize); //hipDeviceSynchronize(); //cudaCheckErrors("ERROR"); hipLaunchKernelGGL(( scale_theta_flip_y_kernel), dim3(ceil(_rightCortexSize / 512.0)), dim3(512), 0, 0, d_rightLoc, yd/xd, _rightCortexSize); hipDeviceSynchronize(); cudaCheckErrors("ERROR"); thrust::device_ptr<double2> d_l_b(d_leftLoc); thrust::device_ptr<double2> d_l_e(d_leftLoc + _leftCortexSize); init.x = init.y = 10000.0; double2 min_l = thrust::reduce(d_l_b, d_l_e, init, min_vals_double2()); thrust::device_ptr<double2> d_r_b(d_rightLoc); thrust::device_ptr<double2> d_r_e(d_rightLoc + _rightCortexSize); init.x = init.y = 10000.0; double2 min_r = thrust::reduce(d_r_b, d_r_e, init, min_vals_double2()); hipLaunchKernelGGL(( cort_prepare_kernel), dim3(ceil(_leftCortexSize / 512.0)), dim3(512), 0, 0, d_leftLoc, min_l, _shrink, _gaussKernelWidth, _leftCortexSize); //hipDeviceSynchronize(); //cudaCheckErrors("ERROR"); hipLaunchKernelGGL(( cort_prepare_kernel), dim3(ceil(_rightCortexSize / 512.0)), dim3(512), 0, 0, d_rightLoc, min_r, _shrink, _gaussKernelWidth, _rightCortexSize); hipDeviceSynchronize(); cudaCheckErrors("ERROR"); init.x = init.y = -10000.0; _cortImgSize.x = thrust::reduce(d_l_b, d_l_e, init, max_vals_double2()).x + _gaussKernelWidth / 2; _cortImgSize.y = thrust::reduce(d_l_b, d_l_e, init, max_vals_double2()).y + _gaussKernelWidth / 2; hipFree(d_eucl_left); hipFree(d_eucl_right); return 0; } void Cortex::gauss100() { setPointerToNull(&d_gauss); setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); hipMalloc((void**)&d_gauss, 100 * _gaussKernelWidth * _gaussKernelWidth * sizeof(double)); hipLaunchKernelGGL(( gauss_kernel), dim3(10), dim3(10), 0, 0, d_gauss, _gaussSigma, _gaussKernelWidth); hipDeviceSynchronize(); cudaCheckErrors("ERROR"); } bool Cortex::isReady() const { return _leftCortexSize != 0 && _rightCortexSize != 0 && d_leftLoc != nullptr && d_rightLoc != nullptr && _cortImgSize.x != 0 && _cortImgSize.y != 0 && _gaussKernelWidth != 0 && d_gauss != nullptr; } void Cortex::setAlpha(float alpha) { if (alpha == _alpha) return; setPointerToNull(&d_leftLoc); setPointerToNull(&d_rightLoc); setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); _alpha = alpha; } void Cortex::setShrink(float shrink) { if (shrink == _shrink) return; setPointerToNull(&d_leftLoc); setPointerToNull(&d_rightLoc); setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); _shrink = shrink; } void Cortex::setRGB(bool rgb) { if (rgb == _rgb) return; _rgb = rgb; _channels = _rgb ? 3 : 1; } void Cortex::setCortImageSize(uint2 cortImgSize) { if (cortImgSize.x == _cortImgSize.x && cortImgSize.y == _cortImgSize.y) return; setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); _cortImgSize = cortImgSize; } error Cortex::getLeftCortexFields(SamplingPoint *h_leftFields, size_t leftSize) const { return getFromDevice(h_leftFields, leftSize, d_leftFields, _leftCortexSize); } error Cortex::setLeftCortexFields(const SamplingPoint *h_leftFields, const size_t leftSize) { return setOnDevice(h_leftFields, leftSize, &d_leftFields, _leftCortexSize); } error Cortex::getRightCortexFields(SamplingPoint *h_rightFields, size_t rightSize) const { return getFromDevice(h_rightFields, rightSize, d_rightFields, _rightCortexSize); } error Cortex::setRightCortexFields(const SamplingPoint *h_rightFields, size_t rightSize) { return setOnDevice(h_rightFields, rightSize, &d_rightFields, _rightCortexSize); } error Cortex::getLeftCortexLocations(double2 *h_leftLoc, size_t leftSize) const { return getFromDevice(h_leftLoc, leftSize, d_leftLoc, _leftCortexSize); } int Cortex::setLeftCortexLocations(const double2 *h_leftLoc, size_t leftSize) { if (leftSize != _leftCortexSize) return ERRORS::cortexSizeDidNotMatch; int err = setOnDevice(h_leftLoc, leftSize, &d_leftLoc, _leftCortexSize); if (err == 0) { setPointerToNull(&d_leftNorm); } return err; } error Cortex::getRightCortexLocations(double2 *h_rightLoc, size_t rightSize) const { return getFromDevice(h_rightLoc, rightSize, d_rightLoc, _rightCortexSize); } int Cortex::setRightCortexLocations(const double2 *h_rightLoc, size_t rightSize) { if (rightSize != _rightCortexSize) return ERRORS::cortexSizeDidNotMatch; int err = setOnDevice(h_rightLoc, rightSize, &d_rightLoc, _rightCortexSize); if (err == 0) setPointerToNull(&d_rightNorm); return err; } error Cortex::getGauss100( double *h_gauss, size_t kernelWidth, float sigma) const { if (kernelWidth != _gaussKernelWidth || sigma != _gaussSigma) return ERRORS::invalidArguments; hipMemcpy(h_gauss, d_gauss, 100 * _gaussKernelWidth * _gaussKernelWidth * sizeof(double), hipMemcpyDeviceToHost); cudaCheckErrors("ERROR"); return 0; } error Cortex::setGauss100(const size_t kernelWidth, const float sigma, double *h_gauss) { if (kernelWidth == 0) return ERRORS::invalidArguments; setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); _gaussKernelWidth = kernelWidth; _gaussSigma = sigma; if (h_gauss == nullptr) { gauss100(); } else { setPointerToNull(&d_gauss); hipMalloc((void**)&d_gauss, 100 * _gaussKernelWidth * _gaussKernelWidth * sizeof(double)); hipMemcpy(d_gauss, h_gauss, 100 * _gaussKernelWidth * _gaussKernelWidth * sizeof(double), hipMemcpyHostToDevice); cudaCheckErrors("ERROR"); } return 0; } template <class T> error Cortex::getFromDevice(T *h_ptr, const size_t h_size, const T *d_ptr, const size_t d_size) const { if (h_ptr == nullptr || h_size == 0) return ERRORS::invalidArguments; if (h_size != d_size) return ERRORS::cortexSizeDidNotMatch; if (d_ptr == nullptr) return ERRORS::uninitialized; hipMemcpy(h_ptr, d_ptr, sizeof(T) * d_size, hipMemcpyDeviceToHost); cudaCheckErrors("ERROR"); return 0; } template <class T> error Cortex::setOnDevice(const T *h_ptr, size_t h_size, T **d_ptr, size_t &d_size) { if (h_ptr == nullptr || h_size == 0) return ERRORS::invalidArguments; setPointerToNull(d_ptr); hipMalloc((void**)d_ptr, sizeof(T) * h_size); hipMemcpy(*d_ptr, h_ptr, sizeof(T) * h_size, hipMemcpyHostToDevice); d_size = h_size; cudaCheckErrors("ERROR"); return 0; }
d63c7470cc86ba3a9186a720dfec9ad589dbef07.cu
#include "Cortex.cuh" #include <iostream> #include "sm_60_atomic_functions.h" #include "CUDAHelper.cuh" #ifndef M_PI #define M_PI 3.14159265358979323846 #endif struct add_double2 { __device__ double2 operator()(const double2& a, const double2& b) const { double2 r; r.x = a.x + b.x; r.y = a.y + b.y; return r; } }; struct min_vals_double2 { __device__ double2 operator()(const double2& a, const double2& b) const { double2 r; r.x = a.x < b.x ? a.x : b.x ; r.y = a.y < b.y ? a.y : b.y; return r; } }; struct max_vals_double2 { __device__ double2 operator()(const double2& a, const double2& b) const { double2 r; r.x = a.x < b.x ? b.x : a.x ; r.y = a.y < b.y ? b.y : a.y; return r; } }; __device__ double gauss(float sigma, float x, float y, float mean = 0.0) { float norm = sqrtf(x*x + y*y); return exp(-powf((norm - mean), 2) / (2 * powf(sigma, 2))) / sqrtf(2 * M_PI * powf(sigma, 2)); } __global__ void cort_map_left_kernel(SamplingPoint *d_leftFields, float alpha, double2 *d_leftLoc, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size <= index) return; SamplingPoint *point = &d_leftFields[index]; d_leftLoc[index].y = sqrtf(powf(point->_x - alpha, 2) + powf(point->_y, 2)); double theta = atan2(point->_y, point->_x - alpha); d_leftLoc[index].x = theta + (theta < 0 ? M_PI : -M_PI); } __global__ void cort_map_right_kernel(SamplingPoint *d_rightFields, float alpha, double2 *d_rightLoc, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size <= index) return; SamplingPoint *point = &d_rightFields[index]; d_rightLoc[index].y = sqrtf(powf(point->_x + alpha, 2) + powf(point->_y, 2)); d_rightLoc[index].x = atan2(point->_y, point->_x + alpha); } __global__ void cort_norm_kernel(double *d_norm_img, double2 *d_loc, uint2 cortImgSize, double *d_gauss, size_t guassKernelWidth, size_t locSize, bool rgb) { int globalIndex = blockIdx.x * blockDim.x + threadIdx.x; if (locSize <= globalIndex) return; int channel = globalIndex / (locSize / (rgb ? 3 : 1)); int offset = channel * cortImgSize.x * cortImgSize.y; int index = globalIndex % (locSize / (rgb ? 3 : 1)); double x = d_loc[index].x; double y = d_loc[index].y; int dx = (int)(10 * ((round(x * 10) / 10 - round(x)))); dx < 0 ? dx = 10 + dx : dx; int dy = (int)(10 * ((round(y * 10) / 10 - round(y)))); dy < 0 ? dy = 10 + dy : dy; double *kernel = &d_gauss[(dx * 10 + dy) * guassKernelWidth * guassKernelWidth]; int X = (int)round(x) - guassKernelWidth / 2; int Y = (int)round(y) - guassKernelWidth / 2; for (int i = 0; i != guassKernelWidth; ++i) { for (int j = 0; j != guassKernelWidth; ++j) { if (X + j >= 0 && Y + i >= 0 && X + j < cortImgSize.x && Y + i < cortImgSize.y) atomicAdd(&d_norm_img[offset + (Y + i) * cortImgSize.x + X + j], kernel[i * guassKernelWidth + j]); } } } __global__ void cort_image_kernel(double *d_img, double *d_img_vector, SamplingPoint *d_fields, uint2 cortImgSize, double2 *d_loc, double *d_gauss, size_t guassKernelWidth, size_t locSize, size_t vecLen, bool rgb) { int globalIndex = blockIdx.x * blockDim.x + threadIdx.x; if (locSize <= globalIndex) return; int channel = globalIndex / (locSize / (rgb ? 3 : 1)); int offset = channel * cortImgSize.x * cortImgSize.y; int index = globalIndex % (locSize / (rgb ? 3 : 1)); int vecOffset = channel * vecLen; double x = d_loc[index].x; double y = d_loc[index].y; int dx = (int)(10 * ((round(x * 10) / 10 - round(x)))); dx < 0 ? dx = 10 + dx : dx; int dy = (int)(10 * ((round(y * 10) / 10 - round(y)))); dy < 0 ? dy = 10 + dy : dy; double *kernel = &d_gauss[(dx * 10 + dy) * guassKernelWidth * guassKernelWidth]; int X = (int)round(x) - guassKernelWidth / 2; int Y = (int)round(y) - guassKernelWidth / 2; double value = d_img_vector[vecOffset + d_fields[index]._i]; for (int i = 0; i != guassKernelWidth; ++i) { for (int j = 0; j != guassKernelWidth; ++j) { if (X + j >= 0 && Y + i >= 0 && X + j < cortImgSize.x && Y + i < cortImgSize.y) atomicAdd(&d_img[offset + (Y + i) * cortImgSize.x + X + j], value * kernel[i * guassKernelWidth + j]); } } } __global__ void cort_prepare_kernel(double2 *d_loc, double2 min, float shrink, size_t guassKernelWidth, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size <= index) return; d_loc[index].x += guassKernelWidth - min.x; d_loc[index].x *= shrink; d_loc[index].y += guassKernelWidth - min.y; d_loc[index].y *= shrink; } __global__ void euclidean_distance_kernel(double2 *d_loc, double2 *d_out, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size * size <= index) return; int x = index % size; int y = index / size; double2 a = d_loc[x]; double2 b = d_loc[y]; d_out[index].x = sqrtf(powf((b.x - a.x), 2)); d_out[index].y = sqrtf(powf((b.y - a.y), 2)); } __global__ void scale_theta_flip_y_kernel(double2 *d_loc, double norm, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size <= index) return; d_loc[index].x *= norm; d_loc[index].y *= -1; } __global__ void gauss_kernel(double *gauss100, float sigma, size_t guassKernelWidth) { int index = (blockIdx.x + threadIdx.x * blockDim.x ) * guassKernelWidth * guassKernelWidth; float x = blockIdx.x * 0.1; float y = threadIdx.x * 0.1; float dx = guassKernelWidth / 2 + x; float dy = guassKernelWidth / 2 + y; for (int i = 0; i != guassKernelWidth; ++i) { for (int j = 0; j != guassKernelWidth; ++j) { gauss100[index + i * guassKernelWidth + j] = gauss(sigma, dx - i, dy - j); } } } __global__ void normalise(uchar *d_norm, double *d_image, double *normaliser, size_t size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (size <= index) return; d_norm[index] = normaliser[index] == 0.0 ? 0 : (int)(d_image[index] / normaliser[index]); } template <class T> void setPointerToNull(T **d_ptr) { if (*d_ptr != nullptr){ cudaFree(*d_ptr); cudaCheckErrors("ERROR"); *d_ptr = nullptr; } } Cortex::~Cortex() { setPointerToNull(&d_leftFields); setPointerToNull(&d_rightFields); setPointerToNull(&d_leftLoc); setPointerToNull(&d_rightLoc); setPointerToNull(&d_gauss); setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); } int Cortex::cortImage(double *h_imageVector, size_t vecLen, double **d_norm, uchar *h_result, size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector, SamplingPoint *d_fields, double2 *d_loc, size_t locSize) { if (!isReady()) return ERRORS::uninitialized; if ((h_imageVector == nullptr && d_imageVector == nullptr) || h_result == nullptr) return ERRORS::invalidArguments; if (cortImgX != _cortImgSize.x || cortImgY != _cortImgSize.y || rgb != _rgb || vecLen != _channels * (_leftCortexSize + _rightCortexSize)) return ERRORS::imageParametersDidNotMatch; double *d_img; cudaMalloc((void**)&d_img, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double)); cudaMemset(d_img, 0.0, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double)); double *_d_imageVector; if (d_imageVector != nullptr) _d_imageVector = d_imageVector; else { cudaMalloc((void**)&_d_imageVector, _channels * (_leftCortexSize + _rightCortexSize) * sizeof(double)); cudaMemcpy(_d_imageVector, h_imageVector, _channels * (_leftCortexSize + _rightCortexSize) * sizeof(double), cudaMemcpyHostToDevice); } cort_image_kernel<<<ceil(_channels * locSize / 512.0), 512>>>(d_img, _d_imageVector, d_fields, _cortImgSize, d_loc, d_gauss, _gaussKernelWidth , _channels * locSize, _leftCortexSize + _rightCortexSize, _rgb); //cudaDeviceSynchronize(); cudaCheckErrors("ERROR"); if (*d_norm == nullptr) { cudaMalloc((void**)d_norm, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double)); cudaMemset(*d_norm, 0.0, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(double)); cort_norm_kernel<<<ceil(_channels * locSize / 512.0), 512>>>(*d_norm, d_loc, _cortImgSize, d_gauss, _gaussKernelWidth, _channels * locSize, _rgb); //cudaDeviceSynchronize(); cudaCheckErrors("ERROR"); } uchar *d_normalised; cudaMalloc((void**)&d_normalised, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(uchar)); normalise<<<ceil(_channels * _cortImgSize.x * _cortImgSize.y / 512.0), 512>>>( d_normalised, d_img, *d_norm, _channels * _cortImgSize.x * _cortImgSize.y); cudaDeviceSynchronize(); cudaCheckErrors("ERROR"); cudaMemcpy(h_result, d_normalised, _channels * _cortImgSize.x * _cortImgSize.y * sizeof(uchar), cudaMemcpyDeviceToHost); cudaCheckErrors("ERROR"); cudaFree(d_normalised); if (d_imageVector == nullptr) cudaFree(_d_imageVector); cudaFree(d_img); return 0; } int Cortex::cortImageLeft(double *h_imageVector, size_t vecLen, uchar *h_result, size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector) { return cortImage(h_imageVector, vecLen, &d_leftNorm, h_result, cortImgX, cortImgY, rgb, d_imageVector, d_leftFields, d_leftLoc, _leftCortexSize); } int Cortex::cortImageRight(double *h_imageVector, size_t vecLen, uchar *h_result, size_t cortImgX, size_t cortImgY, bool rgb, double *d_imageVector) { return cortImage(h_imageVector, vecLen, &d_rightNorm, h_result, cortImgX, cortImgY, rgb, d_imageVector, d_rightFields, d_rightLoc, _rightCortexSize); } error Cortex::initFromCortexFields(SamplingPoint *h_leftFields, size_t leftSize, SamplingPoint *h_rightFields, size_t rightSize) { if (isnan(_shrink) || isnan(_alpha)) return ERRORS::uninitialized; setLeftCortexFields(h_leftFields, leftSize); setRightCortexFields(h_rightFields, rightSize); if (d_leftFields == nullptr || d_rightFields == nullptr) return ERRORS::invalidArguments; setPointerToNull(&d_leftLoc); cudaMalloc((void**)&d_leftLoc, _leftCortexSize * sizeof(double2)); cort_map_left_kernel<<<ceil(_leftCortexSize / 512.0), 512>>>(d_leftFields, _alpha, d_leftLoc, _leftCortexSize); //cudaDeviceSynchronize(); //cudaCheckErrors("ERROR"); setPointerToNull(&d_rightLoc); cudaMalloc((void**)&d_rightLoc, _rightCortexSize * sizeof(double2)); cort_map_right_kernel<<<ceil(_rightCortexSize / 512.0), 512>>>(d_rightFields, _alpha, d_rightLoc, _rightCortexSize); cudaDeviceSynchronize(); cudaCheckErrors("ERROR"); double2 *d_eucl_left; cudaMalloc((void**)&d_eucl_left, _leftCortexSize * _leftCortexSize * sizeof(double2)); euclidean_distance_kernel<<<ceil(_leftCortexSize * _leftCortexSize / 1024.0), 1024>>>( d_leftLoc, d_eucl_left, _leftCortexSize); //cudaDeviceSynchronize(); //cudaCheckErrors("ERROR"); double2 *d_eucl_right; cudaMalloc((void**)&d_eucl_right, _rightCortexSize * _rightCortexSize * sizeof(double2)); euclidean_distance_kernel<<<ceil(_rightCortexSize * _rightCortexSize / 1024.0), 1024>>>( d_rightLoc, d_eucl_right, _rightCortexSize); cudaDeviceSynchronize(); cudaCheckErrors("ERROR"); thrust::device_ptr<double2> d_leftLoc_begin(d_eucl_left); thrust::device_ptr<double2> d_leftLoc_end(d_eucl_left + _leftCortexSize * _leftCortexSize); thrust::device_ptr<double2> d_rightLoc_begin(d_eucl_right); thrust::device_ptr<double2> d_rightLoc_end(d_eucl_right + _rightCortexSize * _rightCortexSize); double2 init; init.x = init.y = 0.0; double2 sum_left = thrust::reduce(d_leftLoc_begin, d_leftLoc_end, init, add_double2()); init.x = init.y = 0.0; double2 sum_right = thrust::reduce(d_rightLoc_begin, d_rightLoc_end, init, add_double2()); double xd = (sum_left.x / (_leftCortexSize * _leftCortexSize) + sum_right.x / (_rightCortexSize * _rightCortexSize)) / 2; double yd = (sum_left.y / (_leftCortexSize * _leftCortexSize) + sum_right.y / (_rightCortexSize * _rightCortexSize)) / 2; scale_theta_flip_y_kernel<<<ceil(_leftCortexSize / 512.0), 512>>>(d_leftLoc, yd/xd, _leftCortexSize); //cudaDeviceSynchronize(); //cudaCheckErrors("ERROR"); scale_theta_flip_y_kernel<<<ceil(_rightCortexSize / 512.0), 512>>>(d_rightLoc, yd/xd, _rightCortexSize); cudaDeviceSynchronize(); cudaCheckErrors("ERROR"); thrust::device_ptr<double2> d_l_b(d_leftLoc); thrust::device_ptr<double2> d_l_e(d_leftLoc + _leftCortexSize); init.x = init.y = 10000.0; double2 min_l = thrust::reduce(d_l_b, d_l_e, init, min_vals_double2()); thrust::device_ptr<double2> d_r_b(d_rightLoc); thrust::device_ptr<double2> d_r_e(d_rightLoc + _rightCortexSize); init.x = init.y = 10000.0; double2 min_r = thrust::reduce(d_r_b, d_r_e, init, min_vals_double2()); cort_prepare_kernel<<<ceil(_leftCortexSize / 512.0), 512>>>( d_leftLoc, min_l, _shrink, _gaussKernelWidth, _leftCortexSize); //cudaDeviceSynchronize(); //cudaCheckErrors("ERROR"); cort_prepare_kernel<<<ceil(_rightCortexSize / 512.0), 512>>>( d_rightLoc, min_r, _shrink, _gaussKernelWidth, _rightCortexSize); cudaDeviceSynchronize(); cudaCheckErrors("ERROR"); init.x = init.y = -10000.0; _cortImgSize.x = thrust::reduce(d_l_b, d_l_e, init, max_vals_double2()).x + _gaussKernelWidth / 2; _cortImgSize.y = thrust::reduce(d_l_b, d_l_e, init, max_vals_double2()).y + _gaussKernelWidth / 2; cudaFree(d_eucl_left); cudaFree(d_eucl_right); return 0; } void Cortex::gauss100() { setPointerToNull(&d_gauss); setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); cudaMalloc((void**)&d_gauss, 100 * _gaussKernelWidth * _gaussKernelWidth * sizeof(double)); gauss_kernel<<<10, 10>>>(d_gauss, _gaussSigma, _gaussKernelWidth); cudaDeviceSynchronize(); cudaCheckErrors("ERROR"); } bool Cortex::isReady() const { return _leftCortexSize != 0 && _rightCortexSize != 0 && d_leftLoc != nullptr && d_rightLoc != nullptr && _cortImgSize.x != 0 && _cortImgSize.y != 0 && _gaussKernelWidth != 0 && d_gauss != nullptr; } void Cortex::setAlpha(float alpha) { if (alpha == _alpha) return; setPointerToNull(&d_leftLoc); setPointerToNull(&d_rightLoc); setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); _alpha = alpha; } void Cortex::setShrink(float shrink) { if (shrink == _shrink) return; setPointerToNull(&d_leftLoc); setPointerToNull(&d_rightLoc); setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); _shrink = shrink; } void Cortex::setRGB(bool rgb) { if (rgb == _rgb) return; _rgb = rgb; _channels = _rgb ? 3 : 1; } void Cortex::setCortImageSize(uint2 cortImgSize) { if (cortImgSize.x == _cortImgSize.x && cortImgSize.y == _cortImgSize.y) return; setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); _cortImgSize = cortImgSize; } error Cortex::getLeftCortexFields(SamplingPoint *h_leftFields, size_t leftSize) const { return getFromDevice(h_leftFields, leftSize, d_leftFields, _leftCortexSize); } error Cortex::setLeftCortexFields(const SamplingPoint *h_leftFields, const size_t leftSize) { return setOnDevice(h_leftFields, leftSize, &d_leftFields, _leftCortexSize); } error Cortex::getRightCortexFields(SamplingPoint *h_rightFields, size_t rightSize) const { return getFromDevice(h_rightFields, rightSize, d_rightFields, _rightCortexSize); } error Cortex::setRightCortexFields(const SamplingPoint *h_rightFields, size_t rightSize) { return setOnDevice(h_rightFields, rightSize, &d_rightFields, _rightCortexSize); } error Cortex::getLeftCortexLocations(double2 *h_leftLoc, size_t leftSize) const { return getFromDevice(h_leftLoc, leftSize, d_leftLoc, _leftCortexSize); } int Cortex::setLeftCortexLocations(const double2 *h_leftLoc, size_t leftSize) { if (leftSize != _leftCortexSize) return ERRORS::cortexSizeDidNotMatch; int err = setOnDevice(h_leftLoc, leftSize, &d_leftLoc, _leftCortexSize); if (err == 0) { setPointerToNull(&d_leftNorm); } return err; } error Cortex::getRightCortexLocations(double2 *h_rightLoc, size_t rightSize) const { return getFromDevice(h_rightLoc, rightSize, d_rightLoc, _rightCortexSize); } int Cortex::setRightCortexLocations(const double2 *h_rightLoc, size_t rightSize) { if (rightSize != _rightCortexSize) return ERRORS::cortexSizeDidNotMatch; int err = setOnDevice(h_rightLoc, rightSize, &d_rightLoc, _rightCortexSize); if (err == 0) setPointerToNull(&d_rightNorm); return err; } error Cortex::getGauss100( double *h_gauss, size_t kernelWidth, float sigma) const { if (kernelWidth != _gaussKernelWidth || sigma != _gaussSigma) return ERRORS::invalidArguments; cudaMemcpy(h_gauss, d_gauss, 100 * _gaussKernelWidth * _gaussKernelWidth * sizeof(double), cudaMemcpyDeviceToHost); cudaCheckErrors("ERROR"); return 0; } error Cortex::setGauss100(const size_t kernelWidth, const float sigma, double *h_gauss) { if (kernelWidth == 0) return ERRORS::invalidArguments; setPointerToNull(&d_leftNorm); setPointerToNull(&d_rightNorm); _gaussKernelWidth = kernelWidth; _gaussSigma = sigma; if (h_gauss == nullptr) { gauss100(); } else { setPointerToNull(&d_gauss); cudaMalloc((void**)&d_gauss, 100 * _gaussKernelWidth * _gaussKernelWidth * sizeof(double)); cudaMemcpy(d_gauss, h_gauss, 100 * _gaussKernelWidth * _gaussKernelWidth * sizeof(double), cudaMemcpyHostToDevice); cudaCheckErrors("ERROR"); } return 0; } template <class T> error Cortex::getFromDevice(T *h_ptr, const size_t h_size, const T *d_ptr, const size_t d_size) const { if (h_ptr == nullptr || h_size == 0) return ERRORS::invalidArguments; if (h_size != d_size) return ERRORS::cortexSizeDidNotMatch; if (d_ptr == nullptr) return ERRORS::uninitialized; cudaMemcpy(h_ptr, d_ptr, sizeof(T) * d_size, cudaMemcpyDeviceToHost); cudaCheckErrors("ERROR"); return 0; } template <class T> error Cortex::setOnDevice(const T *h_ptr, size_t h_size, T **d_ptr, size_t &d_size) { if (h_ptr == nullptr || h_size == 0) return ERRORS::invalidArguments; setPointerToNull(d_ptr); cudaMalloc((void**)d_ptr, sizeof(T) * h_size); cudaMemcpy(*d_ptr, h_ptr, sizeof(T) * h_size, cudaMemcpyHostToDevice); d_size = h_size; cudaCheckErrors("ERROR"); return 0; }
5dfd1c35650d77bee435b47a4baa966ff6028544.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2015-2023 by XGBoost Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <algorithm> #include <cmath> #include <cstdint> // std::int32_t #include <memory> #include <vector> #include "../common/common.h" #include "../common/linalg_op.h" #include "../common/numeric.h" // Reduce #include "../common/pseudo_huber.h" #include "../common/stats.h" #include "../common/threading_utils.h" #include "../common/transform.h" #include "../tree/fit_stump.h" // FitStump #include "./regression_loss.h" #include "adaptive.h" #include "xgboost/base.h" #include "xgboost/context.h" #include "xgboost/data.h" // MetaInfo #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/linalg.h" #include "xgboost/logging.h" #include "xgboost/objective.h" // ObjFunction #include "xgboost/parameter.h" #include "xgboost/span.h" #include "xgboost/tree_model.h" // RegTree #if defined(XGBOOST_USE_CUDA) #include "../common/device_helpers.cuh" #include "../common/linalg_op.cuh" #endif // defined(XGBOOST_USE_CUDA) namespace xgboost { namespace obj { namespace { void CheckInitInputs(MetaInfo const& info) { CHECK_EQ(info.labels.Shape(0), info.num_row_) << "Invalid shape of labels."; if (!info.weights_.Empty()) { CHECK_EQ(info.weights_.Size(), info.num_row_) << "Number of weights should be equal to number of data points."; } } void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) { CheckInitInputs(info); CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels."; } } // anonymous namespace class RegInitEstimation : public ObjFunction { void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_score) const override { CheckInitInputs(info); // Avoid altering any state in child objective. HostDeviceVector<float> dummy_predt(info.labels.Size(), 0.0f, this->ctx_->gpu_id); HostDeviceVector<GradientPair> gpair(info.labels.Size(), GradientPair{}, this->ctx_->gpu_id); Json config{Object{}}; this->SaveConfig(&config); std::unique_ptr<ObjFunction> new_obj{ ObjFunction::Create(get<String const>(config["name"]), this->ctx_)}; new_obj->LoadConfig(config); new_obj->GetGradient(dummy_predt, info, 0, &gpair); bst_target_t n_targets = this->Targets(info); linalg::Vector<float> leaf_weight; tree::FitStump(this->ctx_, gpair, n_targets, &leaf_weight); // workaround, we don't support multi-target due to binary model serialization for // base margin. common::Mean(this->ctx_, leaf_weight, base_score); this->PredTransform(base_score->Data()); } }; #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public RegInitEstimation { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return Loss::Info(); } bst_target_t Targets(MetaInfo const& info) const override { // Multi-target regression. return ::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; const size_t nthreads = ctx_->Threads(); bool on_device = device >= 0; // On CPU we run the transformation each thread processing a contigious block of data // for better performance. const size_t n_data_blocks = ::max(static_cast<size_t>(1), (on_device ? ndata : nthreads)); const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks); auto const n_targets = ::max(info.labels.Shape(1), static_cast<size_t>(1)); common::Transform<>::Init( [block_size, ndata, n_targets] XGBOOST_DEVICE( size_t data_block_idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const bst_float* preds_ptr = _preds.data(); const bst_float* labels_ptr = _labels.data(); const bst_float* weights_ptr = _weights.data(); GradientPair* out_gpair_ptr = _out_gpair.data(); const size_t begin = data_block_idx*block_size; const size_t end = ::min(ndata, begin + block_size); const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; for (size_t idx = begin; idx < end; ++idx) { bst_float p = Loss::PredTransform(preds_ptr[idx]); bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets]; bst_float label = labels_ptr[idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } }, common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device) .Eval(&additional_input_, out_gpair, &preds, info.labels.Data(), &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated class PseudoHuberRegression : public RegInitEstimation { PesudoHuberParam param_; public: void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } bst_target_t Targets(MetaInfo const& info) const override { return ::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto slope = param_.huber_slope; CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0."; auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); const float z = predt(i) - y; const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope)); float grad = z / scale_sqrt; auto scale = common::Sqr(slope) + common::Sqr(z); float hess = common::Sqr(slope) / (scale * scale_sqrt); auto w = weight[sample_id]; gpair(i) = {grad * w, hess * w}; }); } const char* DefaultEvalMetric() const override { return "mphe"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:pseudohubererror"); out["pseudo_huber_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); if (config.find("pseudo_huber_param") == config.cend()) { // The parameter is added in 1.6. return; } FromJson(in["pseudo_huber_param"], &param_); } }; XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror") .describe("Regression Pseudo Huber error.") .set_body([]() { return new PseudoHuberRegression(); }); // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public RegInitEstimation { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public RegInitEstimation { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += ::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels.HostView(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = ::exp(p); const double w = info.GetWeight(ind); const double y = labels(ind); const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*) preds[j] = ::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public RegInitEstimation { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = ctx_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public RegInitEstimation { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ ::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device) .Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); class MeanAbsoluteError : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; } bst_target_t Targets(MetaInfo const& info) const override { return ::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sign = [](auto x) { return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0)); }; auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); auto grad = sign(predt(i) - y) * weight[sample_id]; auto hess = weight[sample_id]; gpair(i) = GradientPair{grad, hess}; }); } void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_margin) const override { CheckInitInputs(info); base_margin->Reshape(this->Targets(info)); double w{0.0}; if (info.weights_.Empty()) { w = static_cast<double>(info.num_row_); } else { w = common::Reduce(ctx_, info.weights_); } if (info.num_row_ == 0) { auto out = base_margin->HostView(); out(0) = 0; } else { linalg::Vector<float> temp; common::Median(ctx_, info.labels, info.weights_, &temp); common::Mean(ctx_, temp, base_margin); } CHECK_EQ(base_margin->Size(), 1); auto out = base_margin->HostView(); // weighted avg std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out), [w](float v) { return v * w; }); collective::Allreduce<collective::Operation::kSum>(out.Values().data(), out.Values().size()); collective::Allreduce<collective::Operation::kSum>(&w, 1); if (common::CloseTo(w, 0.0)) { // Mostly for handling empty dataset test. LOG(WARNING) << "Sum of weights is close to 0.0, skipping base score estimation."; out(0) = ObjFunction::DefaultBaseScore(); return; } std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out), [w](float v) { return v / w; }); } void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info, HostDeviceVector<float> const& prediction, std::int32_t group_idx, RegTree* p_tree) const override { if (ctx_->IsCPU()) { auto const& h_position = position.ConstHostVector(); detail::UpdateTreeLeafHost(ctx_, h_position, group_idx, info, prediction, 0.5, p_tree); } else { #if defined(XGBOOST_USE_CUDA) position.SetDevice(ctx_->gpu_id); auto d_position = position.ConstDeviceSpan(); detail::UpdateTreeLeafDevice(ctx_, d_position, group_idx, info, prediction, 0.5, p_tree); #else common::AssertGPUSupport(); #endif // defined(XGBOOST_USE_CUDA) } } const char* DefaultEvalMetric() const override { return "mae"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:absoluteerror"); } void LoadConfig(Json const& in) override { CHECK_EQ(StringView{get<String const>(in["name"])}, StringView{"reg:absoluteerror"}); } }; XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror") .describe("Mean absoluate error.") .set_body([]() { return new MeanAbsoluteError(); }); } // namespace obj } // namespace xgboost
5dfd1c35650d77bee435b47a4baa966ff6028544.cu
/** * Copyright 2015-2023 by XGBoost Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <algorithm> #include <cmath> #include <cstdint> // std::int32_t #include <memory> #include <vector> #include "../common/common.h" #include "../common/linalg_op.h" #include "../common/numeric.h" // Reduce #include "../common/pseudo_huber.h" #include "../common/stats.h" #include "../common/threading_utils.h" #include "../common/transform.h" #include "../tree/fit_stump.h" // FitStump #include "./regression_loss.h" #include "adaptive.h" #include "xgboost/base.h" #include "xgboost/context.h" #include "xgboost/data.h" // MetaInfo #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/linalg.h" #include "xgboost/logging.h" #include "xgboost/objective.h" // ObjFunction #include "xgboost/parameter.h" #include "xgboost/span.h" #include "xgboost/tree_model.h" // RegTree #if defined(XGBOOST_USE_CUDA) #include "../common/device_helpers.cuh" #include "../common/linalg_op.cuh" #endif // defined(XGBOOST_USE_CUDA) namespace xgboost { namespace obj { namespace { void CheckInitInputs(MetaInfo const& info) { CHECK_EQ(info.labels.Shape(0), info.num_row_) << "Invalid shape of labels."; if (!info.weights_.Empty()) { CHECK_EQ(info.weights_.Size(), info.num_row_) << "Number of weights should be equal to number of data points."; } } void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) { CheckInitInputs(info); CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels."; } } // anonymous namespace class RegInitEstimation : public ObjFunction { void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_score) const override { CheckInitInputs(info); // Avoid altering any state in child objective. HostDeviceVector<float> dummy_predt(info.labels.Size(), 0.0f, this->ctx_->gpu_id); HostDeviceVector<GradientPair> gpair(info.labels.Size(), GradientPair{}, this->ctx_->gpu_id); Json config{Object{}}; this->SaveConfig(&config); std::unique_ptr<ObjFunction> new_obj{ ObjFunction::Create(get<String const>(config["name"]), this->ctx_)}; new_obj->LoadConfig(config); new_obj->GetGradient(dummy_predt, info, 0, &gpair); bst_target_t n_targets = this->Targets(info); linalg::Vector<float> leaf_weight; tree::FitStump(this->ctx_, gpair, n_targets, &leaf_weight); // workaround, we don't support multi-target due to binary model serialization for // base margin. common::Mean(this->ctx_, leaf_weight, base_score); this->PredTransform(base_score->Data()); } }; #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public RegInitEstimation { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return Loss::Info(); } bst_target_t Targets(MetaInfo const& info) const override { // Multi-target regression. return std::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; const size_t nthreads = ctx_->Threads(); bool on_device = device >= 0; // On CPU we run the transformation each thread processing a contigious block of data // for better performance. const size_t n_data_blocks = std::max(static_cast<size_t>(1), (on_device ? ndata : nthreads)); const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks); auto const n_targets = std::max(info.labels.Shape(1), static_cast<size_t>(1)); common::Transform<>::Init( [block_size, ndata, n_targets] XGBOOST_DEVICE( size_t data_block_idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const bst_float* preds_ptr = _preds.data(); const bst_float* labels_ptr = _labels.data(); const bst_float* weights_ptr = _weights.data(); GradientPair* out_gpair_ptr = _out_gpair.data(); const size_t begin = data_block_idx*block_size; const size_t end = std::min(ndata, begin + block_size); const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; for (size_t idx = begin; idx < end; ++idx) { bst_float p = Loss::PredTransform(preds_ptr[idx]); bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets]; bst_float label = labels_ptr[idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } }, common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device) .Eval(&additional_input_, out_gpair, &preds, info.labels.Data(), &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated class PseudoHuberRegression : public RegInitEstimation { PesudoHuberParam param_; public: void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } bst_target_t Targets(MetaInfo const& info) const override { return std::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto slope = param_.huber_slope; CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0."; auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); const float z = predt(i) - y; const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope)); float grad = z / scale_sqrt; auto scale = common::Sqr(slope) + common::Sqr(z); float hess = common::Sqr(slope) / (scale * scale_sqrt); auto w = weight[sample_id]; gpair(i) = {grad * w, hess * w}; }); } const char* DefaultEvalMetric() const override { return "mphe"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:pseudohubererror"); out["pseudo_huber_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); if (config.find("pseudo_huber_param") == config.cend()) { // The parameter is added in 1.6. return; } FromJson(in["pseudo_huber_param"], &param_); } }; XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror") .describe("Regression Pseudo Huber error.") .set_body([]() { return new PseudoHuberRegression(); }); // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public RegInitEstimation { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public RegInitEstimation { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += std::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels.HostView(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = std::exp(p); const double w = info.GetWeight(ind); const double y = labels(ind); const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*) preds[j] = std::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public RegInitEstimation { public: void Configure(Args const&) override {} ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = ctx_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public RegInitEstimation { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } ObjInfo Task() const override { return ObjInfo::kRegression; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device) .Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); class MeanAbsoluteError : public ObjFunction { public: void Configure(Args const&) override {} ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; } bst_target_t Targets(MetaInfo const& info) const override { return std::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/, HostDeviceVector<GradientPair>* out_gpair) override { CheckRegInputs(info, preds); auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(info.labels.Size()); auto gpair = linalg::MakeVec(out_gpair); preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable { auto sign = [](auto x) { return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0)); }; auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape())); auto grad = sign(predt(i) - y) * weight[sample_id]; auto hess = weight[sample_id]; gpair(i) = GradientPair{grad, hess}; }); } void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_margin) const override { CheckInitInputs(info); base_margin->Reshape(this->Targets(info)); double w{0.0}; if (info.weights_.Empty()) { w = static_cast<double>(info.num_row_); } else { w = common::Reduce(ctx_, info.weights_); } if (info.num_row_ == 0) { auto out = base_margin->HostView(); out(0) = 0; } else { linalg::Vector<float> temp; common::Median(ctx_, info.labels, info.weights_, &temp); common::Mean(ctx_, temp, base_margin); } CHECK_EQ(base_margin->Size(), 1); auto out = base_margin->HostView(); // weighted avg std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out), [w](float v) { return v * w; }); collective::Allreduce<collective::Operation::kSum>(out.Values().data(), out.Values().size()); collective::Allreduce<collective::Operation::kSum>(&w, 1); if (common::CloseTo(w, 0.0)) { // Mostly for handling empty dataset test. LOG(WARNING) << "Sum of weights is close to 0.0, skipping base score estimation."; out(0) = ObjFunction::DefaultBaseScore(); return; } std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out), [w](float v) { return v / w; }); } void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info, HostDeviceVector<float> const& prediction, std::int32_t group_idx, RegTree* p_tree) const override { if (ctx_->IsCPU()) { auto const& h_position = position.ConstHostVector(); detail::UpdateTreeLeafHost(ctx_, h_position, group_idx, info, prediction, 0.5, p_tree); } else { #if defined(XGBOOST_USE_CUDA) position.SetDevice(ctx_->gpu_id); auto d_position = position.ConstDeviceSpan(); detail::UpdateTreeLeafDevice(ctx_, d_position, group_idx, info, prediction, 0.5, p_tree); #else common::AssertGPUSupport(); #endif // defined(XGBOOST_USE_CUDA) } } const char* DefaultEvalMetric() const override { return "mae"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:absoluteerror"); } void LoadConfig(Json const& in) override { CHECK_EQ(StringView{get<String const>(in["name"])}, StringView{"reg:absoluteerror"}); } }; XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror") .describe("Mean absoluate error.") .set_body([]() { return new MeanAbsoluteError(); }); } // namespace obj } // namespace xgboost
fd4801738370db8f4d35e599f002832d6bf995ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper #include <cutil.h> #include <iostream> #include <fstream> #ifdef GEM5_FUSION #include <stdint.h> extern "C" { void m5_work_begin(uint64_t workid, uint64_t threadid); void m5_work_end(uint64_t workid, uint64_t threadid); } #endif /* * Options * */ #define GAMMA 1.4f unsigned iterations = 2000; #ifndef block_length #define block_length 192 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2f #define deg_angle_of_attack 0.0f /* * not options */ #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; CUDA_SAFE_CALL(hipMalloc((void**)&t, sizeof(T)*N)); return t; } template <typename T> void dealloc(T* array) { CUDA_SAFE_CALL(hipFree((void*)array)); } template <typename T> void copy(T* dst, T* src, int N) { CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice)); } template <typename T> void upload(T* dst, T* src, int N) { CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice)); } template <typename T> void download(T* dst, T* src, int N) { CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost)); } void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { //std::ofstream file("density"); std::cout << "density:" << std::endl; std::cout << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) std::cout << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { //std::ofstream file("momentum"); std::cout << "momentum:" << std::endl; std::cout << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) std::cout << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; std::cout << std::endl; } } { //std::ofstream file("density_energy"); std::cout << "density_energy:" << std::endl; std::cout << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) std::cout << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_flux_contribution_momentum_x[1]; __constant__ float3 ff_flux_contribution_momentum_y[1]; __constant__ float3 ff_flux_contribution_momentum_z[1]; __constant__ float3 ff_flux_contribution_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / block_length), Db(block_length); hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables); CUT_CHECK_ERROR("initialize_variables failed"); } __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { dim3 Dg(nelr / block_length), Db(block_length); hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors); CUT_CHECK_ERROR("compute_step_factor failed"); } /* * * */ __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { dim3 Dg(nelr / block_length), Db(block_length); hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes); CUT_CHECK_ERROR("compute_flux failed"); } __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { dim3 Dg(nelr / block_length), Db(block_length); hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes); CUT_CHECK_ERROR("update failed"); } /* * Main function */ int main(int argc, char** argv) { if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; if (argc == 3) { iterations = atoi(argv[2]); } hipDeviceProp_t prop; int dev; CUDA_SAFE_CALL(hipSetDevice(0)); CUDA_SAFE_CALL(hipGetDevice(&dev)); CUDA_SAFE_CALL(hipGetDeviceProperties(&prop, dev)); printf("Name: %s\n", prop.name); // set far field conditions and load them into constant memory on the gpu //{ float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_flux_contribution_momentum_x; float3 h_ff_flux_contribution_momentum_y; float3 h_ff_flux_contribution_momentum_z; float3 h_ff_flux_contribution_density_energy; //} int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; //{ std::ifstream file(data_file_name); file >> nel; nelr = block_length*((nel / block_length )+ ::min(1, nel % block_length)); float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<float>(nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); //} #ifdef GEM5_FUSION m5_work_begin(0, 0); #endif compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) ); upload<float>(areas, h_areas, nelr); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); // Create arrays and set initial conditions float* variables = alloc<float>(nelr*NVAR); initialize_variables(nelr, variables); float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); hipMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing hipDeviceSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; unsigned int timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); CUT_SAFE_CALL( cutStartTimer( timer)); // Begin iterations for(int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); CUT_CHECK_ERROR("compute_step_factor failed"); for(int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); CUT_CHECK_ERROR("compute_flux failed"); time_step(j, nelr, old_variables, variables, step_factors, fluxes); CUT_CHECK_ERROR("time_step failed"); } } hipDeviceSynchronize(); #ifdef GEM5_FUSION m5_work_end(0, 0); #endif CUT_SAFE_CALL( cutStopTimer(timer) ); std::cout << (cutGetAverageTimerValue(timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); std::cout << "Done..." << std::endl; return 0; }
fd4801738370db8f4d35e599f002832d6bf995ad.cu
// Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper #include <cutil.h> #include <iostream> #include <fstream> #ifdef GEM5_FUSION #include <stdint.h> extern "C" { void m5_work_begin(uint64_t workid, uint64_t threadid); void m5_work_end(uint64_t workid, uint64_t threadid); } #endif /* * Options * */ #define GAMMA 1.4f unsigned iterations = 2000; #ifndef block_length #define block_length 192 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2f #define deg_angle_of_attack 0.0f /* * not options */ #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; CUDA_SAFE_CALL(cudaMalloc((void**)&t, sizeof(T)*N)); return t; } template <typename T> void dealloc(T* array) { CUDA_SAFE_CALL(cudaFree((void*)array)); } template <typename T> void copy(T* dst, T* src, int N) { CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice)); } template <typename T> void upload(T* dst, T* src, int N) { CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice)); } template <typename T> void download(T* dst, T* src, int N) { CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost)); } void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { //std::ofstream file("density"); std::cout << "density:" << std::endl; std::cout << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) std::cout << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { //std::ofstream file("momentum"); std::cout << "momentum:" << std::endl; std::cout << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) std::cout << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; std::cout << std::endl; } } { //std::ofstream file("density_energy"); std::cout << "density_energy:" << std::endl; std::cout << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) std::cout << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_flux_contribution_momentum_x[1]; __constant__ float3 ff_flux_contribution_momentum_y[1]; __constant__ float3 ff_flux_contribution_momentum_z[1]; __constant__ float3 ff_flux_contribution_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / block_length), Db(block_length); cuda_initialize_variables<<<Dg, Db>>>(nelr, variables); CUT_CHECK_ERROR("initialize_variables failed"); } __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { dim3 Dg(nelr / block_length), Db(block_length); cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors); CUT_CHECK_ERROR("compute_step_factor failed"); } /* * * */ __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { dim3 Dg(nelr / block_length), Db(block_length); cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes); CUT_CHECK_ERROR("compute_flux failed"); } __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { dim3 Dg(nelr / block_length), Db(block_length); cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes); CUT_CHECK_ERROR("update failed"); } /* * Main function */ int main(int argc, char** argv) { if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; if (argc == 3) { iterations = atoi(argv[2]); } cudaDeviceProp prop; int dev; CUDA_SAFE_CALL(cudaSetDevice(0)); CUDA_SAFE_CALL(cudaGetDevice(&dev)); CUDA_SAFE_CALL(cudaGetDeviceProperties(&prop, dev)); printf("Name: %s\n", prop.name); // set far field conditions and load them into constant memory on the gpu //{ float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_flux_contribution_momentum_x; float3 h_ff_flux_contribution_momentum_y; float3 h_ff_flux_contribution_momentum_z; float3 h_ff_flux_contribution_density_energy; //} int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; //{ std::ifstream file(data_file_name); file >> nel; nelr = block_length*((nel / block_length )+ std::min(1, nel % block_length)); float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<float>(nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); //} #ifdef GEM5_FUSION m5_work_begin(0, 0); #endif compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) ); upload<float>(areas, h_areas, nelr); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); // Create arrays and set initial conditions float* variables = alloc<float>(nelr*NVAR); initialize_variables(nelr, variables); float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); cudaMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing cudaThreadSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; unsigned int timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); CUT_SAFE_CALL( cutStartTimer( timer)); // Begin iterations for(int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); CUT_CHECK_ERROR("compute_step_factor failed"); for(int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); CUT_CHECK_ERROR("compute_flux failed"); time_step(j, nelr, old_variables, variables, step_factors, fluxes); CUT_CHECK_ERROR("time_step failed"); } } cudaThreadSynchronize(); #ifdef GEM5_FUSION m5_work_end(0, 0); #endif CUT_SAFE_CALL( cutStopTimer(timer) ); std::cout << (cutGetAverageTimerValue(timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); std::cout << "Done..." << std::endl; return 0; }
5dea791e4199d24cacdd1e07c48c32d3a8084a12.hip
// !!! This is a file automatically generated by hipify!!! #include "luaT.h" #include "THH.h" #include "THLogAdd.h" /* DEBUG: WTF */ #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> LUA_EXTERNC DLL_EXPORT int luaopen_libeladtools(lua_State *L); int luaopen_libeladtools(lua_State *L) { lua_newtable(L); return 1; }
5dea791e4199d24cacdd1e07c48c32d3a8084a12.cu
#include "luaT.h" #include "THC.h" #include "THLogAdd.h" /* DEBUG: WTF */ #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> LUA_EXTERNC DLL_EXPORT int luaopen_libeladtools(lua_State *L); int luaopen_libeladtools(lua_State *L) { lua_newtable(L); return 1; }
658fb05f81fc3d6f50c1933bb9bd136fc600375f.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/Dispatch.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/LinearAlgebra.h> namespace at { namespace native { // This reduction accumulates results as the type `acc_t`. By default, when // `scalar_t` is complex, `acc_t` is the downgraded real number type. // Otherwise, `acc_t` and `scalar_t` are the same type. template <typename scalar_t, typename acc_t=typename scalar_value_type<scalar_t>::type, typename out_t=typename scalar_value_type<scalar_t>::type> void norm_kernel_cuda_impl(TensorIterator& iter, const Scalar& val) { double p; if (val.isIntegral(false)) { p = val.to<int64_t>(); } else if (val.isFloatingPoint()) { p = val.to<double>(); } else { AT_ERROR("norm_kernel_cuda_impl expects norm to be integer or float"); } if (iter.numel() == 0) { iter.output().fill_((p < 0) ? INFINITY : 0); return; } if (p == static_cast<double>(0)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormZeroOps<scalar_t, acc_t>(), 0); } else if (p == static_cast<double>(1)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOneOps<scalar_t, acc_t>(), 0); } else if (p == static_cast<double>(2)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormTwoOps<scalar_t, acc_t>(), 0); } else if (p == static_cast<double>(INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMaxOps<scalar_t, acc_t>(), 0); } else if (p == static_cast<double>(-INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMinOps<scalar_t, acc_t>(), std::numeric_limits<acc_t>::infinity()); } else { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOps<scalar_t, acc_t>{ acc_t(p) }, 0); } if (isComplexType(iter.output().scalar_type())) { at::imag(iter.output()).zero_(); } } static void norm_kernel_cuda(TensorIterator& iter, const Scalar& p) { if (iter.input_dtype() == kHalf) { return norm_kernel_cuda_impl<at::Half, float>(iter, p); } else if (iter.dtype(1) == kHalf && iter.input_dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::Half, float, float>(iter, p); } else if(iter.input_dtype() == kBFloat16) { return norm_kernel_cuda_impl<at::BFloat16, float>(iter, p); } else if (iter.dtype(1) == kBFloat16 && iter.input_dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::BFloat16, float, float>(iter, p); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.input_dtype(), "norm_cuda", [&] { norm_kernel_cuda_impl<scalar_t>(iter, p); }); } static void linalg_vector_norm_kernel_cuda(TensorIterator& iter, Scalar ord) { TORCH_CHECK(ord.isFloatingPoint(), "linalg.vector_norm expects ord to be float"); if (iter.output().scalar_type() == kHalf) { return norm_kernel_cuda_impl<at::Half, float>(iter, ord); } else if (iter.input_dtype() == kHalf && iter.output().scalar_type() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::Half, float, float>(iter, ord); } else if(iter.output().scalar_type() == kBFloat16) { return norm_kernel_cuda_impl<at::BFloat16, float>(iter, ord); } else if (iter.input_dtype() == kBFloat16 && iter.output().scalar_type() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::BFloat16, float, float>(iter, ord); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.input_dtype(), "linalg_vector_norm_cuda", [&] { norm_kernel_cuda_impl<scalar_t>(iter, ord); }); } REGISTER_DISPATCH(norm_stub, &norm_kernel_cuda); REGISTER_DISPATCH(linalg_vector_norm_stub, &linalg_vector_norm_kernel_cuda); }} // namespace at::native
658fb05f81fc3d6f50c1933bb9bd136fc600375f.cu
#include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/Dispatch.h> #include <ATen/native/ReduceOps.h> #include <ATen/native/LinearAlgebra.h> namespace at { namespace native { // This reduction accumulates results as the type `acc_t`. By default, when // `scalar_t` is complex, `acc_t` is the downgraded real number type. // Otherwise, `acc_t` and `scalar_t` are the same type. template <typename scalar_t, typename acc_t=typename scalar_value_type<scalar_t>::type, typename out_t=typename scalar_value_type<scalar_t>::type> void norm_kernel_cuda_impl(TensorIterator& iter, const Scalar& val) { double p; if (val.isIntegral(false)) { p = val.to<int64_t>(); } else if (val.isFloatingPoint()) { p = val.to<double>(); } else { AT_ERROR("norm_kernel_cuda_impl expects norm to be integer or float"); } if (iter.numel() == 0) { iter.output().fill_((p < 0) ? INFINITY : 0); return; } if (p == static_cast<double>(0)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormZeroOps<scalar_t, acc_t>(), 0); } else if (p == static_cast<double>(1)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOneOps<scalar_t, acc_t>(), 0); } else if (p == static_cast<double>(2)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormTwoOps<scalar_t, acc_t>(), 0); } else if (p == static_cast<double>(INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMaxOps<scalar_t, acc_t>(), 0); } else if (p == static_cast<double>(-INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMinOps<scalar_t, acc_t>(), std::numeric_limits<acc_t>::infinity()); } else { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOps<scalar_t, acc_t>{ acc_t(p) }, 0); } if (isComplexType(iter.output().scalar_type())) { at::imag(iter.output()).zero_(); } } static void norm_kernel_cuda(TensorIterator& iter, const Scalar& p) { if (iter.input_dtype() == kHalf) { return norm_kernel_cuda_impl<at::Half, float>(iter, p); } else if (iter.dtype(1) == kHalf && iter.input_dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::Half, float, float>(iter, p); } else if(iter.input_dtype() == kBFloat16) { return norm_kernel_cuda_impl<at::BFloat16, float>(iter, p); } else if (iter.dtype(1) == kBFloat16 && iter.input_dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::BFloat16, float, float>(iter, p); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.input_dtype(), "norm_cuda", [&] { norm_kernel_cuda_impl<scalar_t>(iter, p); }); } static void linalg_vector_norm_kernel_cuda(TensorIterator& iter, Scalar ord) { TORCH_CHECK(ord.isFloatingPoint(), "linalg.vector_norm expects ord to be float"); if (iter.output().scalar_type() == kHalf) { return norm_kernel_cuda_impl<at::Half, float>(iter, ord); } else if (iter.input_dtype() == kHalf && iter.output().scalar_type() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::Half, float, float>(iter, ord); } else if(iter.output().scalar_type() == kBFloat16) { return norm_kernel_cuda_impl<at::BFloat16, float>(iter, ord); } else if (iter.input_dtype() == kBFloat16 && iter.output().scalar_type() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::BFloat16, float, float>(iter, ord); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.input_dtype(), "linalg_vector_norm_cuda", [&] { norm_kernel_cuda_impl<scalar_t>(iter, ord); }); } REGISTER_DISPATCH(norm_stub, &norm_kernel_cuda); REGISTER_DISPATCH(linalg_vector_norm_stub, &linalg_vector_norm_kernel_cuda); }} // namespace at::native
0238b08d050ba2b672d5c4b105bd448eccf23922.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "srad.h" #include <stdio.h> __global__ void srad_cuda_1( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, long cols, long rows, float q0sqr ) { //block id long bx = (long)blockIdx.x; long by = (long)blockIdx.y; //thread id long tx = (long)threadIdx.x; long ty = (long)threadIdx.y; //indices long index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; long index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols; long index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; long index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1; long index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float n, w, e, s, jc, g2, l, num, den, qsqr, c; //shared memory allocation __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float north[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float south[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float west[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory north[ty][tx] = J_cuda[index_n]; south[ty][tx] = J_cuda[index_s]; if ( by == 0 ){ north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx]; } else if ( by == gridDim.y - 1 ){ south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); west[ty][tx] = J_cuda[index_w]; east[ty][tx] = J_cuda[index_e]; if ( bx == 0 ){ west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty]; } else if ( bx == gridDim.x - 1 ){ east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); temp[ty][tx] = J_cuda[index]; __syncthreads(); jc = temp[ty][tx]; if ( ty == 0 && tx == 0 ){ //nw n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 ){ //n n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == BLOCK_SIZE -1 ){ //e n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1){ //s n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == 0 ){ //w n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else{ //the data elements which are not on the borders n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc); l = ( n + s + w + e ) / jc; num = (0.5*g2) - ((1.0/16.0)*(l*l)) ; den = 1 + (.25*l); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c = 1.0 / (1.0+den) ; // saturate diffusion coefficent if (c < 0){temp_result[ty][tx] = 0;} else if (c > 1) {temp_result[ty][tx] = 1;} else {temp_result[ty][tx] = c;} __syncthreads(); C_cuda[index] = temp_result[ty][tx]; E_C[index] = e; W_C[index] = w; S_C[index] = s; N_C[index] = n; } __global__ void srad_cuda_2( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, long cols, long rows, float lambda, float q0sqr ) { //block id long bx = blockIdx.x; long by = blockIdx.y; //thread id long tx = threadIdx.x; long ty = threadIdx.y; //indices long index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; long index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; long index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float cc, cn, cs, ce, cw, d_sum; //shared memory allocation __shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory temp[ty][tx] = J_cuda[index]; __syncthreads(); south_c[ty][tx] = C_cuda[index_s]; if ( by == gridDim.y - 1 ){ south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); east_c[ty][tx] = C_cuda[index_e]; if ( bx == gridDim.x - 1 ){ east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); c_cuda_temp[ty][tx] = C_cuda[index]; __syncthreads(); cc = c_cuda_temp[ty][tx]; if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se cn = cc; cs = south_c[ty][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( tx == BLOCK_SIZE -1 ){ //e cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( ty == BLOCK_SIZE -1){ //s cn = cc; cs = south_c[ty][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } else{ //the data elements which are not on the borders cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } // divergence (equ 58) d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index]; // image update (equ 61) c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum; __syncthreads(); J_cuda[index] = c_cuda_result[ty][tx]; }
0238b08d050ba2b672d5c4b105bd448eccf23922.cu
#include "srad.h" #include <stdio.h> __global__ void srad_cuda_1( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, long cols, long rows, float q0sqr ) { //block id long bx = (long)blockIdx.x; long by = (long)blockIdx.y; //thread id long tx = (long)threadIdx.x; long ty = (long)threadIdx.y; //indices long index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; long index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols; long index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; long index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1; long index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float n, w, e, s, jc, g2, l, num, den, qsqr, c; //shared memory allocation __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float north[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float south[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float west[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory north[ty][tx] = J_cuda[index_n]; south[ty][tx] = J_cuda[index_s]; if ( by == 0 ){ north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx]; } else if ( by == gridDim.y - 1 ){ south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); west[ty][tx] = J_cuda[index_w]; east[ty][tx] = J_cuda[index_e]; if ( bx == 0 ){ west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty]; } else if ( bx == gridDim.x - 1 ){ east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); temp[ty][tx] = J_cuda[index]; __syncthreads(); jc = temp[ty][tx]; if ( ty == 0 && tx == 0 ){ //nw n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 ){ //n n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == BLOCK_SIZE -1 ){ //e n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1){ //s n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == 0 ){ //w n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else{ //the data elements which are not on the borders n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc); l = ( n + s + w + e ) / jc; num = (0.5*g2) - ((1.0/16.0)*(l*l)) ; den = 1 + (.25*l); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c = 1.0 / (1.0+den) ; // saturate diffusion coefficent if (c < 0){temp_result[ty][tx] = 0;} else if (c > 1) {temp_result[ty][tx] = 1;} else {temp_result[ty][tx] = c;} __syncthreads(); C_cuda[index] = temp_result[ty][tx]; E_C[index] = e; W_C[index] = w; S_C[index] = s; N_C[index] = n; } __global__ void srad_cuda_2( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, long cols, long rows, float lambda, float q0sqr ) { //block id long bx = blockIdx.x; long by = blockIdx.y; //thread id long tx = threadIdx.x; long ty = threadIdx.y; //indices long index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; long index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; long index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float cc, cn, cs, ce, cw, d_sum; //shared memory allocation __shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory temp[ty][tx] = J_cuda[index]; __syncthreads(); south_c[ty][tx] = C_cuda[index_s]; if ( by == gridDim.y - 1 ){ south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); east_c[ty][tx] = C_cuda[index_e]; if ( bx == gridDim.x - 1 ){ east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); c_cuda_temp[ty][tx] = C_cuda[index]; __syncthreads(); cc = c_cuda_temp[ty][tx]; if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se cn = cc; cs = south_c[ty][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( tx == BLOCK_SIZE -1 ){ //e cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( ty == BLOCK_SIZE -1){ //s cn = cc; cs = south_c[ty][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } else{ //the data elements which are not on the borders cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } // divergence (equ 58) d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index]; // image update (equ 61) c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum; __syncthreads(); J_cuda[index] = c_cuda_result[ty][tx]; }
bbb6636bf263450a9d30d650224305a402f83aa3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #ifdef CUDNN #pragma comment(lib, "cudnn.lib") #endif extern "C" { #include "convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "hip/hip_runtime.h" } extern "C" { double get_time_point(); void start_timer(); void stop_timer(); double get_time(); void stop_timer_and_show(); void stop_timer_and_show_name(char *name); void show_total_time(); } __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, binary); check_error(hipPeekAtLastError()); } __global__ void binarize_input_kernel(float *input, int n, int size, float *binary) { int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (s >= size) return; int i = 0; float mean = 0; for(i = 0; i < n; ++i){ mean += fabs(input[i*size + s]); } mean = mean / n; for(i = 0; i < n; ++i){ binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean; } } void binarize_input_gpu(float *input, int n, int size, float *binary) { hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, input, n, size, binary); check_error(hipPeekAtLastError()); } __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for(i = 0; i < size; ++i){ mean += fabs(weights[f*size + i]); } mean = mean / size; for(i = 0; i < size; ++i){ binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { hipLaunchKernelGGL(( binarize_weights_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, weights, n, size, binary); check_error(hipPeekAtLastError()); } __global__ void cuda_f32_to_f16(float* input_f32, size_t size, half *output_f16) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_f16[idx] = __float2half(input_f32[idx]); //if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]); } void cuda_convert_f32_to_f16(float* input_f32, size_t size, float *output_f16) { hipLaunchKernelGGL(( cuda_f32_to_f16) , dim3(size / BLOCK + 1), dim3(BLOCK), 0, get_cuda_stream() , input_f32, size, (half *)output_f16); } __global__ void cuda_f16_to_f32(half* input_f16, size_t size, float *output_f32) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_f32[idx] = __half2float(input_f16[idx]); //if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx)); } void cuda_convert_f16_to_f32(float* input_f16, size_t size, float *output_f32) { hipLaunchKernelGGL(( cuda_f16_to_f32) , dim3(size / BLOCK + 1), dim3(BLOCK), 0, get_cuda_stream() , (half *)input_f16, size, output_f32); } half *cuda_make_f16_from_f32_array(float *src, size_t n) { half *dst16; size_t size = sizeof(half)*n; check_error(hipMalloc((void **)&dst16, size)); if (src) { cuda_convert_f32_to_f16(src, n, (float *)dst16); } if (!dst16) error("Cuda malloc failed\n"); return dst16; } void forward_convolutional_layer_gpu(convolutional_layer l, network_state state) { //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); if(l.binary){ binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); } if(l.xnor){ if (!l.align_bit_weights_gpu || state.train) { binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu); } //swap_binary(&l); //binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu); //state.input = l.binary_input_gpu; //hipDeviceSynchronize(); if (l.align_bit_weights_gpu && !state.train && l.c >= 256 && l.size > 1) { //return; hipError_t status = hipSuccess; int input_size = l.c*l.h*l.w*l.batch; int m = l.n; int k = l.size*l.size*l.c; int n = l.out_w*l.out_h; float * a = l.weights_gpu; int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; size_t t_intput_size = new_ldb * n; size_t t_bit_input_size = t_intput_size / 8;// +1; //if(0) { //hipDeviceSynchronize(); int i = 0; // if (l.stride == 1 && l.c >= 256 && l.size > 1) if (l.stride == 1 && l.c >= 1024 && l.size > 1 && 0)// && l.w >= 13) // disabled { // stride=1 only //start_timer(); im2col_align_bin_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace, l.bit_align); //hipDeviceSynchronize(); //stop_timer_and_show_name("im2col_align_bin_ongpu"); } else { //start_timer(); im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align); //hipDeviceSynchronize(); //stop_timer_and_show_name("im2col_align_ongpu"); //getchar(); // should be optimized //start_timer(); float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size); //hipDeviceSynchronize(); //stop_timer_and_show_name("float_to_bit_gpu"); } //start_timer(); transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.transposed_align_workspace_gpu, k, n, l.bit_align, new_ldb, 8); //hipDeviceSynchronize(); //stop_timer_and_show_name("transpose_bin_gpu"); // should be optimized //if(0) {//if (k > 1000) { // sequentially input-shared - BAD // gemm_nn_custom_bin_mean_transposed_sequentially_gpu(m, n, k, // (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu); //} //else { // coalescing & weights-shared-memory - GOOD //start_timer(); gemm_nn_custom_bin_mean_transposed_gpu(m, n, k, (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu); //hipDeviceSynchronize(); //stop_timer_and_show_name("gemm_nn_custom_bin_mean_transposed_gpu"); //} //hipDeviceSynchronize(); //check_error(status); //getchar(); } /* { float_to_bit_gpu(state.input, (unsigned char *)l.align_workspace_gpu, input_size); convolve_bin_gpu(l.align_workspace_gpu, (float *)l.align_bit_weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr_gpu); //convolve_gpu(state.input, l.weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad); //hipDeviceSynchronize(); //check_error(status); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } */ //add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); if(l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); //if (l.binary || l.xnor) swap_binary(&l); //hipDeviceSynchronize(); return; } } if (l.xnor) { swap_binary(&l); binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu); state.input = l.binary_input_gpu; } //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); #ifdef CUDNN float one = 1; // alpha[0], beta[0] is float for HALF and FLOAT float alpha = 1, beta = 0; #ifdef CUDNN_HALF // Note: For improved performance it is advised to use beta[0] = 0.0. // For Tensor Core: cudnnSetConvolutionMathType() where cudnnMathType_t mathType = CUDNN_TENSOR_OP_MATH; // 1. or CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM and use CUDNN_DATA_HALF // 2. or CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED // More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops const size_t input16_size = l.batch*l.c*l.w*l.h; const size_t output16_size = l.batch*l.out_c*l.out_h*l.out_w; if (*state.net.max_input16_size < input16_size) { //printf("\n input16_size: cur = %zu \t max = %zu \n", input16_size, *state.net.max_input16_size); *state.net.max_input16_size = input16_size; if (*state.net.input16_gpu) cuda_free(*state.net.input16_gpu); *state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size); } float *input16 = *state.net.input16_gpu; if (*state.net.max_output16_size < output16_size) { *state.net.max_output16_size = output16_size; if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu); *state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size); } float *output16 = *state.net.output16_gpu; cuda_convert_f32_to_f16(state.input, input16_size, input16); //fill_ongpu(output16_size / 2, 0, (float *)output16, 1); cudnnConvolutionForward(cudnn_handle(), &alpha, l.srcTensorDesc, input16, l.weightDesc, l.weights_gpu16, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &beta, l.dstTensorDesc, output16); if (l.batch_normalize) { if (state.train) // Training { copy_ongpu(l.outputs*l.batch / 2, output16, 1, l.x_gpu, 1); //hipMemcpyAsync(l.x_gpu, output16, l.outputs*l.batch*sizeof(half), hipMemcpyDefault, get_cuda_stream()); float one = 1; float zero = 0; // Batch-normalization can still take FP16 inputs and outputs, saving half the bandwidth // compared to FP32, its just that the statistics and value adjustment should be done in FP32. cudnnBatchNormalizationForwardTraining(cudnn_handle(), CUDNN_BATCHNORM_SPATIAL, &one, &zero, l.normDstTensorDescF16, l.x_gpu, // input l.normDstTensorDescF16, output16, // output l.normTensorDesc, l.scales_gpu, l.biases_gpu, .01, l.rolling_mean_gpu, // output (should be FP32) l.rolling_variance_gpu, // output (should be FP32) .00001, l.mean_gpu, // output (should be FP32) l.variance_gpu); // output (should be FP32) cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); //forward_batchnorm_layer_gpu(l, state); } else // Detection { cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w); scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h); } } else // BIAS only { cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } #else cudnnConvolutionForward(cudnn_handle(), &alpha, //&one, l.srcTensorDesc, state.input, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &beta, //&one, l.dstTensorDesc, l.output_gpu); //hipDeviceSynchronize(); #endif // CUDNN_HALF #else fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); int i; int m = l.n; int k = l.size*l.size*l.c; int n = l.out_w*l.out_h; for(i = 0; i < l.batch; ++i){ float *im = state.input + i*l.c*l.h*l.w; float * a = l.weights_gpu; float * b = state.workspace; float * c = l.output_gpu; if (l.size == 1) { b = im; } else { im2col_ongpu(im, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace); } gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n); } #endif #ifndef CUDNN_HALF if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, state); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } #endif // no CUDNN_HALF if (l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); //if(l.dot > 0) dot_error_gpu(l); if(l.binary || l.xnor) swap_binary(&l); //hipDeviceSynchronize(); // for correct profiling of performance } void backward_convolutional_layer_gpu(convolutional_layer l, network_state state) { gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); #ifndef CUDNN_HALF if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, state); } else { //backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } #endif // no CUDNN_HALF float *original_input = state.input; if(l.xnor) state.input = l.binary_input_gpu; #ifdef CUDNN float one = 1; float alpha = 1, beta = 0; #ifdef CUDNN_HALF const size_t input16_size = l.batch*l.c*l.w*l.h; const size_t delta16_size = l.batch*l.n*l.out_w*l.out_h; if (*state.net.max_input16_size < input16_size) { *state.net.max_input16_size = input16_size; if(*state.net.input16_gpu) cuda_free(*state.net.input16_gpu); *state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size); } float *input16 = *state.net.input16_gpu; if (*state.net.max_output16_size < delta16_size) { *state.net.max_output16_size = delta16_size; if(*state.net.output16_gpu) cuda_free(*state.net.output16_gpu); *state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size); } float *delta16 = *state.net.output16_gpu; cuda_convert_f32_to_f16(state.input, input16_size, input16); cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, delta16); if (l.batch_normalize) { //if (!state.train) { // l.mean_gpu = l.rolling_mean_gpu; // l.variance_gpu = l.rolling_variance_gpu; //} float one = 1; float zero = 0; cudnnBatchNormalizationBackward(cudnn_handle(), CUDNN_BATCHNORM_SPATIAL, &one, &zero, &one, &one, l.normDstTensorDescF16, l.x_gpu, // input l.normDstTensorDescF16, delta16, // input l.normDstTensorDescF16, l.x_norm_gpu, // output l.normTensorDesc, l.scales_gpu, // output (should be FP32) l.scale_updates_gpu, // output (should be FP32) l.bias_updates_gpu, // output (should be FP32) .00001, l.mean_gpu, // input (should be FP32) l.variance_gpu); // input (should be FP32) copy_ongpu(l.outputs*l.batch / 2, l.x_norm_gpu, 1, delta16, 1); //hipMemcpyAsync(delta16, l.x_norm_gpu, l.outputs*l.batch * sizeof(half), hipMemcpyDefault, get_cuda_stream()); } else { //backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } // convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16 // get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16) // calculate conv weight updates // Already: l.weight_updates_gpu = (l.weight_updates_gpu - l.weight*decay*batch*subdivision)*momentum // so we should copy f32 to f16, or compute: f16=(w_up - w*d*b*s)*m cuda_convert_f32_to_f16(l.weight_updates_gpu, l.c*l.n*l.size*l.size, l.weight_updates_gpu16); cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, input16, //state.input, l.ddstTensorDesc, delta16, //l.delta_gpu, l.convDesc, l.bf_algo, state.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu16); // l.weight_updates_gpu); cuda_convert_f16_to_f32(l.weight_updates_gpu16, l.c*l.n*l.size*l.size, l.weight_updates_gpu); if (state.delta) { if (l.binary || l.xnor) swap_binary(&l); // http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData // calculate delta for the next layer // convert input: l.weights_gpu (w), l.delta_gpu (dy) from fp32 to fp16 // get output: state.delta (dx) and convert it to fp32 (ONLY if it is fp16) cudnnConvolutionBackwardData(cudnn_handle(), &alpha, l.weightDesc, l.weights_gpu16, //l.weights_gpu, l.ddstTensorDesc, delta16, //l.delta_gpu, l.convDesc, l.bd_algo, state.workspace, l.workspace_size, &beta, l.dsrcTensorDesc, input16); // state.delta); cuda_convert_f16_to_f32(input16, input16_size, state.delta); if (l.binary || l.xnor) swap_binary(&l); if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta); } #else // CUDNN_HALF // calculate conv weight updates // if used: beta=1 then loss decreases faster cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, state.input, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bf_algo, state.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu); if(state.delta){ if(l.binary || l.xnor) swap_binary(&l); // http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData // calculate delta for the next layer cudnnConvolutionBackwardData(cudnn_handle(), &one, l.weightDesc, l.weights_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bd_algo, state.workspace, l.workspace_size, &one, l.dsrcTensorDesc, state.delta); if(l.binary || l.xnor) swap_binary(&l); if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta); } #endif // CUDNN_HALF #else // CUDNN int m = l.n; int n = l.size*l.size*l.c; int k = l.out_w*l.out_h; int i; for(i = 0; i < l.batch; ++i){ float * a = l.delta_gpu; float * b = state.workspace; float * c = l.weight_updates_gpu; im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace); gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n); if(state.delta){ if(l.binary || l.xnor) swap_binary(&l); float * a = l.weights_gpu; float * b = l.delta_gpu; float * c = state.workspace; gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k); col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w); if(l.binary || l.xnor) { swap_binary(&l); } if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w); } } #endif } void pull_convolutional_layer(convolutional_layer layer) { cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size); cuda_pull_array(layer.biases_gpu, layer.biases, layer.n); cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size); cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n); if (layer.batch_normalize){ cuda_pull_array(layer.scales_gpu, layer.scales, layer.n); cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } if (layer.adam){ cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size); cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size); } } void push_convolutional_layer(convolutional_layer layer) { cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size); #ifdef CUDNN_HALF cuda_convert_f32_to_f16(layer.weights_gpu, layer.c*layer.n*layer.size*layer.size, layer.weights_gpu16); #endif cuda_push_array(layer.biases_gpu, layer.biases, layer.n); cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size); cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n); if (layer.batch_normalize){ cuda_push_array(layer.scales_gpu, layer.scales, layer.n); cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } if (layer.adam){ cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size); cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size); } } void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay) { int size = layer.size*layer.size*layer.c*layer.n; axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1); scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1); if(layer.scales_gpu){ axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1); scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1); } if(layer.adam){ scal_ongpu(size, layer.B1, layer.m_gpu, 1); scal_ongpu(size, layer.B2, layer.v_gpu, 1); axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1); axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1); mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1); axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1); adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1); fill_ongpu(size, 0, layer.weight_updates_gpu, 1); }else{ // update weights: // weights_gpu = weights_gpu*(1 - decay*lr) + weight_updates_gpu*lr / (batch*subdivision) = // weights_gpu*(1 - 0.0005*0.001) + weight_updates_gpu*0.001/(64*8) = // weights_gpu * 0.999 999 5 + weight_updates_gpu * 0.000 001 953125 // // weight_updates_gpu = (weight_updates_gpu - weights_gpu*decay*batch*subdivision)*momentum = // (weight_updates_gpu - weights_gpu * 0.0005 * 64 * 8) * 0.9 = // weight_updates_gpu*0.9 - weights_gpu*0.2304 axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1); axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1); scal_ongpu(size, momentum, layer.weight_updates_gpu, 1); } }
bbb6636bf263450a9d30d650224305a402f83aa3.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #ifdef CUDNN #pragma comment(lib, "cudnn.lib") #endif extern "C" { #include "convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "cuda.h" } extern "C" { double get_time_point(); void start_timer(); void stop_timer(); double get_time(); void stop_timer_and_show(); void stop_timer_and_show_name(char *name); void show_total_time(); } __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { binarize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, binary); check_error(cudaPeekAtLastError()); } __global__ void binarize_input_kernel(float *input, int n, int size, float *binary) { int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (s >= size) return; int i = 0; float mean = 0; for(i = 0; i < n; ++i){ mean += fabs(input[i*size + s]); } mean = mean / n; for(i = 0; i < n; ++i){ binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean; } } void binarize_input_gpu(float *input, int n, int size, float *binary) { binarize_input_kernel<<<cuda_gridsize(size), BLOCK>>>(input, n, size, binary); check_error(cudaPeekAtLastError()); } __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for(i = 0; i < size; ++i){ mean += fabs(weights[f*size + i]); } mean = mean / size; for(i = 0; i < size; ++i){ binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { binarize_weights_kernel<<<cuda_gridsize(n), BLOCK>>>(weights, n, size, binary); check_error(cudaPeekAtLastError()); } __global__ void cuda_f32_to_f16(float* input_f32, size_t size, half *output_f16) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_f16[idx] = __float2half(input_f32[idx]); //if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]); } void cuda_convert_f32_to_f16(float* input_f32, size_t size, float *output_f16) { cuda_f32_to_f16 <<< size / BLOCK + 1, BLOCK, 0, get_cuda_stream() >>> (input_f32, size, (half *)output_f16); } __global__ void cuda_f16_to_f32(half* input_f16, size_t size, float *output_f32) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_f32[idx] = __half2float(input_f16[idx]); //if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx)); } void cuda_convert_f16_to_f32(float* input_f16, size_t size, float *output_f32) { cuda_f16_to_f32 <<< size / BLOCK + 1, BLOCK, 0, get_cuda_stream() >>> ((half *)input_f16, size, output_f32); } half *cuda_make_f16_from_f32_array(float *src, size_t n) { half *dst16; size_t size = sizeof(half)*n; check_error(cudaMalloc((void **)&dst16, size)); if (src) { cuda_convert_f32_to_f16(src, n, (float *)dst16); } if (!dst16) error("Cuda malloc failed\n"); return dst16; } void forward_convolutional_layer_gpu(convolutional_layer l, network_state state) { //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); if(l.binary){ binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); } if(l.xnor){ if (!l.align_bit_weights_gpu || state.train) { binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu); } //swap_binary(&l); //binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu); //state.input = l.binary_input_gpu; //cudaDeviceSynchronize(); if (l.align_bit_weights_gpu && !state.train && l.c >= 256 && l.size > 1) { //return; cudaError_t status = cudaSuccess; int input_size = l.c*l.h*l.w*l.batch; int m = l.n; int k = l.size*l.size*l.c; int n = l.out_w*l.out_h; float * a = l.weights_gpu; int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; size_t t_intput_size = new_ldb * n; size_t t_bit_input_size = t_intput_size / 8;// +1; //if(0) { //cudaDeviceSynchronize(); int i = 0; // if (l.stride == 1 && l.c >= 256 && l.size > 1) if (l.stride == 1 && l.c >= 1024 && l.size > 1 && 0)// && l.w >= 13) // disabled { // stride=1 only //start_timer(); im2col_align_bin_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace, l.bit_align); //cudaDeviceSynchronize(); //stop_timer_and_show_name("im2col_align_bin_ongpu"); } else { //start_timer(); im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align); //cudaDeviceSynchronize(); //stop_timer_and_show_name("im2col_align_ongpu"); //getchar(); // should be optimized //start_timer(); float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size); //cudaDeviceSynchronize(); //stop_timer_and_show_name("float_to_bit_gpu"); } //start_timer(); transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.transposed_align_workspace_gpu, k, n, l.bit_align, new_ldb, 8); //cudaDeviceSynchronize(); //stop_timer_and_show_name("transpose_bin_gpu"); // should be optimized //if(0) {//if (k > 1000) { // sequentially input-shared - BAD // gemm_nn_custom_bin_mean_transposed_sequentially_gpu(m, n, k, // (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu); //} //else { // coalescing & weights-shared-memory - GOOD //start_timer(); gemm_nn_custom_bin_mean_transposed_gpu(m, n, k, (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu); //cudaDeviceSynchronize(); //stop_timer_and_show_name("gemm_nn_custom_bin_mean_transposed_gpu"); //} //cudaDeviceSynchronize(); //check_error(status); //getchar(); } /* { float_to_bit_gpu(state.input, (unsigned char *)l.align_workspace_gpu, input_size); convolve_bin_gpu(l.align_workspace_gpu, (float *)l.align_bit_weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr_gpu); //convolve_gpu(state.input, l.weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad); //cudaDeviceSynchronize(); //check_error(status); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } */ //add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); if(l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); //if (l.binary || l.xnor) swap_binary(&l); //cudaDeviceSynchronize(); return; } } if (l.xnor) { swap_binary(&l); binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu); state.input = l.binary_input_gpu; } //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); #ifdef CUDNN float one = 1; // alpha[0], beta[0] is float for HALF and FLOAT float alpha = 1, beta = 0; #ifdef CUDNN_HALF // Note: For improved performance it is advised to use beta[0] = 0.0. // For Tensor Core: cudnnSetConvolutionMathType() where cudnnMathType_t mathType = CUDNN_TENSOR_OP_MATH; // 1. or CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM and use CUDNN_DATA_HALF // 2. or CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED // More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops const size_t input16_size = l.batch*l.c*l.w*l.h; const size_t output16_size = l.batch*l.out_c*l.out_h*l.out_w; if (*state.net.max_input16_size < input16_size) { //printf("\n input16_size: cur = %zu \t max = %zu \n", input16_size, *state.net.max_input16_size); *state.net.max_input16_size = input16_size; if (*state.net.input16_gpu) cuda_free(*state.net.input16_gpu); *state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size); } float *input16 = *state.net.input16_gpu; if (*state.net.max_output16_size < output16_size) { *state.net.max_output16_size = output16_size; if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu); *state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size); } float *output16 = *state.net.output16_gpu; cuda_convert_f32_to_f16(state.input, input16_size, input16); //fill_ongpu(output16_size / 2, 0, (float *)output16, 1); cudnnConvolutionForward(cudnn_handle(), &alpha, l.srcTensorDesc, input16, l.weightDesc, l.weights_gpu16, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &beta, l.dstTensorDesc, output16); if (l.batch_normalize) { if (state.train) // Training { copy_ongpu(l.outputs*l.batch / 2, output16, 1, l.x_gpu, 1); //cudaMemcpyAsync(l.x_gpu, output16, l.outputs*l.batch*sizeof(half), cudaMemcpyDefault, get_cuda_stream()); float one = 1; float zero = 0; // Batch-normalization can still take FP16 inputs and outputs, saving half the bandwidth // compared to FP32, itís just that the statistics and value adjustment should be done in FP32. cudnnBatchNormalizationForwardTraining(cudnn_handle(), CUDNN_BATCHNORM_SPATIAL, &one, &zero, l.normDstTensorDescF16, l.x_gpu, // input l.normDstTensorDescF16, output16, // output l.normTensorDesc, l.scales_gpu, l.biases_gpu, .01, l.rolling_mean_gpu, // output (should be FP32) l.rolling_variance_gpu, // output (should be FP32) .00001, l.mean_gpu, // output (should be FP32) l.variance_gpu); // output (should be FP32) cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); //forward_batchnorm_layer_gpu(l, state); } else // Detection { cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w); scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h); } } else // BIAS only { cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } #else cudnnConvolutionForward(cudnn_handle(), &alpha, //&one, l.srcTensorDesc, state.input, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &beta, //&one, l.dstTensorDesc, l.output_gpu); //cudaDeviceSynchronize(); #endif // CUDNN_HALF #else fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); int i; int m = l.n; int k = l.size*l.size*l.c; int n = l.out_w*l.out_h; for(i = 0; i < l.batch; ++i){ float *im = state.input + i*l.c*l.h*l.w; float * a = l.weights_gpu; float * b = state.workspace; float * c = l.output_gpu; if (l.size == 1) { b = im; } else { im2col_ongpu(im, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace); } gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n); } #endif #ifndef CUDNN_HALF if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, state); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } #endif // no CUDNN_HALF if (l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); //if(l.dot > 0) dot_error_gpu(l); if(l.binary || l.xnor) swap_binary(&l); //cudaDeviceSynchronize(); // for correct profiling of performance } void backward_convolutional_layer_gpu(convolutional_layer l, network_state state) { gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); #ifndef CUDNN_HALF if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, state); } else { //backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } #endif // no CUDNN_HALF float *original_input = state.input; if(l.xnor) state.input = l.binary_input_gpu; #ifdef CUDNN float one = 1; float alpha = 1, beta = 0; #ifdef CUDNN_HALF const size_t input16_size = l.batch*l.c*l.w*l.h; const size_t delta16_size = l.batch*l.n*l.out_w*l.out_h; if (*state.net.max_input16_size < input16_size) { *state.net.max_input16_size = input16_size; if(*state.net.input16_gpu) cuda_free(*state.net.input16_gpu); *state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size); } float *input16 = *state.net.input16_gpu; if (*state.net.max_output16_size < delta16_size) { *state.net.max_output16_size = delta16_size; if(*state.net.output16_gpu) cuda_free(*state.net.output16_gpu); *state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size); } float *delta16 = *state.net.output16_gpu; cuda_convert_f32_to_f16(state.input, input16_size, input16); cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, delta16); if (l.batch_normalize) { //if (!state.train) { // l.mean_gpu = l.rolling_mean_gpu; // l.variance_gpu = l.rolling_variance_gpu; //} float one = 1; float zero = 0; cudnnBatchNormalizationBackward(cudnn_handle(), CUDNN_BATCHNORM_SPATIAL, &one, &zero, &one, &one, l.normDstTensorDescF16, l.x_gpu, // input l.normDstTensorDescF16, delta16, // input l.normDstTensorDescF16, l.x_norm_gpu, // output l.normTensorDesc, l.scales_gpu, // output (should be FP32) l.scale_updates_gpu, // output (should be FP32) l.bias_updates_gpu, // output (should be FP32) .00001, l.mean_gpu, // input (should be FP32) l.variance_gpu); // input (should be FP32) copy_ongpu(l.outputs*l.batch / 2, l.x_norm_gpu, 1, delta16, 1); //cudaMemcpyAsync(delta16, l.x_norm_gpu, l.outputs*l.batch * sizeof(half), cudaMemcpyDefault, get_cuda_stream()); } else { //backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } // convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16 // get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16) // calculate conv weight updates // Already: l.weight_updates_gpu = (l.weight_updates_gpu - l.weight*decay*batch*subdivision)*momentum // so we should copy f32 to f16, or compute: f16=(w_up - w*d*b*s)*m cuda_convert_f32_to_f16(l.weight_updates_gpu, l.c*l.n*l.size*l.size, l.weight_updates_gpu16); cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, input16, //state.input, l.ddstTensorDesc, delta16, //l.delta_gpu, l.convDesc, l.bf_algo, state.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu16); // l.weight_updates_gpu); cuda_convert_f16_to_f32(l.weight_updates_gpu16, l.c*l.n*l.size*l.size, l.weight_updates_gpu); if (state.delta) { if (l.binary || l.xnor) swap_binary(&l); // http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData // calculate delta for the next layer // convert input: l.weights_gpu (w), l.delta_gpu (dy) from fp32 to fp16 // get output: state.delta (dx) and convert it to fp32 (ONLY if it is fp16) cudnnConvolutionBackwardData(cudnn_handle(), &alpha, l.weightDesc, l.weights_gpu16, //l.weights_gpu, l.ddstTensorDesc, delta16, //l.delta_gpu, l.convDesc, l.bd_algo, state.workspace, l.workspace_size, &beta, l.dsrcTensorDesc, input16); // state.delta); cuda_convert_f16_to_f32(input16, input16_size, state.delta); if (l.binary || l.xnor) swap_binary(&l); if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta); } #else // CUDNN_HALF // calculate conv weight updates // if used: beta=1 then loss decreases faster cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, state.input, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bf_algo, state.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu); if(state.delta){ if(l.binary || l.xnor) swap_binary(&l); // http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData // calculate delta for the next layer cudnnConvolutionBackwardData(cudnn_handle(), &one, l.weightDesc, l.weights_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bd_algo, state.workspace, l.workspace_size, &one, l.dsrcTensorDesc, state.delta); if(l.binary || l.xnor) swap_binary(&l); if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta); } #endif // CUDNN_HALF #else // CUDNN int m = l.n; int n = l.size*l.size*l.c; int k = l.out_w*l.out_h; int i; for(i = 0; i < l.batch; ++i){ float * a = l.delta_gpu; float * b = state.workspace; float * c = l.weight_updates_gpu; im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace); gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n); if(state.delta){ if(l.binary || l.xnor) swap_binary(&l); float * a = l.weights_gpu; float * b = l.delta_gpu; float * c = state.workspace; gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k); col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w); if(l.binary || l.xnor) { swap_binary(&l); } if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w); } } #endif } void pull_convolutional_layer(convolutional_layer layer) { cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size); cuda_pull_array(layer.biases_gpu, layer.biases, layer.n); cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size); cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n); if (layer.batch_normalize){ cuda_pull_array(layer.scales_gpu, layer.scales, layer.n); cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } if (layer.adam){ cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size); cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size); } } void push_convolutional_layer(convolutional_layer layer) { cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size); #ifdef CUDNN_HALF cuda_convert_f32_to_f16(layer.weights_gpu, layer.c*layer.n*layer.size*layer.size, layer.weights_gpu16); #endif cuda_push_array(layer.biases_gpu, layer.biases, layer.n); cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size); cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n); if (layer.batch_normalize){ cuda_push_array(layer.scales_gpu, layer.scales, layer.n); cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n); cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n); } if (layer.adam){ cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size); cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size); } } void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay) { int size = layer.size*layer.size*layer.c*layer.n; axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1); scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1); if(layer.scales_gpu){ axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1); scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1); } if(layer.adam){ scal_ongpu(size, layer.B1, layer.m_gpu, 1); scal_ongpu(size, layer.B2, layer.v_gpu, 1); axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1); axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1); mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1); axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1); adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1); fill_ongpu(size, 0, layer.weight_updates_gpu, 1); }else{ // update weights: // weights_gpu = weights_gpu*(1 - decay*lr) + weight_updates_gpu*lr / (batch*subdivision) = // weights_gpu*(1 - 0.0005*0.001) + weight_updates_gpu*0.001/(64*8) = // weights_gpu * 0.999 999 5 + weight_updates_gpu * 0.000 001 953125 // // weight_updates_gpu = (weight_updates_gpu - weights_gpu*decay*batch*subdivision)*momentum = // (weight_updates_gpu - weights_gpu * 0.0005 * 64 * 8) * 0.9 = // weight_updates_gpu*0.9 - weights_gpu*0.2304 axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1); axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1); scal_ongpu(size, momentum, layer.weight_updates_gpu, 1); } }
73cdc2995c03d893ecffaca20cad8ee816e31054.hip
// !!! This is a file automatically generated by hipify!!! // Includes {{{ #include <iostream> #include <itpp/itbase.h> #include <spinchain.cpp> #include <dev_random.cpp> #include <itpp_ext_math.cpp> #include <math.h> #include <tclap/CmdLine.h> #include <hip/device_functions.h> #include <hip/hip_runtime.h> #include "tools.cpp" #include "hip_functions.hip" #include "hip_utils.hip" #include "ev_routines.cu" #include "cfp_routines.cu" #include <time.h> // }}} // TCLAP setup {{{ TCLAP::CmdLine cmd("Command description message", ' ', "0.1"); TCLAP::ValueArg<unsigned int> seed("s","seed", "Random seed [0 for urandom]",false, 243243,"unsigned int",cmd); TCLAP::ValueArg<string> optionArg("o","option", "Option" ,false,"nichts", "string",cmd); TCLAP::ValueArg<int> nqubits("q","qubits", "Number of qubits",false, 3,"int",cmd); TCLAP::ValueArg<int> numt("","t", "Number of time iterartions",false, 1,"int",cmd); TCLAP::ValueArg<int> position("","position", "The position of something",false, 0,"int",cmd); TCLAP::ValueArg<int> whichq("","which", "Which qubits in densmat",false, 1,"int",cmd); TCLAP::ValueArg<int> x("","x", "Size of the x-dimention",false, 0,"int",cmd); // TCLAP::ValueArg<int> y("","y", "Size of the y-dimention",false, 0,"int",cmd); //TCLAP::ValueArg<int> position2("","position2", "The position of something",false, 3,"int",cmd); TCLAP::ValueArg<double> ising("","ising_z", "Ising interaction in the z-direction",false, 0,"double",cmd); TCLAP::ValueArg<double> deltav("","delta", "Some small delta",false, 1,"double",cmd); TCLAP::ValueArg<int> trotternum("","trotter", "Number of steps for trotter-suzuki algorithm",false, 1,"int",cmd); TCLAP::ValueArg<double> bx("","bx", "Magnetic field in x direction",false, 0,"double",cmd); TCLAP::ValueArg<double> by("","by", "Magnetic field in y direction",false, 0,"double",cmd); TCLAP::ValueArg<double> bz("","bz", "Magnetic field in z direction",false, 0,"double",cmd); TCLAP::ValueArg<double> beginx("","startx", "Magnetic field start in x direction",false, 0,"double",cmd); TCLAP::ValueArg<double> beginz("","startz", "Magnetic field start in z direction",false, 0,"double",cmd); TCLAP::ValueArg<double> km("","k", "Momentum of the proyector",false,0,"double",cmd); TCLAP::ValueArg<int> one_state("","one_state", "State l",false, 0,"int",cmd); TCLAP::ValueArg<int> ifrandom("","ifrandom", "0 if you dont want randstate",false,1,"int",cmd); TCLAP::ValueArg<int> dev("","dev", "Gpu to be used, 0 for k20, 1 for c20",false, 0,"int",cmd); TCLAP::SwitchArg no_general_report("","no_general_report", "Print the general report", cmd); // }}} double diffclock(clock_t clock1,clock_t clock2) // {{{ { double diffticks=clock1-clock2; double diffms=(diffticks*1000)/CLOCKS_PER_SEC; return diffms; } // }}} // }}} int main(int argc,char* argv[]) { // Setup CUDA devide, random numbers, command line parserc, and other parametrs {{{ hipSetDevice(dev.getValue()); // itpp::RNG_randomize(); double error=0; cmd.parse(argc,argv); // {{{ Set seed for random unsigned int semilla=seed.getValue(); // std::cout << "La semilla es " << semilla << endl; if (semilla == 0){ Random semilla_uran; semilla=semilla_uran.strong(); } itpp::RNG_reset(semilla); // }}} // {{{ Report on the screen if(!no_general_report.getValue()){ cout << "#linea de comando: "; for(int i=0;i<argc;i++){ cout <<argv[i]<<" " ; } cout << endl ; cout << "#semilla = " << semilla << endl; error += system("echo \\#hostname: $(hostname)"); error += system("echo \\#comenzando en: $(date)"); error += system("echo \\#uname -a: $(uname -a)"); error += system("echo \\#working dir: $(pwd)"); } // }}} string option=optionArg.getValue(); int l=pow(2,nqubits.getValue()); int numthreads, numblocks; choosenumblocks(l,numthreads,numblocks); int div=choosediv(nqubits.getValue()); // }}} // Create workspace in the CPU double *R=new double[l], *I=new double[l]; // Create random state {{{ double *dev_R, *dev_I; randomstate(l,R,I); if (ifrandom.getValue()!=1) { for(int i=0;i<l;i++) { R[i]=0; I[i]=0; } R[one_state.getValue()]=1; } cudaSafeCall(hipMalloc((void**)&dev_R,l*sizeof(double)),"malloc",124); cudaSafeCall(hipMalloc((void**)&dev_I,l*sizeof(double)),"malloc",125); cudaSafeCall(hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice),"memcpy",127); cudaSafeCall(hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice),"memcpy",128); // }}} if (option=="test_apply_ising") { // {{{ double mcos=cos(ising.getValue()); double msin=sin(ising.getValue()); for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,mcos,msin,l); } } hipMemcpy(R,dev_R,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(I,dev_I,l*sizeof(double),hipMemcpyDeviceToHost); hipFree(dev_R); hipFree(dev_I); for(int i=0;i<l;i++) { std::cout<<R[i]<<" i"<<I[i]<<endl; } } // }}} if (option=="test_apply_magnetic_kick") { // {{{ double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double mcos=cos(theta); double msin=sin(theta); for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,mcos,msin,l); } } hipMemcpy(R,dev_R,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(I,dev_I,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { std::cout<<R[i]<<" i"<<I[i]<<endl; } } // }}} if (option=="apply_chain") { // {{{ double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); cudaCheckError("kick",i); } } hipMemcpy(R,dev_R,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(I,dev_I,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { std::cout<<R[i]<<" i"<<I[i]<<endl; } } // }}} if (option=="measure_time") { // {{{ hipEvent_t kstart, kstop; // hipEvent_t cstart, cstop, kstart, kstop; double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); float timek=0; hipEventCreate(&kstart); hipEventCreate(&kstop); hipEventRecord(kstart,0); clock_t begin=clock(); //system("echo \\#Comenzando a hacer varias iteraciones: $(date)"); //cout << "Iteraciones son " << numt.getValue() << endl; for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); cudaCheckError("kick",i); } } //system("echo \\#Terminando a hacer varias iteraciones: $(date)"); hipEventRecord(kstop,0); hipEventSynchronize(kstop); hipEventElapsedTime(&timek,kstart,kstop); clock_t end=clock(); double tiempo=double(diffclock(end,begin)); std::cout<<"CUDA EVENT "<<timek/(numt.getValue()*100)<<endl; cout <<"C clock "<<tiempo/(numt.getValue()*100)<< endl; } // }}} if (option=="check_inverse") { // {{{ double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); itpp::cvec initialstate(l); itpp::cvec finalstate(l); for(int i=0;i<l;i++) { initialstate(i)=std::complex<double>(R[i],I[i]); } //se aplica la U for(int t=0;t<numt.getValue();t++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); cudaCheckError("kick",i); } } //Se aplica U^-1 for(int t=0;t<numt.getValue();t++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,-1*isin,l); cudaCheckError("ising",i); } } hipMemcpy(R,dev_R,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(I,dev_I,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(R[i],I[i]); } cout<<std::norm(itpp::dot(itpp::conj(initialstate),finalstate))<<endl; } // }}} if (option=="check_inverse_trotter2d") { // {{{ int xlen=x.getValue(); int num_trotter=trotternum.getValue(); double delta=1./num_trotter; cout << delta << endl; int i_hor,i_ver; double icos=cos((delta/2.)*ising.getValue()); double isin=sin((delta/2.)*ising.getValue()); double theta=(delta)*sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=delta*bx.getValue()/theta; double by2=delta*by.getValue()/theta; double bz2=delta*bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); itpp::cvec initialstate(l); itpp::cvec finalstate(l); for(int i=0;i<l;i++) { initialstate(i)=std::complex<double>(R[i],I[i]); } //se aplica la U for(int t=0;t<numt.getValue();t++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i+=2) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } for(int i=1;i<nqubits.getValue();i+=2) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); } } } //Se aplica U^-1 for(int t=0;t<numt.getValue();t++) { for(int it=0;it<num_trotter;it++) { for(int i=1;i<nqubits.getValue();i+=2) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,-1*isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,-1*isin,l); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i+=2) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,-1*isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,-1*isin,l); } } } hipMemcpy(R,dev_R,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(I,dev_I,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(R[i],I[i]); } cout<<itpp::norm(initialstate-finalstate)<<endl; } // }}} if (option=="correlation_measure") { // {{{ double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double bx2,by2,bz2,kcos,ksin,icos,isin; itpp::vec b(3); b(0)=bx.getValue(); b(1)=by.getValue(); b(2)=bz.getValue(); set_parameters(ising.getValue(),b,icos,isin,kcos,ksin,bx2,by2,bz2); itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; // double res,norm; hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaCheckError("sum_dx",1); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(int n=0;n<numt.getValue();n++) { //se aplica M hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //se aplica U^-1 for(int t=0;t<n;t++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_sumdxR,dev_sumdxI,icos,-1*isin,l); //cudaCheckError("ising",i); } } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); //std::cout<<itpp::dot(itpp::conj(zerostate),finalstate); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="correlation_measure_test") { // {{{ //this method computes fater correlations at the cost of having two states in global mem double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; double *zeroR=new double[l]; double *zeroI=new double[l]; double *dev_zeroR; double *dev_zeroI; double *resR=new double[numt.getValue()]; double *dev_resR; double *resI=new double[numt.getValue()]; double *dev_resI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); hipMalloc((void**)&dev_zeroR,l*sizeof(double)); hipMalloc((void**)&dev_zeroI,l*sizeof(double)); hipMalloc((void**)&dev_resR,numt.getValue()*sizeof(double)); hipMalloc((void**)&dev_resI,numt.getValue()*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_zeroR,dev_zeroI,nqubits.getValue(),l); cudaCheckError("sum_dx",1); for(int n=0;n<numt.getValue();n++) { //se aplica M hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //se aplica U^-1 for(int t=0;t<n;t++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_sumdxR,dev_sumdxI,icos,-1*isin,l); //cudaCheckError("ising",i); } } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } hipLaunchKernelGGL(( timed_dot), dim3(numblocks),dim3(numthreads), 0, 0, n,dev_zeroR,dev_zeroI,dev_sumdxR,dev_sumdxI,dev_resR,dev_resI,l); cudaCheckError("dot",0); } hipMemcpy(resR,dev_resR,numt.getValue()*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(resI,dev_resI,numt.getValue()*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<numt.getValue();i++) { cout<<sqrt(resR[i]*resR[i]+resI[i]*resI[i])/nqubits.getValue()<<endl; } } // }}} if (option=="fidelity_measure") { // {{{ double *AR=new double[l]; double *AI=new double[l]; double *dev_AR; double *dev_AI; hipMalloc((void**)&dev_AR,l*sizeof(double)); hipMalloc((void**)&dev_AI,l*sizeof(double)); double delta=deltav.getValue(); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); double thetadel=delta; double bx2del=1.; double by2del=0.; double bz2del=0.; double kcosdel=cos(thetadel); double ksindel=sin(thetadel); itpp::cvec leftstate(l); itpp::cvec rightstate(l); for(int i=0;i<l;i++) { leftstate(i)=std::complex<double>(R[i],I[i]); rightstate(i)=std::complex<double>(R[i],I[i]); } cout<<std::norm(itpp::dot(itpp::conj(leftstate),rightstate))<<endl; // devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_AR,dev_AI); for(int n=0;n<numt.getValue();n++) { //se aplica la U for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); // cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //Se aplica M_delta for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2del,by2del,bz2del,kcosdel,ksindel,l); //cudaCheckError("kick",i); } //Se aplica U^-1 for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_AR,dev_AI,icos,isin,l); //cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_AR,dev_AI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("ising",i); } hipMemcpy(AR,dev_AR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(AI,dev_AI,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(R,dev_R,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(I,dev_I,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { leftstate(i)=std::complex<double>(R[i],I[i]); rightstate(i)=std::complex<double>(AR[i],AI[i]); } cout<<std::norm(itpp::dot(itpp::conj(leftstate),rightstate))<<endl; } } // }}} if (option=="fidelity_measure2d") { // {{{ int xlen=x.getValue(); // int ylen=y.getValue(); double *AR=new double[l]; double *AI=new double[l]; double *dev_AR; double *dev_AI; hipMalloc((void**)&dev_AR,l*sizeof(double)); hipMalloc((void**)&dev_AI,l*sizeof(double)); double delta=deltav.getValue(); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); double thetadel=delta; double bx2del=1.; double by2del=0.; double bz2del=0.; double kcosdel=cos(thetadel); double ksindel=sin(thetadel); int i_hor,i_ver; itpp::cvec leftstate(l); itpp::cvec rightstate(l); for(int i=0;i<l;i++) { leftstate(i)=std::complex<double>(R[i],I[i]); rightstate(i)=std::complex<double>(R[i],I[i]); } cout<<std::norm(itpp::dot(itpp::conj(leftstate),rightstate))<<endl; // devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_AR,dev_AI); for(int n=0;n<numt.getValue();n++) { //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); // cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //Se aplica M_delta for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2del,by2del,bz2del,kcosdel,ksindel,l); //cudaCheckError("kick",i); } //Se aplica U^-1 for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_AR,dev_AI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_AR,dev_AI,icos,isin,l); //cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_AR,dev_AI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("ising",i); } hipMemcpy(AR,dev_AR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(AI,dev_AI,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(R,dev_R,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(I,dev_I,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { leftstate(i)=std::complex<double>(R[i],I[i]); rightstate(i)=std::complex<double>(AR[i],AI[i]); } cout<<std::norm(itpp::dot(itpp::conj(leftstate),rightstate))<<endl; } } // }}} if (option=="QFT") { // {{{ for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( QFT), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,l,nqubits.getValue()); } } hipMemcpy(R,dev_R,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(I,dev_I,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { std::cout<<R[i]<<" i"<<I[i]<<endl; } } // }}} if (option=="correlation_measure_carlos") { // {{{ itpp::vec magnetic_field(3); magnetic_field(0)=bx.getValue(); magnetic_field(1)=by.getValue(); magnetic_field(2)=bz.getValue(); int qubits = nqubits.getValue(); int xlen=x.getValue(); double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaCheckError("sum_dx",1); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(int n=0;n<numt.getValue();n++) { //se aplica M hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //se aplica U^-1 evcuda::apply_floquet2d(dev_sumdxR,dev_sumdxI, magnetic_field, ising.getValue() ,qubits, xlen); //se aplica U evcuda::apply_floquet2d(dev_R, dev_I, magnetic_field, ising.getValue() ,qubits, xlen); // se calcula el producto punto itppcuda::cuda2itpp(finalstate,dev_sumdxR, dev_sumdxI); res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="correlation_measure2d") { // {{{ int xlen=x.getValue(); // int ylen=y.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; hipMalloc((void**)&dev_inR,l*sizeof(double)); hipMalloc((void**)&dev_inI,l*sizeof(double)); double bx2,by2,bz2,kcos,ksin,icos,isin; itpp::vec b(3); b(0)=bx.getValue(); b(1)=by.getValue(); b(2)=bz.getValue(); set_parameters(ising.getValue(),b,icos,isin,kcos,ksin,bx2,by2,bz2); itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; // double res,norm; int i_hor,i_ver; itpp::vec b_obs(3); b_obs(0)=1.; b_obs(1)=0.; b_obs(2)=0.; double cos_obs,sin_obs,bx_obs,by_obs,bz_obs; set_parameters(b_obs,cos_obs,sin_obs,bx_obs,by_obs,bz_obs); //sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); //OBSERBABLE hipLaunchKernelGGL(( devcpy), dim3(numblocks),dim3(numthreads), 0, 0, l,dev_R,dev_I,dev_inR,dev_inI); for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); //cudaCheckError("kick",i); } //cudaCheckError("sum_dx",1); for(int n=0;n<numt.getValue();n++) { //se aplica M //sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //OBSERBABLE hipLaunchKernelGGL(( devcpy), dim3(numblocks),dim3(numthreads), 0, 0, l,dev_R,dev_I,dev_sumdxR,dev_sumdxI); for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_sumdxR,dev_sumdxI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); //cudaCheckError("kick",i); } hipMemcpy(inR,dev_inR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inI,dev_inI,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="correlation_obsz") { // {{{ int xlen=x.getValue(); // int ylen=y.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; hipMalloc((void**)&dev_inR,l*sizeof(double)); hipMalloc((void**)&dev_inI,l*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; // double res,norm; int i_hor,i_ver; hipLaunchKernelGGL(( sumsigma_z), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); //cudaCheckError("sum_dx",1); for(int n=0;n<numt.getValue();n++) { //se aplica M hipLaunchKernelGGL(( sumsigma_z), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); hipMemcpy(inR,dev_inR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inI,dev_inI,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="correlation_obsy") { // {{{ int xlen=x.getValue(); // int ylen=y.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; hipMalloc((void**)&dev_inR,l*sizeof(double)); hipMalloc((void**)&dev_inI,l*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; // double res,norm; int i_hor,i_ver; hipLaunchKernelGGL(( sigma_xsigma_y), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,1,3,l); //cudaCheckError("sum_dx",1); for(int n=0;n<numt.getValue();n++) { //se aplica M hipLaunchKernelGGL(( sigma_xsigma_y), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,1,3,l); hipMemcpy(inR,dev_inR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inI,dev_inI,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="color_map2d_no") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; int i_hor,i_ver; double pass; int tgo,tback,cont; hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(double bxi=0.0;bxi<1.5;bxi+=0.05) { for(double bzi=0.0;bzi<1.5;bzi+=0.05) { pass=10.; cont=3; res=0.; double theta=sqrt((bxi*bxi)+(bzi*bzi)); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=bxi/theta; double by2=0; double bz2=bzi/theta; hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice); tgo=70; tback=70; while(abs(pass-res)>0.001) { // cout<<"pass "<<abs(pass-res)<<endl; pass=res; for(int n=0;n<tgo;n++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } //se aplica M hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //cudaCheckError("kick",00); //se aplica U^-1 for(int n=0;n<tback;n++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); // cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); //cout << i << " " << i_hor << " " << i_ver << endl; hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); //cudaCheckError("ising",i); } } hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); tgo=cont; tback=70+cont; cont++; } cout<< bxi << " " << bzi <<" "<<res<<" "<<cont-3<<endl; } } } // }}} if (option=="color_map2d_stdev") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res; int i_hor,i_ver; hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(double bxi=0.0;bxi<=1.5;bxi+=0.01) { for(double bzi=0.0;bzi<=0;bzi+=0.05) { double theta=sqrt((bxi*bxi)+(bzi*bzi)); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=bxi/theta; double by2=0; double bz2=bzi/theta; hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice); for(int n=0;n<70;n++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<30;in++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); for(int back=0;back<71+in;back++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); // cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); //cout << i << " " << i_hor << " " << i_ver << endl; hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); //cudaCheckError("ising",i); } } hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_fast") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; hipMalloc((void**)&dev_inR,l*sizeof(double)); hipMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res,bx2,by2,bz2,kcos,ksin,icos,isin,ising; int i_hor,i_ver; itpp::vec b_obs(3); b_obs(0)=1.; b_obs(1)=0.; b_obs(2)=0.; double cos_obs,sin_obs,bx_obs,by_obs,bz_obs; set_parameters(b_obs,cos_obs,sin_obs,bx_obs,by_obs,bz_obs); double bxi,bzi; for(int bxii=0;bxii<=360;bxii+=1) { for(int bzii=0;bzii<=180;bzii+=1) { bxi=bxii*itpp::pi/720.; bzi=bzii*itpp::pi/720.; itpp::vec b(3); b(0)=bxi; b(1)=0.; b(2)=0.; ising=bzi; set_parameters(ising,b,icos,isin,kcos,ksin,bx2,by2,bz2); hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice); //OBSERBABLE hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); // hipLaunchKernelGGL(( devcpy), dim3(numblocks),dim3(numthreads), 0, 0, l,dev_R,dev_I,dev_inR,dev_inI); // for(int i=0;i<nqubits.getValue();i++) { // hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); // //cudaCheckError("kick",i); // } // for(int n=0;n<70;n++) { //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<30;in++) { hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //OBSERVABLE // devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_sumdxR,dev_sumdxI); // for(int i=0;i<nqubits.getValue();i++) { // hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_sumdxR,dev_sumdxI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); // //cudaCheckError("kick",i); // } hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inR,dev_inR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inI,dev_inI,l*sizeof(double),hipMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); //res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate))); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_block") { // {{{ int xlen=x.getValue(); itpp::cmat eigenvectors1=evcuda::invariant_vectors(nqubits.getValue(),x.getValue(),1,1,0); itpp::cmat eigenvectors2=evcuda::invariant_vectors(nqubits.getValue(),x.getValue(),1,2,0); int rcont1=eigenvectors1.rows(); int rcont2=eigenvectors2.rows(); itpp::cvec small_state=itppextmath::RandomState(rcont1); itpp::cvec state = itpp::transpose(eigenvectors1)*small_state; small_state=itppextmath::RandomState(rcont2); state=state+itpp::transpose(eigenvectors2)*small_state; state=state/itpp::norm(state); evcuda::itpp2cuda(state,dev_R,dev_I); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; hipMalloc((void**)&dev_inR,l*sizeof(double)); hipMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res,bx2,by2,bz2,kcos,ksin,icos,isin,ising; int i_hor,i_ver; itpp::vec b_obs(3); b_obs(0)=1./sqrt(3); b_obs(1)=1./sqrt(3); b_obs(2)=1./sqrt(3); double cos_obs,sin_obs,bx_obs,by_obs,bz_obs; set_parameters(b_obs,cos_obs,sin_obs,bx_obs,by_obs,bz_obs); for(double bxi=0.0;bxi<=itpp::pi/2;bxi+=itpp::pi/80) { for(double bzi=0.0;bzi<=itpp::pi/2;bzi+=itpp::pi/80) { itpp::vec b(3); b(0)=bxi; b(1)=0.; b(2)=0.; ising=bzi; set_parameters(ising,b,icos,isin,kcos,ksin,bx2,by2,bz2); evcuda::itpp2cuda(state,dev_R,dev_I); //OBSERBABLE hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); //devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_inR,dev_inI); // for(int i=0;i<nqubits.getValue();i++) { // hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); // //cudaCheckError("kick",i); // } for(int n=0;n<70;n++) { //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); //CHAIN //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //CHAIN //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<1;in++) { hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //OBSERVABLE // devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_sumdxR,dev_sumdxI); // for(int i=0;i<nqubits.getValue();i++) { // hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_sumdxR,dev_sumdxI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); // //cudaCheckError("kick",i); // } hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inR,dev_inR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inI,dev_inI,l*sizeof(double),hipMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); //CHAIN //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //CHAIN hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_fast_obsz") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; hipMalloc((void**)&dev_inR,l*sizeof(double)); hipMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res,bx2,by2,bz2,kcos,ksin,ising,icos,isin; int i_hor,i_ver; for(double bxi=0.;bxi<=3.2;bxi+=0.05) { for(double bzi=0.;bzi<=3.2;bzi+=0.05) { itpp::vec b(3); b(0)=bxi; b(1)=0.; b(2)=0.; ising=bzi; set_parameters(ising,b,icos,isin,kcos,ksin,bx2,by2,bz2); hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice); hipLaunchKernelGGL(( sumsigma_z), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); for(int n=0;n<70;n++) { //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<30;in++) { hipLaunchKernelGGL(( sumsigma_z), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inR,dev_inR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inI,dev_inI,l*sizeof(double),hipMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_fast_obsy") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; hipMalloc((void**)&dev_inR,l*sizeof(double)); hipMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res,bx2,by2,bz2,kcos,ksin,ising,icos,isin; int i_hor,i_ver; for(double bxi=0.;bxi<=3.2;bxi+=0.05) { for(double bzi=0.;bzi<=3.2;bzi+=0.05) { itpp::vec b(3); b(0)=bxi; b(1)=0.; b(2)=0.; ising=bzi; set_parameters(ising,b,icos,isin,kcos,ksin,bx2,by2,bz2); hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice); hipLaunchKernelGGL(( sumsigma_y), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); for(int n=0;n<70;n++) { //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<30;in++) { hipLaunchKernelGGL(( sumsigma_y), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inR,dev_inR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inI,dev_inI,l*sizeof(double),hipMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_fast_in70") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; hipMalloc((void**)&dev_inR,l*sizeof(double)); hipMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(1); double res,bx2,by2,bz2,kcos,ksin,ising,icos,isin; for(double bxi=0.0;bxi<=3.2;bxi+=0.05) { for(double bzi=0.0;bzi<=3.2;bzi+=0.05) { itpp::vec b(3); b(0)=bxi; b(1)=0.; b(2)=0.; ising=bzi; set_parameters(ising,b,icos,isin,kcos,ksin,bx2,by2,bz2); hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice); hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); for(int n=0;n<10;n++) { //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<1;in++) { hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inR,dev_inR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inI,dev_inI,l*sizeof(double),hipMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)/(double)nqubits.getValue())); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev__trotter1g_fast") { // {{{ int xlen=x.getValue(); int num_trotter=trotternum.getValue(); double delta=1./num_trotter; // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos(delta*ising.getValue()); double isin=sin(delta*ising.getValue()); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; hipMalloc((void**)&dev_inR,l*sizeof(double)); hipMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res; int i_hor,i_ver; for(double bxi=0.0;bxi<=1.5;bxi+=0.05) { for(double bzi=0.0;bzi<=1.5;bzi+=0.05) { double theta=delta*sqrt((bxi*bxi)+(bzi*bzi)); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=delta*bxi/theta; double by2=0; double bz2=delta*bzi/theta; hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice); hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); for(int n=0;n<70;n++) { //se aplica U a in for(int trot=0;trot<num_trotter;trot++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } //se aplica la U for(int trot=0;trot<num_trotter;trot++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } } for(int in=0;in<30;in++) { hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inR,dev_inR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(inI,dev_inI,l*sizeof(double),hipMemcpyDeviceToHost); //se aplica U a in for(int trot=0;trot<num_trotter;trot++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_inR,dev_inI,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } //se aplica la U for(int trot=0;trot<num_trotter;trot++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_trotter2g") { // {{{ int xlen=x.getValue(); int num_trotter=trotternum.getValue(); double delta=1./num_trotter; // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos((delta/2)*ising.getValue()); double isin=sin((delta/2)*ising.getValue()); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res; int i_hor,i_ver; hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(double bxi=beginx.getValue();bxi<=1.5;bxi+=0.05) { for(double bzi=0.0;bzi<=1.5;bzi+=0.05) { double theta=delta*sqrt((bxi*bxi)+(bzi*bzi)); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=delta*bxi/theta; double by2=0; double bz2=delta*bzi/theta; hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice); for(int n=0;n<70;n++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); } } } for(int in=0;in<30;in++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); } } hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); for(int back=0;back<71+in;back++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); } } } hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_trotter1g") { // {{{ int xlen=x.getValue(); int num_trotter=trotternum.getValue(); double delta=1./num_trotter; cout<<"delta " << delta<<endl; // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos((delta)*ising.getValue()); double isin=sin((delta)*ising.getValue()); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res; int i_hor,i_ver; hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(double bxi=beginx.getValue();bxi<1.5;bxi+=0.05) { for(double bzi=0.;bzi<1.5;bzi+=0.05) { double theta=delta*sqrt((bxi*bxi)+(bzi*bzi)); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=delta*bxi/theta; double by2=0; double bz2=delta*bzi/theta; hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice); for(int n=0;n<70;n++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } } } for(int in=0;in<30;in++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } } hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); for(int back=0;back<71+in;back++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); } } } hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="test_2d_grid") { // {{{ int xlen=x.getValue(); // int ylen=y.getValue(); //cout<<square<<endl; double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { int i_hor=(i+1)%xlen+(i/xlen)*xlen; int i_ver=(i+xlen)%nqubits.getValue(); cout << i << " " << i_hor << " " << i_ver << endl; hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } } hipMemcpy(R,dev_R,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(I,dev_I,l*sizeof(double),hipMemcpyDeviceToHost); hipFree(dev_R); hipFree(dev_I); for(int i=0;i<l;i++) { std::cout<<R[i]<<" i"<<I[i]<<endl; } } // }}} if (option=="exp_lattice") { // {{{ //nqubits debe corresponder a un cuadro double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(pow(bx.getValue(),2)+pow(by.getValue(),2)+pow(bz.getValue(),2)); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); double *dotR=new double[nqubits.getValue()]; double *dotI=new double[nqubits.getValue()]; double *dev_dotR; double *dev_dotI; hipMalloc((void**)&dev_dotR,nqubits.getValue()*sizeof(double)); hipMalloc((void**)&dev_dotI,nqubits.getValue()*sizeof(double)); //cout<<div<<endl; for(int t=0;t<numt.getValue();t++) { for(int i=0;i<nqubits.getValue();i++) { dotR[i]=0; dotI[i]=0; } cudaSafeCall(hipMemcpy(dev_dotR,dotR,nqubits.getValue()*sizeof(double),hipMemcpyHostToDevice),"hipMalloc",1); cudaSafeCall(hipMemcpy(dev_dotI,dotI,nqubits.getValue()*sizeof(double),hipMemcpyHostToDevice),"hipMalloc",2); for(int i=0;i<nqubits.getValue();i++) { // dot_2<<<numblocks,numthreads>>>(1,1,i,dev_R,dev_I,dev_dotR,dev_dotI,l); //hipDeviceSynchronize(); cudaCheckError("dot",i); } cudaSafeCall(hipMemcpy(dotR,dev_dotR,nqubits.getValue()*sizeof(double),hipMemcpyDeviceToHost),"hipMalloc",3); cudaSafeCall(hipMemcpy(dotI,dev_dotI,nqubits.getValue()*sizeof(double),hipMemcpyDeviceToHost),"hipMalloc",4); for(int i=0;i<nqubits.getValue();i++) { cout<<dotR[i]<<" "; } cout<<endl; for(int is=0;is<nqubits.getValue();is++) { //Ui_kernel<<<numblocks,numthreads>>>(is,(is+1)%5,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, is,(is+5)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); cudaCheckError("ising",is); } for(int ki=0;ki<nqubits.getValue();ki++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, ki,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); cudaCheckError("kick",ki); } } hipFree(dev_dotR); hipFree(dev_dotI); } // }}} if (option=="test_densmat") { // {{{ int ndens=pow(2,numbits(whichq.getValue())); double *densR=new double[ndens*ndens]; double *densI=new double[ndens*ndens]; double *dev_densR; double *dev_densI; hipMalloc((void**)&dev_densR,ndens*ndens*sizeof(double)); hipMalloc((void**)&dev_densI,ndens*ndens*sizeof(double)); for(int i=0;i<ndens*ndens;i++) { densR[i]=0; densI[i]=0; } cudaSafeCall(hipMemcpy(dev_densR,densR,ndens*ndens*sizeof(double),hipMemcpyHostToDevice),"hipMemcpy",1); cudaSafeCall(hipMemcpy(dev_densI,densI,ndens*ndens*sizeof(double),hipMemcpyHostToDevice),"hipMemcpy",2); int div=choosediv(nqubits.getValue()); // int blockdivdens,threaddivdens; // choosedivdens(l,blockdivdens,threaddivdens); cout<<numblocks<<" "<<numthreads/ndens<<endl; for(int i=0;i<ndens;i++) { for(int j=0;j<ndens;j++) { hipLaunchKernelGGL(( density_matrix), dim3(numblocks),dim3(numthreads/ndens), 0, 0, whichq.getValue(),ndens,i,j,dev_R,dev_I,dev_densR,dev_densI,l/ndens); cudaCheckError("dot",i+j); } } cudaSafeCall(hipMemcpy(densR,dev_densR,ndens*ndens*sizeof(double),hipMemcpyDeviceToHost),"hipMemcpy",3); cudaSafeCall(hipMemcpy(densI,dev_densI,ndens*ndens*sizeof(double),hipMemcpyDeviceToHost),"hipMemcpy",4); for(int i=0;i<ndens;i++) { for(int j=0;j<ndens;j++) { cout<<densR[(ndens*i)+j]<<" i"<<densI[(ndens*i)+j]<<" "; } cout<<endl; } hipFree(dev_densR); hipFree(dev_densI); } // }}} if (option=="exp_cadena_densmat") { // {{{ double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); int ndens=pow(2,numbits(whichq.getValue())); double *densR=new double[ndens*ndens]; double *densI=new double[ndens*ndens]; double *dev_densR; double *dev_densI; hipMalloc((void**)&dev_densR,ndens*ndens*sizeof(double)); hipMalloc((void**)&dev_densI,ndens*ndens*sizeof(double)); itpp::cmat dens(2,2); // which temp para sacar matdens todos //for(int n=0;n<nqubits.getValue();n++) { for(int n=0;n<numt.getValue();n++) { for(int qus=0;qus<nqubits.getValue();qus++) { int whichtemp=pow(2,qus); for(int i=0;i<ndens*ndens;i++) { densR[i]=0; densI[i]=0; } cudaSafeCall(hipMemcpy(dev_densR,densR,ndens*ndens*sizeof(double),hipMemcpyHostToDevice),"hipMemcpy",1); cudaSafeCall(hipMemcpy(dev_densI,densI,ndens*ndens*sizeof(double),hipMemcpyHostToDevice),"hipMemcpy",2); for(int i=0;i<ndens;i++) { for(int j=0;j<ndens;j++) { //whichtemp aqui hipLaunchKernelGGL(( density_matrix), dim3(numblocks),dim3(numthreads/ndens), 0, 0, whichtemp,ndens,i,j,dev_R,dev_I,dev_densR,dev_densI,l/ndens); cudaCheckError("dot",i+j); } } cudaSafeCall(hipMemcpy(densR,dev_densR,ndens*ndens*sizeof(double),hipMemcpyDeviceToHost),"hipMemcpy",3); cudaSafeCall(hipMemcpy(densI,dev_densI,ndens*ndens*sizeof(double),hipMemcpyDeviceToHost),"hipMemcpy",4); for(int i=0;i<ndens;i++) { for(int j=0;j<ndens;j++) { dens(i,j)=std::complex<double>(densR[(ndens*i)+j],densI[(ndens*i)+j]); } } cout<<real(itpp::trace(dens*itppextmath::sigma(3)))<<endl; // cout<<"-----------------otro qubit-----------------------"<<endl; } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); cudaCheckError("ising",i); } //cout<<endl; } hipFree(dev_densR); hipFree(dev_densI); } // }}} if(option=="correlation_measure2d_trotter") { // {{{ int num_trotter=trotternum.getValue(); double delta=1./num_trotter; int xlen=x.getValue(); // int ylen=y.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; hipMalloc((void**)&dev_sumdxR,l*sizeof(double)); hipMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos((delta/2)*ising.getValue()); double isin=sin((delta/2)*ising.getValue()); double theta=(delta)*sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=delta*bx.getValue()/theta; double by2=delta*by.getValue()/theta; double bz2=delta*bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; // double res,norm; int i_hor,i_ver; hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaCheckError("sum_dx",1); hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(int n=0;n<numt.getValue();n++) { //se aplica M hipLaunchKernelGGL(( sumsigma_x), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //se aplica U^-1 for(int t=0;t<n;t++) { for(int it=0;it<num_trotter;it++) { for(int i=1;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); } } } //se aplica la U for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { hipLaunchKernelGGL(( Uk_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } for(int i=1;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_hor,dev_R,dev_I,icos,isin,l); hipLaunchKernelGGL(( Ui_kernel), dim3(numblocks),dim3(numthreads), 0, 0, i,i_ver,dev_R,dev_I,icos,isin,l); } } hipMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="test_proyector_big") { // {{{ itpp::cvec proyectado(l); double *rotR=new double[l]; double *rotI=new double[l]; double *dev_rotR; double *dev_rotI; hipMalloc((void**)&dev_rotR,l*sizeof(double)); hipMalloc((void**)&dev_rotI,l*sizeof(double)); for(int i=1;i<x.getValue();i++) { hipLaunchKernelGGL(( vertical_rotation), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_rotR,dev_rotI,x.getValue(),nqubits.getValue(),l,i); hipMemcpy(rotR,dev_rotR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(rotI,dev_rotI,l*sizeof(double),hipMemcpyDeviceToHost); for(int j=0;j<l;j++) { R[j]=R[j]+cos(2*itpp::pi*km.getValue()*i/x.getValue())*rotR[j]-sin(2*itpp::pi*km.getValue()*i/x.getValue())*rotI[j]; I[j]=I[j]+sin(2*itpp::pi*km.getValue()*i/x.getValue())*rotR[j]+cos(2*itpp::pi*km.getValue()*i/x.getValue())*rotI[j]; } } hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice); hipLaunchKernelGGL(( vertical_rotation), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_rotR,dev_rotI,x.getValue(),nqubits.getValue(),l); hipMemcpy(rotR,dev_rotR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(rotI,dev_rotI,l*sizeof(double),hipMemcpyDeviceToHost); std::complex<double> fase=::exp(std::complex<double>(0,-2*itpp::pi*km.getValue()/x.getValue())); for(int i=0;i<l;i++) { //cout<<std::complex<double>(rotR[i],rotI[i])/std::complex<double>(R[i],I[i])<<endl; proyectado(i)=fase*(std::complex<double>(R[i],I[i]))-std::complex<double>(rotR[i],rotI[i]); //cout<<"---->"<<proyectado(i)<<endl; } cout<<"-----------------"<<endl; cout<<itpp::norm(proyectado)<<endl; //cout<<fase<<endl; } // }}} if (option=="test_proyector") { // {{{ itpp::cvec proyectado(l); double *rotR=new double[l]; double *rotI=new double[l]; double *dev_rotR; double *dev_rotI; hipMalloc((void**)&dev_rotR,l*sizeof(double)); hipMalloc((void**)&dev_rotI,l*sizeof(double)); itpp::cvec vector(l); for(int i=0;i<l;i++) { vector(i)=std::complex<double>(R[i],I[i]); } hipLaunchKernelGGL(( vertical_proyector), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_rotR,dev_rotI,x.getValue(),nqubits.getValue(),l,km.getValue()); hipMemcpy(rotR,dev_rotR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(rotI,dev_rotI,l*sizeof(double),hipMemcpyDeviceToHost); for(int i=0;i<l;i++) { proyectado(i)=std::complex<double>(rotR[i],rotI[i]); } evcuda::proyector_vertical_itpp(vector,dev_R,dev_I,dev_rotR,dev_rotI,x.getValue(),km.getValue()); cout<< proyectado<<endl; cout<<vector<<endl; cout<<itpp::norm(proyectado-vector)<<endl; itpp::cvec rotado=proyectado; itpp::cvec rotado2=vector; int nx = x.getValue(); evcuda::apply_vertical_rotation_itpp(rotado,dev_R,dev_I,dev_rotR,dev_rotI,nx); evcuda::apply_vertical_rotation_itpp(rotado2,dev_R,dev_I,dev_rotR,dev_rotI,nx); double error = abs(itppextmath::proportionality_test(rotado,proyectado)); double error2 = abs(itppextmath::proportionality_test(proyectado,vector)); cout << "Error en la proporcionalidad es del cuda " << error << endl; cout << "Error en la proporcionalidad es del normal-- " << error2 << endl; //cout<<proyectado<<endl; //cout<<vector<<endl; } // }}} if (option=="assemble_matrix") { // {{{ double *rotR=new double[l]; double *rotI=new double[l]; double *dev_rotR; double *dev_rotI; hipMalloc((void**)&dev_rotR,l*sizeof(double)); hipMalloc((void**)&dev_rotI,l*sizeof(double)); int *A=new int[l]; for(int i=0;i<l;i++) { A[i]=2; } find_states_horizontal(A,nqubits.getValue(),x.getValue(),km.getValue(),l); int cont=0; for(int i=0;i<l;i++) { cont+=A[i]; } itpp::cmat eigenvectors(cont,cont); for(int vec=0;vec<cont;vec++) { int flag=0; for(int i=0;i<l;i++) { if(A[i]=1 && flag==0) { R[i]=1.; flag=1; A[i]=0; } else { R[i]=0.; } I[i]=0; } hipMemcpy(dev_R,R,l*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_I,I,l*sizeof(double),hipMemcpyHostToDevice); for(int i=1;i<x.getValue();i++) { hipLaunchKernelGGL(( horizontal_rotation), dim3(numblocks),dim3(numthreads), 0, 0, dev_R,dev_I,dev_rotR,dev_rotI,x.getValue(),nqubits.getValue(),l,i); hipMemcpy(rotR,dev_rotR,l*sizeof(double),hipMemcpyDeviceToHost); hipMemcpy(rotI,dev_rotI,l*sizeof(double),hipMemcpyDeviceToHost); for(int j=0;j<l;j++) { R[j]=R[j]+cos(2*itpp::pi*km.getValue()*i/x.getValue())*rotR[j]-sin(2*itpp::pi*km.getValue()*i/x.getValue())*rotI[j]; I[j]=I[j]+sin(2*itpp::pi*km.getValue()*i/x.getValue())*rotR[j]+cos(2*itpp::pi*km.getValue()*i/x.getValue())*rotI[j]; } } for(int i=0;i<l;i++) { eigenvectors(vec,i)=std::complex<double>(R[i],I[i]); } } } // }}} hipFree(dev_R); hipFree(dev_I); // {{{ Final report if(!no_general_report.getValue()){ error += system("echo \\#terminando: $(date)"); } // }}} return 0; }
73cdc2995c03d893ecffaca20cad8ee816e31054.cu
// Includes {{{ #include <iostream> #include <itpp/itbase.h> #include <spinchain.cpp> #include <dev_random.cpp> #include <itpp_ext_math.cpp> #include <math.h> #include <tclap/CmdLine.h> #include <device_functions.h> #include <cuda.h> #include "tools.cpp" #include "cuda_functions.cu" #include "cuda_utils.cu" #include "ev_routines.cu" #include "cfp_routines.cu" #include <time.h> // }}} // TCLAP setup {{{ TCLAP::CmdLine cmd("Command description message", ' ', "0.1"); TCLAP::ValueArg<unsigned int> seed("s","seed", "Random seed [0 for urandom]",false, 243243,"unsigned int",cmd); TCLAP::ValueArg<string> optionArg("o","option", "Option" ,false,"nichts", "string",cmd); TCLAP::ValueArg<int> nqubits("q","qubits", "Number of qubits",false, 3,"int",cmd); TCLAP::ValueArg<int> numt("","t", "Number of time iterartions",false, 1,"int",cmd); TCLAP::ValueArg<int> position("","position", "The position of something",false, 0,"int",cmd); TCLAP::ValueArg<int> whichq("","which", "Which qubits in densmat",false, 1,"int",cmd); TCLAP::ValueArg<int> x("","x", "Size of the x-dimention",false, 0,"int",cmd); // TCLAP::ValueArg<int> y("","y", "Size of the y-dimention",false, 0,"int",cmd); //TCLAP::ValueArg<int> position2("","position2", "The position of something",false, 3,"int",cmd); TCLAP::ValueArg<double> ising("","ising_z", "Ising interaction in the z-direction",false, 0,"double",cmd); TCLAP::ValueArg<double> deltav("","delta", "Some small delta",false, 1,"double",cmd); TCLAP::ValueArg<int> trotternum("","trotter", "Number of steps for trotter-suzuki algorithm",false, 1,"int",cmd); TCLAP::ValueArg<double> bx("","bx", "Magnetic field in x direction",false, 0,"double",cmd); TCLAP::ValueArg<double> by("","by", "Magnetic field in y direction",false, 0,"double",cmd); TCLAP::ValueArg<double> bz("","bz", "Magnetic field in z direction",false, 0,"double",cmd); TCLAP::ValueArg<double> beginx("","startx", "Magnetic field start in x direction",false, 0,"double",cmd); TCLAP::ValueArg<double> beginz("","startz", "Magnetic field start in z direction",false, 0,"double",cmd); TCLAP::ValueArg<double> km("","k", "Momentum of the proyector",false,0,"double",cmd); TCLAP::ValueArg<int> one_state("","one_state", "State l",false, 0,"int",cmd); TCLAP::ValueArg<int> ifrandom("","ifrandom", "0 if you dont want randstate",false,1,"int",cmd); TCLAP::ValueArg<int> dev("","dev", "Gpu to be used, 0 for k20, 1 for c20",false, 0,"int",cmd); TCLAP::SwitchArg no_general_report("","no_general_report", "Print the general report", cmd); // }}} double diffclock(clock_t clock1,clock_t clock2) // {{{ { double diffticks=clock1-clock2; double diffms=(diffticks*1000)/CLOCKS_PER_SEC; return diffms; } // }}} // }}} int main(int argc,char* argv[]) { // Setup CUDA devide, random numbers, command line parserc, and other parametrs {{{ cudaSetDevice(dev.getValue()); // itpp::RNG_randomize(); double error=0; cmd.parse(argc,argv); // {{{ Set seed for random unsigned int semilla=seed.getValue(); // std::cout << "La semilla es " << semilla << endl; if (semilla == 0){ Random semilla_uran; semilla=semilla_uran.strong(); } itpp::RNG_reset(semilla); // }}} // {{{ Report on the screen if(!no_general_report.getValue()){ cout << "#linea de comando: "; for(int i=0;i<argc;i++){ cout <<argv[i]<<" " ; } cout << endl ; cout << "#semilla = " << semilla << endl; error += system("echo \\#hostname: $(hostname)"); error += system("echo \\#comenzando en: $(date)"); error += system("echo \\#uname -a: $(uname -a)"); error += system("echo \\#working dir: $(pwd)"); } // }}} string option=optionArg.getValue(); int l=pow(2,nqubits.getValue()); int numthreads, numblocks; choosenumblocks(l,numthreads,numblocks); int div=choosediv(nqubits.getValue()); // }}} // Create workspace in the CPU double *R=new double[l], *I=new double[l]; // Create random state {{{ double *dev_R, *dev_I; randomstate(l,R,I); if (ifrandom.getValue()!=1) { for(int i=0;i<l;i++) { R[i]=0; I[i]=0; } R[one_state.getValue()]=1; } cudaSafeCall(cudaMalloc((void**)&dev_R,l*sizeof(double)),"malloc",124); cudaSafeCall(cudaMalloc((void**)&dev_I,l*sizeof(double)),"malloc",125); cudaSafeCall(cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice),"memcpy",127); cudaSafeCall(cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice),"memcpy",128); // }}} if (option=="test_apply_ising") { // {{{ double mcos=cos(ising.getValue()); double msin=sin(ising.getValue()); for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,mcos,msin,l); } } cudaMemcpy(R,dev_R,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(I,dev_I,l*sizeof(double),cudaMemcpyDeviceToHost); cudaFree(dev_R); cudaFree(dev_I); for(int i=0;i<l;i++) { std::cout<<R[i]<<" i"<<I[i]<<endl; } } // }}} if (option=="test_apply_magnetic_kick") { // {{{ double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double mcos=cos(theta); double msin=sin(theta); for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,mcos,msin,l); } } cudaMemcpy(R,dev_R,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(I,dev_I,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { std::cout<<R[i]<<" i"<<I[i]<<endl; } } // }}} if (option=="apply_chain") { // {{{ double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); cudaCheckError("kick",i); } } cudaMemcpy(R,dev_R,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(I,dev_I,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { std::cout<<R[i]<<" i"<<I[i]<<endl; } } // }}} if (option=="measure_time") { // {{{ cudaEvent_t kstart, kstop; // cudaEvent_t cstart, cstop, kstart, kstop; double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); float timek=0; cudaEventCreate(&kstart); cudaEventCreate(&kstop); cudaEventRecord(kstart,0); clock_t begin=clock(); //system("echo \\#Comenzando a hacer varias iteraciones: $(date)"); //cout << "Iteraciones son " << numt.getValue() << endl; for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); cudaCheckError("kick",i); } } //system("echo \\#Terminando a hacer varias iteraciones: $(date)"); cudaEventRecord(kstop,0); cudaEventSynchronize(kstop); cudaEventElapsedTime(&timek,kstart,kstop); clock_t end=clock(); double tiempo=double(diffclock(end,begin)); std::cout<<"CUDA EVENT "<<timek/(numt.getValue()*100)<<endl; cout <<"C clock "<<tiempo/(numt.getValue()*100)<< endl; } // }}} if (option=="check_inverse") { // {{{ double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); itpp::cvec initialstate(l); itpp::cvec finalstate(l); for(int i=0;i<l;i++) { initialstate(i)=std::complex<double>(R[i],I[i]); } //se aplica la U for(int t=0;t<numt.getValue();t++) { for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); cudaCheckError("kick",i); } } //Se aplica U^-1 for(int t=0;t<numt.getValue();t++) { for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,-1*isin,l); cudaCheckError("ising",i); } } cudaMemcpy(R,dev_R,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(I,dev_I,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(R[i],I[i]); } cout<<std::norm(itpp::dot(itpp::conj(initialstate),finalstate))<<endl; } // }}} if (option=="check_inverse_trotter2d") { // {{{ int xlen=x.getValue(); int num_trotter=trotternum.getValue(); double delta=1./num_trotter; cout << delta << endl; int i_hor,i_ver; double icos=cos((delta/2.)*ising.getValue()); double isin=sin((delta/2.)*ising.getValue()); double theta=(delta)*sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=delta*bx.getValue()/theta; double by2=delta*by.getValue()/theta; double bz2=delta*bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); itpp::cvec initialstate(l); itpp::cvec finalstate(l); for(int i=0;i<l;i++) { initialstate(i)=std::complex<double>(R[i],I[i]); } //se aplica la U for(int t=0;t<numt.getValue();t++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i+=2) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } for(int i=1;i<nqubits.getValue();i+=2) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); } } } //Se aplica U^-1 for(int t=0;t<numt.getValue();t++) { for(int it=0;it<num_trotter;it++) { for(int i=1;i<nqubits.getValue();i+=2) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,-1*isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,-1*isin,l); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i+=2) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,-1*isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,-1*isin,l); } } } cudaMemcpy(R,dev_R,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(I,dev_I,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(R[i],I[i]); } cout<<itpp::norm(initialstate-finalstate)<<endl; } // }}} if (option=="correlation_measure") { // {{{ double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double bx2,by2,bz2,kcos,ksin,icos,isin; itpp::vec b(3); b(0)=bx.getValue(); b(1)=by.getValue(); b(2)=bz.getValue(); set_parameters(ising.getValue(),b,icos,isin,kcos,ksin,bx2,by2,bz2); itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; // double res,norm; sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaCheckError("sum_dx",1); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(int n=0;n<numt.getValue();n++) { //se aplica M sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //se aplica U^-1 for(int t=0;t<n;t++) { for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_sumdxR,dev_sumdxI,icos,-1*isin,l); //cudaCheckError("ising",i); } } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); //std::cout<<itpp::dot(itpp::conj(zerostate),finalstate); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="correlation_measure_test") { // {{{ //this method computes fater correlations at the cost of having two states in global mem double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; double *zeroR=new double[l]; double *zeroI=new double[l]; double *dev_zeroR; double *dev_zeroI; double *resR=new double[numt.getValue()]; double *dev_resR; double *resI=new double[numt.getValue()]; double *dev_resI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); cudaMalloc((void**)&dev_zeroR,l*sizeof(double)); cudaMalloc((void**)&dev_zeroI,l*sizeof(double)); cudaMalloc((void**)&dev_resR,numt.getValue()*sizeof(double)); cudaMalloc((void**)&dev_resI,numt.getValue()*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_zeroR,dev_zeroI,nqubits.getValue(),l); cudaCheckError("sum_dx",1); for(int n=0;n<numt.getValue();n++) { //se aplica M sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //se aplica U^-1 for(int t=0;t<n;t++) { for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_sumdxR,dev_sumdxI,icos,-1*isin,l); //cudaCheckError("ising",i); } } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } timed_dot<<<numblocks,numthreads>>>(n,dev_zeroR,dev_zeroI,dev_sumdxR,dev_sumdxI,dev_resR,dev_resI,l); cudaCheckError("dot",0); } cudaMemcpy(resR,dev_resR,numt.getValue()*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(resI,dev_resI,numt.getValue()*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<numt.getValue();i++) { cout<<sqrt(resR[i]*resR[i]+resI[i]*resI[i])/nqubits.getValue()<<endl; } } // }}} if (option=="fidelity_measure") { // {{{ double *AR=new double[l]; double *AI=new double[l]; double *dev_AR; double *dev_AI; cudaMalloc((void**)&dev_AR,l*sizeof(double)); cudaMalloc((void**)&dev_AI,l*sizeof(double)); double delta=deltav.getValue(); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); double thetadel=delta; double bx2del=1.; double by2del=0.; double bz2del=0.; double kcosdel=cos(thetadel); double ksindel=sin(thetadel); itpp::cvec leftstate(l); itpp::cvec rightstate(l); for(int i=0;i<l;i++) { leftstate(i)=std::complex<double>(R[i],I[i]); rightstate(i)=std::complex<double>(R[i],I[i]); } cout<<std::norm(itpp::dot(itpp::conj(leftstate),rightstate))<<endl; // devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_AR,dev_AI); for(int n=0;n<numt.getValue();n++) { //se aplica la U for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); // cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //Se aplica M_delta for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2del,by2del,bz2del,kcosdel,ksindel,l); //cudaCheckError("kick",i); } //Se aplica U^-1 for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_AR,dev_AI,icos,isin,l); //cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_AR,dev_AI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("ising",i); } cudaMemcpy(AR,dev_AR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(AI,dev_AI,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(R,dev_R,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(I,dev_I,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { leftstate(i)=std::complex<double>(R[i],I[i]); rightstate(i)=std::complex<double>(AR[i],AI[i]); } cout<<std::norm(itpp::dot(itpp::conj(leftstate),rightstate))<<endl; } } // }}} if (option=="fidelity_measure2d") { // {{{ int xlen=x.getValue(); // int ylen=y.getValue(); double *AR=new double[l]; double *AI=new double[l]; double *dev_AR; double *dev_AI; cudaMalloc((void**)&dev_AR,l*sizeof(double)); cudaMalloc((void**)&dev_AI,l*sizeof(double)); double delta=deltav.getValue(); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); double thetadel=delta; double bx2del=1.; double by2del=0.; double bz2del=0.; double kcosdel=cos(thetadel); double ksindel=sin(thetadel); int i_hor,i_ver; itpp::cvec leftstate(l); itpp::cvec rightstate(l); for(int i=0;i<l;i++) { leftstate(i)=std::complex<double>(R[i],I[i]); rightstate(i)=std::complex<double>(R[i],I[i]); } cout<<std::norm(itpp::dot(itpp::conj(leftstate),rightstate))<<endl; // devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_AR,dev_AI); for(int n=0;n<numt.getValue();n++) { //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); // cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //Se aplica M_delta for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2del,by2del,bz2del,kcosdel,ksindel,l); //cudaCheckError("kick",i); } //Se aplica U^-1 for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_AR,dev_AI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_AR,dev_AI,icos,isin,l); //cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_AR,dev_AI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("ising",i); } cudaMemcpy(AR,dev_AR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(AI,dev_AI,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(R,dev_R,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(I,dev_I,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { leftstate(i)=std::complex<double>(R[i],I[i]); rightstate(i)=std::complex<double>(AR[i],AI[i]); } cout<<std::norm(itpp::dot(itpp::conj(leftstate),rightstate))<<endl; } } // }}} if (option=="QFT") { // {{{ for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { QFT<<<numblocks,numthreads>>>(i,dev_R,dev_I,l,nqubits.getValue()); } } cudaMemcpy(R,dev_R,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(I,dev_I,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { std::cout<<R[i]<<" i"<<I[i]<<endl; } } // }}} if (option=="correlation_measure_carlos") { // {{{ itpp::vec magnetic_field(3); magnetic_field(0)=bx.getValue(); magnetic_field(1)=by.getValue(); magnetic_field(2)=bz.getValue(); int qubits = nqubits.getValue(); int xlen=x.getValue(); double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaCheckError("sum_dx",1); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(int n=0;n<numt.getValue();n++) { //se aplica M sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //se aplica U^-1 evcuda::apply_floquet2d(dev_sumdxR,dev_sumdxI, magnetic_field, ising.getValue() ,qubits, xlen); //se aplica U evcuda::apply_floquet2d(dev_R, dev_I, magnetic_field, ising.getValue() ,qubits, xlen); // se calcula el producto punto itppcuda::cuda2itpp(finalstate,dev_sumdxR, dev_sumdxI); res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="correlation_measure2d") { // {{{ int xlen=x.getValue(); // int ylen=y.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; cudaMalloc((void**)&dev_inR,l*sizeof(double)); cudaMalloc((void**)&dev_inI,l*sizeof(double)); double bx2,by2,bz2,kcos,ksin,icos,isin; itpp::vec b(3); b(0)=bx.getValue(); b(1)=by.getValue(); b(2)=bz.getValue(); set_parameters(ising.getValue(),b,icos,isin,kcos,ksin,bx2,by2,bz2); itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; // double res,norm; int i_hor,i_ver; itpp::vec b_obs(3); b_obs(0)=1.; b_obs(1)=0.; b_obs(2)=0.; double cos_obs,sin_obs,bx_obs,by_obs,bz_obs; set_parameters(b_obs,cos_obs,sin_obs,bx_obs,by_obs,bz_obs); //sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); //OBSERBABLE devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_inR,dev_inI); for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); //cudaCheckError("kick",i); } //cudaCheckError("sum_dx",1); for(int n=0;n<numt.getValue();n++) { //se aplica M //sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //OBSERBABLE devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_sumdxR,dev_sumdxI); for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_sumdxR,dev_sumdxI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); //cudaCheckError("kick",i); } cudaMemcpy(inR,dev_inR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inI,dev_inI,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="correlation_obsz") { // {{{ int xlen=x.getValue(); // int ylen=y.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; cudaMalloc((void**)&dev_inR,l*sizeof(double)); cudaMalloc((void**)&dev_inI,l*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; // double res,norm; int i_hor,i_ver; sumsigma_z<<<numblocks,numthreads>>>(dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); //cudaCheckError("sum_dx",1); for(int n=0;n<numt.getValue();n++) { //se aplica M sumsigma_z<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaMemcpy(inR,dev_inR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inI,dev_inI,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="correlation_obsy") { // {{{ int xlen=x.getValue(); // int ylen=y.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; cudaMalloc((void**)&dev_inR,l*sizeof(double)); cudaMalloc((void**)&dev_inI,l*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; // double res,norm; int i_hor,i_ver; sigma_xsigma_y<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,1,3,l); //cudaCheckError("sum_dx",1); for(int n=0;n<numt.getValue();n++) { //se aplica M sigma_xsigma_y<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,1,3,l); cudaMemcpy(inR,dev_inR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inI,dev_inI,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="color_map2d_no") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; int i_hor,i_ver; double pass; int tgo,tback,cont; sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(double bxi=0.0;bxi<1.5;bxi+=0.05) { for(double bzi=0.0;bzi<1.5;bzi+=0.05) { pass=10.; cont=3; res=0.; double theta=sqrt((bxi*bxi)+(bzi*bzi)); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=bxi/theta; double by2=0; double bz2=bzi/theta; cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice); tgo=70; tback=70; while(abs(pass-res)>0.001) { // cout<<"pass "<<abs(pass-res)<<endl; pass=res; for(int n=0;n<tgo;n++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } //se aplica M sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //cudaCheckError("kick",00); //se aplica U^-1 for(int n=0;n<tback;n++) { for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); // cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); //cout << i << " " << i_hor << " " << i_ver << endl; Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); //cudaCheckError("ising",i); } } cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); tgo=cont; tback=70+cont; cont++; } cout<< bxi << " " << bzi <<" "<<res<<" "<<cont-3<<endl; } } } // }}} if (option=="color_map2d_stdev") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res; int i_hor,i_ver; sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(double bxi=0.0;bxi<=1.5;bxi+=0.01) { for(double bzi=0.0;bzi<=0;bzi+=0.05) { double theta=sqrt((bxi*bxi)+(bzi*bzi)); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=bxi/theta; double by2=0; double bz2=bzi/theta; cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice); for(int n=0;n<70;n++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<30;in++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); for(int back=0;back<71+in;back++) { for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); // cudaCheckError("kick",i); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); //cout << i << " " << i_hor << " " << i_ver << endl; Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); //cudaCheckError("ising",i); } } cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_fast") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; cudaMalloc((void**)&dev_inR,l*sizeof(double)); cudaMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res,bx2,by2,bz2,kcos,ksin,icos,isin,ising; int i_hor,i_ver; itpp::vec b_obs(3); b_obs(0)=1.; b_obs(1)=0.; b_obs(2)=0.; double cos_obs,sin_obs,bx_obs,by_obs,bz_obs; set_parameters(b_obs,cos_obs,sin_obs,bx_obs,by_obs,bz_obs); double bxi,bzi; for(int bxii=0;bxii<=360;bxii+=1) { for(int bzii=0;bzii<=180;bzii+=1) { bxi=bxii*itpp::pi/720.; bzi=bzii*itpp::pi/720.; itpp::vec b(3); b(0)=bxi; b(1)=0.; b(2)=0.; ising=bzi; set_parameters(ising,b,icos,isin,kcos,ksin,bx2,by2,bz2); cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice); //OBSERBABLE sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); // devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_inR,dev_inI); // for(int i=0;i<nqubits.getValue();i++) { // Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); // //cudaCheckError("kick",i); // } // for(int n=0;n<70;n++) { //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<30;in++) { sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //OBSERVABLE // devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_sumdxR,dev_sumdxI); // for(int i=0;i<nqubits.getValue();i++) { // Uk_kernel<<<numblocks,numthreads>>>(i,dev_sumdxR,dev_sumdxI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); // //cudaCheckError("kick",i); // } cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inR,dev_inR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inI,dev_inI,l*sizeof(double),cudaMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); //res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate))); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_block") { // {{{ int xlen=x.getValue(); itpp::cmat eigenvectors1=evcuda::invariant_vectors(nqubits.getValue(),x.getValue(),1,1,0); itpp::cmat eigenvectors2=evcuda::invariant_vectors(nqubits.getValue(),x.getValue(),1,2,0); int rcont1=eigenvectors1.rows(); int rcont2=eigenvectors2.rows(); itpp::cvec small_state=itppextmath::RandomState(rcont1); itpp::cvec state = itpp::transpose(eigenvectors1)*small_state; small_state=itppextmath::RandomState(rcont2); state=state+itpp::transpose(eigenvectors2)*small_state; state=state/itpp::norm(state); evcuda::itpp2cuda(state,dev_R,dev_I); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; cudaMalloc((void**)&dev_inR,l*sizeof(double)); cudaMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res,bx2,by2,bz2,kcos,ksin,icos,isin,ising; int i_hor,i_ver; itpp::vec b_obs(3); b_obs(0)=1./sqrt(3); b_obs(1)=1./sqrt(3); b_obs(2)=1./sqrt(3); double cos_obs,sin_obs,bx_obs,by_obs,bz_obs; set_parameters(b_obs,cos_obs,sin_obs,bx_obs,by_obs,bz_obs); for(double bxi=0.0;bxi<=itpp::pi/2;bxi+=itpp::pi/80) { for(double bzi=0.0;bzi<=itpp::pi/2;bzi+=itpp::pi/80) { itpp::vec b(3); b(0)=bxi; b(1)=0.; b(2)=0.; ising=bzi; set_parameters(ising,b,icos,isin,kcos,ksin,bx2,by2,bz2); evcuda::itpp2cuda(state,dev_R,dev_I); //OBSERBABLE sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); //devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_inR,dev_inI); // for(int i=0;i<nqubits.getValue();i++) { // Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); // //cudaCheckError("kick",i); // } for(int n=0;n<70;n++) { //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); //CHAIN //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //CHAIN //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<1;in++) { sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //OBSERVABLE // devcpy<<<numblocks,numthreads>>>(l,dev_R,dev_I,dev_sumdxR,dev_sumdxI); // for(int i=0;i<nqubits.getValue();i++) { // Uk_kernel<<<numblocks,numthreads>>>(i,dev_sumdxR,dev_sumdxI,bx_obs,by_obs,bz_obs,cos_obs,sin_obs,l); // //cudaCheckError("kick",i); // } cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inR,dev_inR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inI,dev_inI,l*sizeof(double),cudaMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); //CHAIN //Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //CHAIN Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_fast_obsz") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; cudaMalloc((void**)&dev_inR,l*sizeof(double)); cudaMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res,bx2,by2,bz2,kcos,ksin,ising,icos,isin; int i_hor,i_ver; for(double bxi=0.;bxi<=3.2;bxi+=0.05) { for(double bzi=0.;bzi<=3.2;bzi+=0.05) { itpp::vec b(3); b(0)=bxi; b(1)=0.; b(2)=0.; ising=bzi; set_parameters(ising,b,icos,isin,kcos,ksin,bx2,by2,bz2); cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice); sumsigma_z<<<numblocks,numthreads>>>(dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); for(int n=0;n<70;n++) { //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<30;in++) { sumsigma_z<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inR,dev_inR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inI,dev_inI,l*sizeof(double),cudaMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_fast_obsy") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; cudaMalloc((void**)&dev_inR,l*sizeof(double)); cudaMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res,bx2,by2,bz2,kcos,ksin,ising,icos,isin; int i_hor,i_ver; for(double bxi=0.;bxi<=3.2;bxi+=0.05) { for(double bzi=0.;bzi<=3.2;bzi+=0.05) { itpp::vec b(3); b(0)=bxi; b(1)=0.; b(2)=0.; ising=bzi; set_parameters(ising,b,icos,isin,kcos,ksin,bx2,by2,bz2); cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice); sumsigma_y<<<numblocks,numthreads>>>(dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); for(int n=0;n<70;n++) { //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<30;in++) { sumsigma_y<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inR,dev_inR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inI,dev_inI,l*sizeof(double),cudaMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_fast_in70") { // {{{ int xlen=x.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; cudaMalloc((void**)&dev_inR,l*sizeof(double)); cudaMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(1); double res,bx2,by2,bz2,kcos,ksin,ising,icos,isin; for(double bxi=0.0;bxi<=3.2;bxi+=0.05) { for(double bzi=0.0;bzi<=3.2;bzi+=0.05) { itpp::vec b(3); b(0)=bxi; b(1)=0.; b(2)=0.; ising=bzi; set_parameters(ising,b,icos,isin,kcos,ksin,bx2,by2,bz2); cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice); sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); for(int n=0;n<10;n++) { //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int in=0;in<1;in++) { sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inR,dev_inR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inI,dev_inI,l*sizeof(double),cudaMemcpyDeviceToHost); //se aplica U a in for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } //se aplica la U for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)/(double)nqubits.getValue())); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev__trotter1g_fast") { // {{{ int xlen=x.getValue(); int num_trotter=trotternum.getValue(); double delta=1./num_trotter; // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos(delta*ising.getValue()); double isin=sin(delta*ising.getValue()); double *inR=new double[l]; double *inI=new double[l]; double *dev_inR; double *dev_inI; cudaMalloc((void**)&dev_inR,l*sizeof(double)); cudaMalloc((void**)&dev_inI,l*sizeof(double)); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res; int i_hor,i_ver; for(double bxi=0.0;bxi<=1.5;bxi+=0.05) { for(double bzi=0.0;bzi<=1.5;bzi+=0.05) { double theta=delta*sqrt((bxi*bxi)+(bzi*bzi)); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=delta*bxi/theta; double by2=0; double bz2=delta*bzi/theta; cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice); sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_inR,dev_inI,nqubits.getValue(),l); for(int n=0;n<70;n++) { //se aplica U a in for(int trot=0;trot<num_trotter;trot++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } //se aplica la U for(int trot=0;trot<num_trotter;trot++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } } for(int in=0;in<30;in++) { sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inR,dev_inR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(inI,dev_inI,l*sizeof(double),cudaMemcpyDeviceToHost); //se aplica U a in for(int trot=0;trot<num_trotter;trot++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_inR,dev_inI,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_inR,dev_inI,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_inR,dev_inI,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } //se aplica la U for(int trot=0;trot<num_trotter;trot++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); //cudaCheckError("kick",i); } } for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); zerostate(i)=std::complex<double>(inR[i],inI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_trotter2g") { // {{{ int xlen=x.getValue(); int num_trotter=trotternum.getValue(); double delta=1./num_trotter; // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos((delta/2)*ising.getValue()); double isin=sin((delta/2)*ising.getValue()); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res; int i_hor,i_ver; sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(double bxi=beginx.getValue();bxi<=1.5;bxi+=0.05) { for(double bzi=0.0;bzi<=1.5;bzi+=0.05) { double theta=delta*sqrt((bxi*bxi)+(bzi*bzi)); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=delta*bxi/theta; double by2=0; double bz2=delta*bzi/theta; cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice); for(int n=0;n<70;n++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); } } } for(int in=0;in<30;in++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); } } sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); for(int back=0;back<71+in;back++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); } } } cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="color_map2d_stdev_trotter1g") { // {{{ int xlen=x.getValue(); int num_trotter=trotternum.getValue(); double delta=1./num_trotter; cout<<"delta " << delta<<endl; // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos((delta)*ising.getValue()); double isin=sin((delta)*ising.getValue()); itpp::cvec finalstate(l); itpp::cvec zerostate(l); itpp::vec correlations(30); double res; int i_hor,i_ver; sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(double bxi=beginx.getValue();bxi<1.5;bxi+=0.05) { for(double bzi=0.;bzi<1.5;bzi+=0.05) { double theta=delta*sqrt((bxi*bxi)+(bzi*bzi)); double kcos=cos(theta); double ksin=sin(theta); if(theta==0) { theta=1.; } double bx2=delta*bxi/theta; double by2=0; double bz2=delta*bzi/theta; cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice); for(int n=0;n<70;n++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } } } for(int in=0;in<30;in++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } } sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); for(int back=0;back<71+in;back++) { for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); } } } cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=sqrt(std::norm(itpp::dot(itpp::conj(zerostate),finalstate)))/nqubits.getValue(); correlations(in)=res; } cout<<bxi<<" "<<bzi<<" "<<itpp::mean(correlations)<<" "<<std::sqrt(itpp::variance(correlations))<<endl; } } } // }}} if (option=="test_2d_grid") { // {{{ int xlen=x.getValue(); // int ylen=y.getValue(); //cout<<square<<endl; double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); for(int n=0;n<numt.getValue();n++) { for(int i=0;i<nqubits.getValue();i++) { int i_hor=(i+1)%xlen+(i/xlen)*xlen; int i_ver=(i+xlen)%nqubits.getValue(); cout << i << " " << i_hor << " " << i_ver << endl; Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); //cudaCheckError("ising",i); } } cudaMemcpy(R,dev_R,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(I,dev_I,l*sizeof(double),cudaMemcpyDeviceToHost); cudaFree(dev_R); cudaFree(dev_I); for(int i=0;i<l;i++) { std::cout<<R[i]<<" i"<<I[i]<<endl; } } // }}} if (option=="exp_lattice") { // {{{ //nqubits debe corresponder a un cuadro double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(pow(bx.getValue(),2)+pow(by.getValue(),2)+pow(bz.getValue(),2)); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); double *dotR=new double[nqubits.getValue()]; double *dotI=new double[nqubits.getValue()]; double *dev_dotR; double *dev_dotI; cudaMalloc((void**)&dev_dotR,nqubits.getValue()*sizeof(double)); cudaMalloc((void**)&dev_dotI,nqubits.getValue()*sizeof(double)); //cout<<div<<endl; for(int t=0;t<numt.getValue();t++) { for(int i=0;i<nqubits.getValue();i++) { dotR[i]=0; dotI[i]=0; } cudaSafeCall(cudaMemcpy(dev_dotR,dotR,nqubits.getValue()*sizeof(double),cudaMemcpyHostToDevice),"cudaMalloc",1); cudaSafeCall(cudaMemcpy(dev_dotI,dotI,nqubits.getValue()*sizeof(double),cudaMemcpyHostToDevice),"cudaMalloc",2); for(int i=0;i<nqubits.getValue();i++) { // dot_2<<<numblocks,numthreads>>>(1,1,i,dev_R,dev_I,dev_dotR,dev_dotI,l); //cudaDeviceSynchronize(); cudaCheckError("dot",i); } cudaSafeCall(cudaMemcpy(dotR,dev_dotR,nqubits.getValue()*sizeof(double),cudaMemcpyDeviceToHost),"cudaMalloc",3); cudaSafeCall(cudaMemcpy(dotI,dev_dotI,nqubits.getValue()*sizeof(double),cudaMemcpyDeviceToHost),"cudaMalloc",4); for(int i=0;i<nqubits.getValue();i++) { cout<<dotR[i]<<" "; } cout<<endl; for(int is=0;is<nqubits.getValue();is++) { //Ui_kernel<<<numblocks,numthreads>>>(is,(is+1)%5,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(is,(is+5)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); cudaCheckError("ising",is); } for(int ki=0;ki<nqubits.getValue();ki++) { Uk_kernel<<<numblocks,numthreads>>>(ki,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); cudaCheckError("kick",ki); } } cudaFree(dev_dotR); cudaFree(dev_dotI); } // }}} if (option=="test_densmat") { // {{{ int ndens=pow(2,numbits(whichq.getValue())); double *densR=new double[ndens*ndens]; double *densI=new double[ndens*ndens]; double *dev_densR; double *dev_densI; cudaMalloc((void**)&dev_densR,ndens*ndens*sizeof(double)); cudaMalloc((void**)&dev_densI,ndens*ndens*sizeof(double)); for(int i=0;i<ndens*ndens;i++) { densR[i]=0; densI[i]=0; } cudaSafeCall(cudaMemcpy(dev_densR,densR,ndens*ndens*sizeof(double),cudaMemcpyHostToDevice),"cudaMemcpy",1); cudaSafeCall(cudaMemcpy(dev_densI,densI,ndens*ndens*sizeof(double),cudaMemcpyHostToDevice),"cudaMemcpy",2); int div=choosediv(nqubits.getValue()); // int blockdivdens,threaddivdens; // choosedivdens(l,blockdivdens,threaddivdens); cout<<numblocks<<" "<<numthreads/ndens<<endl; for(int i=0;i<ndens;i++) { for(int j=0;j<ndens;j++) { density_matrix<<<numblocks,numthreads/ndens>>>(whichq.getValue(),ndens,i,j,dev_R,dev_I,dev_densR,dev_densI,l/ndens); cudaCheckError("dot",i+j); } } cudaSafeCall(cudaMemcpy(densR,dev_densR,ndens*ndens*sizeof(double),cudaMemcpyDeviceToHost),"cudaMemcpy",3); cudaSafeCall(cudaMemcpy(densI,dev_densI,ndens*ndens*sizeof(double),cudaMemcpyDeviceToHost),"cudaMemcpy",4); for(int i=0;i<ndens;i++) { for(int j=0;j<ndens;j++) { cout<<densR[(ndens*i)+j]<<" i"<<densI[(ndens*i)+j]<<" "; } cout<<endl; } cudaFree(dev_densR); cudaFree(dev_densI); } // }}} if (option=="exp_cadena_densmat") { // {{{ double icos=cos(ising.getValue()); double isin=sin(ising.getValue()); double theta=sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=bx.getValue()/theta; double by2=by.getValue()/theta; double bz2=bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); int ndens=pow(2,numbits(whichq.getValue())); double *densR=new double[ndens*ndens]; double *densI=new double[ndens*ndens]; double *dev_densR; double *dev_densI; cudaMalloc((void**)&dev_densR,ndens*ndens*sizeof(double)); cudaMalloc((void**)&dev_densI,ndens*ndens*sizeof(double)); itpp::cmat dens(2,2); // which temp para sacar matdens todos //for(int n=0;n<nqubits.getValue();n++) { for(int n=0;n<numt.getValue();n++) { for(int qus=0;qus<nqubits.getValue();qus++) { int whichtemp=pow(2,qus); for(int i=0;i<ndens*ndens;i++) { densR[i]=0; densI[i]=0; } cudaSafeCall(cudaMemcpy(dev_densR,densR,ndens*ndens*sizeof(double),cudaMemcpyHostToDevice),"cudaMemcpy",1); cudaSafeCall(cudaMemcpy(dev_densI,densI,ndens*ndens*sizeof(double),cudaMemcpyHostToDevice),"cudaMemcpy",2); for(int i=0;i<ndens;i++) { for(int j=0;j<ndens;j++) { //whichtemp aqui density_matrix<<<numblocks,numthreads/ndens>>>(whichtemp,ndens,i,j,dev_R,dev_I,dev_densR,dev_densI,l/ndens); cudaCheckError("dot",i+j); } } cudaSafeCall(cudaMemcpy(densR,dev_densR,ndens*ndens*sizeof(double),cudaMemcpyDeviceToHost),"cudaMemcpy",3); cudaSafeCall(cudaMemcpy(densI,dev_densI,ndens*ndens*sizeof(double),cudaMemcpyDeviceToHost),"cudaMemcpy",4); for(int i=0;i<ndens;i++) { for(int j=0;j<ndens;j++) { dens(i,j)=std::complex<double>(densR[(ndens*i)+j],densI[(ndens*i)+j]); } } cout<<real(itpp::trace(dens*itppextmath::sigma(3)))<<endl; // cout<<"-----------------otro qubit-----------------------"<<endl; } for(int i=0;i<nqubits.getValue();i++) { Ui_kernel<<<numblocks,numthreads>>>(i,(i+1)%nqubits.getValue(),dev_R,dev_I,icos,isin,l); cudaCheckError("ising",i); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); cudaCheckError("ising",i); } //cout<<endl; } cudaFree(dev_densR); cudaFree(dev_densI); } // }}} if(option=="correlation_measure2d_trotter") { // {{{ int num_trotter=trotternum.getValue(); double delta=1./num_trotter; int xlen=x.getValue(); // int ylen=y.getValue(); // cout<<square<<endl; double *sumdxR=new double[l]; double *sumdxI=new double[l]; double *dev_sumdxR; double *dev_sumdxI; cudaMalloc((void**)&dev_sumdxR,l*sizeof(double)); cudaMalloc((void**)&dev_sumdxI,l*sizeof(double)); double icos=cos((delta/2)*ising.getValue()); double isin=sin((delta/2)*ising.getValue()); double theta=(delta)*sqrt(bx.getValue()*bx.getValue()+by.getValue()*by.getValue()+bz.getValue()*bz.getValue()); double bx2=delta*bx.getValue()/theta; double by2=delta*by.getValue()/theta; double bz2=delta*bz.getValue()/theta; double kcos=cos(theta); double ksin=sin(theta); itpp::cvec finalstate(l); itpp::cvec zerostate(l); double res; // double res,norm; int i_hor,i_ver; sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); cudaCheckError("sum_dx",1); cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { zerostate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } for(int n=0;n<numt.getValue();n++) { //se aplica M sumsigma_x<<<numblocks,numthreads>>>(dev_R,dev_I,dev_sumdxR,dev_sumdxI,nqubits.getValue(),l); //se aplica U^-1 for(int t=0;t<n;t++) { for(int it=0;it<num_trotter;it++) { for(int i=1;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_sumdxR,dev_sumdxI,-1*bx2,-1*by2,-1*bz2,kcos,ksin,l); } for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_sumdxR,dev_sumdxI,icos,-1*isin,l); } } } //se aplica la U for(int it=0;it<num_trotter;it++) { for(int i=0;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); } for(int i=0;i<nqubits.getValue();i++) { Uk_kernel<<<numblocks,numthreads>>>(i,dev_R,dev_I,bx2,by2,bz2,kcos,ksin,l); } for(int i=1;i<nqubits.getValue();i++) { i_hor=(i+1)%xlen+(i/xlen)*xlen; i_ver=(i+xlen)%nqubits.getValue(); Ui_kernel<<<numblocks,numthreads>>>(i,i_hor,dev_R,dev_I,icos,isin,l); Ui_kernel<<<numblocks,numthreads>>>(i,i_ver,dev_R,dev_I,icos,isin,l); } } cudaMemcpy(sumdxR,dev_sumdxR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(sumdxI,dev_sumdxI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { finalstate(i)=std::complex<double>(sumdxR[i],sumdxI[i]); } res=std::norm(itpp::dot(itpp::conj(zerostate),finalstate)); cout<<sqrt(res)/nqubits.getValue()<<endl; } } // }}} if (option=="test_proyector_big") { // {{{ itpp::cvec proyectado(l); double *rotR=new double[l]; double *rotI=new double[l]; double *dev_rotR; double *dev_rotI; cudaMalloc((void**)&dev_rotR,l*sizeof(double)); cudaMalloc((void**)&dev_rotI,l*sizeof(double)); for(int i=1;i<x.getValue();i++) { vertical_rotation<<<numblocks,numthreads>>>(dev_R,dev_I,dev_rotR,dev_rotI,x.getValue(),nqubits.getValue(),l,i); cudaMemcpy(rotR,dev_rotR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(rotI,dev_rotI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int j=0;j<l;j++) { R[j]=R[j]+cos(2*itpp::pi*km.getValue()*i/x.getValue())*rotR[j]-sin(2*itpp::pi*km.getValue()*i/x.getValue())*rotI[j]; I[j]=I[j]+sin(2*itpp::pi*km.getValue()*i/x.getValue())*rotR[j]+cos(2*itpp::pi*km.getValue()*i/x.getValue())*rotI[j]; } } cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice); vertical_rotation<<<numblocks,numthreads>>>(dev_R,dev_I,dev_rotR,dev_rotI,x.getValue(),nqubits.getValue(),l); cudaMemcpy(rotR,dev_rotR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(rotI,dev_rotI,l*sizeof(double),cudaMemcpyDeviceToHost); std::complex<double> fase=std::exp(std::complex<double>(0,-2*itpp::pi*km.getValue()/x.getValue())); for(int i=0;i<l;i++) { //cout<<std::complex<double>(rotR[i],rotI[i])/std::complex<double>(R[i],I[i])<<endl; proyectado(i)=fase*(std::complex<double>(R[i],I[i]))-std::complex<double>(rotR[i],rotI[i]); //cout<<"---->"<<proyectado(i)<<endl; } cout<<"-----------------"<<endl; cout<<itpp::norm(proyectado)<<endl; //cout<<fase<<endl; } // }}} if (option=="test_proyector") { // {{{ itpp::cvec proyectado(l); double *rotR=new double[l]; double *rotI=new double[l]; double *dev_rotR; double *dev_rotI; cudaMalloc((void**)&dev_rotR,l*sizeof(double)); cudaMalloc((void**)&dev_rotI,l*sizeof(double)); itpp::cvec vector(l); for(int i=0;i<l;i++) { vector(i)=std::complex<double>(R[i],I[i]); } vertical_proyector<<<numblocks,numthreads>>>(dev_R,dev_I,dev_rotR,dev_rotI,x.getValue(),nqubits.getValue(),l,km.getValue()); cudaMemcpy(rotR,dev_rotR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(rotI,dev_rotI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<l;i++) { proyectado(i)=std::complex<double>(rotR[i],rotI[i]); } evcuda::proyector_vertical_itpp(vector,dev_R,dev_I,dev_rotR,dev_rotI,x.getValue(),km.getValue()); cout<< proyectado<<endl; cout<<vector<<endl; cout<<itpp::norm(proyectado-vector)<<endl; itpp::cvec rotado=proyectado; itpp::cvec rotado2=vector; int nx = x.getValue(); evcuda::apply_vertical_rotation_itpp(rotado,dev_R,dev_I,dev_rotR,dev_rotI,nx); evcuda::apply_vertical_rotation_itpp(rotado2,dev_R,dev_I,dev_rotR,dev_rotI,nx); double error = abs(itppextmath::proportionality_test(rotado,proyectado)); double error2 = abs(itppextmath::proportionality_test(proyectado,vector)); cout << "Error en la proporcionalidad es del cuda " << error << endl; cout << "Error en la proporcionalidad es del normal-- " << error2 << endl; //cout<<proyectado<<endl; //cout<<vector<<endl; } // }}} if (option=="assemble_matrix") { // {{{ double *rotR=new double[l]; double *rotI=new double[l]; double *dev_rotR; double *dev_rotI; cudaMalloc((void**)&dev_rotR,l*sizeof(double)); cudaMalloc((void**)&dev_rotI,l*sizeof(double)); int *A=new int[l]; for(int i=0;i<l;i++) { A[i]=2; } find_states_horizontal(A,nqubits.getValue(),x.getValue(),km.getValue(),l); int cont=0; for(int i=0;i<l;i++) { cont+=A[i]; } itpp::cmat eigenvectors(cont,cont); for(int vec=0;vec<cont;vec++) { int flag=0; for(int i=0;i<l;i++) { if(A[i]=1 && flag==0) { R[i]=1.; flag=1; A[i]=0; } else { R[i]=0.; } I[i]=0; } cudaMemcpy(dev_R,R,l*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_I,I,l*sizeof(double),cudaMemcpyHostToDevice); for(int i=1;i<x.getValue();i++) { horizontal_rotation<<<numblocks,numthreads>>>(dev_R,dev_I,dev_rotR,dev_rotI,x.getValue(),nqubits.getValue(),l,i); cudaMemcpy(rotR,dev_rotR,l*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(rotI,dev_rotI,l*sizeof(double),cudaMemcpyDeviceToHost); for(int j=0;j<l;j++) { R[j]=R[j]+cos(2*itpp::pi*km.getValue()*i/x.getValue())*rotR[j]-sin(2*itpp::pi*km.getValue()*i/x.getValue())*rotI[j]; I[j]=I[j]+sin(2*itpp::pi*km.getValue()*i/x.getValue())*rotR[j]+cos(2*itpp::pi*km.getValue()*i/x.getValue())*rotI[j]; } } for(int i=0;i<l;i++) { eigenvectors(vec,i)=std::complex<double>(R[i],I[i]); } } } // }}} cudaFree(dev_R); cudaFree(dev_I); // {{{ Final report if(!no_general_report.getValue()){ error += system("echo \\#terminando: $(date)"); } // }}} return 0; }