hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
30b4df43e4c18dccf0497fad1be6bf94943da8dd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> #include "tensorAccessor.h" // A demo of packed tensor accessors in Pytorch __global__ void tensor_packed_accessor_kernel ( PackedTensorAccessor64<float, 1, RestrictPtrTraits> r, PackedTensorAccessor64<float, 2, RestrictPtrTraits> m, PackedTensorAccessor64<float, 1, RestrictPtrTraits> v) { int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < r.size(0)) { float val = 0.0f; for (int64_t j = 0; j < m.size(1); j++) { val += m[i][j] * v[j]; } r[i] = val; } } __global__ void raw_accessor_kernel ( const int64_t nrow, const int64_t ncol, float *__restrict__ r, const float *__restrict__ m, const float *__restrict__ v) { int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nrow) { float val = 0.0f; for (int64_t j = 0; j < ncol; j++) { val += m[i * ncol + j] * v[j]; } r[i] = val; } } int main(int argc, char* argv[]) { if (argc != 4) { printf("Usage: %s <number of rows> <number of columns> <repeat>\n", argv[0]); return 1; } const int64_t nrow = atol(argv[1]); const int64_t ncol = atol(argv[2]); const int repeat = atoi(argv[3]); // tensor sizes and strides const int64_t sizes[2] = {nrow, ncol}; const int64_t strides[2] = {ncol, 1}; int64_t numel = 1; for (int i = 0; i < 2; i++) numel *= sizes[i]; // matrix vector multiply int64_t m_bytes = numel * sizeof(float); int64_t v_bytes = ncol * sizeof(float); int64_t r_bytes = nrow * sizeof(float); float *m, *v, *r, *r_ref; m = (float*) malloc (m_bytes); v = (float*) malloc (v_bytes); r = (float*) malloc (r_bytes); r_ref = (float*) malloc (r_bytes); srand(123); for (int64_t i = 0; i < numel; i++) { m[i] = rand() / (float)RAND_MAX; } for (int64_t i = 0; i < ncol; i++) { v[i] = rand() / (float)RAND_MAX; } for (int64_t i = 0; i < nrow; i++) { float val = 0.f; for (int64_t j = 0; j < ncol; j++) { val += m[i * ncol + j] * v[j]; } r_ref[i] = val; } float *d_m, *d_v, *d_r; hipMalloc((void**)&d_m, m_bytes); hipMemcpy(d_m, m, m_bytes, hipMemcpyHostToDevice); hipMalloc((void**)&d_v, v_bytes); hipMemcpy(d_v, v, v_bytes, hipMemcpyHostToDevice); hipMalloc((void**)&d_r, r_bytes); PackedTensorAccessor64<float, 2, RestrictPtrTraits> m_acc (d_m, sizes, strides); PackedTensorAccessor64<float, 1, RestrictPtrTraits> v_acc (d_v, &ncol, strides+1); PackedTensorAccessor64<float, 1, RestrictPtrTraits> r_acc (d_r, &nrow, strides+1); dim3 grid ((nrow + 255) / 256); dim3 block (256); printf("Warmup..\n"); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( tensor_packed_accessor_kernel), dim3(grid), dim3(block), 0, 0, r_acc, m_acc, v_acc); hipLaunchKernelGGL(( raw_accessor_kernel), dim3(grid), dim3(block), 0, 0, nrow, ncol, d_r, d_m, d_v); } hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( raw_accessor_kernel), dim3(grid), dim3(block), 0, 0, nrow, ncol, d_r, d_m, d_v); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of raw_accessor_kernel: %f (us)\n", time * 1e-3f / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( tensor_packed_accessor_kernel), dim3(grid), dim3(block), 0, 0, r_acc, m_acc, v_acc); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of tensor_packed_accessor_kernel: %f (us)\n", time * 1e-3f / repeat); hipMemcpy(r, d_r, r_bytes, hipMemcpyDeviceToHost); hipFree(d_m); hipFree(d_v); hipFree(d_r); // verify (may fail due to floating-point rounding) bool ok = true; for (int64_t i = 0; i < nrow; i++) { if (fabsf(r[i] - r_ref[i]) > 1e-3f) { printf("%f %f\n", r[i], r_ref[i]); ok = false; break; } } printf("%s\n", ok ? "PASS" : "FAIL"); free(m); free(v); free(r); free(r_ref); return 0; }
30b4df43e4c18dccf0497fad1be6bf94943da8dd.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> #include "tensorAccessor.h" // A demo of packed tensor accessors in Pytorch __global__ void tensor_packed_accessor_kernel ( PackedTensorAccessor64<float, 1, RestrictPtrTraits> r, PackedTensorAccessor64<float, 2, RestrictPtrTraits> m, PackedTensorAccessor64<float, 1, RestrictPtrTraits> v) { int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < r.size(0)) { float val = 0.0f; for (int64_t j = 0; j < m.size(1); j++) { val += m[i][j] * v[j]; } r[i] = val; } } __global__ void raw_accessor_kernel ( const int64_t nrow, const int64_t ncol, float *__restrict__ r, const float *__restrict__ m, const float *__restrict__ v) { int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nrow) { float val = 0.0f; for (int64_t j = 0; j < ncol; j++) { val += m[i * ncol + j] * v[j]; } r[i] = val; } } int main(int argc, char* argv[]) { if (argc != 4) { printf("Usage: %s <number of rows> <number of columns> <repeat>\n", argv[0]); return 1; } const int64_t nrow = atol(argv[1]); const int64_t ncol = atol(argv[2]); const int repeat = atoi(argv[3]); // tensor sizes and strides const int64_t sizes[2] = {nrow, ncol}; const int64_t strides[2] = {ncol, 1}; int64_t numel = 1; for (int i = 0; i < 2; i++) numel *= sizes[i]; // matrix vector multiply int64_t m_bytes = numel * sizeof(float); int64_t v_bytes = ncol * sizeof(float); int64_t r_bytes = nrow * sizeof(float); float *m, *v, *r, *r_ref; m = (float*) malloc (m_bytes); v = (float*) malloc (v_bytes); r = (float*) malloc (r_bytes); r_ref = (float*) malloc (r_bytes); srand(123); for (int64_t i = 0; i < numel; i++) { m[i] = rand() / (float)RAND_MAX; } for (int64_t i = 0; i < ncol; i++) { v[i] = rand() / (float)RAND_MAX; } for (int64_t i = 0; i < nrow; i++) { float val = 0.f; for (int64_t j = 0; j < ncol; j++) { val += m[i * ncol + j] * v[j]; } r_ref[i] = val; } float *d_m, *d_v, *d_r; hipMalloc((void**)&d_m, m_bytes); hipMemcpy(d_m, m, m_bytes, hipMemcpyHostToDevice); hipMalloc((void**)&d_v, v_bytes); hipMemcpy(d_v, v, v_bytes, hipMemcpyHostToDevice); hipMalloc((void**)&d_r, r_bytes); PackedTensorAccessor64<float, 2, RestrictPtrTraits> m_acc (d_m, sizes, strides); PackedTensorAccessor64<float, 1, RestrictPtrTraits> v_acc (d_v, &ncol, strides+1); PackedTensorAccessor64<float, 1, RestrictPtrTraits> r_acc (d_r, &nrow, strides+1); dim3 grid ((nrow + 255) / 256); dim3 block (256); printf("Warmup..\n"); for (int i = 0; i < repeat; i++) { tensor_packed_accessor_kernel<<<grid, block>>>(r_acc, m_acc, v_acc); raw_accessor_kernel<<<grid, block>>>(nrow, ncol, d_r, d_m, d_v); } hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { raw_accessor_kernel<<<grid, block>>>(nrow, ncol, d_r, d_m, d_v); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of raw_accessor_kernel: %f (us)\n", time * 1e-3f / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { tensor_packed_accessor_kernel<<<grid, block>>>(r_acc, m_acc, v_acc); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of tensor_packed_accessor_kernel: %f (us)\n", time * 1e-3f / repeat); hipMemcpy(r, d_r, r_bytes, hipMemcpyDeviceToHost); hipFree(d_m); hipFree(d_v); hipFree(d_r); // verify (may fail due to floating-point rounding) bool ok = true; for (int64_t i = 0; i < nrow; i++) { if (fabsf(r[i] - r_ref[i]) > 1e-3f) { printf("%f %f\n", r[i], r_ref[i]); ok = false; break; } } printf("%s\n", ok ? "PASS" : "FAIL"); free(m); free(v); free(r); free(r_ref); return 0; }
62bdbea3d4768ae6202404837dbff96766a11d92.hip
// !!! This is a file automatically generated by hipify!!! #include <cfloat> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <helper_math.h> #define _USE_MATH_DEFINES #include "kernels_hip.cuh" // Moller Trumbore algorithm for ray triangle intersection, +1 if no // intersection, -1 if intersection __device__ float intersect_triangle( float3 orig, float3 dir, float3 v0, float3 v1, float3 v2) { float3 e1 = v1 - v0; float3 e2 = v2 - v0; // calculate triangle normal vector float3 pvec = cross(dir, e2); float det = dot(e1, pvec); // ray is parallel to the plane if (det < FLT_EPSILON && det > -FLT_EPSILON) { return 1.f; } float invDet = 1.f / det; float3 tvec = orig - v0; float u = dot(tvec, pvec) * invDet; if (u < 0.f || u > 1.f) { return 1.f; } float3 qvec = cross(tvec, e1); float v = dot(dir, qvec) * invDet; if (v < 0.f || u + v > 1.f) { return 1.f; } float t = dot(e2, qvec) * invDet; if (t < 0.0) { return 1.f; } return -1.f; } // calculate unsigned distances of the dField grid points __global__ void calculateUnsignedDistancesKernel( float* dField, float dFieldOffsetX, float dFieldOffsetY, float dFieldOffsetZ, float voxelSizeX, float voxelSizeY, float voxelSizeZ, int gridSizeX, int gridSizeY, int gridSizeZ, float* meshPts, int nMeshPts, int meshStart, int step) { int gidx = threadIdx.x + blockIdx.x * blockDim.x; int gidy = threadIdx.y + blockIdx.y * blockDim.y; int gidz = threadIdx.z + blockIdx.z * blockDim.z; int gid = gridSizeX * gridSizeY * gidz + gridSizeX * gidy + gidx; if (gid < gridSizeX*gridSizeY*gridSizeZ) { // coordinates of the dField grid point float3 fPoint = make_float3(dFieldOffsetX, dFieldOffsetY, dFieldOffsetZ) + make_float3((float)gidx, (float)gidy, (float)gidz)*make_float3(voxelSizeX, voxelSizeY, voxelSizeZ); // initialize the minimum distance for the grid point float minDist = (meshStart == 0) ? FLT_MAX : dField[gid]; // check distances to all mesh points in this step float tmpDist; for(size_t i = meshStart; (i < (meshStart + step)) && i < nMeshPts; i++) { float3 mPoint = make_float3(meshPts[i * 3], meshPts[i * 3 + 1], meshPts[i * 3 + 2]); tmpDist = length(mPoint - fPoint); minDist = (tmpDist < minDist) ? tmpDist : minDist; } // set the dField grid point distance to the new minimum distance dField[gid] = minDist; } } // sign the distances of the dField grid points __global__ void signDistancesKernel( float* dField, float dFieldOffsetX, float dFieldOffsetY, float dFieldOffsetZ, float voxelSizeX, float voxelSizeY, float voxelSizeZ, int gridSizeX, int gridSizeY, int gridSizeZ, float* meshPts, unsigned int* meshFaces, int nMeshFaces, int faceStart, int step) { int gidx = threadIdx.x + blockIdx.x * blockDim.x; int gidy = threadIdx.y + blockIdx.y * blockDim.y; int gidz = threadIdx.z + blockIdx.z * blockDim.z; int gid = gridSizeX * gridSizeY * gidz + gridSizeX * gidy + gidx; if (gid < gridSizeX*gridSizeY*gridSizeZ) { // coordinates of the dField grid point float3 fPoint = make_float3(dFieldOffsetX, dFieldOffsetY, dFieldOffsetZ) + make_float3((float)gidx, (float)gidy, (float)gidz)*make_float3(voxelSizeX, voxelSizeY, voxelSizeZ); // direction of ray to cast (toward origin) float3 dir = -fPoint; // determine the sign of the distance by checking for intersection with // every face of the mesh - an odd number of intersections means the // dfield grid point is inside the mesh and the distance is negative float3 v0, v1, v2; float sign = 1.f; for (size_t i = faceStart; (i < (faceStart + step)) && i < nMeshFaces; i++) { // indices of the triangle vertices unsigned int ind0 = meshFaces[i * 3]; unsigned int ind1 = meshFaces[i * 3 + 1]; unsigned int ind2 = meshFaces[i * 3 + 2]; // get the triangle vertices v0.x = meshPts[ind0 * 3]; v0.y = meshPts[ind0 * 3 + 1]; v0.z = meshPts[ind0 * 3 + 2]; v1.x = meshPts[ind1 * 3]; v1.y = meshPts[ind1 * 3 + 1]; v1.z = meshPts[ind1 * 3 + 2]; v2.x = meshPts[ind2 * 3]; v2.y = meshPts[ind2 * 3 + 1]; v2.z = meshPts[ind2 * 3 + 2]; sign *= intersect_triangle(fPoint, dir, v0, v1, v2); } // set the dField grid point distance to the new minimum distance dField[gid] *= sign; } } // clear drr __global__ void clearDRRKernel( float* img, int nPix) { // X and Y positions of the pixel in the DRR image int gidx = threadIdx.x + blockIdx.x * blockDim.x; int gidy = threadIdx.y + blockIdx.y * blockDim.y; if (gidx < nPix && gidy < nPix) { img[nPix * gidy + gidx] = 0; } } // ray trace __global__ void raycastKernel( float* dField, float dFieldOffsetX, float dFieldOffsetY, float dFieldOffsetZ, float voxelSizeX, float voxelSizeY, float voxelSizeZ, int gridSizeX, int gridSizeY, int gridSizeZ, float* imv, float imPosX, float imPosY, float imPosZ, float* aabb, float* img, float pixSize, int nPix, float weight, float step, float density) { // X and Y positions of the pixel in the DRR image int gidx = threadIdx.x + blockIdx.x * blockDim.x; int gidy = threadIdx.y + blockIdx.y * blockDim.y; if (gidx < nPix && gidy < nPix) { // coordinates of the pixel and direction of the look ray in source space float u = (gidx - (nPix + 1.f) / 2.f) * pixSize; float v = (gidy - (nPix + 1.f) / 2.f) * pixSize; float3 pixPos = make_float3(u, v, 0.f) + make_float3(imPosX, imPosY, imPosZ); float3 look = step * normalize(pixPos); // ray origin in object space float3 rayOrigin = make_float3(imv[12], imv[13], imv[14]); // look ray of the pixel in object space float3 rayDirection = make_float3( dot(make_float3(imv[0], imv[4], imv[8]), look), dot(make_float3(imv[1], imv[5], imv[9]), look), dot(make_float3(imv[2], imv[6], imv[10]), look)); // min and max corners of the bounding box float3 boxMin = make_float3(aabb[0], aabb[1], aabb[2]); float3 boxMax = make_float3(aabb[3], aabb[4], aabb[5]); // compute intersection of ray with all six planes of the bounding box float3 tBot = (boxMin - rayOrigin) / rayDirection; float3 tTop = (boxMax - rayOrigin) / rayDirection; // re-order intersections to find smallest and largest on each axis float3 tMin = fminf(tTop, tBot); float3 tMax = fmaxf(tTop, tBot); // find the largest tMin and smallest tMax float near = fmaxf(fmaxf(tMin.x, tMin.y), tMin.z); float far = fminf(fminf(tMax.x, tMax.y), tMax.z); // if ray does not intersect the boundign box, skip it if(!(far > near)) return; // clamp the ray to the near plane if (near < 0.f) near = 0.f; // perform ray marching from back to front in uniform steps - step size was // set in the look ray // start at pixel and march backwards float t = fminf(far, length(pixPos)/step); // current point on the ray in space float3 rayPoint; // point in distance field coordinates float3 lookup; // distance field offset from the origin and voxel size float3 dFieldOffset = make_float3( dFieldOffsetX, dFieldOffsetY, dFieldOffsetZ); float3 dFieldVoxelSize = make_float3( voxelSizeX, voxelSizeY, voxelSizeZ); // parameters used in the spline interpolation float3 i1, i2; int i1x, i1y, i1z, i2x, i2y, i2z; float tmp1, tmp2; float P0, P1, P2, P3, P4, P5, P6, P7; float A, B, C, D, E, F; // distance from the point on the ray to the surface float dist; // ray intensity at pixel float intensity = 0.f; // march! while (t>near) { // point in space and in distance field rayPoint = rayOrigin + t*rayDirection; lookup = (rayPoint - dFieldOffset) / dFieldVoxelSize; // spline interpolation to get distance to surface i1 = floorf(lookup); i2 = i1 + make_float3(1.f, 1.f, 1.f); i1x = (int)i1.x; i1y = (int)i1.y; i1z = (int)i1.z; i2x = (int)i2.x; i2y = (int)i2.y; i2z = (int)i2.z; if (i2x > (gridSizeX-1) || (i2y > gridSizeY-1) || (i2z > gridSizeZ-1) || i1x < 0 || i1y < 0 || i1z < 0) { intensity += 0.f; } else { P0 = dField[i1x + gridSizeX * i1y + gridSizeX*gridSizeY * i1z]; P1 = dField[i2x + gridSizeX * i1y + gridSizeX*gridSizeY * i1z]; P2 = dField[i2x + gridSizeX * i1y + gridSizeX*gridSizeY * i2z]; P3 = dField[i1x + gridSizeX * i1y + gridSizeX*gridSizeY * i2z]; P4 = dField[i1x + gridSizeX * i2y + gridSizeX*gridSizeY * i1z]; P5 = dField[i2x + gridSizeX * i2y + gridSizeX*gridSizeY * i1z]; P6 = dField[i2x + gridSizeX * i2y + gridSizeX*gridSizeY * i2z]; P7 = dField[i1x + gridSizeX * i2y + gridSizeX*gridSizeY * i2z]; tmp1 = lookup.x - (float)i1x; tmp2 = (float)i2x - lookup.x; A = tmp1 * P2 + tmp2 * P3; B = tmp1 * P1 + tmp2 * P0; C = tmp1 * P5 + tmp2 * P4; D = tmp1 * P6 + tmp2 * P7; tmp1 = lookup.y - (float)i1y; tmp2 = (float)i2y - lookup.y; E = tmp1 * D + tmp2 * A; F = tmp1 * C + tmp2 * B; tmp1 = lookup.z - (float)i1z; tmp2 = (float)i2z - lookup.z; dist = tmp1 * E + tmp2 * F; // If point inside mesh, add to intensity. if (dist <= 0.0) intensity += weight; } t -= 1.f; } // clamp pixel intensity if (intensity > 0.f) { intensity *= density; img[nPix * gidy + gidx] = clamp( img[nPix * gidy + gidx] + intensity, 0.f, FLT_MAX); } } } void calculate_unsigned_distances(dim3 dimGrid, dim3 dimBlock, float* d_dField, float* dFieldOffset, float* voxelSize, int* gridSize, float* d_meshPts, int nMeshPts, int meshStart, int step) { hipLaunchKernelGGL(( calculateUnsignedDistancesKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_dField, dFieldOffset[0], dFieldOffset[1], dFieldOffset[2], voxelSize[0], voxelSize[1], voxelSize[2], gridSize[0], gridSize[1], gridSize[2], d_meshPts, nMeshPts, meshStart, step); } void sign_distances(dim3 dimGrid, dim3 dimBlock, float* d_dField, float* dFieldOffset, float* voxelSize, int* gridSize, float* d_meshPts, unsigned int* d_meshFaces, int nMeshFaces, int faceStart, int step) { hipLaunchKernelGGL(( signDistancesKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_dField, dFieldOffset[0], dFieldOffset[1], dFieldOffset[2], voxelSize[0], voxelSize[1], voxelSize[2], gridSize[0], gridSize[1], gridSize[2], d_meshPts, d_meshFaces, nMeshFaces, faceStart, step); } void raycast(dim3 dimGrid, dim3 dimBlock, float* d_dField, float* dFieldOffset, float* voxelSize, int* gridSize, float* d_imv, float* imPos, float* d_aabb, float* d_img, float pixSize, int nPix, float weight, float step, float density) { hipLaunchKernelGGL(( raycastKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_dField, dFieldOffset[0], dFieldOffset[1], dFieldOffset[2], voxelSize[0], voxelSize[1], voxelSize[2], gridSize[0], gridSize[1], gridSize[2], d_imv, imPos[0], imPos[1], imPos[2], d_aabb, d_img, pixSize, nPix, weight, step, density); } void clearDRR(dim3 dimGrid, dim3 dimBlock, float* img, int nPix) { hipLaunchKernelGGL(( clearDRRKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, img, nPix); }
62bdbea3d4768ae6202404837dbff96766a11d92.cu
#include <cfloat> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <helper_math.h> #define _USE_MATH_DEFINES #include "kernels.cuh" // Moller Trumbore algorithm for ray triangle intersection, +1 if no // intersection, -1 if intersection __device__ float intersect_triangle( float3 orig, float3 dir, float3 v0, float3 v1, float3 v2) { float3 e1 = v1 - v0; float3 e2 = v2 - v0; // calculate triangle normal vector float3 pvec = cross(dir, e2); float det = dot(e1, pvec); // ray is parallel to the plane if (det < FLT_EPSILON && det > -FLT_EPSILON) { return 1.f; } float invDet = 1.f / det; float3 tvec = orig - v0; float u = dot(tvec, pvec) * invDet; if (u < 0.f || u > 1.f) { return 1.f; } float3 qvec = cross(tvec, e1); float v = dot(dir, qvec) * invDet; if (v < 0.f || u + v > 1.f) { return 1.f; } float t = dot(e2, qvec) * invDet; if (t < 0.0) { return 1.f; } return -1.f; } // calculate unsigned distances of the dField grid points __global__ void calculateUnsignedDistancesKernel( float* dField, float dFieldOffsetX, float dFieldOffsetY, float dFieldOffsetZ, float voxelSizeX, float voxelSizeY, float voxelSizeZ, int gridSizeX, int gridSizeY, int gridSizeZ, float* meshPts, int nMeshPts, int meshStart, int step) { int gidx = threadIdx.x + blockIdx.x * blockDim.x; int gidy = threadIdx.y + blockIdx.y * blockDim.y; int gidz = threadIdx.z + blockIdx.z * blockDim.z; int gid = gridSizeX * gridSizeY * gidz + gridSizeX * gidy + gidx; if (gid < gridSizeX*gridSizeY*gridSizeZ) { // coordinates of the dField grid point float3 fPoint = make_float3(dFieldOffsetX, dFieldOffsetY, dFieldOffsetZ) + make_float3((float)gidx, (float)gidy, (float)gidz)*make_float3(voxelSizeX, voxelSizeY, voxelSizeZ); // initialize the minimum distance for the grid point float minDist = (meshStart == 0) ? FLT_MAX : dField[gid]; // check distances to all mesh points in this step float tmpDist; for(size_t i = meshStart; (i < (meshStart + step)) && i < nMeshPts; i++) { float3 mPoint = make_float3(meshPts[i * 3], meshPts[i * 3 + 1], meshPts[i * 3 + 2]); tmpDist = length(mPoint - fPoint); minDist = (tmpDist < minDist) ? tmpDist : minDist; } // set the dField grid point distance to the new minimum distance dField[gid] = minDist; } } // sign the distances of the dField grid points __global__ void signDistancesKernel( float* dField, float dFieldOffsetX, float dFieldOffsetY, float dFieldOffsetZ, float voxelSizeX, float voxelSizeY, float voxelSizeZ, int gridSizeX, int gridSizeY, int gridSizeZ, float* meshPts, unsigned int* meshFaces, int nMeshFaces, int faceStart, int step) { int gidx = threadIdx.x + blockIdx.x * blockDim.x; int gidy = threadIdx.y + blockIdx.y * blockDim.y; int gidz = threadIdx.z + blockIdx.z * blockDim.z; int gid = gridSizeX * gridSizeY * gidz + gridSizeX * gidy + gidx; if (gid < gridSizeX*gridSizeY*gridSizeZ) { // coordinates of the dField grid point float3 fPoint = make_float3(dFieldOffsetX, dFieldOffsetY, dFieldOffsetZ) + make_float3((float)gidx, (float)gidy, (float)gidz)*make_float3(voxelSizeX, voxelSizeY, voxelSizeZ); // direction of ray to cast (toward origin) float3 dir = -fPoint; // determine the sign of the distance by checking for intersection with // every face of the mesh - an odd number of intersections means the // dfield grid point is inside the mesh and the distance is negative float3 v0, v1, v2; float sign = 1.f; for (size_t i = faceStart; (i < (faceStart + step)) && i < nMeshFaces; i++) { // indices of the triangle vertices unsigned int ind0 = meshFaces[i * 3]; unsigned int ind1 = meshFaces[i * 3 + 1]; unsigned int ind2 = meshFaces[i * 3 + 2]; // get the triangle vertices v0.x = meshPts[ind0 * 3]; v0.y = meshPts[ind0 * 3 + 1]; v0.z = meshPts[ind0 * 3 + 2]; v1.x = meshPts[ind1 * 3]; v1.y = meshPts[ind1 * 3 + 1]; v1.z = meshPts[ind1 * 3 + 2]; v2.x = meshPts[ind2 * 3]; v2.y = meshPts[ind2 * 3 + 1]; v2.z = meshPts[ind2 * 3 + 2]; sign *= intersect_triangle(fPoint, dir, v0, v1, v2); } // set the dField grid point distance to the new minimum distance dField[gid] *= sign; } } // clear drr __global__ void clearDRRKernel( float* img, int nPix) { // X and Y positions of the pixel in the DRR image int gidx = threadIdx.x + blockIdx.x * blockDim.x; int gidy = threadIdx.y + blockIdx.y * blockDim.y; if (gidx < nPix && gidy < nPix) { img[nPix * gidy + gidx] = 0; } } // ray trace __global__ void raycastKernel( float* dField, float dFieldOffsetX, float dFieldOffsetY, float dFieldOffsetZ, float voxelSizeX, float voxelSizeY, float voxelSizeZ, int gridSizeX, int gridSizeY, int gridSizeZ, float* imv, float imPosX, float imPosY, float imPosZ, float* aabb, float* img, float pixSize, int nPix, float weight, float step, float density) { // X and Y positions of the pixel in the DRR image int gidx = threadIdx.x + blockIdx.x * blockDim.x; int gidy = threadIdx.y + blockIdx.y * blockDim.y; if (gidx < nPix && gidy < nPix) { // coordinates of the pixel and direction of the look ray in source space float u = (gidx - (nPix + 1.f) / 2.f) * pixSize; float v = (gidy - (nPix + 1.f) / 2.f) * pixSize; float3 pixPos = make_float3(u, v, 0.f) + make_float3(imPosX, imPosY, imPosZ); float3 look = step * normalize(pixPos); // ray origin in object space float3 rayOrigin = make_float3(imv[12], imv[13], imv[14]); // look ray of the pixel in object space float3 rayDirection = make_float3( dot(make_float3(imv[0], imv[4], imv[8]), look), dot(make_float3(imv[1], imv[5], imv[9]), look), dot(make_float3(imv[2], imv[6], imv[10]), look)); // min and max corners of the bounding box float3 boxMin = make_float3(aabb[0], aabb[1], aabb[2]); float3 boxMax = make_float3(aabb[3], aabb[4], aabb[5]); // compute intersection of ray with all six planes of the bounding box float3 tBot = (boxMin - rayOrigin) / rayDirection; float3 tTop = (boxMax - rayOrigin) / rayDirection; // re-order intersections to find smallest and largest on each axis float3 tMin = fminf(tTop, tBot); float3 tMax = fmaxf(tTop, tBot); // find the largest tMin and smallest tMax float near = fmaxf(fmaxf(tMin.x, tMin.y), tMin.z); float far = fminf(fminf(tMax.x, tMax.y), tMax.z); // if ray does not intersect the boundign box, skip it if(!(far > near)) return; // clamp the ray to the near plane if (near < 0.f) near = 0.f; // perform ray marching from back to front in uniform steps - step size was // set in the look ray // start at pixel and march backwards float t = fminf(far, length(pixPos)/step); // current point on the ray in space float3 rayPoint; // point in distance field coordinates float3 lookup; // distance field offset from the origin and voxel size float3 dFieldOffset = make_float3( dFieldOffsetX, dFieldOffsetY, dFieldOffsetZ); float3 dFieldVoxelSize = make_float3( voxelSizeX, voxelSizeY, voxelSizeZ); // parameters used in the spline interpolation float3 i1, i2; int i1x, i1y, i1z, i2x, i2y, i2z; float tmp1, tmp2; float P0, P1, P2, P3, P4, P5, P6, P7; float A, B, C, D, E, F; // distance from the point on the ray to the surface float dist; // ray intensity at pixel float intensity = 0.f; // march! while (t>near) { // point in space and in distance field rayPoint = rayOrigin + t*rayDirection; lookup = (rayPoint - dFieldOffset) / dFieldVoxelSize; // spline interpolation to get distance to surface i1 = floorf(lookup); i2 = i1 + make_float3(1.f, 1.f, 1.f); i1x = (int)i1.x; i1y = (int)i1.y; i1z = (int)i1.z; i2x = (int)i2.x; i2y = (int)i2.y; i2z = (int)i2.z; if (i2x > (gridSizeX-1) || (i2y > gridSizeY-1) || (i2z > gridSizeZ-1) || i1x < 0 || i1y < 0 || i1z < 0) { intensity += 0.f; } else { P0 = dField[i1x + gridSizeX * i1y + gridSizeX*gridSizeY * i1z]; P1 = dField[i2x + gridSizeX * i1y + gridSizeX*gridSizeY * i1z]; P2 = dField[i2x + gridSizeX * i1y + gridSizeX*gridSizeY * i2z]; P3 = dField[i1x + gridSizeX * i1y + gridSizeX*gridSizeY * i2z]; P4 = dField[i1x + gridSizeX * i2y + gridSizeX*gridSizeY * i1z]; P5 = dField[i2x + gridSizeX * i2y + gridSizeX*gridSizeY * i1z]; P6 = dField[i2x + gridSizeX * i2y + gridSizeX*gridSizeY * i2z]; P7 = dField[i1x + gridSizeX * i2y + gridSizeX*gridSizeY * i2z]; tmp1 = lookup.x - (float)i1x; tmp2 = (float)i2x - lookup.x; A = tmp1 * P2 + tmp2 * P3; B = tmp1 * P1 + tmp2 * P0; C = tmp1 * P5 + tmp2 * P4; D = tmp1 * P6 + tmp2 * P7; tmp1 = lookup.y - (float)i1y; tmp2 = (float)i2y - lookup.y; E = tmp1 * D + tmp2 * A; F = tmp1 * C + tmp2 * B; tmp1 = lookup.z - (float)i1z; tmp2 = (float)i2z - lookup.z; dist = tmp1 * E + tmp2 * F; // If point inside mesh, add to intensity. if (dist <= 0.0) intensity += weight; } t -= 1.f; } // clamp pixel intensity if (intensity > 0.f) { intensity *= density; img[nPix * gidy + gidx] = clamp( img[nPix * gidy + gidx] + intensity, 0.f, FLT_MAX); } } } void calculate_unsigned_distances(dim3 dimGrid, dim3 dimBlock, float* d_dField, float* dFieldOffset, float* voxelSize, int* gridSize, float* d_meshPts, int nMeshPts, int meshStart, int step) { calculateUnsignedDistancesKernel <<< dimGrid, dimBlock >>> (d_dField, dFieldOffset[0], dFieldOffset[1], dFieldOffset[2], voxelSize[0], voxelSize[1], voxelSize[2], gridSize[0], gridSize[1], gridSize[2], d_meshPts, nMeshPts, meshStart, step); } void sign_distances(dim3 dimGrid, dim3 dimBlock, float* d_dField, float* dFieldOffset, float* voxelSize, int* gridSize, float* d_meshPts, unsigned int* d_meshFaces, int nMeshFaces, int faceStart, int step) { signDistancesKernel <<< dimGrid, dimBlock >>> (d_dField, dFieldOffset[0], dFieldOffset[1], dFieldOffset[2], voxelSize[0], voxelSize[1], voxelSize[2], gridSize[0], gridSize[1], gridSize[2], d_meshPts, d_meshFaces, nMeshFaces, faceStart, step); } void raycast(dim3 dimGrid, dim3 dimBlock, float* d_dField, float* dFieldOffset, float* voxelSize, int* gridSize, float* d_imv, float* imPos, float* d_aabb, float* d_img, float pixSize, int nPix, float weight, float step, float density) { raycastKernel <<< dimGrid, dimBlock >>> (d_dField, dFieldOffset[0], dFieldOffset[1], dFieldOffset[2], voxelSize[0], voxelSize[1], voxelSize[2], gridSize[0], gridSize[1], gridSize[2], d_imv, imPos[0], imPos[1], imPos[2], d_aabb, d_img, pixSize, nPix, weight, step, density); } void clearDRR(dim3 dimGrid, dim3 dimBlock, float* img, int nPix) { clearDRRKernel <<< dimGrid, dimBlock >>> (img, nPix); }
b627a6109592e5108ecda75bc9607ddfffe3ee19.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2019-2020, Brian Schnepp Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <wchar.h> #include <hip/hip_runtime.h> #include <ctype.h> #include "ngram.h" /* isspace doesn't compile for device code. * May replace this with a GPU device function later. */ #define IS_WHITESPACE(x) (x == ' ' || x == '\n' || x == '\r' || x == '\t' || \ x == '\v' || x == '\f') typedef struct GpuContext { /* Team Green calls this a "warp" Insist on calling it a "wavefront". */ uint64_t GpuIndex; uint64_t WavefrontCount; uint64_t WavefrontSize; /* Is probably 64??? */ uint64_t GpuMemoryAmt; }GpuContext; /* * Copies the entirety of a file to GPU memory. * * @return void * * @param Name The name of the file to open. * @param Dst A pointer to an unallocated void pointer which * will be updated with the result of hipMalloc and * the entire content of the file. */ void ReadFile(const char *Name, void **Dst, uint64_t *OutSize) { FILE *File = fopen(Name, "r"); if (File == NULL) { fprintf(stderr, "Could not open file %s\n", Name); *OutSize = 0; return; } fseek(File, 0, SEEK_END); uint64_t Length = ftell(File); *OutSize = Length; rewind(File); void *Buffer = malloc(Length); uint64_t ReadCount = fread(Buffer, 1, Length, File); hipMalloc(Dst, Length); hipMemcpy(*Dst, Buffer, Length, hipMemcpyHostToDevice); free(Buffer); fclose(File); } __global__ void TestFile(void *Src, void *Spaces, uint64_t Length) { uint64_t Index = 0; char *SrcC = (char*)(Src); for (Index = blockIdx.x * blockDim.y + threadIdx.x; Index < Length; Index += blockDim.x * gridDim.x) { if (IS_WHITESPACE(SrcC[Index])) { ((char*)(Spaces))[Index] = 1; } else { ((char*)(Spaces))[Index] = 0; } } } int main(int argc, char **argv) { void *TxtPtr; void *SpaceBfr; void *TxtPtrHost; /* Eat up GPU initalization time early, so sync after doing nothing. */ hipDeviceSynchronize(); if (argc < 2) { fprintf(stderr, "Error: missing file name to read.\n"); return -1; } const char *FName = argv[1]; uint64_t Length; ReadFile(FName, &TxtPtr, &Length); hipDeviceSynchronize(); printf("Got length of file %lu\n", Length); hipMallocManaged(&SpaceBfr, Length); TxtPtrHost = malloc(Length); /* This performs the best on my GP102. (11GB VRam) */ hipMemPrefetchAsync(SpaceBfr, Length, 0); hipLaunchKernelGGL(( TestFile), dim3(Length/256), dim3(1), 0, 0, TxtPtr, SpaceBfr, Length); hipDeviceSynchronize(); hipMemcpy(TxtPtrHost, TxtPtr, Length, hipMemcpyDeviceToHost); for (uint64_t Index = 0; Index < Length; ++Index) { if (((char*)SpaceBfr)[Index] == 0) { printf("%c", ((char*)(TxtPtrHost))[Index]); } } printf("\n"); hipFree(TxtPtr); hipHostFree(SpaceBfr); free(TxtPtrHost); return 0; }
b627a6109592e5108ecda75bc9607ddfffe3ee19.cu
/* Copyright (c) 2019-2020, Brian Schnepp Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <wchar.h> #include <cuda.h> #include <ctype.h> #include "ngram.h" /* isspace doesn't compile for device code. * May replace this with a GPU device function later. */ #define IS_WHITESPACE(x) (x == ' ' || x == '\n' || x == '\r' || x == '\t' || \ x == '\v' || x == '\f') typedef struct GpuContext { /* Team Green calls this a "warp" Insist on calling it a "wavefront". */ uint64_t GpuIndex; uint64_t WavefrontCount; uint64_t WavefrontSize; /* Is probably 64??? */ uint64_t GpuMemoryAmt; }GpuContext; /* * Copies the entirety of a file to GPU memory. * * @return void * * @param Name The name of the file to open. * @param Dst A pointer to an unallocated void pointer which * will be updated with the result of cudaMalloc and * the entire content of the file. */ void ReadFile(const char *Name, void **Dst, uint64_t *OutSize) { FILE *File = fopen(Name, "r"); if (File == NULL) { fprintf(stderr, "Could not open file %s\n", Name); *OutSize = 0; return; } fseek(File, 0, SEEK_END); uint64_t Length = ftell(File); *OutSize = Length; rewind(File); void *Buffer = malloc(Length); uint64_t ReadCount = fread(Buffer, 1, Length, File); cudaMalloc(Dst, Length); cudaMemcpy(*Dst, Buffer, Length, cudaMemcpyHostToDevice); free(Buffer); fclose(File); } __global__ void TestFile(void *Src, void *Spaces, uint64_t Length) { uint64_t Index = 0; char *SrcC = (char*)(Src); for (Index = blockIdx.x * blockDim.y + threadIdx.x; Index < Length; Index += blockDim.x * gridDim.x) { if (IS_WHITESPACE(SrcC[Index])) { ((char*)(Spaces))[Index] = 1; } else { ((char*)(Spaces))[Index] = 0; } } } int main(int argc, char **argv) { void *TxtPtr; void *SpaceBfr; void *TxtPtrHost; /* Eat up GPU initalization time early, so sync after doing nothing. */ cudaDeviceSynchronize(); if (argc < 2) { fprintf(stderr, "Error: missing file name to read.\n"); return -1; } const char *FName = argv[1]; uint64_t Length; ReadFile(FName, &TxtPtr, &Length); cudaDeviceSynchronize(); printf("Got length of file %lu\n", Length); cudaMallocManaged(&SpaceBfr, Length); TxtPtrHost = malloc(Length); /* This performs the best on my GP102. (11GB VRam) */ cudaMemPrefetchAsync(SpaceBfr, Length, 0); TestFile<<<Length/256, 1>>>(TxtPtr, SpaceBfr, Length); cudaDeviceSynchronize(); cudaMemcpy(TxtPtrHost, TxtPtr, Length, cudaMemcpyDeviceToHost); for (uint64_t Index = 0; Index < Length; ++Index) { if (((char*)SpaceBfr)[Index] == 0) { printf("%c", ((char*)(TxtPtrHost))[Index]); } } printf("\n"); cudaFree(TxtPtr); cudaFreeHost(SpaceBfr); free(TxtPtrHost); return 0; }
3f48df67cc1c8f4a19fdc99772443c816b8dee43.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/NamedTensorUtils.h> #include <ATen/NumericUtils.h> #include <ATen/native/Pool.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/NumericLimits.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } template <typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( scalar_t* inputData, PackedTensorAccessor64<scalar_t, 4> output, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int64_t slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature // For int64_t data type, see https://github.com/pytorch/pytorch/issues/52822 if (oRow < output.size(2) && oColumn < output.size(3)) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; int maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart; inputData += slice * itime * iheight * iwidth; scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { int index = t * iheight * iwidth + h * iwidth + w; scalar_t val = inputData[index]; if ((max < val) || at::_isnan(val)) { max = val; maxIndex = index; } } } } output[slice][oFrame][oRow][oColumn] = max; indices[slice][oFrame][oRow][oColumn] = maxIndex; } } template <typename scalar_t> void max_pool3d_with_indices_out_frame( scalar_t* input_data, const Tensor& output, const Tensor& indices, int totalZ, int itime, int iheight, int iwidth, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( max_pool3d_with_indices_single_out_frame) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } #undef UPDATE_OUTPUT_KERNEL_WIDTH template <typename scalar_t> __global__ static void max_pool3d_with_indices_backward_single_out_frame( scalar_t *gradInputData, PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3)) { int maxIndex = indices[slice][oFrame][oRow][oColumn]; if (maxIndex != -1) { gpuAtomicAddNoReturn(&gradInputData[slice * itime * iheight * iwidth + maxIndex], gradOutput[slice][oFrame][oRow][oColumn]); } } } template <typename scalar_t> void max_pool3d_with_indices_backward_out_frame( scalar_t *gradInputData, const Tensor& gradOutput, const Tensor& indices, int64_t totalZ, int itime, int iheight, int iwidth, int oheight, int owidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( max_pool3d_with_indices_backward_single_out_frame) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInputData, gradOutput.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } void max_pool3d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_out_cuda_template()"); if (input.ndimension() == 4) { output.resize_({ nslices, otime, oheight, owidth}); indices.resize_({nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}); indices.resize_({nbatch, nslices, otime, oheight, owidth}); } if (input.numel() == 0) { return; } Tensor work_input = input.contiguous(); Tensor work_output = output; Tensor work_indices = indices; if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_out_frame", [&]{ scalar_t *input_data = work_input.data_ptr<scalar_t>(); int64_t totalZ = otime * nslices * nbatch; max_pool3d_with_indices_out_frame( input_data, work_output, work_indices, totalZ, itime, iheight, iwidth, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); } ); } void max_pool3d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU(__func__, {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D input tensor, but got ", input.sizes()); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D gradOutput tensor, but got ", gradOutput.sizes()); // Resize and initialize result tensor. gradInput.resize_as_(input); gradInput.zero_(); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); const int64_t itime = gradInput.size(-3); const int64_t iheight = gradInput.size(-2); const int64_t iwidth = gradInput.size(-1); max_pool3d_backward_shape_check( input, gradOutput, indices, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_backward_out_cuda_template()"); if (gradOutput.numel() == 0) { return; } Tensor work_grad_input = gradInput; Tensor work_grad_output = gradOutput.contiguous(); Tensor work_indices = indices.contiguous(); if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_backward_out_frame", [&] { const int64_t totalZ = otime * nslices * nbatch; scalar_t *grad_input_data = work_grad_input.data_ptr<scalar_t>(); max_pool3d_with_indices_backward_out_frame( grad_input_data, work_grad_output, work_indices, totalZ, itime, iheight, iwidth, oheight, owidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor& output, Tensor& indices) { max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { NoNamesGuard guard; Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); guard.reset(); namedinference::propagate_names(output, input); namedinference::propagate_names(indices, input); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool3d_with_indices_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda"); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool3d_with_indices_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
3f48df67cc1c8f4a19fdc99772443c816b8dee43.cu
#include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/NamedTensorUtils.h> #include <ATen/NumericUtils.h> #include <ATen/native/Pool.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/NumericLimits.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } template <typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( scalar_t* inputData, PackedTensorAccessor64<scalar_t, 4> output, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int64_t slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature // For int64_t data type, see https://github.com/pytorch/pytorch/issues/52822 if (oRow < output.size(2) && oColumn < output.size(3)) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; int maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart; inputData += slice * itime * iheight * iwidth; scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { int index = t * iheight * iwidth + h * iwidth + w; scalar_t val = inputData[index]; if ((max < val) || at::_isnan(val)) { max = val; maxIndex = index; } } } } output[slice][oFrame][oRow][oColumn] = max; indices[slice][oFrame][oRow][oColumn] = maxIndex; } } template <typename scalar_t> void max_pool3d_with_indices_out_frame( scalar_t* input_data, const Tensor& output, const Tensor& indices, int totalZ, int itime, int iheight, int iwidth, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); max_pool3d_with_indices_single_out_frame <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } #undef UPDATE_OUTPUT_KERNEL_WIDTH template <typename scalar_t> __global__ static void max_pool3d_with_indices_backward_single_out_frame( scalar_t *gradInputData, PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3)) { int maxIndex = indices[slice][oFrame][oRow][oColumn]; if (maxIndex != -1) { gpuAtomicAddNoReturn(&gradInputData[slice * itime * iheight * iwidth + maxIndex], gradOutput[slice][oFrame][oRow][oColumn]); } } } template <typename scalar_t> void max_pool3d_with_indices_backward_out_frame( scalar_t *gradInputData, const Tensor& gradOutput, const Tensor& indices, int64_t totalZ, int itime, int iheight, int iwidth, int oheight, int owidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); max_pool3d_with_indices_backward_single_out_frame <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( gradInputData, gradOutput.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } void max_pool3d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_out_cuda_template()"); if (input.ndimension() == 4) { output.resize_({ nslices, otime, oheight, owidth}); indices.resize_({nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}); indices.resize_({nbatch, nslices, otime, oheight, owidth}); } if (input.numel() == 0) { return; } Tensor work_input = input.contiguous(); Tensor work_output = output; Tensor work_indices = indices; if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_out_frame", [&]{ scalar_t *input_data = work_input.data_ptr<scalar_t>(); int64_t totalZ = otime * nslices * nbatch; max_pool3d_with_indices_out_frame( input_data, work_output, work_indices, totalZ, itime, iheight, iwidth, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); } ); } void max_pool3d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU(__func__, {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D input tensor, but got ", input.sizes()); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D gradOutput tensor, but got ", gradOutput.sizes()); // Resize and initialize result tensor. gradInput.resize_as_(input); gradInput.zero_(); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); const int64_t itime = gradInput.size(-3); const int64_t iheight = gradInput.size(-2); const int64_t iwidth = gradInput.size(-1); max_pool3d_backward_shape_check( input, gradOutput, indices, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_backward_out_cuda_template()"); if (gradOutput.numel() == 0) { return; } Tensor work_grad_input = gradInput; Tensor work_grad_output = gradOutput.contiguous(); Tensor work_indices = indices.contiguous(); if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_backward_out_frame", [&] { const int64_t totalZ = otime * nslices * nbatch; scalar_t *grad_input_data = work_grad_input.data_ptr<scalar_t>(); max_pool3d_with_indices_backward_out_frame( grad_input_data, work_grad_output, work_indices, totalZ, itime, iheight, iwidth, oheight, owidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor& output, Tensor& indices) { max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { NoNamesGuard guard; Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); guard.reset(); namedinference::propagate_names(output, input); namedinference::propagate_names(indices, input); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool3d_with_indices_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda"); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool3d_with_indices_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
b509e0eb1106534d6a736a38e79504ede65cafa9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kCopy(float* srcStart, float* destStart, unsigned int copyWidth, unsigned int jumpWidth, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < numElements) destStart[(idx / copyWidth) * jumpWidth + idx % copyWidth] = srcStart[(idx / copyWidth) * jumpWidth + idx % copyWidth]; }
b509e0eb1106534d6a736a38e79504ede65cafa9.cu
#include "includes.h" __global__ void kCopy(float* srcStart, float* destStart, unsigned int copyWidth, unsigned int jumpWidth, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < numElements) destStart[(idx / copyWidth) * jumpWidth + idx % copyWidth] = srcStart[(idx / copyWidth) * jumpWidth + idx % copyWidth]; }
aa831bde61b9365324d200d5ae46d3c71a903429.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void matrix_multiply_simple(float *a, float *b, float *ab, size_t width) { //TODO: write the kernel to perform matrix a times b, store results into ab. // width is the size of the square matrix along one dimension. int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < width && col < width) { float pvalue = 0; for(int k = 0; k < width; k++) { pvalue += a[row * width + k] * b[k * width +col]; } ab[row * width + col] = pvalue; } }
aa831bde61b9365324d200d5ae46d3c71a903429.cu
#include "includes.h" __global__ void matrix_multiply_simple(float *a, float *b, float *ab, size_t width) { //TODO: write the kernel to perform matrix a times b, store results into ab. // width is the size of the square matrix along one dimension. int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < width && col < width) { float pvalue = 0; for(int k = 0; k < width; k++) { pvalue += a[row * width + k] * b[k * width +col]; } ab[row * width + col] = pvalue; } }
90f70a615e8cd08c09001dff1110a957fc2dc9ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void add(int *output, int length, int *n) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * length; output[blockOffset + threadID] += n[blockID]; }
90f70a615e8cd08c09001dff1110a957fc2dc9ad.cu
#include "includes.h" __global__ void add(int *output, int length, int *n) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * length; output[blockOffset + threadID] += n[blockID]; }
19a4bae3007f353af93765ab1c88956e11bea72e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// Copyright (c) 2017, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: nstream /// /// PURPOSE: To compute memory bandwidth when adding a vector of a given /// number of double precision values to the scalar multiple of /// another vector of the same length, and storing the result in /// a third vector. /// /// USAGE: The program takes as input the number /// of iterations to loop over the triad vectors, the length of the /// vectors, and the offset between vectors /// /// <progname> <# iterations> <vector length> <offset> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// NOTES: Bandwidth is determined as the number of words read, plus the /// number of words written, times the size of the words, divided /// by the execution time. For a vector length of N, the total /// number of words read and written is 4*N*sizeof(double). /// /// HISTORY: This code is loosely based on the Stream benchmark by John /// McCalpin, but does not follow all the Stream rules. Hence, /// reported results should not be associated with Stream in /// external publications /// /// Converted to C++11 by Jeff Hammond, November 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" __global__ void nstream(const unsigned n, const prk_float scalar, prk_float * A, const prk_float * B, const prk_float * C) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { A[i] += B[i] + scalar * C[i]; } } int main(int argc, char * argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUDA STREAM triad: A = B + scalar * C" << std::endl; prk::CUDA::info info; info.print(); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// int iterations, offset; int length; try { if (argc < 3) { throw "Usage: <# iterations> <vector length> [<offset>]"; } iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } length = std::atoi(argv[2]); if (length <= 0) { throw "ERROR: vector length must be positive"; } offset = (argc>3) ? std::atoi(argv[3]) : 0; if (length <= 0) { throw "ERROR: offset must be nonnegative"; } } catch (const char * e) { std::cout << e << std::endl; return 1; } std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Vector length = " << length << std::endl; std::cout << "Offset = " << offset << std::endl; const int blockSize = 128; dim3 dimBlock(blockSize, 1, 1); dim3 dimGrid(prk::divceil(length,blockSize), 1, 1); info.checkDims(dimBlock, dimGrid); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// auto nstream_time = 0.0; const size_t bytes = length * sizeof(prk_float); prk_float * h_A; prk_float * h_B; prk_float * h_C; #ifndef __CORIANDERCC__ prk::CUDA::check( hipHostMalloc((void**)&h_A, bytes) ); prk::CUDA::check( hipHostMalloc((void**)&h_B, bytes) ); prk::CUDA::check( hipHostMalloc((void**)&h_C, bytes) ); #else h_A = new prk_float[length]; h_B = new prk_float[length]; h_C = new prk_float[length]; #endif for (auto i=0; i<length; ++i) { h_A[i] = static_cast<prk_float>(0); h_B[i] = static_cast<prk_float>(2); h_C[i] = static_cast<prk_float>(2); } prk_float * d_A; prk_float * d_B; prk_float * d_C; prk::CUDA::check( hipMalloc((void**)&d_A, bytes) ); prk::CUDA::check( hipMalloc((void**)&d_B, bytes) ); prk::CUDA::check( hipMalloc((void**)&d_C, bytes) ); prk::CUDA::check( hipMemcpy(d_A, &(h_A[0]), bytes, hipMemcpyHostToDevice) ); prk::CUDA::check( hipMemcpy(d_B, &(h_B[0]), bytes, hipMemcpyHostToDevice) ); prk::CUDA::check( hipMemcpy(d_C, &(h_C[0]), bytes, hipMemcpyHostToDevice) ); prk_float scalar(3); { for (auto iter = 0; iter<=iterations; iter++) { if (iter==1) nstream_time = prk::wtime(); hipLaunchKernelGGL(( nstream), dim3(dimGrid), dim3(dimBlock), 0, 0, static_cast<unsigned>(length), scalar, d_A, d_B, d_C); #ifndef __CORIANDERCC__ // silence "ignoring hipDeviceSynchronize for now" warning prk::CUDA::check( hipDeviceSynchronize() ); #endif } nstream_time = prk::wtime() - nstream_time; } prk::CUDA::check( hipMemcpy(&(h_A[0]), d_A, bytes, hipMemcpyDeviceToHost) ); prk::CUDA::check( hipFree(d_C) ); prk::CUDA::check( hipFree(d_B) ); prk::CUDA::check( hipFree(d_A) ); #ifndef __CORIANDERCC__ prk::CUDA::check( hipHostFree(h_B) ); prk::CUDA::check( hipHostFree(h_C) ); #endif ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// double ar(0); double br(2); double cr(2); for (auto i=0; i<=iterations; i++) { ar += br + scalar * cr; } ar *= length; double asum(0); for (auto i=0; i<length; i++) { asum += ::fabs(h_A[i]); } #ifndef __CORIANDERCC__ prk::CUDA::check( hipHostFree(h_A) ); #endif double epsilon=1.e-8; if (::fabs(ar-asum)/asum > epsilon) { std::cout << "Failed Validation on output array\n" << " Expected checksum: " << ar << "\n" << " Observed checksum: " << asum << std::endl; std::cout << "ERROR: solution did not validate" << std::endl; return 1; } else { std::cout << "Solution validates" << std::endl; double avgtime = nstream_time/iterations; double nbytes = 4.0 * length * sizeof(prk_float); std::cout << "Rate (MB/s): " << 1.e-6*nbytes/avgtime << " Avg time (s): " << avgtime << std::endl; } return 0; }
19a4bae3007f353af93765ab1c88956e11bea72e.cu
/// /// Copyright (c) 2017, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: nstream /// /// PURPOSE: To compute memory bandwidth when adding a vector of a given /// number of double precision values to the scalar multiple of /// another vector of the same length, and storing the result in /// a third vector. /// /// USAGE: The program takes as input the number /// of iterations to loop over the triad vectors, the length of the /// vectors, and the offset between vectors /// /// <progname> <# iterations> <vector length> <offset> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// NOTES: Bandwidth is determined as the number of words read, plus the /// number of words written, times the size of the words, divided /// by the execution time. For a vector length of N, the total /// number of words read and written is 4*N*sizeof(double). /// /// HISTORY: This code is loosely based on the Stream benchmark by John /// McCalpin, but does not follow all the Stream rules. Hence, /// reported results should not be associated with Stream in /// external publications /// /// Converted to C++11 by Jeff Hammond, November 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" __global__ void nstream(const unsigned n, const prk_float scalar, prk_float * A, const prk_float * B, const prk_float * C) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { A[i] += B[i] + scalar * C[i]; } } int main(int argc, char * argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUDA STREAM triad: A = B + scalar * C" << std::endl; prk::CUDA::info info; info.print(); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// int iterations, offset; int length; try { if (argc < 3) { throw "Usage: <# iterations> <vector length> [<offset>]"; } iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } length = std::atoi(argv[2]); if (length <= 0) { throw "ERROR: vector length must be positive"; } offset = (argc>3) ? std::atoi(argv[3]) : 0; if (length <= 0) { throw "ERROR: offset must be nonnegative"; } } catch (const char * e) { std::cout << e << std::endl; return 1; } std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Vector length = " << length << std::endl; std::cout << "Offset = " << offset << std::endl; const int blockSize = 128; dim3 dimBlock(blockSize, 1, 1); dim3 dimGrid(prk::divceil(length,blockSize), 1, 1); info.checkDims(dimBlock, dimGrid); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// auto nstream_time = 0.0; const size_t bytes = length * sizeof(prk_float); prk_float * h_A; prk_float * h_B; prk_float * h_C; #ifndef __CORIANDERCC__ prk::CUDA::check( cudaMallocHost((void**)&h_A, bytes) ); prk::CUDA::check( cudaMallocHost((void**)&h_B, bytes) ); prk::CUDA::check( cudaMallocHost((void**)&h_C, bytes) ); #else h_A = new prk_float[length]; h_B = new prk_float[length]; h_C = new prk_float[length]; #endif for (auto i=0; i<length; ++i) { h_A[i] = static_cast<prk_float>(0); h_B[i] = static_cast<prk_float>(2); h_C[i] = static_cast<prk_float>(2); } prk_float * d_A; prk_float * d_B; prk_float * d_C; prk::CUDA::check( cudaMalloc((void**)&d_A, bytes) ); prk::CUDA::check( cudaMalloc((void**)&d_B, bytes) ); prk::CUDA::check( cudaMalloc((void**)&d_C, bytes) ); prk::CUDA::check( cudaMemcpy(d_A, &(h_A[0]), bytes, cudaMemcpyHostToDevice) ); prk::CUDA::check( cudaMemcpy(d_B, &(h_B[0]), bytes, cudaMemcpyHostToDevice) ); prk::CUDA::check( cudaMemcpy(d_C, &(h_C[0]), bytes, cudaMemcpyHostToDevice) ); prk_float scalar(3); { for (auto iter = 0; iter<=iterations; iter++) { if (iter==1) nstream_time = prk::wtime(); nstream<<<dimGrid, dimBlock>>>(static_cast<unsigned>(length), scalar, d_A, d_B, d_C); #ifndef __CORIANDERCC__ // silence "ignoring cudaDeviceSynchronize for now" warning prk::CUDA::check( cudaDeviceSynchronize() ); #endif } nstream_time = prk::wtime() - nstream_time; } prk::CUDA::check( cudaMemcpy(&(h_A[0]), d_A, bytes, cudaMemcpyDeviceToHost) ); prk::CUDA::check( cudaFree(d_C) ); prk::CUDA::check( cudaFree(d_B) ); prk::CUDA::check( cudaFree(d_A) ); #ifndef __CORIANDERCC__ prk::CUDA::check( cudaFreeHost(h_B) ); prk::CUDA::check( cudaFreeHost(h_C) ); #endif ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// double ar(0); double br(2); double cr(2); for (auto i=0; i<=iterations; i++) { ar += br + scalar * cr; } ar *= length; double asum(0); for (auto i=0; i<length; i++) { asum += std::fabs(h_A[i]); } #ifndef __CORIANDERCC__ prk::CUDA::check( cudaFreeHost(h_A) ); #endif double epsilon=1.e-8; if (std::fabs(ar-asum)/asum > epsilon) { std::cout << "Failed Validation on output array\n" << " Expected checksum: " << ar << "\n" << " Observed checksum: " << asum << std::endl; std::cout << "ERROR: solution did not validate" << std::endl; return 1; } else { std::cout << "Solution validates" << std::endl; double avgtime = nstream_time/iterations; double nbytes = 4.0 * length * sizeof(prk_float); std::cout << "Rate (MB/s): " << 1.e-6*nbytes/avgtime << " Avg time (s): " << avgtime << std::endl; } return 0; }
b74c499b9a38e3f23128775957ae62fd45c3bc6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <random> #include <chrono> #define BATCH_SIZE 20000 typedef struct { float3 pos = {0.0}; float3 vel = {0.0}; } Particle; // Timestep for particles, f contains force to be applied to p.vel in x,y,z and w is time derivative __global__ void device_timestep(Particle* p, float4 f) { int i = blockIdx.x * blockDim.x + threadIdx.x; float dt = f.w; // Update velocity p[i].vel.x = p[i].vel.x + f.x * dt; p[i].vel.y = p[i].vel.y + f.y * dt; p[i].vel.z = p[i].vel.z + f.z * dt; // Update position p[i].pos.x = p[i].pos.x + p[i].vel.x * dt; p[i].pos.y = p[i].pos.y + p[i].vel.y * dt; p[i].pos.z = p[i].pos.z + p[i].vel.z * dt; } void host_timestep(Particle* p, float4 f, const int num_particles) { float dt = f.w; for(int i = 0; i < num_particles; i++) { // Update velocity p[i].vel.x = p[i].vel.x + f.x * dt; p[i].vel.y = p[i].vel.y + f.y * dt; p[i].vel.z = p[i].vel.z + f.z * dt; // Update position p[i].pos.x = p[i].pos.x + p[i].vel.x * dt; p[i].pos.y = p[i].pos.y + p[i].vel.y * dt; p[i].pos.z = p[i].pos.z + p[i].vel.z * dt; } } // returns if values successfully read or not. bool setValuesFromArgs(int argc, char **argv, unsigned int *block_size, unsigned int *num_iterations, unsigned int *num_particles) { if (argc < 4) { printf("Incorrect parameters!\nUsage: %s <block size> <num iterations>\ <num particles> [1 extra arg for gpu benchmark output, 2 for cpu]\n", *argv); return false; } char *s; *block_size = strtoul(argv[1], &s, 10); *num_iterations = strtoul(argv[2], &s, 10); *num_particles = strtoul(argv[3], &s, 10); return true; } int main(int argc, char **argv) { unsigned int block_size, num_iterations, num_particles; if(!setValuesFromArgs(argc, argv, &block_size, &num_iterations, &num_particles)) return 0; // Change num_threads to a multiple of block_size to prevent unexpected outcomes (memory size not matching up etc) num_particles = ((num_particles + block_size - 1) / block_size) * block_size; bool gpuBench = argc == 5; bool cpuBench = argc == 6; if (!(gpuBench || cpuBench)) printf("Starting simulation on %d particles with %d iterations, GPU set to use block size %d...\n\n", num_particles, num_iterations, block_size); Particle *particles = (Particle*)malloc(num_particles * sizeof(Particle)); Particle *d_res; hipHostMalloc((void**)&d_res, num_particles * sizeof(Particle), hipHostMallocDefault); std::default_random_engine rdmGen; std::uniform_real_distribution<float> posDist(-100.0, 100.0); std::uniform_real_distribution<float> velDist(-10.0, 10.0); for(int i = 0; i < num_particles; i++) { d_res[i].pos.x = particles[i].pos.x = posDist(rdmGen); d_res[i].pos.y = particles[i].pos.y = posDist(rdmGen); d_res[i].pos.z = particles[i].pos.z = posDist(rdmGen); d_res[i].vel.x = particles[i].vel.x = velDist(rdmGen); d_res[i].vel.y = particles[i].vel.y = velDist(rdmGen); d_res[i].vel.z = particles[i].vel.z = velDist(rdmGen); } float4 forces = { 0.0, // x 0.0, // y -9.82, // z 1.0 // dt }; /* === Example === ... int N = 3; int *arr, *d_arr; hipHostMalloc(&arr, N * sizeof(int)); hipMalloc(&d_arr, N * sizeof(int)); hipStream_t s_id; hipStreamCreate(&s_id); hipMemcpyAsync(d_arr, arr, N * sizeof(int), hipMemcpyHostToDevice, s_id); // 3rd parameter is shared device memory fun<<<block_size, blocks, 0, s_id>>>; hipStreamSynchronize(s_id); hipMemcpyAsync(arr, d_arr, N * sizeof(int), hipMemcpyDeviceToHost, s_id); hipStreamDestroy(s_id); ... === = = = = === === Lecture === ... for (int i = 0; i < nStreams; i++) { int offset = i * streamSize; hipMemcpyAsync(&d_a[offset], &a[offset], streamBytes, hipMemcpyHostToDevice, stream[i]); kernel<<<streamSize/blockSize, blockSize, 0, stream[i]>>>(d_a, offset); hipMemcpyAsync(&a[offset], &d_a[offset], streamBytes, hipMemcpyDeviceToHost, stream[i]); } ... === = = = = === */ // ============= START COMPUTING ON DEVICE ============== // if (!cpuBench) { if (!gpuBench) printf("Simulating on the GPU...\n"); auto start1 = std::chrono::system_clock::now(); // Create, allocate and copy array to device Particle* d_particles = 0; int num_streams = (num_particles + BATCH_SIZE - 1) / BATCH_SIZE; // Allocate memory for num_particles + potential padding hipMalloc(&d_particles, num_streams * BATCH_SIZE * sizeof(Particle)); hipStream_t *s_id = (hipStream_t*)malloc(num_streams * sizeof(hipStream_t)); for (int i = 0; i < num_streams; i++) { hipStreamCreate(&s_id[i]); } for(int i = 0; i < num_iterations; i++) { for(int j = 0; j < num_streams; j++) { int offset = j * BATCH_SIZE; hipMemcpyAsync(&d_particles[offset], &d_res[offset], BATCH_SIZE * sizeof(Particle), hipMemcpyHostToDevice, s_id[j]); hipLaunchKernelGGL(( device_timestep), dim3((BATCH_SIZE + block_size - 1) / block_size), dim3(block_size), 0, s_id[j], d_particles, forces); hipMemcpyAsync(&d_res[offset], &d_particles[offset], num_particles * sizeof(Particle), hipMemcpyDeviceToHost, s_id[j]); } // Basically like hipDeviceSynchronize, but using this to memorize stream synchronization for(int j = 0; j < num_streams; j++) { hipStreamSynchronize(s_id[j]); } // Ready to do stuff on host... } for (int i = 0; i < num_streams; i++) { hipStreamDestroy(s_id[i]); } hipFree(d_particles); auto end1 = std::chrono::system_clock::now(); std::chrono::duration<double> device_time = end1-start1; if (!gpuBench) printf("\tDone in %f s!\n\n", device_time.count()); else printf("%f\n", device_time.count()); } if (!gpuBench) { // ============= START COMPUTING ON HOST ============== // if (!cpuBench) printf("Simulating on the CPU...\n"); auto start2 = std::chrono::system_clock::now(); for(int i = 0; i < num_iterations; i++) { host_timestep(particles, forces, num_particles); } auto end2 = std::chrono::system_clock::now(); std::chrono::duration<double> host_time = end2-start2; if (!cpuBench) { printf("\tDone in %f s!\n\n", host_time.count()); printf("All done!\n"); } else printf("%f\n", host_time.count()); } hipFree(d_res); free(particles); return 0; }
b74c499b9a38e3f23128775957ae62fd45c3bc6e.cu
#include <stdio.h> #include <random> #include <chrono> #define BATCH_SIZE 20000 typedef struct { float3 pos = {0.0}; float3 vel = {0.0}; } Particle; // Timestep for particles, f contains force to be applied to p.vel in x,y,z and w is time derivative __global__ void device_timestep(Particle* p, float4 f) { int i = blockIdx.x * blockDim.x + threadIdx.x; float dt = f.w; // Update velocity p[i].vel.x = p[i].vel.x + f.x * dt; p[i].vel.y = p[i].vel.y + f.y * dt; p[i].vel.z = p[i].vel.z + f.z * dt; // Update position p[i].pos.x = p[i].pos.x + p[i].vel.x * dt; p[i].pos.y = p[i].pos.y + p[i].vel.y * dt; p[i].pos.z = p[i].pos.z + p[i].vel.z * dt; } void host_timestep(Particle* p, float4 f, const int num_particles) { float dt = f.w; for(int i = 0; i < num_particles; i++) { // Update velocity p[i].vel.x = p[i].vel.x + f.x * dt; p[i].vel.y = p[i].vel.y + f.y * dt; p[i].vel.z = p[i].vel.z + f.z * dt; // Update position p[i].pos.x = p[i].pos.x + p[i].vel.x * dt; p[i].pos.y = p[i].pos.y + p[i].vel.y * dt; p[i].pos.z = p[i].pos.z + p[i].vel.z * dt; } } // returns if values successfully read or not. bool setValuesFromArgs(int argc, char **argv, unsigned int *block_size, unsigned int *num_iterations, unsigned int *num_particles) { if (argc < 4) { printf("Incorrect parameters!\nUsage: %s <block size> <num iterations>\ <num particles> [1 extra arg for gpu benchmark output, 2 for cpu]\n", *argv); return false; } char *s; *block_size = strtoul(argv[1], &s, 10); *num_iterations = strtoul(argv[2], &s, 10); *num_particles = strtoul(argv[3], &s, 10); return true; } int main(int argc, char **argv) { unsigned int block_size, num_iterations, num_particles; if(!setValuesFromArgs(argc, argv, &block_size, &num_iterations, &num_particles)) return 0; // Change num_threads to a multiple of block_size to prevent unexpected outcomes (memory size not matching up etc) num_particles = ((num_particles + block_size - 1) / block_size) * block_size; bool gpuBench = argc == 5; bool cpuBench = argc == 6; if (!(gpuBench || cpuBench)) printf("Starting simulation on %d particles with %d iterations, GPU set to use block size %d...\n\n", num_particles, num_iterations, block_size); Particle *particles = (Particle*)malloc(num_particles * sizeof(Particle)); Particle *d_res; cudaMallocHost((void**)&d_res, num_particles * sizeof(Particle), cudaHostAllocDefault); std::default_random_engine rdmGen; std::uniform_real_distribution<float> posDist(-100.0, 100.0); std::uniform_real_distribution<float> velDist(-10.0, 10.0); for(int i = 0; i < num_particles; i++) { d_res[i].pos.x = particles[i].pos.x = posDist(rdmGen); d_res[i].pos.y = particles[i].pos.y = posDist(rdmGen); d_res[i].pos.z = particles[i].pos.z = posDist(rdmGen); d_res[i].vel.x = particles[i].vel.x = velDist(rdmGen); d_res[i].vel.y = particles[i].vel.y = velDist(rdmGen); d_res[i].vel.z = particles[i].vel.z = velDist(rdmGen); } float4 forces = { 0.0, // x 0.0, // y -9.82, // z 1.0 // dt }; /* === Example === ... int N = 3; int *arr, *d_arr; cudaMallocHost(&arr, N * sizeof(int)); cudaMalloc(&d_arr, N * sizeof(int)); cudaStream_t s_id; cudaStreamCreate(&s_id); cudaMemcpyAsync(d_arr, arr, N * sizeof(int), cudaMemcpyHostToDevice, s_id); // 3rd parameter is shared device memory fun<<<block_size, blocks, 0, s_id>>>; cudaStreamSynchronize(s_id); cudaMemcpyAsync(arr, d_arr, N * sizeof(int), cudaMemcpyDeviceToHost, s_id); cudaStreamDestroy(s_id); ... === = = = = === === Lecture === ... for (int i = 0; i < nStreams; i++) { int offset = i * streamSize; cudaMemcpyAsync(&d_a[offset], &a[offset], streamBytes, cudaMemcpyHostToDevice, stream[i]); kernel<<<streamSize/blockSize, blockSize, 0, stream[i]>>>(d_a, offset); cudaMemcpyAsync(&a[offset], &d_a[offset], streamBytes, cudaMemcpyDeviceToHost, stream[i]); } ... === = = = = === */ // ============= START COMPUTING ON DEVICE ============== // if (!cpuBench) { if (!gpuBench) printf("Simulating on the GPU...\n"); auto start1 = std::chrono::system_clock::now(); // Create, allocate and copy array to device Particle* d_particles = 0; int num_streams = (num_particles + BATCH_SIZE - 1) / BATCH_SIZE; // Allocate memory for num_particles + potential padding cudaMalloc(&d_particles, num_streams * BATCH_SIZE * sizeof(Particle)); cudaStream_t *s_id = (cudaStream_t*)malloc(num_streams * sizeof(cudaStream_t)); for (int i = 0; i < num_streams; i++) { cudaStreamCreate(&s_id[i]); } for(int i = 0; i < num_iterations; i++) { for(int j = 0; j < num_streams; j++) { int offset = j * BATCH_SIZE; cudaMemcpyAsync(&d_particles[offset], &d_res[offset], BATCH_SIZE * sizeof(Particle), cudaMemcpyHostToDevice, s_id[j]); device_timestep<<<(BATCH_SIZE + block_size - 1) / block_size, block_size, 0, s_id[j]>>>(d_particles, forces); cudaMemcpyAsync(&d_res[offset], &d_particles[offset], num_particles * sizeof(Particle), cudaMemcpyDeviceToHost, s_id[j]); } // Basically like cudaDeviceSynchronize, but using this to memorize stream synchronization for(int j = 0; j < num_streams; j++) { cudaStreamSynchronize(s_id[j]); } // Ready to do stuff on host... } for (int i = 0; i < num_streams; i++) { cudaStreamDestroy(s_id[i]); } cudaFree(d_particles); auto end1 = std::chrono::system_clock::now(); std::chrono::duration<double> device_time = end1-start1; if (!gpuBench) printf("\tDone in %f s!\n\n", device_time.count()); else printf("%f\n", device_time.count()); } if (!gpuBench) { // ============= START COMPUTING ON HOST ============== // if (!cpuBench) printf("Simulating on the CPU...\n"); auto start2 = std::chrono::system_clock::now(); for(int i = 0; i < num_iterations; i++) { host_timestep(particles, forces, num_particles); } auto end2 = std::chrono::system_clock::now(); std::chrono::duration<double> host_time = end2-start2; if (!cpuBench) { printf("\tDone in %f s!\n\n", host_time.count()); printf("All done!\n"); } else printf("%f\n", host_time.count()); } cudaFree(d_res); free(particles); return 0; }
9e8f09b4524aa886d059092602a8e013d43dece5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdlib.h" #include "stdio.h" #include <iostream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <ctime> using namespace std; const int Radius = 1000; const int N = 100000; __global__ void PiCuda(double* points, int* count) { //__shared__ int cCount[N]; //for (int i = 0; i < N; i++) //cCount[i] = 0; int tx = threadIdx.x + blockIdx.x * blockDim.x; if (points[tx * 2] * points[tx * 2] + points[tx * 2 + 1] * points[tx * 2 + 1] < Radius * Radius) atomicAdd(count, 1); //cCount[tx]++; //__syncthreads(); //count[0] += cCount[tx]; } int Pi(double* points) { int count = 0; for (int i = 0; i < N; i++) { if (points[i * 2] * points[i * 2] + points[i * 2 + 1] * points[i * 2 + 1] < Radius * Radius) count++; } return count; } void CreatePoints(double* Points) { srand(time(0)); for (int i = 0; i < N; i++) { Points[i * 2] = rand() % Radius; Points[i * 2 + 1] = rand() % Radius; } } int main() { // CUDA. hipEvent_t start, end; float Time = 0; double points[N * 2]; CreatePoints(points); int countPointsInCircle = 0; double* cudaPoints; int* cudaCountPointsInCircle; hipMalloc((void**)&cudaPoints, sizeof(double) * N * 2); hipMalloc((void**)&cudaCountPointsInCircle, sizeof(int)); hipMemcpy(cudaPoints, &points, sizeof(double) * N * 2, hipMemcpyHostToDevice); hipMemcpy(cudaCountPointsInCircle, &countPointsInCircle, sizeof(int), hipMemcpyHostToDevice); hipEventCreate(&start); hipEventCreate(&end); // hipEventRecord(start); float t_start = clock(); countPointsInCircle = Pi(points); float t_end = clock(); cout << " :" << t_end - t_start << endl; // hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&Time, start, end); cout << "PI = " << 4 * double(countPointsInCircle) / N << endl; dim3 blocks = 100; dim3 threads = 1000; // hipEventRecord(start); PiCuda << < blocks, threads >> > (cudaPoints, cudaCountPointsInCircle); // hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&Time, start, end); cout << " GPU:" << Time << endl; hipDeviceSynchronize(); hipMemcpy(&countPointsInCircle, cudaCountPointsInCircle, sizeof(int), hipMemcpyDeviceToHost); cout << "PI = " << 4 * float(countPointsInCircle) / float(N) << endl; hipFree(cudaPoints); return 0; }
9e8f09b4524aa886d059092602a8e013d43dece5.cu
#include "cuda.h" #include "stdlib.h" #include "stdio.h" #include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <ctime> using namespace std; const int Radius = 1000; const int N = 100000; __global__ void PiCuda(double* points, int* count) { //__shared__ int cCount[N]; //for (int i = 0; i < N; i++) //cCount[i] = 0; int tx = threadIdx.x + blockIdx.x * blockDim.x; if (points[tx * 2] * points[tx * 2] + points[tx * 2 + 1] * points[tx * 2 + 1] < Radius * Radius) atomicAdd(count, 1); //cCount[tx]++; //__syncthreads(); //count[0] += cCount[tx]; } int Pi(double* points) { int count = 0; for (int i = 0; i < N; i++) { if (points[i * 2] * points[i * 2] + points[i * 2 + 1] * points[i * 2 + 1] < Radius * Radius) count++; } return count; } void CreatePoints(double* Points) { srand(time(0)); for (int i = 0; i < N; i++) { Points[i * 2] = rand() % Radius; Points[i * 2 + 1] = rand() % Radius; } } int main() { //Переменные для измерения времени выполнения CUDA. cudaEvent_t start, end; float Time = 0; double points[N * 2]; CreatePoints(points); int countPointsInCircle = 0; double* cudaPoints; int* cudaCountPointsInCircle; cudaMalloc((void**)&cudaPoints, sizeof(double) * N * 2); cudaMalloc((void**)&cudaCountPointsInCircle, sizeof(int)); cudaMemcpy(cudaPoints, &points, sizeof(double) * N * 2, cudaMemcpyHostToDevice); cudaMemcpy(cudaCountPointsInCircle, &countPointsInCircle, sizeof(int), cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&end); // Запуск таймера cudaEventRecord(start); float t_start = clock(); countPointsInCircle = Pi(points); float t_end = clock(); cout << "Время работы последовательной версии:" << t_end - t_start << endl; // Остановка таймера cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&Time, start, end); cout << "PI = " << 4 * double(countPointsInCircle) / N << endl; dim3 blocks = 100; dim3 threads = 1000; // Запуск таймера cudaEventRecord(start); PiCuda << < blocks, threads >> > (cudaPoints, cudaCountPointsInCircle); // Остановка таймера cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&Time, start, end); cout << "Время работы на GPU:" << Time << endl; cudaDeviceSynchronize(); cudaMemcpy(&countPointsInCircle, cudaCountPointsInCircle, sizeof(int), cudaMemcpyDeviceToHost); cout << "PI = " << 4 * float(countPointsInCircle) / float(N) << endl; cudaFree(cudaPoints); return 0; }
f59dc421f905efd3d0491cae3b5396984f7cf8dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "DFT.cuh" using byte = unsigned char; inline void checkCudaErrors(hipError_t err, char* tag) //cuda error handle function { if (hipSuccess != err) { fprintf(stderr, "CUDA Runtime API error:%s. %s\n", hipGetErrorString(err), tag); return; } } __global__ void DFT_kernel(byte *GPU_source, byte *GPU_result, int HandleWidth, int HandleHeight, int SourceWidth, int SourceHeight, int pitch, int pixelSize) { //uvthreadxy int v = blockIdx.x*blockDim.x + threadIdx.x; int u = blockIdx.y*blockDim.y + threadIdx.y; if (v >= HandleWidth || u >= HandleHeight) { return; } ComplexNumber result; double realpart=0; double imaginepart =0; double greyValue; for (int x = 0; x < SourceHeight; x++) { for (int y = 0; y < SourceWidth; y++) { greyValue = (double)GPU_source[x*SourceWidth + y]; if ((x + y) & 1) greyValue = -1.0*greyValue; double factor = (double)u*x / (double)SourceHeight + (double)v * y / (double)SourceWidth; double realpart_buf = cos(-2 * PI*(factor)); double imaginepart_buf =sin(-2 * PI*(factor)); realpart += realpart_buf * greyValue; imaginepart += imaginepart_buf * greyValue; } } double result_norm = 15 * log(std::sqrt(realpart*realpart+ imaginepart * imaginepart) + 1); result_norm = result_norm < 0.0 ? 0.0 : result_norm; result_norm = result_norm > 255.0 ? 255.0 : result_norm; GPU_result[(SourceHeight - 1 - u)*(-1)*pitch + v * pixelSize] = (byte)result_norm; GPU_result[(SourceHeight - 1 - u)*(-1)*pitch + v * pixelSize + 1] = (byte)result_norm; GPU_result[(SourceHeight - 1 - u)*(-1)*pitch + v * pixelSize + 2] = (byte)result_norm; //GPU_result[u*SourceWidth + v] = GPU_source[u*SourceWidth +v]; } extern "C" void DFT_host(byte* source, byte* result_buf, int HandleWidth, int HandleHeight, int SourceWidth, int SourceHeight, int pitch, int pixelSize) { hipDeviceSetLimit(hipLimitPrintfFifoSize, (size_t)1024 * 1024 * 1024); //GPU dim3 DimBlock(BlockXMaxThreadNum, BlockYMaxThreadNum); dim3 DimGrid(HandleWidth / BlockXMaxThreadNum + 1, HandleHeight / BlockYMaxThreadNum + 1); byte* result; // byte* GPU_source; // checkCudaErrors(hipMalloc((void **)&GPU_source, sizeof(byte)*SourceWidth*SourceHeight), "a"); checkCudaErrors(hipMalloc((void **)&result, sizeof(byte)*HandleHeight*((-1)*pitch)), "b"); checkCudaErrors(hipMemcpy(GPU_source, source, sizeof(byte)*SourceHeight*SourceWidth, hipMemcpyHostToDevice), "c"); hipDeviceSynchronize(); hipLaunchKernelGGL(( DFT_kernel) , dim3(DimGrid), dim3(DimBlock) , 0, 0, GPU_source, result, HandleWidth, HandleHeight, SourceWidth, SourceHeight, pitch, pixelSize); hipDeviceSynchronize(); checkCudaErrors(hipMemcpy(result_buf, result, sizeof(byte)*HandleHeight*((-1) * pitch), hipMemcpyDeviceToHost), "d"); hipFree(GPU_source); hipFree(result); }
f59dc421f905efd3d0491cae3b5396984f7cf8dd.cu
#include "DFT.cuh" using byte = unsigned char; inline void checkCudaErrors(cudaError err, char* tag) //cuda error handle function { if (cudaSuccess != err) { fprintf(stderr, "CUDA Runtime API error:%s. %s\n", cudaGetErrorString(err), tag); return; } } __global__ void DFT_kernel(byte *GPU_source, byte *GPU_result, int HandleWidth, int HandleHeight, int SourceWidth, int SourceHeight, int pitch, int pixelSize) { //频率坐标系下的u,v坐标即为对应的线程在整个thread阵里面的x,y坐标 int v = blockIdx.x*blockDim.x + threadIdx.x; int u = blockIdx.y*blockDim.y + threadIdx.y; if (v >= HandleWidth || u >= HandleHeight) { return; } ComplexNumber result; double realpart=0; double imaginepart =0; double greyValue; for (int x = 0; x < SourceHeight; x++) { for (int y = 0; y < SourceWidth; y++) { greyValue = (double)GPU_source[x*SourceWidth + y]; if ((x + y) & 1) greyValue = -1.0*greyValue; double factor = (double)u*x / (double)SourceHeight + (double)v * y / (double)SourceWidth; double realpart_buf = cos(-2 * PI*(factor)); double imaginepart_buf =sin(-2 * PI*(factor)); realpart += realpart_buf * greyValue; imaginepart += imaginepart_buf * greyValue; } } double result_norm = 15 * log(std::sqrt(realpart*realpart+ imaginepart * imaginepart) + 1); result_norm = result_norm < 0.0 ? 0.0 : result_norm; result_norm = result_norm > 255.0 ? 255.0 : result_norm; GPU_result[(SourceHeight - 1 - u)*(-1)*pitch + v * pixelSize] = (byte)result_norm; GPU_result[(SourceHeight - 1 - u)*(-1)*pitch + v * pixelSize + 1] = (byte)result_norm; GPU_result[(SourceHeight - 1 - u)*(-1)*pitch + v * pixelSize + 2] = (byte)result_norm; //GPU_result[u*SourceWidth + v] = GPU_source[u*SourceWidth +v]; } extern "C" void DFT_host(byte* source, byte* result_buf, int HandleWidth, int HandleHeight, int SourceWidth, int SourceHeight, int pitch, int pixelSize) { cudaDeviceSetLimit(cudaLimitPrintfFifoSize, (size_t)1024 * 1024 * 1024); //指定GPU分配空间方式 dim3 DimBlock(BlockXMaxThreadNum, BlockYMaxThreadNum); dim3 DimGrid(HandleWidth / BlockXMaxThreadNum + 1, HandleHeight / BlockYMaxThreadNum + 1); byte* result; //用来在显存中进行操作的指针 byte* GPU_source; //在显存中为原图像和工作区分配空间 checkCudaErrors(cudaMalloc((void **)&GPU_source, sizeof(byte)*SourceWidth*SourceHeight), "a"); checkCudaErrors(cudaMalloc((void **)&result, sizeof(byte)*HandleHeight*((-1)*pitch)), "b"); checkCudaErrors(cudaMemcpy(GPU_source, source, sizeof(byte)*SourceHeight*SourceWidth, cudaMemcpyHostToDevice), "c"); cudaThreadSynchronize(); DFT_kernel <<< DimGrid, DimBlock >>> (GPU_source, result, HandleWidth, HandleHeight, SourceWidth, SourceHeight, pitch, pixelSize); cudaThreadSynchronize(); checkCudaErrors(cudaMemcpy(result_buf, result, sizeof(byte)*HandleHeight*((-1) * pitch), cudaMemcpyDeviceToHost), "d"); cudaFree(GPU_source); cudaFree(result); }
5aebf4083fb9a41321e6338f44625266851c09c1.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _GRADIENTKL_H_ #define _GRADIENTKL_H_ #include <hip/hip_runtime.h> #include "math.h" __global__ void G_lrcache(double* dY, double* dH, double* cuda_cache, double* dmul, int cur_index, long row_num, int pitch) { // F_partial += gradientCompute(Y[i],H[i],lm)*cache[cur_index][i]; long Idx = blockIdx.x * blockDim.x + threadIdx.x; if(Idx < row_num) { dmul[Idx] = -(dY[Idx] / (1 + exp(dY[Idx] * dH[Idx])))*cuda_cache[cur_index*pitch/sizeof(double) + Idx]; //dmul[Idx] = -(dY[Idx] / (1 + exp(dY[Idx] * dH[Idx])))* (*(cuda_cache + Idx*pitch/sizeof(double)+cur_index)); } } __global__ void G_lrkl(double* dY, double* dH, double* dX, double* dmul, long row_num) { // F_partial += gradientCompute(Y[i],H[i],lm)*dX[i]; long Idx = blockIdx.x * blockDim.x + threadIdx.x; if(Idx < row_num) { dmul[Idx] = -(dY[Idx] / (1 + exp(dY[Idx] * dH[Idx])))* dX[Idx]; } } __global__ void G_lrloss(double* dY, double* dH, double* dmul, long row_num) { // F += lossCompute(Y[i],H[i],lm); long Idx = blockIdx.x * blockDim.x + threadIdx.x; if(Idx < row_num) { dmul[Idx] = log(1+exp(-dY[Idx]*dH[Idx])); } } __global__ void H_cache(double* dH, double* cuda_cache, double diff, int cur_index, int pitch, long row_num) { long Idx = blockIdx.x * blockDim.x + threadIdx.x; if(Idx < row_num) { dH[Idx] = dH[Idx] + diff * cuda_cache[cur_index*pitch/sizeof(double) + Idx]; } } __global__ void Hkl(double* dH, double* dX, double diff, long row_num) { long Idx = blockIdx.x * blockDim.x + threadIdx.x; if(Idx < row_num) { dH[Idx] = dH[Idx] + diff * dX[Idx]; } } #endif
5aebf4083fb9a41321e6338f44625266851c09c1.cu
#ifndef _GRADIENTKL_H_ #define _GRADIENTKL_H_ #include <cuda.h> #include "math.h" __global__ void G_lrcache(double* dY, double* dH, double* cuda_cache, double* dmul, int cur_index, long row_num, int pitch) { // F_partial += gradientCompute(Y[i],H[i],lm)*cache[cur_index][i]; long Idx = blockIdx.x * blockDim.x + threadIdx.x; if(Idx < row_num) { dmul[Idx] = -(dY[Idx] / (1 + exp(dY[Idx] * dH[Idx])))*cuda_cache[cur_index*pitch/sizeof(double) + Idx]; //dmul[Idx] = -(dY[Idx] / (1 + exp(dY[Idx] * dH[Idx])))* (*(cuda_cache + Idx*pitch/sizeof(double)+cur_index)); } } __global__ void G_lrkl(double* dY, double* dH, double* dX, double* dmul, long row_num) { // F_partial += gradientCompute(Y[i],H[i],lm)*dX[i]; long Idx = blockIdx.x * blockDim.x + threadIdx.x; if(Idx < row_num) { dmul[Idx] = -(dY[Idx] / (1 + exp(dY[Idx] * dH[Idx])))* dX[Idx]; } } __global__ void G_lrloss(double* dY, double* dH, double* dmul, long row_num) { // F += lossCompute(Y[i],H[i],lm); long Idx = blockIdx.x * blockDim.x + threadIdx.x; if(Idx < row_num) { dmul[Idx] = log(1+exp(-dY[Idx]*dH[Idx])); } } __global__ void H_cache(double* dH, double* cuda_cache, double diff, int cur_index, int pitch, long row_num) { long Idx = blockIdx.x * blockDim.x + threadIdx.x; if(Idx < row_num) { dH[Idx] = dH[Idx] + diff * cuda_cache[cur_index*pitch/sizeof(double) + Idx]; } } __global__ void Hkl(double* dH, double* dX, double diff, long row_num) { long Idx = blockIdx.x * blockDim.x + threadIdx.x; if(Idx < row_num) { dH[Idx] = dH[Idx] + diff * dX[Idx]; } } #endif
94c0cda78b6c5042aedfa0629d2b2d5b39b612de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/reduce_front_back_max_ops.h" namespace caffe2 { /*** Max Ops ***/ namespace { __global__ void columnwise_max_kernel( const int rows, const int cols, const float* data, const int* lengths, float* out) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int colIndex = blockIdx.x; colIndex < cols; colIndex += gridDim.x) { float mx = FLT_MIN; const int length = lengths == nullptr ? rows : lengths[colIndex]; for (int rowIndex = threadIdx.x; rowIndex < length; rowIndex += blockDim.x) { mx = fmaxf(mx, data[rowIndex * cols + colIndex]); } mx = BlockReduce(temp_storage).Reduce(mx, hipcub::Max()); if (threadIdx.x == 0) { out[colIndex] = mx; } __syncthreads(); } } __global__ void rowwise_max_kernel( const int rows, const int cols, const float* data, const int* lengths, float* out) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int rowIndex = blockIdx.x; rowIndex < rows; rowIndex += gridDim.x) { float mx = FLT_MIN; const int length = lengths == nullptr ? cols : lengths[rowIndex]; for (int colIndex = threadIdx.x; colIndex < length; colIndex += blockDim.x) { mx = fmaxf(mx, data[rowIndex * cols + colIndex]); } mx = BlockReduce(temp_storage).Reduce(mx, hipcub::Max()); if (threadIdx.x == 0) { out[rowIndex] = mx; } __syncthreads(); } } __global__ void columnwise_max_grad_kernel( const int rows, const int cols, const float* dYdata, const float* Xdata, const float* Ydata, const int* lengths, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, rows * cols) { int col = i % cols; int row = i / cols; if (lengths != nullptr && row >= lengths[col]) { dXdata[i] = 0.0f; } else { dXdata[i] = (Xdata[i] == Ydata[col]) * dYdata[col]; } } } __global__ void rowwise_max_grad_kernel( const int rows, const int cols, const float* dYdata, const float* Xdata, const float* Ydata, const int* lengths, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, rows * cols) { int col = i % cols; int row = i / cols; if (lengths != nullptr && col >= lengths[row]) { dXdata[i] = 0.0f; } else { dXdata[i] = (Xdata[i] == Ydata[row]) * dYdata[row]; } } } } // anonymous namespace // ReduceFrontmax template <> void MaxReduceDimsOp<float, CUDAContext, true>::Compute( int rows, int cols, const float* data, const int32_t* lengths_data, float* out_data) { hipLaunchKernelGGL(( columnwise_max_kernel), dim3(::min(cols, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), rows, cols, data, lengths_data, out_data); } // ReduceBackMax template <> void MaxReduceDimsOp<float, CUDAContext, false>::Compute( int rows, int cols, const float* data, const int32_t* lengths_data, float* out_data) { hipLaunchKernelGGL(( rowwise_max_kernel), dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), rows, cols, data, lengths_data, out_data); } // ReduceFrontMaxGradient template <> void MaxReduceDimsGradientOp<float, CUDAContext, true>::Compute( int rows, int cols, const float* dYdata, const float* Xdata, const float* Ydata, const int32_t* lengths_data, float* dXdata) { hipLaunchKernelGGL(( columnwise_max_grad_kernel), dim3(CAFFE_GET_BLOCKS(rows * cols)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), rows, cols, dYdata, Xdata, Ydata, lengths_data, dXdata); } // ReduceBackMaxGradient template <> void MaxReduceDimsGradientOp<float, CUDAContext, false>::Compute( int rows, int cols, const float* dYdata, const float* Xdata, const float* Ydata, const int* lengths_data, float* dXdata) { hipLaunchKernelGGL(( rowwise_max_grad_kernel), dim3(CAFFE_GET_BLOCKS(rows * cols)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), rows, cols, dYdata, Xdata, Ydata, lengths_data, dXdata); } REGISTER_CUDA_OPERATOR( ReduceFrontMax, MaxReduceDimsOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR( ReduceFrontMaxGradient, MaxReduceDimsGradientOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR( ReduceBackMax, MaxReduceDimsOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR( ReduceBackMaxGradient, MaxReduceDimsGradientOp<float, CUDAContext, false>); } // namespace caffe2
94c0cda78b6c5042aedfa0629d2b2d5b39b612de.cu
#include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/reduce_front_back_max_ops.h" namespace caffe2 { /*** Max Ops ***/ namespace { __global__ void columnwise_max_kernel( const int rows, const int cols, const float* data, const int* lengths, float* out) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int colIndex = blockIdx.x; colIndex < cols; colIndex += gridDim.x) { float mx = FLT_MIN; const int length = lengths == nullptr ? rows : lengths[colIndex]; for (int rowIndex = threadIdx.x; rowIndex < length; rowIndex += blockDim.x) { mx = fmaxf(mx, data[rowIndex * cols + colIndex]); } mx = BlockReduce(temp_storage).Reduce(mx, cub::Max()); if (threadIdx.x == 0) { out[colIndex] = mx; } __syncthreads(); } } __global__ void rowwise_max_kernel( const int rows, const int cols, const float* data, const int* lengths, float* out) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int rowIndex = blockIdx.x; rowIndex < rows; rowIndex += gridDim.x) { float mx = FLT_MIN; const int length = lengths == nullptr ? cols : lengths[rowIndex]; for (int colIndex = threadIdx.x; colIndex < length; colIndex += blockDim.x) { mx = fmaxf(mx, data[rowIndex * cols + colIndex]); } mx = BlockReduce(temp_storage).Reduce(mx, cub::Max()); if (threadIdx.x == 0) { out[rowIndex] = mx; } __syncthreads(); } } __global__ void columnwise_max_grad_kernel( const int rows, const int cols, const float* dYdata, const float* Xdata, const float* Ydata, const int* lengths, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, rows * cols) { int col = i % cols; int row = i / cols; if (lengths != nullptr && row >= lengths[col]) { dXdata[i] = 0.0f; } else { dXdata[i] = (Xdata[i] == Ydata[col]) * dYdata[col]; } } } __global__ void rowwise_max_grad_kernel( const int rows, const int cols, const float* dYdata, const float* Xdata, const float* Ydata, const int* lengths, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, rows * cols) { int col = i % cols; int row = i / cols; if (lengths != nullptr && col >= lengths[row]) { dXdata[i] = 0.0f; } else { dXdata[i] = (Xdata[i] == Ydata[row]) * dYdata[row]; } } } } // anonymous namespace // ReduceFrontmax template <> void MaxReduceDimsOp<float, CUDAContext, true>::Compute( int rows, int cols, const float* data, const int32_t* lengths_data, float* out_data) { columnwise_max_kernel<<< std::min(cols, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(rows, cols, data, lengths_data, out_data); } // ReduceBackMax template <> void MaxReduceDimsOp<float, CUDAContext, false>::Compute( int rows, int cols, const float* data, const int32_t* lengths_data, float* out_data) { rowwise_max_kernel<<< std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(rows, cols, data, lengths_data, out_data); } // ReduceFrontMaxGradient template <> void MaxReduceDimsGradientOp<float, CUDAContext, true>::Compute( int rows, int cols, const float* dYdata, const float* Xdata, const float* Ydata, const int32_t* lengths_data, float* dXdata) { columnwise_max_grad_kernel<<< CAFFE_GET_BLOCKS(rows * cols), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( rows, cols, dYdata, Xdata, Ydata, lengths_data, dXdata); } // ReduceBackMaxGradient template <> void MaxReduceDimsGradientOp<float, CUDAContext, false>::Compute( int rows, int cols, const float* dYdata, const float* Xdata, const float* Ydata, const int* lengths_data, float* dXdata) { rowwise_max_grad_kernel<<< CAFFE_GET_BLOCKS(rows * cols), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( rows, cols, dYdata, Xdata, Ydata, lengths_data, dXdata); } REGISTER_CUDA_OPERATOR( ReduceFrontMax, MaxReduceDimsOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR( ReduceFrontMaxGradient, MaxReduceDimsGradientOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR( ReduceBackMax, MaxReduceDimsOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR( ReduceBackMaxGradient, MaxReduceDimsGradientOp<float, CUDAContext, false>); } // namespace caffe2
16940be8a27eff55c486a44e912757baaf6b2132.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _CUDADOUBLE_SOLVER_CU_ #define _CUDADOUBLE_SOLVER_CU_ #include <stdio.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include "pnpstructs.h" //#include "pnpdebug.h" #define DefClock0 clock_t time0;timeval tvtime0,tvtime1; #define StartClock0 time0=clock ();gettimeofday(&tvtime0,NULL); #define StopClock0 gettimeofday(&tvtime1,NULL);DbgPrint0("Time : %g s(CPU Time) %g s(Wall Time)\n",((double)(clock ()-time0))/CLOCKS_PER_SEC,double(tvtime1.tv_sec)+(double(tvtime1.tv_usec)/1000000.0)-double(tvtime0.tv_sec)-(double(tvtime0.tv_usec)/1000000.0)); #define StopClockWMes0(Massege) gettimeofday(&tvtime1,NULL);printf("Time for %s is %.5g s (CPU Time) %g s(Wall Time)\n",(Massege),((double)(clock ()-time0))/CLOCKS_PER_SEC,double(tvtime1.tv_sec)+(double(tvtime1.tv_usec)/1000000.0)-double(tvtime0.tv_sec)-(double(tvtime0.tv_usec)/1000000.0)); #define FMUL __dmul_rn #define FADD __dadd_rn #define FMAF __dmaf_rn __constant__ double dc_om1; __constant__ double dc_om2d6; __constant__ double* dc_P[8]; __constant__ int dc_Qnum[8]; __constant__ double* dc_Q[8]; __constant__ double* dc_Qmult[8]; __constant__ int* dc_Qpos[8]; __constant__ int dc_DBnum[8]; __constant__ double* dc_DielMult[48]; __constant__ int* dc_DBpos[8]; #define dc_lookupVirGridSIZE 1024 __constant__ int dc_lookupVirGrid[dc_lookupVirGridSIZE]; __global__ void KDLaplaceB(double* d_P0,double* d_P1,double* d_P2,double* d_P3, double* d_P4,double* d_P5,double* d_P6,double* d_P7, const double om1,const double om2d6, const int pitchX,const int pitchXY,const int pitchXY_BS_Z,const int BS_X,const int BS_XY) { // int VirXblock = (blockIdx.x%VirGridX); // int VirYblock = (blockIdx.x/VirGridX); // int VirZblock = blockIdx.y; // // int tx = VirXblock*BS_X + threadIdx.x; // int ty = VirYblock*BS_Y + threadIdx.y+1; // int tz = VirZblock*BS_Z + threadIdx.z+1; // // int i=tx+ty*pitchX+tz*pitchXY; int t=threadIdx.x+threadIdx.y*pitchX+threadIdx.z*pitchXY; int i=dc_lookupVirGrid[blockIdx.x] + blockIdx.y*pitchXY_BS_Z + t; t=threadIdx.x+threadIdx.y*BS_X+threadIdx.z*BS_XY; double xP0,xP3,xP5,xP6; double yP0,yP3,yP5,yP6; double zP0,zP3,zP5,zP6; __shared__ double shP[256]; //do over P1 shP[t]=d_P1[i]; __syncthreads(); zP5=FADD(shP[t],d_P1[i+pitchXY]); yP3=FADD(shP[t],d_P1[i+pitchX]); if(threadIdx.x!=0) xP0=FADD(shP[t],shP[t-1]); else xP0=FADD(shP[t],d_P1[i-1]); __syncthreads(); //do over P2 shP[t]=d_P2[i]; __syncthreads(); zP6=FADD(shP[t],d_P2[i+pitchXY]); yP0=FADD(shP[t],d_P2[i-pitchX]); if(threadIdx.x!=blockDim.x-1) xP3=FADD(shP[t],shP[t+1]); else xP3=FADD(shP[t],d_P2[i+1]); __syncthreads(); //do over P4 shP[t]=d_P4[i]; __syncthreads(); zP0=FADD(shP[t],d_P4[i-pitchXY]); yP6=FADD(shP[t],d_P4[i+pitchX]); if(threadIdx.x!=blockDim.x-1) xP5=FADD(shP[t],shP[t+1]); else xP5=FADD(shP[t],d_P4[i+1]); __syncthreads(); //do over P7 shP[t]=d_P7[i]; __syncthreads(); zP3=FADD(shP[t],d_P7[i-pitchXY]); yP5=FADD(shP[t],d_P7[i-pitchX]); if(threadIdx.x!=0) xP6=FADD(shP[t],shP[t-1]); else xP6=FADD(shP[t],d_P7[i-1]); d_P0[i]=FADD(FMUL(om1,d_P0[i]),FMUL(om2d6,FADD(FADD(xP0,yP0),zP0))); d_P3[i]=FADD(FMUL(om1,d_P3[i]),FMUL(om2d6,FADD(FADD(xP3,yP3),zP3))); d_P5[i]=FADD(FMUL(om1,d_P5[i]),FMUL(om2d6,FADD(FADD(xP5,yP5),zP5))); d_P6[i]=FADD(FMUL(om1,d_P6[i]),FMUL(om2d6,FADD(FADD(xP6,yP6),zP6))); } __global__ void KDLaplaceW(double* d_P0,double* d_P1,double* d_P2,double* d_P3, double* d_P4,double* d_P5,double* d_P6,double* d_P7, const double om1,const double om2d6, const int pitchX,const int pitchXY,const int pitchXY_BS_Z,const int BS_X,const int BS_XY) { // int VirXblock = (blockIdx.x%VirGridX); // int VirYblock = (blockIdx.x/VirGridX); // int VirZblock = blockIdx.y; // // int tx = VirXblock*BS_X + threadIdx.x; // int ty = VirYblock*BS_Y + threadIdx.y+1; // int tz = VirZblock*BS_Z + threadIdx.z+1; // // int i=tx+ty*pitchX+tz*pitchXY; //int i=dc_lookupVirGrid[blockIdx.x] + blockIdx.y*pitchXY_BS_Z + threadIdx.x+threadIdx.y*pitchX+threadIdx.z*pitchXY; int t=threadIdx.x+threadIdx.y*pitchX+threadIdx.z*pitchXY; int i=dc_lookupVirGrid[blockIdx.x] + blockIdx.y*pitchXY_BS_Z + t; t=threadIdx.x+threadIdx.y*BS_X+threadIdx.z*BS_XY; double xP1,xP2,xP4,xP7; double yP1,yP2,yP4,yP7; double zP1,zP2,zP4,zP7; __shared__ double shP[256]; //do dc_P[0] shP[t]=d_P0[i]; __syncthreads(); zP4=FADD(shP[t],d_P0[i+pitchXY]); yP2=FADD(shP[t],d_P0[i+pitchX]); if(threadIdx.x!=blockDim.x-1) xP1=FADD(shP[t],shP[t+1]); else xP1=FADD(shP[t],d_P0[i+1]); __syncthreads(); //do d_P[3] shP[t]=d_P3[i]; __syncthreads(); zP7=FADD(shP[t],d_P3[i+pitchXY]); yP1=FADD(shP[t],d_P3[i-pitchX]); if(threadIdx.x!=0) xP2=FADD(shP[t],shP[t-1]); else xP2=FADD(shP[t],d_P3[i-1]); __syncthreads(); //do d_P[5] shP[t]=d_P5[i]; __syncthreads(); zP1=FADD(shP[t],d_P5[i-pitchXY]); yP7=FADD(shP[t],d_P5[i+pitchX]); if(threadIdx.x!=0) xP4=FADD(shP[t],shP[t-1]); else xP4=FADD(shP[t],d_P5[i-1]); __syncthreads(); //do d_P6 shP[t]=d_P6[i]; __syncthreads(); zP2=FADD(shP[t],d_P6[i-pitchXY]); yP4=FADD(shP[t],d_P6[i-pitchX]); if(threadIdx.x!=blockDim.x-1) xP7=FADD(shP[t],shP[t+1]); else xP7=FADD(shP[t],d_P6[i+1]); //d_P6[i]=FADD(FMUL(om1,d_P6[i]),FMUL(om2d6,FADD(FADD(xP6,yP6),zP6))); d_P1[i]=FADD(FMUL(om1,d_P1[i]),FMUL(om2d6,FADD(FADD(xP1,yP1),zP1))); d_P2[i]=FADD(FMUL(om1,d_P2[i]),FMUL(om2d6,FADD(FADD(xP2,yP2),zP2))); d_P4[i]=FADD(FMUL(om1,d_P4[i]),FMUL(om2d6,FADD(FADD(xP4,yP4),zP4))); d_P7[i]=FADD(FMUL(om1,d_P7[i]),FMUL(om2d6,FADD(FADD(xP7,yP7),zP7))); } __global__ void KDPoissonQB() { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; if(t<dc_Qnum[0]) { i=dc_Qpos[0][t]; dc_P[0][i]=FADD(dc_P[0][i],dc_Q[0][t]); } if(t<dc_Qnum[3]) { i=dc_Qpos[3][t]; dc_P[3][i]=FADD(dc_P[3][i],dc_Q[3][t]); } if(t<dc_Qnum[5]) { i=dc_Qpos[5][t]; dc_P[5][i]=FADD(dc_P[5][i],dc_Q[5][t]); } if(t<dc_Qnum[6]) { i=dc_Qpos[6][t]; dc_P[6][i]=FADD(dc_P[6][i],dc_Q[6][t]); } } __global__ void KDPoissonQW() { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; if(t<dc_Qnum[1]) { i=dc_Qpos[1][t]; dc_P[1][i]=FADD(dc_P[1][i],dc_Q[1][t]); } if(t<dc_Qnum[2]) { i=dc_Qpos[2][t]; dc_P[2][i]=FADD(dc_P[2][i],dc_Q[2][t]); } if(t<dc_Qnum[4]) { i=dc_Qpos[4][t]; dc_P[4][i]=FADD(dc_P[4][i],dc_Q[4][t]); } if(t<dc_Qnum[7]) { i=dc_Qpos[7][t]; dc_P[7][i]=FADD(dc_P[7][i],dc_Q[7][t]); } } __global__ void KDPoissonDBB(double* d_P0,double* d_P1,double* d_P2,double* d_P3, double* d_P4,double* d_P5,double* d_P6,double* d_P7, const double om2d6, const int pitchX,const int pitchXY) { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; double xP,xM,yP,yM,zP,zM; if(t<dc_DBnum[0]) { i=dc_DBpos[0][t]; //P0 x xP=FMUL(dc_DielMult[PlusX][t],d_P1[i]); xM=FMUL(dc_DielMult[MinusX][t],d_P1[i-1]); //P0 y yP=FMUL(dc_DielMult[PlusY][t],d_P2[i]); yM=FMUL(dc_DielMult[MinusY][t],d_P2[i-pitchX]); //P0 z zP=FMUL(dc_DielMult[PlusZ][t],d_P4[i]); zM=FMUL(dc_DielMult[MinusZ][t],d_P4[i-pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[0][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[0][i])); } if(t<dc_DBnum[3]) { i=dc_DBpos[3][t]; //P3 x xM=FMUL(dc_DielMult[3*6+MinusX][t],d_P2[i]); xP=FMUL(dc_DielMult[3*6+PlusX][t],d_P2[i+1]); //P3 y yM=FMUL(dc_DielMult[3*6+MinusY][t],d_P1[i]); yP=FMUL(dc_DielMult[3*6+PlusY][t],d_P1[i+pitchX]); //P3 z zP=FMUL(dc_DielMult[3*6+PlusZ][t],d_P7[i]); zM=FMUL(dc_DielMult[3*6+MinusZ][t],d_P7[i-pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[3][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[3][i])); } if(t<dc_DBnum[5]) { i=dc_DBpos[5][t]; //P5 x xM=FMUL(dc_DielMult[5*6+MinusX][t],d_P4[i]); xP=FMUL(dc_DielMult[5*6+PlusX][t],d_P4[i+1]); //P5 y yP=FMUL(dc_DielMult[5*6+PlusY][t],d_P7[i]); yM=FMUL(dc_DielMult[5*6+MinusY][t],d_P7[i-pitchX]); //P5 z zM=FMUL(dc_DielMult[5*6+MinusZ][t],d_P1[i]); zP=FMUL(dc_DielMult[5*6+PlusZ][t],d_P1[i+pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[5][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[5][i])); } if(t<dc_DBnum[6]) { i=dc_DBpos[6][t]; //P6 x xP=FMUL(dc_DielMult[6*6+PlusX][t],d_P7[i]); xM=FMUL(dc_DielMult[6*6+MinusX][t],d_P7[i-1]); //P6 y yM=FMUL(dc_DielMult[6*6+MinusY][t],d_P4[i]); yP=FMUL(dc_DielMult[6*6+PlusY][t],d_P4[i+pitchX]); //P6 z zM=FMUL(dc_DielMult[6*6+MinusZ][t],d_P2[i]); zP=FMUL(dc_DielMult[6*6+PlusZ][t],d_P2[i+pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[6][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[6][i])); } } __global__ void KDPoissonDBW(double* d_P0,double* d_P1,double* d_P2,double* d_P3, double* d_P4,double* d_P5,double* d_P6,double* d_P7, const double om2d6, const int pitchX,const int pitchXY) { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; double xP,xM,yP,yM,zP,zM; if(t<dc_DBnum[1]) { i=dc_DBpos[1][t]; //P1 x xM=FMUL(dc_DielMult[1*6+MinusX][t],d_P0[i]); xP=FMUL(dc_DielMult[1*6+PlusX][t],d_P0[i+1]); //P1 y yP=FMUL(dc_DielMult[1*6+PlusY][t],d_P3[i]); yM=FMUL(dc_DielMult[1*6+MinusY][t],d_P3[i-pitchX]); //P1 z zP=FMUL(dc_DielMult[1*6+PlusZ][t],d_P5[i]); zM=FMUL(dc_DielMult[1*6+MinusZ][t],d_P5[i-pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[1][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[1][i])); } if(t<dc_DBnum[2]) { i=dc_DBpos[2][t]; //P2 x xP=FMUL(dc_DielMult[2*6+PlusX][t],d_P3[i]); xM=FMUL(dc_DielMult[2*6+MinusX][t],d_P3[i-1]); //P2 y yM=FMUL(dc_DielMult[2*6+MinusY][t],d_P0[i]); yP=FMUL(dc_DielMult[2*6+PlusY][t],d_P0[i+pitchX]); //P2 z zP=FMUL(dc_DielMult[2*6+PlusZ][t],d_P6[i]); zM=FMUL(dc_DielMult[2*6+MinusZ][t],d_P6[i-pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[2][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[2][i])); } if(t<dc_DBnum[4]) { i=dc_DBpos[4][t]; //P4x xP=FMUL(dc_DielMult[4*6+PlusX][t],d_P5[i]); xM=FMUL(dc_DielMult[4*6+MinusX][t],d_P5[i-1]); //P4 y yP=FMUL(dc_DielMult[4*6+PlusY][t],d_P6[i]); yM=FMUL(dc_DielMult[4*6+MinusY][t],d_P6[i-pitchX]); //P4 z zM=FMUL(dc_DielMult[4*6+MinusZ][t],d_P0[i]); zP=FMUL(dc_DielMult[4*6+PlusZ][t],d_P0[i+pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[4][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[4][i])); } if(t<dc_DBnum[7]) { i=dc_DBpos[7][t]; //P7 x xM=FMUL(dc_DielMult[7*6+MinusX][t],d_P6[i]); xP=FMUL(dc_DielMult[7*6+PlusX][t],d_P6[i+1]); //P7 y yM=FMUL(dc_DielMult[7*6+MinusY][t],d_P5[i]); yP=FMUL(dc_DielMult[7*6+PlusY][t],d_P5[i+pitchX]); //P7 z zM=FMUL(dc_DielMult[7*6+MinusZ][t],d_P3[i]); zP=FMUL(dc_DielMult[7*6+PlusZ][t],d_P3[i+pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[7][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[7][i])); } } __global__ void KDPoissonQnCalcEB() { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; if(t<dc_Qnum[0]) { i=dc_Qpos[0][t]; dc_P[0][i]=FADD(dc_P[0][i],dc_Q[0][t]); } if(t<dc_Qnum[3]) { i=dc_Qpos[3][t]; dc_P[3][i]=FADD(dc_P[3][i],dc_Q[3][t]); } if(t<dc_Qnum[5]) { i=dc_Qpos[5][t]; dc_P[5][i]=FADD(dc_P[5][i],dc_Q[5][t]); } if(t<dc_Qnum[6]) { i=dc_Qpos[6][t]; dc_P[6][i]=FADD(dc_P[6][i],dc_Q[6][t]); } } __global__ void KDPoissonQnCalcW() { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; if(t<dc_Qnum[1]) { i=dc_Qpos[1][t]; dc_P[1][i]=FADD(dc_P[1][i],dc_Q[1][t]); } if(t<dc_Qnum[2]) { i=dc_Qpos[2][t]; dc_P[2][i]=FADD(dc_P[2][i],dc_Q[2][t]); } if(t<dc_Qnum[4]) { i=dc_Qpos[4][t]; dc_P[4][i]=FADD(dc_P[4][i],dc_Q[4][t]); } if(t<dc_Qnum[7]) { i=dc_Qpos[7][t]; dc_P[7][i]=FADD(dc_P[7][i],dc_Q[7][t]); } } int checkCUDAError(const char* msg); int GetCUDADevStat(); extern "C" int DoPSolverOnCudaDouble(PoissonSolverOnCudaParamStruct* CudaParm,PSolverOnCudaStructDouble* PS) { GetCUDADevStat(); int i,k; printf("dimBlock [%d,%d,%d]\n",CudaParm->BS_X,CudaParm->BS_Y,CudaParm->BS_Z); DefClock0; hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); // Allocate vectors in device memory int ErrorCount=0; int GS_X=PS->GS[0]; int GS_Y=PS->GS[1]; int GS_Z=PS->GS[2]; int GS_XY=GS_X*GS_Y; int GS_XYZ=GS_X*GS_Y*GS_Z; double om1 = 1.0-PS->Relaxation; double om2d6 = PS->Relaxation/6.0; hipMemcpyToSymbol(dc_om1, &om1, sizeof(double), 0, hipMemcpyHostToDevice ); hipMemcpyToSymbol(dc_om2d6, &om2d6, sizeof(double), 0, hipMemcpyHostToDevice ); printf("GS=[%d,%d,%d]=%d\n",GS_X,GS_Y,GS_Z,GS_XYZ); //Start Clock for GPU StartClock0; int BS_X=CudaParm->BS_X; int BS_Y=CudaParm->BS_Y; int BS_Z=CudaParm->BS_Z; int BS_XY=BS_X*BS_Y; int BS_XYZ=BS_X*BS_Y*BS_Z; dim3 dimBlock(BS_X,BS_Y,BS_Z); dim3 dimGridVirt(GS_X/BS_X/2, GS_Y/BS_Y/2, GS_Z/BS_Z/2); //d_P* is store in pitched array x has 16 for CUDA and Y/Z is +2 for BC dim3 spltGSWBC(PS->spltGSWBC[0],PS->spltGSWBC[1],PS->spltGSWBC[2]); dim3 dimGrid(dimGridVirt.x*dimGridVirt.y, dimGridVirt.z,1); int pitchX=spltGSWBC.x; int pitchXY=spltGSWBC.x*spltGSWBC.y; printf("dimBlock [%d,%d,%d]\n",dimBlock.x,dimBlock.y,dimBlock.z); printf("dimGrid [%d,%d,%d]\n",dimGrid.x,dimGrid.y,dimGrid.z); printf("dimGridVirt [%d,%d,%d]\n",dimGridVirt.x,dimGridVirt.y,dimGridVirt.z); printf("spltGSWBC [%d,%d,%d]\n",spltGSWBC.x,spltGSWBC.y,spltGSWBC.z); printf("Total number of threads %d\n",dimGrid.x*dimGrid.y*dimGrid.z*dimBlock.x*dimBlock.y*dimBlock.z); //fill dc_lookupVirGrid int h_lookupVirGrid[dc_lookupVirGridSIZE]; if(dc_lookupVirGridSIZE<dimGrid.x) { printf("ERROR: dc_lookupVirGridSIZE is smaller then dimGrid.x, make it at least %d\n",dimGrid.x); return 1; } int VirXblock,VirYblock; for(i=0;i<dimGrid.x;i++) { VirXblock = (i%dimGridVirt.x); VirYblock = (i/dimGridVirt.x); h_lookupVirGrid[i]=VirXblock*BS_X+(VirYblock*BS_Y + 1)*pitchX+pitchXY; } hipMemcpyToSymbol(dc_lookupVirGrid, h_lookupVirGrid, dimGrid.x*sizeof(int), 0, hipMemcpyHostToDevice ); int GS_XYZsplit = spltGSWBC.x*spltGSWBC.y*spltGSWBC.z; int sizeGS_XYZsplit = GS_XYZsplit*sizeof(double); //allocate and copy to device lin-array double* d_P[8]; for(i=0;i<8;i++) hipMalloc((void**)&d_P[i], sizeGS_XYZsplit); for(i=0;i<8;i++) hipMemcpy(d_P[i], PS->P[i], sizeGS_XYZsplit, hipMemcpyHostToDevice); hipMemcpyToSymbol(dc_P, d_P, 8*sizeof(double*), 0, hipMemcpyHostToDevice ); //charges double* d_Q[8]; int* d_Qpos[8]; double* d_Qmult[8]; int Qmax=0; for(i=0;i<8;i++) { d_Q[i]=NULL; d_Qpos[i]=NULL; d_Qmult[i]=NULL; if(PS->Qnum[i]>Qmax)Qmax=PS->Qnum[i]; if(PS->Qnum[i]>0) { hipMalloc((void**)&d_Q[i], PS->Qnum[i]*sizeof(double)); hipMalloc((void**)&d_Qpos[i], PS->Qnum[i]*sizeof(int)); hipMalloc((void**)&d_Qmult[i], PS->Qnum[i]*sizeof(int)); hipMemcpy(d_Q[i], PS->Q[i], PS->Qnum[i]*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_Qpos[i], PS->Qpos[i], PS->Qnum[i]*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_Qmult[i], PS->Qmult[i], PS->Qnum[i]*sizeof(int), hipMemcpyHostToDevice); } } hipMemcpyToSymbol(dc_Q, d_Q, 8*sizeof(double*), 0, hipMemcpyHostToDevice ); hipMemcpyToSymbol(dc_Qnum, PS->Qnum, 8*sizeof(int), 0, hipMemcpyHostToDevice ); hipMemcpyToSymbol(dc_Qpos, d_Qpos, 8*sizeof(int*), 0, hipMemcpyHostToDevice ); hipMemcpyToSymbol(dc_Qmult, d_Qmult, 8*sizeof(int*), 0, hipMemcpyHostToDevice ); int Qblock=CudaParm->Qblock; int QGrid=Qmax/Qblock; if(Qmax%Qblock!=0)QGrid++; printf("Qmax=%d Qblock=%d QGrid=%d\n",Qmax,Qblock,QGrid); //Diel Border int d_DBNum[8]; double* d_DielMult[48]; int* d_DBPos[8]; int DBmax=0; for(i=0;i<8;i++) { d_DBPos[i]=NULL; for(k=0;k<6;k++) d_DielMult[i*6+k]=NULL; if(PS->DielBordNum[i]>DBmax)DBmax=PS->DielBordNum[i]; if(PS->DielBordNum[i]>0) { hipMalloc((void**)&d_DBPos[i], PS->DielBordNum[i]*sizeof(int)); hipMemcpy(d_DBPos[i], PS->DielBordPos[i], PS->DielBordNum[i]*sizeof(int), hipMemcpyHostToDevice); for(k=0;k<6;k++) { hipMalloc((void**)&d_DielMult[i*6+k], PS->DielBordNum[i]*sizeof(double)); hipMemcpy(d_DielMult[i*6+k], PS->DielMult[i][k], PS->DielBordNum[i]*sizeof(double), hipMemcpyHostToDevice); } } } hipMemcpyToSymbol(dc_DBnum, PS->DielBordNum, 8*sizeof(int), 0, hipMemcpyHostToDevice ); hipMemcpyToSymbol(dc_DBpos, d_DBPos, 8*sizeof(int*), 0, hipMemcpyHostToDevice ); hipMemcpyToSymbol(dc_DielMult, d_DielMult, 48*sizeof(double*), 0, hipMemcpyHostToDevice ); int DBblock=CudaParm->DBblock; int DBGrid=DBmax/DBblock; if(DBmax%DBblock!=0)DBGrid++; printf("DBmax=%d DBblock=%d DBGrid=%d\n",DBmax,DBblock,DBGrid); //do loop hipEventRecord( start, 0 ); int j; double totalEnergy,dtmp1; double fpoh=4.0*M_PI*PS->GridScale; GetCUDADevStat(); for(int iteration=1;iteration<=PS->MaxIterations;iteration++) {//pitchXY*BS_Z hipLaunchKernelGGL(( KDLaplaceB), dim3(dimGrid), dim3(dimBlock), 0, 0, d_P[0],d_P[1],d_P[2],d_P[3], d_P[4],d_P[5],d_P[6],d_P[7], om1,om2d6, pitchX,pitchXY,pitchXY*BS_Z,BS_X,BS_XY); hipDeviceSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDLaplaceB"); if(QGrid>0) { hipLaunchKernelGGL(( KDPoissonQB), dim3(QGrid), dim3(Qblock), 0, 0, ); hipDeviceSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDPoissonQB"); } if(DBGrid>0) { hipLaunchKernelGGL(( KDPoissonDBB), dim3(DBGrid), dim3(DBblock), 0, 0, d_P[0],d_P[1],d_P[2],d_P[3], d_P[4],d_P[5],d_P[6],d_P[7], om2d6, pitchX,pitchXY); hipDeviceSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDPoissonDBB"); } hipLaunchKernelGGL(( KDLaplaceW), dim3(dimGrid), dim3(dimBlock), 0, 0, d_P[0],d_P[1],d_P[2],d_P[3], d_P[4],d_P[5],d_P[6],d_P[7], om1,om2d6, pitchX,pitchXY,pitchXY*BS_Z,BS_X,BS_XY); hipDeviceSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDLaplaceW"); if(QGrid>0) { hipLaunchKernelGGL(( KDPoissonQW), dim3(QGrid), dim3(Qblock), 0, 0, ); hipDeviceSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDPoissonQW"); } if(DBGrid>0) { hipLaunchKernelGGL(( KDPoissonDBW), dim3(DBGrid), dim3(DBblock), 0, 0, d_P[0],d_P[1],d_P[2],d_P[3], d_P[4],d_P[5],d_P[6],d_P[7],om2d6, pitchX,pitchXY); hipDeviceSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDPoissonDBW"); } if(iteration%PS->ConvergenceCheck==0) { double OldTotalEnergy=totalEnergy; double totalChange; double relativeChange; double ConvFac; totalEnergy=0.0; for(i=0;i<8;i++) { hipMemcpy(PS->P[i], d_P[i], sizeGS_XYZsplit, hipMemcpyDeviceToHost); for(j=0;j<PS->Qnum[i];j++) { dtmp1=double(PS->P[i][PS->Qpos[i][j]])*double(PS->Q[i][j])/double(PS->Qmult[i][j]); totalEnergy+=dtmp1; } } totalEnergy=totalEnergy/(fpoh*2.0); totalChange=totalEnergy-OldTotalEnergy; relativeChange=totalChange/totalEnergy; printf("<PoissonIterations Nit=\"%8d\" E=\"%20.16e\" dE=\"%.4e\" rel.E=\"%.4e\" ConvFac=\"%.4e\"/>\n", iteration, totalEnergy, totalChange, relativeChange,ConvFac); if(PS->Tolerance!=0.0) { if(fabs(relativeChange)<=PS->Tolerance) { printf("Solver has reached the requiered tolerance level\n"); break; } } } if(ErrorCount) break; } GetCUDADevStat(); ErrorCount+=1-checkCUDAError("cuda kernel running"); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); printf("Time on iterations: %e\n",time); time/=1000;//time in s printf("\tfor [%d,%d,%d] load store cycle: %f iter/s\n",GS_X,GS_Y,GS_Z,double(PS->MaxIterations)/time); //printf("block QGridQ time iter/s %d %d %g %g\n",Qblock,QGrid,time,double(PS->MaxIterations)/time); //copy from device lin-array for(i=0;i<8;i++) hipMemcpy(PS->P[i], d_P[i], sizeGS_XYZsplit, hipMemcpyDeviceToHost); //free stuff for(i=0;i<8;i++) { if(PS->DielBordNum[i]>0) { for(k=0;k<6;k++) { hipFree(d_DielMult[i*6+k]); } hipFree(d_DBPos[i]); } } for(i=0;i<8;i++) { if(PS->Qnum[i]>0) { hipFree(d_Qmult[i]); hipFree(d_Qpos[i]); hipFree(d_Q[i]); } } for(i=0;i<8;i++) hipFree(d_P[i]); StopClockWMes0("GPU"); GetCUDADevStat(); if(ErrorCount) { printf("During GPU accelerated calculations found %d errors\n",ErrorCount); return 0; } return 1; } #endif
16940be8a27eff55c486a44e912757baaf6b2132.cu
#ifndef _CUDADOUBLE_SOLVER_CU_ #define _CUDADOUBLE_SOLVER_CU_ #include <stdio.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include "pnpstructs.h" //#include "pnpdebug.h" #define DefClock0 clock_t time0;timeval tvtime0,tvtime1; #define StartClock0 time0=clock ();gettimeofday(&tvtime0,NULL); #define StopClock0 gettimeofday(&tvtime1,NULL);DbgPrint0("Time : %g s(CPU Time) %g s(Wall Time)\n",((double)(clock ()-time0))/CLOCKS_PER_SEC,double(tvtime1.tv_sec)+(double(tvtime1.tv_usec)/1000000.0)-double(tvtime0.tv_sec)-(double(tvtime0.tv_usec)/1000000.0)); #define StopClockWMes0(Massege) gettimeofday(&tvtime1,NULL);printf("Time for %s is %.5g s (CPU Time) %g s(Wall Time)\n",(Massege),((double)(clock ()-time0))/CLOCKS_PER_SEC,double(tvtime1.tv_sec)+(double(tvtime1.tv_usec)/1000000.0)-double(tvtime0.tv_sec)-(double(tvtime0.tv_usec)/1000000.0)); #define FMUL __dmul_rn #define FADD __dadd_rn #define FMAF __dmaf_rn __constant__ double dc_om1; __constant__ double dc_om2d6; __constant__ double* dc_P[8]; __constant__ int dc_Qnum[8]; __constant__ double* dc_Q[8]; __constant__ double* dc_Qmult[8]; __constant__ int* dc_Qpos[8]; __constant__ int dc_DBnum[8]; __constant__ double* dc_DielMult[48]; __constant__ int* dc_DBpos[8]; #define dc_lookupVirGridSIZE 1024 __constant__ int dc_lookupVirGrid[dc_lookupVirGridSIZE]; __global__ void KDLaplaceB(double* d_P0,double* d_P1,double* d_P2,double* d_P3, double* d_P4,double* d_P5,double* d_P6,double* d_P7, const double om1,const double om2d6, const int pitchX,const int pitchXY,const int pitchXY_BS_Z,const int BS_X,const int BS_XY) { // int VirXblock = (blockIdx.x%VirGridX); // int VirYblock = (blockIdx.x/VirGridX); // int VirZblock = blockIdx.y; // // int tx = VirXblock*BS_X + threadIdx.x; // int ty = VirYblock*BS_Y + threadIdx.y+1; // int tz = VirZblock*BS_Z + threadIdx.z+1; // // int i=tx+ty*pitchX+tz*pitchXY; int t=threadIdx.x+threadIdx.y*pitchX+threadIdx.z*pitchXY; int i=dc_lookupVirGrid[blockIdx.x] + blockIdx.y*pitchXY_BS_Z + t; t=threadIdx.x+threadIdx.y*BS_X+threadIdx.z*BS_XY; double xP0,xP3,xP5,xP6; double yP0,yP3,yP5,yP6; double zP0,zP3,zP5,zP6; __shared__ double shP[256]; //do over P1 shP[t]=d_P1[i]; __syncthreads(); zP5=FADD(shP[t],d_P1[i+pitchXY]); yP3=FADD(shP[t],d_P1[i+pitchX]); if(threadIdx.x!=0) xP0=FADD(shP[t],shP[t-1]); else xP0=FADD(shP[t],d_P1[i-1]); __syncthreads(); //do over P2 shP[t]=d_P2[i]; __syncthreads(); zP6=FADD(shP[t],d_P2[i+pitchXY]); yP0=FADD(shP[t],d_P2[i-pitchX]); if(threadIdx.x!=blockDim.x-1) xP3=FADD(shP[t],shP[t+1]); else xP3=FADD(shP[t],d_P2[i+1]); __syncthreads(); //do over P4 shP[t]=d_P4[i]; __syncthreads(); zP0=FADD(shP[t],d_P4[i-pitchXY]); yP6=FADD(shP[t],d_P4[i+pitchX]); if(threadIdx.x!=blockDim.x-1) xP5=FADD(shP[t],shP[t+1]); else xP5=FADD(shP[t],d_P4[i+1]); __syncthreads(); //do over P7 shP[t]=d_P7[i]; __syncthreads(); zP3=FADD(shP[t],d_P7[i-pitchXY]); yP5=FADD(shP[t],d_P7[i-pitchX]); if(threadIdx.x!=0) xP6=FADD(shP[t],shP[t-1]); else xP6=FADD(shP[t],d_P7[i-1]); d_P0[i]=FADD(FMUL(om1,d_P0[i]),FMUL(om2d6,FADD(FADD(xP0,yP0),zP0))); d_P3[i]=FADD(FMUL(om1,d_P3[i]),FMUL(om2d6,FADD(FADD(xP3,yP3),zP3))); d_P5[i]=FADD(FMUL(om1,d_P5[i]),FMUL(om2d6,FADD(FADD(xP5,yP5),zP5))); d_P6[i]=FADD(FMUL(om1,d_P6[i]),FMUL(om2d6,FADD(FADD(xP6,yP6),zP6))); } __global__ void KDLaplaceW(double* d_P0,double* d_P1,double* d_P2,double* d_P3, double* d_P4,double* d_P5,double* d_P6,double* d_P7, const double om1,const double om2d6, const int pitchX,const int pitchXY,const int pitchXY_BS_Z,const int BS_X,const int BS_XY) { // int VirXblock = (blockIdx.x%VirGridX); // int VirYblock = (blockIdx.x/VirGridX); // int VirZblock = blockIdx.y; // // int tx = VirXblock*BS_X + threadIdx.x; // int ty = VirYblock*BS_Y + threadIdx.y+1; // int tz = VirZblock*BS_Z + threadIdx.z+1; // // int i=tx+ty*pitchX+tz*pitchXY; //int i=dc_lookupVirGrid[blockIdx.x] + blockIdx.y*pitchXY_BS_Z + threadIdx.x+threadIdx.y*pitchX+threadIdx.z*pitchXY; int t=threadIdx.x+threadIdx.y*pitchX+threadIdx.z*pitchXY; int i=dc_lookupVirGrid[blockIdx.x] + blockIdx.y*pitchXY_BS_Z + t; t=threadIdx.x+threadIdx.y*BS_X+threadIdx.z*BS_XY; double xP1,xP2,xP4,xP7; double yP1,yP2,yP4,yP7; double zP1,zP2,zP4,zP7; __shared__ double shP[256]; //do dc_P[0] shP[t]=d_P0[i]; __syncthreads(); zP4=FADD(shP[t],d_P0[i+pitchXY]); yP2=FADD(shP[t],d_P0[i+pitchX]); if(threadIdx.x!=blockDim.x-1) xP1=FADD(shP[t],shP[t+1]); else xP1=FADD(shP[t],d_P0[i+1]); __syncthreads(); //do d_P[3] shP[t]=d_P3[i]; __syncthreads(); zP7=FADD(shP[t],d_P3[i+pitchXY]); yP1=FADD(shP[t],d_P3[i-pitchX]); if(threadIdx.x!=0) xP2=FADD(shP[t],shP[t-1]); else xP2=FADD(shP[t],d_P3[i-1]); __syncthreads(); //do d_P[5] shP[t]=d_P5[i]; __syncthreads(); zP1=FADD(shP[t],d_P5[i-pitchXY]); yP7=FADD(shP[t],d_P5[i+pitchX]); if(threadIdx.x!=0) xP4=FADD(shP[t],shP[t-1]); else xP4=FADD(shP[t],d_P5[i-1]); __syncthreads(); //do d_P6 shP[t]=d_P6[i]; __syncthreads(); zP2=FADD(shP[t],d_P6[i-pitchXY]); yP4=FADD(shP[t],d_P6[i-pitchX]); if(threadIdx.x!=blockDim.x-1) xP7=FADD(shP[t],shP[t+1]); else xP7=FADD(shP[t],d_P6[i+1]); //d_P6[i]=FADD(FMUL(om1,d_P6[i]),FMUL(om2d6,FADD(FADD(xP6,yP6),zP6))); d_P1[i]=FADD(FMUL(om1,d_P1[i]),FMUL(om2d6,FADD(FADD(xP1,yP1),zP1))); d_P2[i]=FADD(FMUL(om1,d_P2[i]),FMUL(om2d6,FADD(FADD(xP2,yP2),zP2))); d_P4[i]=FADD(FMUL(om1,d_P4[i]),FMUL(om2d6,FADD(FADD(xP4,yP4),zP4))); d_P7[i]=FADD(FMUL(om1,d_P7[i]),FMUL(om2d6,FADD(FADD(xP7,yP7),zP7))); } __global__ void KDPoissonQB() { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; if(t<dc_Qnum[0]) { i=dc_Qpos[0][t]; dc_P[0][i]=FADD(dc_P[0][i],dc_Q[0][t]); } if(t<dc_Qnum[3]) { i=dc_Qpos[3][t]; dc_P[3][i]=FADD(dc_P[3][i],dc_Q[3][t]); } if(t<dc_Qnum[5]) { i=dc_Qpos[5][t]; dc_P[5][i]=FADD(dc_P[5][i],dc_Q[5][t]); } if(t<dc_Qnum[6]) { i=dc_Qpos[6][t]; dc_P[6][i]=FADD(dc_P[6][i],dc_Q[6][t]); } } __global__ void KDPoissonQW() { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; if(t<dc_Qnum[1]) { i=dc_Qpos[1][t]; dc_P[1][i]=FADD(dc_P[1][i],dc_Q[1][t]); } if(t<dc_Qnum[2]) { i=dc_Qpos[2][t]; dc_P[2][i]=FADD(dc_P[2][i],dc_Q[2][t]); } if(t<dc_Qnum[4]) { i=dc_Qpos[4][t]; dc_P[4][i]=FADD(dc_P[4][i],dc_Q[4][t]); } if(t<dc_Qnum[7]) { i=dc_Qpos[7][t]; dc_P[7][i]=FADD(dc_P[7][i],dc_Q[7][t]); } } __global__ void KDPoissonDBB(double* d_P0,double* d_P1,double* d_P2,double* d_P3, double* d_P4,double* d_P5,double* d_P6,double* d_P7, const double om2d6, const int pitchX,const int pitchXY) { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; double xP,xM,yP,yM,zP,zM; if(t<dc_DBnum[0]) { i=dc_DBpos[0][t]; //P0 x xP=FMUL(dc_DielMult[PlusX][t],d_P1[i]); xM=FMUL(dc_DielMult[MinusX][t],d_P1[i-1]); //P0 y yP=FMUL(dc_DielMult[PlusY][t],d_P2[i]); yM=FMUL(dc_DielMult[MinusY][t],d_P2[i-pitchX]); //P0 z zP=FMUL(dc_DielMult[PlusZ][t],d_P4[i]); zM=FMUL(dc_DielMult[MinusZ][t],d_P4[i-pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[0][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[0][i])); } if(t<dc_DBnum[3]) { i=dc_DBpos[3][t]; //P3 x xM=FMUL(dc_DielMult[3*6+MinusX][t],d_P2[i]); xP=FMUL(dc_DielMult[3*6+PlusX][t],d_P2[i+1]); //P3 y yM=FMUL(dc_DielMult[3*6+MinusY][t],d_P1[i]); yP=FMUL(dc_DielMult[3*6+PlusY][t],d_P1[i+pitchX]); //P3 z zP=FMUL(dc_DielMult[3*6+PlusZ][t],d_P7[i]); zM=FMUL(dc_DielMult[3*6+MinusZ][t],d_P7[i-pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[3][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[3][i])); } if(t<dc_DBnum[5]) { i=dc_DBpos[5][t]; //P5 x xM=FMUL(dc_DielMult[5*6+MinusX][t],d_P4[i]); xP=FMUL(dc_DielMult[5*6+PlusX][t],d_P4[i+1]); //P5 y yP=FMUL(dc_DielMult[5*6+PlusY][t],d_P7[i]); yM=FMUL(dc_DielMult[5*6+MinusY][t],d_P7[i-pitchX]); //P5 z zM=FMUL(dc_DielMult[5*6+MinusZ][t],d_P1[i]); zP=FMUL(dc_DielMult[5*6+PlusZ][t],d_P1[i+pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[5][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[5][i])); } if(t<dc_DBnum[6]) { i=dc_DBpos[6][t]; //P6 x xP=FMUL(dc_DielMult[6*6+PlusX][t],d_P7[i]); xM=FMUL(dc_DielMult[6*6+MinusX][t],d_P7[i-1]); //P6 y yM=FMUL(dc_DielMult[6*6+MinusY][t],d_P4[i]); yP=FMUL(dc_DielMult[6*6+PlusY][t],d_P4[i+pitchX]); //P6 z zM=FMUL(dc_DielMult[6*6+MinusZ][t],d_P2[i]); zP=FMUL(dc_DielMult[6*6+PlusZ][t],d_P2[i+pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[6][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[6][i])); } } __global__ void KDPoissonDBW(double* d_P0,double* d_P1,double* d_P2,double* d_P3, double* d_P4,double* d_P5,double* d_P6,double* d_P7, const double om2d6, const int pitchX,const int pitchXY) { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; double xP,xM,yP,yM,zP,zM; if(t<dc_DBnum[1]) { i=dc_DBpos[1][t]; //P1 x xM=FMUL(dc_DielMult[1*6+MinusX][t],d_P0[i]); xP=FMUL(dc_DielMult[1*6+PlusX][t],d_P0[i+1]); //P1 y yP=FMUL(dc_DielMult[1*6+PlusY][t],d_P3[i]); yM=FMUL(dc_DielMult[1*6+MinusY][t],d_P3[i-pitchX]); //P1 z zP=FMUL(dc_DielMult[1*6+PlusZ][t],d_P5[i]); zM=FMUL(dc_DielMult[1*6+MinusZ][t],d_P5[i-pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[1][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[1][i])); } if(t<dc_DBnum[2]) { i=dc_DBpos[2][t]; //P2 x xP=FMUL(dc_DielMult[2*6+PlusX][t],d_P3[i]); xM=FMUL(dc_DielMult[2*6+MinusX][t],d_P3[i-1]); //P2 y yM=FMUL(dc_DielMult[2*6+MinusY][t],d_P0[i]); yP=FMUL(dc_DielMult[2*6+PlusY][t],d_P0[i+pitchX]); //P2 z zP=FMUL(dc_DielMult[2*6+PlusZ][t],d_P6[i]); zM=FMUL(dc_DielMult[2*6+MinusZ][t],d_P6[i-pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[2][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[2][i])); } if(t<dc_DBnum[4]) { i=dc_DBpos[4][t]; //P4x xP=FMUL(dc_DielMult[4*6+PlusX][t],d_P5[i]); xM=FMUL(dc_DielMult[4*6+MinusX][t],d_P5[i-1]); //P4 y yP=FMUL(dc_DielMult[4*6+PlusY][t],d_P6[i]); yM=FMUL(dc_DielMult[4*6+MinusY][t],d_P6[i-pitchX]); //P4 z zM=FMUL(dc_DielMult[4*6+MinusZ][t],d_P0[i]); zP=FMUL(dc_DielMult[4*6+PlusZ][t],d_P0[i+pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[4][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[4][i])); } if(t<dc_DBnum[7]) { i=dc_DBpos[7][t]; //P7 x xM=FMUL(dc_DielMult[7*6+MinusX][t],d_P6[i]); xP=FMUL(dc_DielMult[7*6+PlusX][t],d_P6[i+1]); //P7 y yM=FMUL(dc_DielMult[7*6+MinusY][t],d_P5[i]); yP=FMUL(dc_DielMult[7*6+PlusY][t],d_P5[i+pitchX]); //P7 z zM=FMUL(dc_DielMult[7*6+MinusZ][t],d_P3[i]); zP=FMUL(dc_DielMult[7*6+PlusZ][t],d_P3[i+pitchXY]); xP=FADD(xM,xP); yP=FADD(yM,yP); zP=FADD(zM,zP); dc_P[7][i]=FADD(FADD(xP,yP),FADD(zP,dc_P[7][i])); } } __global__ void KDPoissonQnCalcEB() { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; if(t<dc_Qnum[0]) { i=dc_Qpos[0][t]; dc_P[0][i]=FADD(dc_P[0][i],dc_Q[0][t]); } if(t<dc_Qnum[3]) { i=dc_Qpos[3][t]; dc_P[3][i]=FADD(dc_P[3][i],dc_Q[3][t]); } if(t<dc_Qnum[5]) { i=dc_Qpos[5][t]; dc_P[5][i]=FADD(dc_P[5][i],dc_Q[5][t]); } if(t<dc_Qnum[6]) { i=dc_Qpos[6][t]; dc_P[6][i]=FADD(dc_P[6][i],dc_Q[6][t]); } } __global__ void KDPoissonQnCalcW() { int t=threadIdx.x+blockIdx.x*blockDim.x; int i; if(t<dc_Qnum[1]) { i=dc_Qpos[1][t]; dc_P[1][i]=FADD(dc_P[1][i],dc_Q[1][t]); } if(t<dc_Qnum[2]) { i=dc_Qpos[2][t]; dc_P[2][i]=FADD(dc_P[2][i],dc_Q[2][t]); } if(t<dc_Qnum[4]) { i=dc_Qpos[4][t]; dc_P[4][i]=FADD(dc_P[4][i],dc_Q[4][t]); } if(t<dc_Qnum[7]) { i=dc_Qpos[7][t]; dc_P[7][i]=FADD(dc_P[7][i],dc_Q[7][t]); } } int checkCUDAError(const char* msg); int GetCUDADevStat(); extern "C" int DoPSolverOnCudaDouble(PoissonSolverOnCudaParamStruct* CudaParm,PSolverOnCudaStructDouble* PS) { GetCUDADevStat(); int i,k; printf("dimBlock [%d,%d,%d]\n",CudaParm->BS_X,CudaParm->BS_Y,CudaParm->BS_Z); DefClock0; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); // Allocate vectors in device memory int ErrorCount=0; int GS_X=PS->GS[0]; int GS_Y=PS->GS[1]; int GS_Z=PS->GS[2]; int GS_XY=GS_X*GS_Y; int GS_XYZ=GS_X*GS_Y*GS_Z; double om1 = 1.0-PS->Relaxation; double om2d6 = PS->Relaxation/6.0; cudaMemcpyToSymbol(dc_om1, &om1, sizeof(double), 0, cudaMemcpyHostToDevice ); cudaMemcpyToSymbol(dc_om2d6, &om2d6, sizeof(double), 0, cudaMemcpyHostToDevice ); printf("GS=[%d,%d,%d]=%d\n",GS_X,GS_Y,GS_Z,GS_XYZ); //Start Clock for GPU StartClock0; int BS_X=CudaParm->BS_X; int BS_Y=CudaParm->BS_Y; int BS_Z=CudaParm->BS_Z; int BS_XY=BS_X*BS_Y; int BS_XYZ=BS_X*BS_Y*BS_Z; dim3 dimBlock(BS_X,BS_Y,BS_Z); dim3 dimGridVirt(GS_X/BS_X/2, GS_Y/BS_Y/2, GS_Z/BS_Z/2); //d_P* is store in pitched array x has 16 for CUDA and Y/Z is +2 for BC dim3 spltGSWBC(PS->spltGSWBC[0],PS->spltGSWBC[1],PS->spltGSWBC[2]); dim3 dimGrid(dimGridVirt.x*dimGridVirt.y, dimGridVirt.z,1); int pitchX=spltGSWBC.x; int pitchXY=spltGSWBC.x*spltGSWBC.y; printf("dimBlock [%d,%d,%d]\n",dimBlock.x,dimBlock.y,dimBlock.z); printf("dimGrid [%d,%d,%d]\n",dimGrid.x,dimGrid.y,dimGrid.z); printf("dimGridVirt [%d,%d,%d]\n",dimGridVirt.x,dimGridVirt.y,dimGridVirt.z); printf("spltGSWBC [%d,%d,%d]\n",spltGSWBC.x,spltGSWBC.y,spltGSWBC.z); printf("Total number of threads %d\n",dimGrid.x*dimGrid.y*dimGrid.z*dimBlock.x*dimBlock.y*dimBlock.z); //fill dc_lookupVirGrid int h_lookupVirGrid[dc_lookupVirGridSIZE]; if(dc_lookupVirGridSIZE<dimGrid.x) { printf("ERROR: dc_lookupVirGridSIZE is smaller then dimGrid.x, make it at least %d\n",dimGrid.x); return 1; } int VirXblock,VirYblock; for(i=0;i<dimGrid.x;i++) { VirXblock = (i%dimGridVirt.x); VirYblock = (i/dimGridVirt.x); h_lookupVirGrid[i]=VirXblock*BS_X+(VirYblock*BS_Y + 1)*pitchX+pitchXY; } cudaMemcpyToSymbol(dc_lookupVirGrid, h_lookupVirGrid, dimGrid.x*sizeof(int), 0, cudaMemcpyHostToDevice ); int GS_XYZsplit = spltGSWBC.x*spltGSWBC.y*spltGSWBC.z; int sizeGS_XYZsplit = GS_XYZsplit*sizeof(double); //allocate and copy to device lin-array double* d_P[8]; for(i=0;i<8;i++) cudaMalloc((void**)&d_P[i], sizeGS_XYZsplit); for(i=0;i<8;i++) cudaMemcpy(d_P[i], PS->P[i], sizeGS_XYZsplit, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(dc_P, d_P, 8*sizeof(double*), 0, cudaMemcpyHostToDevice ); //charges double* d_Q[8]; int* d_Qpos[8]; double* d_Qmult[8]; int Qmax=0; for(i=0;i<8;i++) { d_Q[i]=NULL; d_Qpos[i]=NULL; d_Qmult[i]=NULL; if(PS->Qnum[i]>Qmax)Qmax=PS->Qnum[i]; if(PS->Qnum[i]>0) { cudaMalloc((void**)&d_Q[i], PS->Qnum[i]*sizeof(double)); cudaMalloc((void**)&d_Qpos[i], PS->Qnum[i]*sizeof(int)); cudaMalloc((void**)&d_Qmult[i], PS->Qnum[i]*sizeof(int)); cudaMemcpy(d_Q[i], PS->Q[i], PS->Qnum[i]*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_Qpos[i], PS->Qpos[i], PS->Qnum[i]*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_Qmult[i], PS->Qmult[i], PS->Qnum[i]*sizeof(int), cudaMemcpyHostToDevice); } } cudaMemcpyToSymbol(dc_Q, d_Q, 8*sizeof(double*), 0, cudaMemcpyHostToDevice ); cudaMemcpyToSymbol(dc_Qnum, PS->Qnum, 8*sizeof(int), 0, cudaMemcpyHostToDevice ); cudaMemcpyToSymbol(dc_Qpos, d_Qpos, 8*sizeof(int*), 0, cudaMemcpyHostToDevice ); cudaMemcpyToSymbol(dc_Qmult, d_Qmult, 8*sizeof(int*), 0, cudaMemcpyHostToDevice ); int Qblock=CudaParm->Qblock; int QGrid=Qmax/Qblock; if(Qmax%Qblock!=0)QGrid++; printf("Qmax=%d Qblock=%d QGrid=%d\n",Qmax,Qblock,QGrid); //Diel Border int d_DBNum[8]; double* d_DielMult[48]; int* d_DBPos[8]; int DBmax=0; for(i=0;i<8;i++) { d_DBPos[i]=NULL; for(k=0;k<6;k++) d_DielMult[i*6+k]=NULL; if(PS->DielBordNum[i]>DBmax)DBmax=PS->DielBordNum[i]; if(PS->DielBordNum[i]>0) { cudaMalloc((void**)&d_DBPos[i], PS->DielBordNum[i]*sizeof(int)); cudaMemcpy(d_DBPos[i], PS->DielBordPos[i], PS->DielBordNum[i]*sizeof(int), cudaMemcpyHostToDevice); for(k=0;k<6;k++) { cudaMalloc((void**)&d_DielMult[i*6+k], PS->DielBordNum[i]*sizeof(double)); cudaMemcpy(d_DielMult[i*6+k], PS->DielMult[i][k], PS->DielBordNum[i]*sizeof(double), cudaMemcpyHostToDevice); } } } cudaMemcpyToSymbol(dc_DBnum, PS->DielBordNum, 8*sizeof(int), 0, cudaMemcpyHostToDevice ); cudaMemcpyToSymbol(dc_DBpos, d_DBPos, 8*sizeof(int*), 0, cudaMemcpyHostToDevice ); cudaMemcpyToSymbol(dc_DielMult, d_DielMult, 48*sizeof(double*), 0, cudaMemcpyHostToDevice ); int DBblock=CudaParm->DBblock; int DBGrid=DBmax/DBblock; if(DBmax%DBblock!=0)DBGrid++; printf("DBmax=%d DBblock=%d DBGrid=%d\n",DBmax,DBblock,DBGrid); //do loop cudaEventRecord( start, 0 ); int j; double totalEnergy,dtmp1; double fpoh=4.0*M_PI*PS->GridScale; GetCUDADevStat(); for(int iteration=1;iteration<=PS->MaxIterations;iteration++) {//pitchXY*BS_Z KDLaplaceB<<<dimGrid, dimBlock>>>(d_P[0],d_P[1],d_P[2],d_P[3], d_P[4],d_P[5],d_P[6],d_P[7], om1,om2d6, pitchX,pitchXY,pitchXY*BS_Z,BS_X,BS_XY); cudaThreadSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDLaplaceB"); if(QGrid>0) { KDPoissonQB<<<QGrid, Qblock>>>(); cudaThreadSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDPoissonQB"); } if(DBGrid>0) { KDPoissonDBB<<<DBGrid, DBblock>>>(d_P[0],d_P[1],d_P[2],d_P[3], d_P[4],d_P[5],d_P[6],d_P[7], om2d6, pitchX,pitchXY); cudaThreadSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDPoissonDBB"); } KDLaplaceW<<<dimGrid, dimBlock>>>(d_P[0],d_P[1],d_P[2],d_P[3], d_P[4],d_P[5],d_P[6],d_P[7], om1,om2d6, pitchX,pitchXY,pitchXY*BS_Z,BS_X,BS_XY); cudaThreadSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDLaplaceW"); if(QGrid>0) { KDPoissonQW<<<QGrid, Qblock>>>(); cudaThreadSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDPoissonQW"); } if(DBGrid>0) { KDPoissonDBW<<<DBGrid, DBblock>>>(d_P[0],d_P[1],d_P[2],d_P[3], d_P[4],d_P[5],d_P[6],d_P[7],om2d6, pitchX,pitchXY); cudaThreadSynchronize(); ErrorCount+=1-checkCUDAError("cuda kernel running: KDPoissonDBW"); } if(iteration%PS->ConvergenceCheck==0) { double OldTotalEnergy=totalEnergy; double totalChange; double relativeChange; double ConvFac; totalEnergy=0.0; for(i=0;i<8;i++) { cudaMemcpy(PS->P[i], d_P[i], sizeGS_XYZsplit, cudaMemcpyDeviceToHost); for(j=0;j<PS->Qnum[i];j++) { dtmp1=double(PS->P[i][PS->Qpos[i][j]])*double(PS->Q[i][j])/double(PS->Qmult[i][j]); totalEnergy+=dtmp1; } } totalEnergy=totalEnergy/(fpoh*2.0); totalChange=totalEnergy-OldTotalEnergy; relativeChange=totalChange/totalEnergy; printf("<PoissonIterations Nit=\"%8d\" E=\"%20.16e\" dE=\"%.4e\" rel.E=\"%.4e\" ConvFac=\"%.4e\"/>\n", iteration, totalEnergy, totalChange, relativeChange,ConvFac); if(PS->Tolerance!=0.0) { if(fabs(relativeChange)<=PS->Tolerance) { printf("Solver has reached the requiered tolerance level\n"); break; } } } if(ErrorCount) break; } GetCUDADevStat(); ErrorCount+=1-checkCUDAError("cuda kernel running"); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); printf("Time on iterations: %e\n",time); time/=1000;//time in s printf("\tfor [%d,%d,%d] load store cycle: %f iter/s\n",GS_X,GS_Y,GS_Z,double(PS->MaxIterations)/time); //printf("block QGridQ time iter/s %d %d %g %g\n",Qblock,QGrid,time,double(PS->MaxIterations)/time); //copy from device lin-array for(i=0;i<8;i++) cudaMemcpy(PS->P[i], d_P[i], sizeGS_XYZsplit, cudaMemcpyDeviceToHost); //free stuff for(i=0;i<8;i++) { if(PS->DielBordNum[i]>0) { for(k=0;k<6;k++) { cudaFree(d_DielMult[i*6+k]); } cudaFree(d_DBPos[i]); } } for(i=0;i<8;i++) { if(PS->Qnum[i]>0) { cudaFree(d_Qmult[i]); cudaFree(d_Qpos[i]); cudaFree(d_Q[i]); } } for(i=0;i<8;i++) cudaFree(d_P[i]); StopClockWMes0("GPU"); GetCUDADevStat(); if(ErrorCount) { printf("During GPU accelerated calculations found %d errors\n",ErrorCount); return 0; } return 1; } #endif
83a7f3545dc90eb7eb366e31dbc7f4dd202567f1.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2015 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "absolute_layer_tester_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" namespace nnforge { namespace cuda { __global__ void absolute_kernel( float4 * __restrict output, const float4 * __restrict input, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = fabsf(val.x); val.y = fabsf(val.y); val.z = fabsf(val.z); val.w = fabsf(val.w); output[elem_id] = val; } } absolute_layer_tester_cuda::absolute_layer_tester_cuda() { } absolute_layer_tester_cuda::~absolute_layer_tester_cuda() { } void absolute_layer_tester_cuda::enqueue_forward_propagation( hipStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, unsigned int entry_count) { int elem_count = (output_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( absolute_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_buffer, *input_buffers[0], elem_count); } int absolute_layer_tester_cuda::get_input_index_layer_can_write() const { return 0; } } }
83a7f3545dc90eb7eb366e31dbc7f4dd202567f1.cu
/* * Copyright 2011-2015 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "absolute_layer_tester_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" namespace nnforge { namespace cuda { __global__ void absolute_kernel( float4 * __restrict output, const float4 * __restrict input, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = fabsf(val.x); val.y = fabsf(val.y); val.z = fabsf(val.z); val.w = fabsf(val.w); output[elem_id] = val; } } absolute_layer_tester_cuda::absolute_layer_tester_cuda() { } absolute_layer_tester_cuda::~absolute_layer_tester_cuda() { } void absolute_layer_tester_cuda::enqueue_forward_propagation( cudaStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, unsigned int entry_count) { int elem_count = (output_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); absolute_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_buffer, *input_buffers[0], elem_count); } int absolute_layer_tester_cuda::get_input_index_layer_can_write() const { return 0; } } }
8fb83b44df19a93ca6363114519c7563d9da0100.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //********************************************************// // CUDA SIFT extractor by Marten Bjorkman aka Celebrandil // //********************************************************// #include "cudautils.h" #include "cudaSiftD.h" #include "cudaSift.h" /////////////////////////////////////////////////////////////////////////////// // Kernel configuration /////////////////////////////////////////////////////////////////////////////// __constant__ int d_MaxNumPoints; __device__ unsigned int d_PointCounter[8*2+1]; __constant__ float d_ScaleDownKernel[5]; __constant__ float d_LowPassKernel[2*LOWPASS_R+1]; __constant__ float d_LaplaceKernel[8*12*16]; /////////////////////////////////////////////////////////////////////////////// // Lowpass filter and subsample image /////////////////////////////////////////////////////////////////////////////// __global__ void ScaleDownDenseShift(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch) { #define BW (SCALEDOWN_W+4) #define BH (SCALEDOWN_H+4) #define W2 (SCALEDOWN_W/2) #define H2 (SCALEDOWN_H/2) __shared__ float brows[BH*BW]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int xp = blockIdx.x*SCALEDOWN_W + tx; const int yp = blockIdx.y*SCALEDOWN_H + ty; const float k0 = d_ScaleDownKernel[0]; const float k1 = d_ScaleDownKernel[1]; const float k2 = d_ScaleDownKernel[2]; const int xl = min(width-1, max(0, xp-2)); const int yl = min(height-1, max(0, yp-2)); if (xp<(width+4) && yp<(height+4)) { float v = d_Data[yl*pitch + xl]; brows[BW*ty + tx] = k0*(v + ShiftDown(v, 4)) + k1*(ShiftDown(v, 1) + ShiftDown(v, 3)) + k2*ShiftDown(v, 2); } __syncthreads(); const int xs = blockIdx.x*W2 + tx; const int ys = blockIdx.y*H2 + ty; if (tx<W2 && ty<H2 && xs<(width/2) && ys<(height/2)) { float *ptr = &brows[BW*(ty*2) + (tx*2)]; d_Result[ys*newpitch + xs] = k0*(ptr[0] + ptr[4*BW]) + k1*(ptr[1*BW] + ptr[3*BW]) + k2*ptr[2*BW]; } } __global__ void ScaleDownDense(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch) { #define BW (SCALEDOWN_W+4) #define BH (SCALEDOWN_H+4) #define W2 (SCALEDOWN_W/2) #define H2 (SCALEDOWN_H/2) __shared__ float irows[BH*BW]; __shared__ float brows[BH*W2]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int xp = blockIdx.x*SCALEDOWN_W + tx; const int yp = blockIdx.y*SCALEDOWN_H + ty; const int xl = min(width-1, max(0, xp-2)); const int yl = min(height-1, max(0, yp-2)); const float k0 = d_ScaleDownKernel[0]; const float k1 = d_ScaleDownKernel[1]; const float k2 = d_ScaleDownKernel[2]; if (xp<(width+4) && yp<(height+4)) irows[BW*ty + tx] = d_Data[yl*pitch + xl]; __syncthreads(); if (yp<(height+4) && tx<W2) { float *ptr = &irows[BW*ty + 2*tx]; brows[W2*ty + tx] = k0*(ptr[0] + ptr[4]) + k1*(ptr[1] + ptr[3]) + k2*ptr[2]; } __syncthreads(); const int xs = blockIdx.x*W2 + tx; const int ys = blockIdx.y*H2 + ty; if (tx<W2 && ty<H2 && xs<(width/2) && ys<(height/2)) { float *ptr = &brows[W2*(ty*2) + tx]; d_Result[ys*newpitch + xs] = k0*(ptr[0] + ptr[4*W2]) + k1*(ptr[1*W2] + ptr[3*W2]) + k2*ptr[2*W2]; } } __global__ void ScaleDown(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch) { __shared__ float inrow[SCALEDOWN_W+4]; __shared__ float brow[5*(SCALEDOWN_W/2)]; __shared__ int yRead[SCALEDOWN_H+4]; __shared__ int yWrite[SCALEDOWN_H+4]; #define dx2 (SCALEDOWN_W/2) const int tx = threadIdx.x; const int tx0 = tx + 0*dx2; const int tx1 = tx + 1*dx2; const int tx2 = tx + 2*dx2; const int tx3 = tx + 3*dx2; const int tx4 = tx + 4*dx2; const int xStart = blockIdx.x*SCALEDOWN_W; const int yStart = blockIdx.y*SCALEDOWN_H; const int xWrite = xStart/2 + tx; float k0 = d_ScaleDownKernel[0]; float k1 = d_ScaleDownKernel[1]; float k2 = d_ScaleDownKernel[2]; if (tx<SCALEDOWN_H+4) { int y = yStart + tx - 2; y = (y<0 ? 0 : y); y = (y>=height ? height-1 : y); yRead[tx] = y*pitch; yWrite[tx] = (yStart + tx - 4)/2 * newpitch; } __syncthreads(); int xRead = xStart + tx - 2; xRead = (xRead<0 ? 0 : xRead); xRead = (xRead>=width ? width-1 : xRead); int maxtx = min(dx2, width/2 - xStart/2); for (int dy=0;dy<SCALEDOWN_H+4;dy+=5) { { inrow[tx] = d_Data[yRead[dy+0] + xRead]; __syncthreads(); if (tx<maxtx) { brow[tx4] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2]; if (dy>=4 && !(dy&1)) d_Result[yWrite[dy+0] + xWrite] = k2*brow[tx2] + k0*(brow[tx0]+brow[tx4]) + k1*(brow[tx1]+brow[tx3]); } __syncthreads(); } if (dy<(SCALEDOWN_H+3)) { inrow[tx] = d_Data[yRead[dy+1] + xRead]; __syncthreads(); if (tx<maxtx) { brow[tx0] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2]; if (dy>=3 && (dy&1)) d_Result[yWrite[dy+1] + xWrite] = k2*brow[tx3] + k0*(brow[tx1]+brow[tx0]) + k1*(brow[tx2]+brow[tx4]); } __syncthreads(); } if (dy<(SCALEDOWN_H+2)) { inrow[tx] = d_Data[yRead[dy+2] + xRead]; __syncthreads(); if (tx<maxtx) { brow[tx1] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2]; if (dy>=2 && !(dy&1)) d_Result[yWrite[dy+2] + xWrite] = k2*brow[tx4] + k0*(brow[tx2]+brow[tx1]) + k1*(brow[tx3]+brow[tx0]); } __syncthreads(); } if (dy<(SCALEDOWN_H+1)) { inrow[tx] = d_Data[yRead[dy+3] + xRead]; __syncthreads(); if (tx<maxtx) { brow[tx2] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2]; if (dy>=1 && (dy&1)) d_Result[yWrite[dy+3] + xWrite] = k2*brow[tx0] + k0*(brow[tx3]+brow[tx2]) + k1*(brow[tx4]+brow[tx1]); } __syncthreads(); } if (dy<SCALEDOWN_H) { inrow[tx] = d_Data[yRead[dy+4] + xRead]; __syncthreads(); if (tx<dx2 && xWrite<width/2) { brow[tx3] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2]; if (!(dy&1)) d_Result[yWrite[dy+4] + xWrite] = k2*brow[tx1] + k0*(brow[tx4]+brow[tx3]) + k1*(brow[tx0]+brow[tx2]); } __syncthreads(); } } } __global__ void ScaleUp(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch) { const int tx = threadIdx.x; const int ty = threadIdx.y; int x = blockIdx.x*SCALEUP_W + 2*tx; int y = blockIdx.y*SCALEUP_H + 2*ty; if (x<2*width && y<2*height) { int xl = blockIdx.x*(SCALEUP_W/2) + tx; int yu = blockIdx.y*(SCALEUP_H/2) + ty; int xr = min(xl + 1, width - 1); int yd = min(yu + 1, height - 1); float vul = d_Data[yu*pitch + xl]; float vur = d_Data[yu*pitch + xr]; float vdl = d_Data[yd*pitch + xl]; float vdr = d_Data[yd*pitch + xr]; d_Result[(y + 0)*newpitch + x + 0] = vul; d_Result[(y + 0)*newpitch + x + 1] = 0.50f*(vul + vur); d_Result[(y + 1)*newpitch + x + 0] = 0.50f*(vul + vdl); d_Result[(y + 1)*newpitch + x + 1] = 0.25f*(vul + vur + vdl + vdr); } } __global__ void ExtractSiftDescriptors(hipTextureObject_t texObj, SiftPoint *d_sift, int fstPts, float subsampling) { __shared__ float gauss[16]; __shared__ float buffer[128]; __shared__ float sums[4]; const int tx = threadIdx.x; // 0 -> 16 const int ty = threadIdx.y; // 0 -> 8 const int idx = ty*16 + tx; const int bx = blockIdx.x + fstPts; // 0 -> numPts if (ty==0) gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f); buffer[idx] = 0.0f; __syncthreads(); // Compute angles and gradients float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation; float sina = sinf(theta); // cosa -sina float cosa = cosf(theta); // sina cosa float scale = 12.0f/16.0f*d_sift[bx].scale; float ssina = scale*sina; float scosa = scale*cosa; for (int y=ty;y<16;y+=8) { float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f; float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f; float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) - tex2D<float>(texObj, xpos-cosa, ypos-sina); float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) - tex2D<float>(texObj, xpos+sina, ypos-cosa); float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy); float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f; int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins float horf = (tx - 1.5f)/4.0f - hori; float ihorf = 1.0f - horf; int veri = (y + 2)/4 - 1; float verf = (y - 1.5f)/4.0f - veri; float iverf = 1.0f - verf; int angi = angf; int angp = (angi<7 ? angi+1 : 0); angf -= angi; float iangf = 1.0f - angf; int hist = 8*(4*veri + hori); // Each gradient measure is interpolated int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores int p2 = angp + hist; if (tx>=2) { float grad1 = ihorf*grad; if (y>=2) { // Upper left float grad2 = iverf*grad1; atomicAdd(buffer + p1, iangf*grad2); atomicAdd(buffer + p2, angf*grad2); } if (y<=13) { // Lower left float grad2 = verf*grad1; atomicAdd(buffer + p1+32, iangf*grad2); atomicAdd(buffer + p2+32, angf*grad2); } } if (tx<=13) { float grad1 = horf*grad; if (y>=2) { // Upper right float grad2 = iverf*grad1; atomicAdd(buffer + p1+8, iangf*grad2); atomicAdd(buffer + p2+8, angf*grad2); } if (y<=13) { // Lower right float grad2 = verf*grad1; atomicAdd(buffer + p1+40, iangf*grad2); atomicAdd(buffer + p2+40, angf*grad2); } } } __syncthreads(); // Normalize twice and suppress peaks first time float sum = buffer[idx]*buffer[idx]; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum1 = sums[0] + sums[1] + sums[2] + sums[3]; tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f); sum = tsum1*tsum1; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum2 = sums[0] + sums[1] + sums[2] + sums[3]; float *desc = d_sift[bx].data; desc[idx] = tsum1 * rsqrtf(tsum2); if (idx==0) { d_sift[bx].xpos *= subsampling; d_sift[bx].ypos *= subsampling; d_sift[bx].scale *= subsampling; } } __device__ float FastAtan2(float y, float x) { float absx = abs(x); float absy = abs(y); float a = __fdiv_rn(min(absx, absy), max(absx, absy)); float s = a*a; float r = ((-0.0464964749f*s + 0.15931422f)*s - 0.327622764f)*s*a + a; r = (absy>absx ? 1.57079637f - r : r); r = (x<0 ? 3.14159274f - r : r); r = (y<0 ? -r : r); return r; } __global__ void ExtractSiftDescriptorsCONSTNew(hipTextureObject_t texObj, SiftPoint *d_sift, float subsampling, int octave) { __shared__ float gauss[16]; __shared__ float buffer[128]; __shared__ float sums[4]; const int tx = threadIdx.x; // 0 -> 16 const int ty = threadIdx.y; // 0 -> 8 const int idx = ty*16 + tx; if (ty==0) gauss[tx] = __expf(-(tx-7.5f)*(tx-7.5f)/128.0f); int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints); int totPts = min(d_PointCounter[2*octave+1], d_MaxNumPoints); //if (tx==0 && ty==0) // printf("%d %d %d %d\n", octave, fstPts, min(d_PointCounter[2*octave], d_MaxNumPoints), totPts); for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) { buffer[idx] = 0.0f; __syncthreads(); // Compute angles and gradients float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation; float sina = __sinf(theta); // cosa -sina float cosa = __cosf(theta); // sina cosa float scale = 12.0f/16.0f*d_sift[bx].scale; float ssina = scale*sina; float scosa = scale*cosa; for (int y=ty;y<16;y+=8) { float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f; float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f; float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) - tex2D<float>(texObj, xpos-cosa, ypos-sina); float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) - tex2D<float>(texObj, xpos+sina, ypos-cosa); float grad = gauss[y]*gauss[tx] * __fsqrt_rn(dx*dx + dy*dy); float angf = 4.0f/3.1415f*FastAtan2(dy, dx) + 4.0f; int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins float horf = (tx - 1.5f)/4.0f - hori; float ihorf = 1.0f - horf; int veri = (y + 2)/4 - 1; float verf = (y - 1.5f)/4.0f - veri; float iverf = 1.0f - verf; int angi = angf; int angp = (angi<7 ? angi+1 : 0); angf -= angi; float iangf = 1.0f - angf; int hist = 8*(4*veri + hori); // Each gradient measure is interpolated int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores int p2 = angp + hist; if (tx>=2) { float grad1 = ihorf*grad; if (y>=2) { // Upper left float grad2 = iverf*grad1; atomicAdd(buffer + p1, iangf*grad2); atomicAdd(buffer + p2, angf*grad2); } if (y<=13) { // Lower left float grad2 = verf*grad1; atomicAdd(buffer + p1+32, iangf*grad2); atomicAdd(buffer + p2+32, angf*grad2); } } if (tx<=13) { float grad1 = horf*grad; if (y>=2) { // Upper right float grad2 = iverf*grad1; atomicAdd(buffer + p1+8, iangf*grad2); atomicAdd(buffer + p2+8, angf*grad2); } if (y<=13) { // Lower right float grad2 = verf*grad1; atomicAdd(buffer + p1+40, iangf*grad2); atomicAdd(buffer + p2+40, angf*grad2); } } } __syncthreads(); // Normalize twice and suppress peaks first time float sum = buffer[idx]*buffer[idx]; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum1 = sums[0] + sums[1] + sums[2] + sums[3]; tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f); sum = tsum1*tsum1; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum2 = sums[0] + sums[1] + sums[2] + sums[3]; float *desc = d_sift[bx].data; desc[idx] = tsum1 * rsqrtf(tsum2); if (idx==0) { d_sift[bx].xpos *= subsampling; d_sift[bx].ypos *= subsampling; d_sift[bx].scale *= subsampling; } __syncthreads(); } } __global__ void ExtractSiftDescriptorsCONST(hipTextureObject_t texObj, SiftPoint *d_sift, float subsampling, int octave) { __shared__ float gauss[16]; __shared__ float buffer[128]; __shared__ float sums[4]; const int tx = threadIdx.x; // 0 -> 16 const int ty = threadIdx.y; // 0 -> 8 const int idx = ty*16 + tx; if (ty==0) gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f); int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints); int totPts = min(d_PointCounter[2*octave+1], d_MaxNumPoints); //if (tx==0 && ty==0) // printf("%d %d %d %d\n", octave, fstPts, min(d_PointCounter[2*octave], d_MaxNumPoints), totPts); for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) { buffer[idx] = 0.0f; __syncthreads(); // Compute angles and gradients float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation; float sina = sinf(theta); // cosa -sina float cosa = cosf(theta); // sina cosa float scale = 12.0f/16.0f*d_sift[bx].scale; float ssina = scale*sina; float scosa = scale*cosa; for (int y=ty;y<16;y+=8) { float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f; float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f; float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) - tex2D<float>(texObj, xpos-cosa, ypos-sina); float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) - tex2D<float>(texObj, xpos+sina, ypos-cosa); float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy); float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f; int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins float horf = (tx - 1.5f)/4.0f - hori; float ihorf = 1.0f - horf; int veri = (y + 2)/4 - 1; float verf = (y - 1.5f)/4.0f - veri; float iverf = 1.0f - verf; int angi = angf; int angp = (angi<7 ? angi+1 : 0); angf -= angi; float iangf = 1.0f - angf; int hist = 8*(4*veri + hori); // Each gradient measure is interpolated int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores int p2 = angp + hist; if (tx>=2) { float grad1 = ihorf*grad; if (y>=2) { // Upper left float grad2 = iverf*grad1; atomicAdd(buffer + p1, iangf*grad2); atomicAdd(buffer + p2, angf*grad2); } if (y<=13) { // Lower left float grad2 = verf*grad1; atomicAdd(buffer + p1+32, iangf*grad2); atomicAdd(buffer + p2+32, angf*grad2); } } if (tx<=13) { float grad1 = horf*grad; if (y>=2) { // Upper right float grad2 = iverf*grad1; atomicAdd(buffer + p1+8, iangf*grad2); atomicAdd(buffer + p2+8, angf*grad2); } if (y<=13) { // Lower right float grad2 = verf*grad1; atomicAdd(buffer + p1+40, iangf*grad2); atomicAdd(buffer + p2+40, angf*grad2); } } } __syncthreads(); // Normalize twice and suppress peaks first time float sum = buffer[idx]*buffer[idx]; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum1 = sums[0] + sums[1] + sums[2] + sums[3]; tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f); sum = tsum1*tsum1; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum2 = sums[0] + sums[1] + sums[2] + sums[3]; float *desc = d_sift[bx].data; desc[idx] = tsum1 * rsqrtf(tsum2); if (idx==0) { d_sift[bx].xpos *= subsampling; d_sift[bx].ypos *= subsampling; d_sift[bx].scale *= subsampling; } __syncthreads(); } } __global__ void ExtractSiftDescriptorsOld(hipTextureObject_t texObj, SiftPoint *d_sift, int fstPts, float subsampling) { __shared__ float gauss[16]; __shared__ float buffer[128]; __shared__ float sums[128]; const int tx = threadIdx.x; // 0 -> 16 const int ty = threadIdx.y; // 0 -> 8 const int idx = ty*16 + tx; const int bx = blockIdx.x + fstPts; // 0 -> numPts if (ty==0) gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f); buffer[idx] = 0.0f; __syncthreads(); // Compute angles and gradients float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation; float sina = sinf(theta); // cosa -sina float cosa = cosf(theta); // sina cosa float scale = 12.0f/16.0f*d_sift[bx].scale; float ssina = scale*sina; float scosa = scale*cosa; for (int y=ty;y<16;y+=8) { float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f; float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f; float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) - tex2D<float>(texObj, xpos-cosa, ypos-sina); float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) - tex2D<float>(texObj, xpos+sina, ypos-cosa); float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy); float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f; int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins float horf = (tx - 1.5f)/4.0f - hori; float ihorf = 1.0f - horf; int veri = (y + 2)/4 - 1; float verf = (y - 1.5f)/4.0f - veri; float iverf = 1.0f - verf; int angi = angf; int angp = (angi<7 ? angi+1 : 0); angf -= angi; float iangf = 1.0f - angf; int hist = 8*(4*veri + hori); // Each gradient measure is interpolated int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores int p2 = angp + hist; if (tx>=2) { float grad1 = ihorf*grad; if (y>=2) { // Upper left float grad2 = iverf*grad1; atomicAdd(buffer + p1, iangf*grad2); atomicAdd(buffer + p2, angf*grad2); } if (y<=13) { // Lower left float grad2 = verf*grad1; atomicAdd(buffer + p1+32, iangf*grad2); atomicAdd(buffer + p2+32, angf*grad2); } } if (tx<=13) { float grad1 = horf*grad; if (y>=2) { // Upper right float grad2 = iverf*grad1; atomicAdd(buffer + p1+8, iangf*grad2); atomicAdd(buffer + p2+8, angf*grad2); } if (y<=13) { // Lower right float grad2 = verf*grad1; atomicAdd(buffer + p1+40, iangf*grad2); atomicAdd(buffer + p2+40, angf*grad2); } } } __syncthreads(); // Normalize twice and suppress peaks first time if (idx<64) sums[idx] = buffer[idx]*buffer[idx] + buffer[idx+64]*buffer[idx+64]; __syncthreads(); if (idx<32) sums[idx] = sums[idx] + sums[idx+32]; __syncthreads(); if (idx<16) sums[idx] = sums[idx] + sums[idx+16]; __syncthreads(); if (idx<8) sums[idx] = sums[idx] + sums[idx+8]; __syncthreads(); if (idx<4) sums[idx] = sums[idx] + sums[idx+4]; __syncthreads(); float tsum1 = sums[0] + sums[1] + sums[2] + sums[3]; buffer[idx] = buffer[idx] * rsqrtf(tsum1); if (buffer[idx]>0.2f) buffer[idx] = 0.2f; __syncthreads(); if (idx<64) sums[idx] = buffer[idx]*buffer[idx] + buffer[idx+64]*buffer[idx+64]; __syncthreads(); if (idx<32) sums[idx] = sums[idx] + sums[idx+32]; __syncthreads(); if (idx<16) sums[idx] = sums[idx] + sums[idx+16]; __syncthreads(); if (idx<8) sums[idx] = sums[idx] + sums[idx+8]; __syncthreads(); if (idx<4) sums[idx] = sums[idx] + sums[idx+4]; __syncthreads(); float tsum2 = sums[0] + sums[1] + sums[2] + sums[3]; float *desc = d_sift[bx].data; desc[idx] = buffer[idx] * rsqrtf(tsum2); if (idx==0) { d_sift[bx].xpos *= subsampling; d_sift[bx].ypos *= subsampling; d_sift[bx].scale *= subsampling; } } __device__ void ExtractSiftDescriptor(hipTextureObject_t texObj, SiftPoint *d_sift, float subsampling, int octave, int bx) { __shared__ float gauss[16]; __shared__ float buffer[128]; __shared__ float sums[4]; const int idx = threadIdx.x; const int tx = idx & 15; // 0 -> 16 const int ty = idx / 16; // 0 -> 8 if (ty==0) gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f); buffer[idx] = 0.0f; __syncthreads(); // Compute angles and gradients float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation; float sina = sinf(theta); // cosa -sina float cosa = cosf(theta); // sina cosa float scale = 12.0f/16.0f*d_sift[bx].scale; float ssina = scale*sina; float scosa = scale*cosa; for (int y=ty;y<16;y+=8) { float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f; float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f; float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) - tex2D<float>(texObj, xpos-cosa, ypos-sina); float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) - tex2D<float>(texObj, xpos+sina, ypos-cosa); float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy); float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f; int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins float horf = (tx - 1.5f)/4.0f - hori; float ihorf = 1.0f - horf; int veri = (y + 2)/4 - 1; float verf = (y - 1.5f)/4.0f - veri; float iverf = 1.0f - verf; int angi = angf; int angp = (angi<7 ? angi+1 : 0); angf -= angi; float iangf = 1.0f - angf; int hist = 8*(4*veri + hori); // Each gradient measure is interpolated int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores int p2 = angp + hist; if (tx>=2) { float grad1 = ihorf*grad; if (y>=2) { // Upper left float grad2 = iverf*grad1; atomicAdd(buffer + p1, iangf*grad2); atomicAdd(buffer + p2, angf*grad2); } if (y<=13) { // Lower left float grad2 = verf*grad1; atomicAdd(buffer + p1+32, iangf*grad2); atomicAdd(buffer + p2+32, angf*grad2); } } if (tx<=13) { float grad1 = horf*grad; if (y>=2) { // Upper right float grad2 = iverf*grad1; atomicAdd(buffer + p1+8, iangf*grad2); atomicAdd(buffer + p2+8, angf*grad2); } if (y<=13) { // Lower right float grad2 = verf*grad1; atomicAdd(buffer + p1+40, iangf*grad2); atomicAdd(buffer + p2+40, angf*grad2); } } } __syncthreads(); // Normalize twice and suppress peaks first time float sum = buffer[idx]*buffer[idx]; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum1 = sums[0] + sums[1] + sums[2] + sums[3]; tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f); sum = tsum1*tsum1; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum2 = sums[0] + sums[1] + sums[2] + sums[3]; float *desc = d_sift[bx].data; desc[idx] = tsum1 * rsqrtf(tsum2); if (idx==0) { d_sift[bx].xpos *= subsampling; d_sift[bx].ypos *= subsampling; d_sift[bx].scale *= subsampling; } __syncthreads(); } __global__ void RescalePositions(SiftPoint *d_sift, int numPts, float scale) { int num = blockIdx.x*blockDim.x + threadIdx.x; if (num<numPts) { d_sift[num].xpos *= scale; d_sift[num].ypos *= scale; d_sift[num].scale *= scale; } } __global__ void ComputeOrientations(hipTextureObject_t texObj, SiftPoint *d_Sift, int fstPts) { __shared__ float hist[64]; __shared__ float gauss[11]; const int tx = threadIdx.x; const int bx = blockIdx.x + fstPts; float i2sigma2 = -1.0f/(4.5f*d_Sift[bx].scale*d_Sift[bx].scale); if (tx<11) gauss[tx] = exp(i2sigma2*(tx-5)*(tx-5)); if (tx<64) hist[tx] = 0.0f; __syncthreads(); float xp = d_Sift[bx].xpos - 4.5f; float yp = d_Sift[bx].ypos - 4.5f; int yd = tx/11; int xd = tx - yd*11; float xf = xp + xd; float yf = yp + yd; if (yd<11) { float dx = tex2D<float>(texObj, xf+1.0, yf) - tex2D<float>(texObj, xf-1.0, yf); float dy = tex2D<float>(texObj, xf, yf+1.0) - tex2D<float>(texObj, xf, yf-1.0); int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f; if (bin>31) bin = 0; float grad = sqrtf(dx*dx + dy*dy); atomicAdd(&hist[bin], grad*gauss[xd]*gauss[yd]); } __syncthreads(); int x1m = (tx>=1 ? tx-1 : tx+31); int x1p = (tx<=30 ? tx+1 : tx-31); if (tx<32) { int x2m = (tx>=2 ? tx-2 : tx+30); int x2p = (tx<=29 ? tx+2 : tx-30); hist[tx+32] = 6.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + (hist[x2m] + hist[x2p]); } __syncthreads(); if (tx<32) { float v = hist[32+tx]; hist[tx] = (v>hist[32+x1m] && v>=hist[32+x1p] ? v : 0.0f); } __syncthreads(); if (tx==0) { float maxval1 = 0.0; float maxval2 = 0.0; int i1 = -1; int i2 = -1; for (int i=0;i<32;i++) { float v = hist[i]; if (v>maxval1) { maxval2 = maxval1; maxval1 = v; i2 = i1; i1 = i; } else if (v>maxval2) { maxval2 = v; i2 = i; } } float val1 = hist[32+((i1+1)&31)]; float val2 = hist[32+((i1+31)&31)]; float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2); d_Sift[bx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak); if (maxval2>0.8f*maxval1) { float val1 = hist[32+((i2+1)&31)]; float val2 = hist[32+((i2+31)&31)]; float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2); unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff); if (idx<d_MaxNumPoints) { d_Sift[idx].xpos = d_Sift[bx].xpos; d_Sift[idx].ypos = d_Sift[bx].ypos; d_Sift[idx].scale = d_Sift[bx].scale; d_Sift[idx].sharpness = d_Sift[bx].sharpness; d_Sift[idx].edgeness = d_Sift[bx].edgeness; d_Sift[idx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);; d_Sift[idx].subsampling = d_Sift[bx].subsampling; } } } } // With constant number of blocks __global__ void ComputeOrientationsCONSTNew(float *image, int w, int p, int h, SiftPoint *d_Sift, int octave) { #define RAD 9 #define WID (2*RAD + 1) #define LEN 32 //%%%% Note: Lowe suggests 36, not 32 __shared__ float img[WID][WID], tmp[WID][WID]; __shared__ float hist[2*LEN]; __shared__ float gaussx[WID], gaussy[WID]; const int tx = threadIdx.x; int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints); int totPts = min(d_PointCounter[2*octave+0], d_MaxNumPoints); for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) { float sc = d_Sift[bx].scale; for (int i=tx;i<2*LEN;i+=blockDim.x) hist[i] = 0.0f; float xp = d_Sift[bx].xpos; float yp = d_Sift[bx].ypos; int xi = (int)xp; int yi = (int)yp; float xf = xp - xi; float yf = yp - yi; for (int i=tx;i<WID*WID;i+=blockDim.x) { int y = i/WID; int x = i - y*WID; int xp = max(min(x - RAD + xi, w - 1), 0); int yp = max(min(y - RAD + yi, h - 1), 0); img[y][x] = image[yp*p + xp]; } float fac[5]; fac[1] = fac[3] = (sc>0.5f ? __expf(-1.0f/(2.0f*(sc*sc - 0.25f))) : 0.0f); fac[0] = fac[4] = (sc>0.5f ? __expf(-4.0f/(2.0f*(sc*sc - 0.25f))) : 0.0f); fac[2] = 1.0f; float i2sigma2 = -1.0f/(2.0f*2.0f*2.0f*sc*sc); //%%%% Note: Lowe suggests 1.5, not 2.0 if (tx<WID) { gaussx[tx] = __expf(i2sigma2*(tx-RAD-xf)*(tx-RAD-xf)); gaussy[tx] = __expf(i2sigma2*(tx-RAD-yf)*(tx-RAD-yf)); } __syncthreads(); for (int i=tx;i<(WID-4)*WID;i+=blockDim.x) { int y = i/WID; int x = i - y*WID; y += 2; tmp[y][x] = img[y][x] + fac[1]*(img[y-1][x] + img[y+1][x]) + fac[0]*(img[y-2][x] + img[y+2][x]); } __syncthreads(); for (int i=tx;i<(WID-4)*(WID-4);i+=blockDim.x) { int y = i/(WID-4); int x = i - y*(WID-4); x += 2; y += 2; img[y][x] = tmp[y][x] + fac[1]*(tmp[y][x-1] + tmp[y][x+1]) + fac[0]*(tmp[y][x-2] + tmp[y][x+2]); } __syncthreads(); for (int i=tx;i<(WID-6)*(WID-6);i+=blockDim.x) { int y = i/(WID-6); int x = i - y*(WID-6); x += 3; y += 3; float dx = img[y][x+1] - img[y][x-1]; float dy = img[y+1][x] - img[y-1][x]; int bin = (int)((LEN/2)*atan2f(dy, dx)/3.1416f + (LEN/2) + 0.5f)%LEN; float grad = __fsqrt_rn(dx*dx + dy*dy); atomicAdd(&hist[LEN + bin], grad*gaussx[x]*gaussy[y]); } __syncthreads(); int x1m = (tx>=1 ? tx-1 : tx+LEN-1); int x1p = (tx<(LEN-1) ? tx+1 : tx-LEN+1); int x2m = (tx>=2 ? tx-2 : tx+LEN-2); int x2p = (tx<(LEN-2) ? tx+2 : tx-LEN+2); if (tx<LEN) { hist[tx] = 6.0f*hist[tx + LEN] + 4.0f*(hist[x1m + LEN] + hist[x1p + LEN]) + 1.0f*(hist[x2m + LEN] + hist[x2p + LEN]); hist[tx + LEN] = 8.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + 0.0f*(hist[x2m] + hist[x2p]); float val = hist[tx + LEN]; hist[tx] = (val>hist[x1m + LEN] && val>=hist[x1p + LEN] ? val : 0.0f); } __syncthreads(); if (tx==0) { float maxval1 = 0.0; float maxval2 = 0.0; int i1 = -1; int i2 = -1; for (int i=0;i<LEN;i++) { float v = hist[i]; if (v>maxval1) { maxval2 = maxval1; maxval1 = v; i2 = i1; i1 = i; } else if (v>maxval2) { maxval2 = v; i2 = i; } } float val1 = hist[LEN + ((i1 + 1)%LEN)]; float val2 = hist[LEN + ((i1 + LEN - 1)%LEN)]; float peak = i1 + 0.5f*(val1 - val2) / (2.0f*maxval1 - val1 - val2); d_Sift[bx].orientation = 360.0f*(peak<0.0f ? peak + LEN : peak)/LEN; atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave+0]); if (maxval2>0.8f*maxval1 && true) { float val1 = hist[LEN + ((i2 + 1)%LEN)]; float val2 = hist[LEN + ((i2 + LEN - 1)%LEN)]; float peak = i2 + 0.5f*(val1 - val2) / (2.0f*maxval2 - val1 - val2); unsigned int idx = atomicInc(&d_PointCounter[2*octave+1], 0x7fffffff); if (idx<d_MaxNumPoints) { d_Sift[idx].xpos = d_Sift[bx].xpos; d_Sift[idx].ypos = d_Sift[bx].ypos; d_Sift[idx].scale = sc; d_Sift[idx].sharpness = d_Sift[bx].sharpness; d_Sift[idx].edgeness = d_Sift[bx].edgeness; d_Sift[idx].orientation = 360.0f*(peak<0.0f ? peak + LEN : peak)/LEN; d_Sift[idx].subsampling = d_Sift[bx].subsampling; } } } } #undef RAD #undef WID #undef LEN } // With constant number of blocks __global__ void ComputeOrientationsCONST(hipTextureObject_t texObj, SiftPoint *d_Sift, int octave) { __shared__ float hist[64]; __shared__ float gauss[11]; const int tx = threadIdx.x; int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints); int totPts = min(d_PointCounter[2*octave+0], d_MaxNumPoints); for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) { float i2sigma2 = -1.0f/(2.0f*1.5f*1.5f*d_Sift[bx].scale*d_Sift[bx].scale); if (tx<11) gauss[tx] = exp(i2sigma2*(tx-5)*(tx-5)); if (tx<64) hist[tx] = 0.0f; __syncthreads(); float xp = d_Sift[bx].xpos - 4.5f; float yp = d_Sift[bx].ypos - 4.5f; int yd = tx/11; int xd = tx - yd*11; float xf = xp + xd; float yf = yp + yd; if (yd<11) { float dx = tex2D<float>(texObj, xf+1.0, yf) - tex2D<float>(texObj, xf-1.0, yf); float dy = tex2D<float>(texObj, xf, yf+1.0) - tex2D<float>(texObj, xf, yf-1.0); int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f; if (bin>31) bin = 0; float grad = sqrtf(dx*dx + dy*dy); atomicAdd(&hist[bin], grad*gauss[xd]*gauss[yd]); } __syncthreads(); int x1m = (tx>=1 ? tx-1 : tx+31); int x1p = (tx<=30 ? tx+1 : tx-31); if (tx<32) { int x2m = (tx>=2 ? tx-2 : tx+30); int x2p = (tx<=29 ? tx+2 : tx-30); hist[tx+32] = 6.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + (hist[x2m] + hist[x2p]); } __syncthreads(); if (tx<32) { float v = hist[32+tx]; hist[tx] = (v>hist[32+x1m] && v>=hist[32+x1p] ? v : 0.0f); } __syncthreads(); if (tx==0) { float maxval1 = 0.0; float maxval2 = 0.0; int i1 = -1; int i2 = -1; for (int i=0;i<32;i++) { float v = hist[i]; if (v>maxval1) { maxval2 = maxval1; maxval1 = v; i2 = i1; i1 = i; } else if (v>maxval2) { maxval2 = v; i2 = i; } } float val1 = hist[32+((i1+1)&31)]; float val2 = hist[32+((i1+31)&31)]; float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2); d_Sift[bx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak); atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave+0]); if (maxval2>0.8f*maxval1 && true) { float val1 = hist[32+((i2+1)&31)]; float val2 = hist[32+((i2+31)&31)]; float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2); unsigned int idx = atomicInc(&d_PointCounter[2*octave+1], 0x7fffffff); if (idx<d_MaxNumPoints) { d_Sift[idx].xpos = d_Sift[bx].xpos; d_Sift[idx].ypos = d_Sift[bx].ypos; d_Sift[idx].scale = d_Sift[bx].scale; d_Sift[idx].sharpness = d_Sift[bx].sharpness; d_Sift[idx].edgeness = d_Sift[bx].edgeness; d_Sift[idx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);; d_Sift[idx].subsampling = d_Sift[bx].subsampling; } } } __syncthreads(); } } // With constant number of blocks __global__ void OrientAndExtractCONST(hipTextureObject_t texObj, SiftPoint *d_Sift, float subsampling, int octave) { __shared__ float hist[64]; __shared__ float gauss[11]; __shared__ unsigned int idx; //%%%% const int tx = threadIdx.x; int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints); int totPts = min(d_PointCounter[2*octave+0], d_MaxNumPoints); for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) { float i2sigma2 = -1.0f/(4.5f*d_Sift[bx].scale*d_Sift[bx].scale); if (tx<11) gauss[tx] = exp(i2sigma2*(tx-5)*(tx-5)); if (tx<64) hist[tx] = 0.0f; __syncthreads(); float xp = d_Sift[bx].xpos - 4.5f; float yp = d_Sift[bx].ypos - 4.5f; int yd = tx/11; int xd = tx - yd*11; float xf = xp + xd; float yf = yp + yd; if (yd<11) { float dx = tex2D<float>(texObj, xf+1.0, yf) - tex2D<float>(texObj, xf-1.0, yf); float dy = tex2D<float>(texObj, xf, yf+1.0) - tex2D<float>(texObj, xf, yf-1.0); int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f; if (bin>31) bin = 0; float grad = sqrtf(dx*dx + dy*dy); atomicAdd(&hist[bin], grad*gauss[xd]*gauss[yd]); } __syncthreads(); int x1m = (tx>=1 ? tx-1 : tx+31); int x1p = (tx<=30 ? tx+1 : tx-31); if (tx<32) { int x2m = (tx>=2 ? tx-2 : tx+30); int x2p = (tx<=29 ? tx+2 : tx-30); hist[tx+32] = 6.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + (hist[x2m] + hist[x2p]); } __syncthreads(); if (tx<32) { float v = hist[32+tx]; hist[tx] = (v>hist[32+x1m] && v>=hist[32+x1p] ? v : 0.0f); } __syncthreads(); if (tx==0) { float maxval1 = 0.0; float maxval2 = 0.0; int i1 = -1; int i2 = -1; for (int i=0;i<32;i++) { float v = hist[i]; if (v>maxval1) { maxval2 = maxval1; maxval1 = v; i2 = i1; i1 = i; } else if (v>maxval2) { maxval2 = v; i2 = i; } } float val1 = hist[32+((i1+1)&31)]; float val2 = hist[32+((i1+31)&31)]; float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2); d_Sift[bx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak); idx = 0xffffffff; //%%%% atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave+0]); if (maxval2>0.8f*maxval1) { float val1 = hist[32+((i2+1)&31)]; float val2 = hist[32+((i2+31)&31)]; float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2); idx = atomicInc(&d_PointCounter[2*octave+1], 0x7fffffff); //%%%% if (idx<d_MaxNumPoints) { d_Sift[idx].xpos = d_Sift[bx].xpos; d_Sift[idx].ypos = d_Sift[bx].ypos; d_Sift[idx].scale = d_Sift[bx].scale; d_Sift[idx].sharpness = d_Sift[bx].sharpness; d_Sift[idx].edgeness = d_Sift[bx].edgeness; d_Sift[idx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);; d_Sift[idx].subsampling = d_Sift[bx].subsampling; } } } __syncthreads(); ExtractSiftDescriptor(texObj, d_Sift, subsampling, octave, bx); //%%%% if (idx<d_MaxNumPoints) //%%%% ExtractSiftDescriptor(texObj, d_Sift, subsampling, octave, idx); //%%%% } } /////////////////////////////////////////////////////////////////////////////// // Subtract two images (multi-scale version) /////////////////////////////////////////////////////////////////////////////// __global__ void FindPointsMultiTest(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave) { #define MEMWID (MINMAX_W + 2) __shared__ unsigned int cnt; __shared__ unsigned short points[3*MEMWID]; if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0 && threadIdx.y==0) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]); } int tx = threadIdx.x; int ty = threadIdx.y; if (tx==0 && ty==0) cnt = 0; __syncthreads(); int ypos = MINMAX_H*blockIdx.y + ty; if (ypos>=height) return; int block = blockIdx.x/NUM_SCALES; int scale = blockIdx.x - NUM_SCALES*block; int minx = block*MINMAX_W; int maxx = min(minx + MINMAX_W, width); int xpos = minx + tx; int size = pitch*height; int ptr = size*scale + max(min(xpos-1, width-1), 0); float maxv = fabs(d_Data0[ptr + ypos*pitch + 1*size]); maxv = fmaxf(maxv, ShiftDown(maxv, 16, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 8, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 4, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 2, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 1, MINMAX_W)); if (Shuffle(maxv, 0)>thresh) { int yptr1 = ptr + ypos*pitch; int yptr0 = ptr + max(0,ypos-1)*pitch; int yptr2 = ptr + min(height-1,ypos+1)*pitch; float d20 = d_Data0[yptr0 + 1*size]; float d21 = d_Data0[yptr1 + 1*size]; float d22 = d_Data0[yptr2 + 1*size]; float d31 = d_Data0[yptr1 + 2*size]; float d11 = d_Data0[yptr1]; float d10 = d_Data0[yptr0]; float d12 = d_Data0[yptr2]; float ymin1 = fminf(fminf(d10, d11), d12); float ymax1 = fmaxf(fmaxf(d10, d11), d12); float d30 = d_Data0[yptr0 + 2*size]; float d32 = d_Data0[yptr2 + 2*size]; float ymin3 = fminf(fminf(d30, d31), d32); float ymax3 = fmaxf(fmaxf(d30, d31), d32); float ymin2 = fminf(fminf(ymin1, fminf(fminf(d20, d22), d21)), ymin3); float ymax2 = fmaxf(fmaxf(ymax1, fmaxf(fmaxf(d20, d22), d21)), ymax3); float nmin2 = fminf(ShiftUp(ymin2, 1), ShiftDown(ymin2, 1)); float nmax2 = fmaxf(ShiftUp(ymax2, 1), ShiftDown(ymax2, 1)); if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) { if (d21<-thresh) { float minv = fminf(fminf(nmin2, ymin1), ymin3); minv = fminf(fminf(minv, d20), d22); if (d21<minv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } if (d21>thresh) { float maxv = fmaxf(fmaxf(nmax2, ymax1), ymax3); maxv = fmaxf(fmaxf(maxv, d20), d22); if (d21>maxv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } } } __syncthreads(); if (ty==0 && tx<cnt) { int xpos = points[3*tx+0]; int ypos = points[3*tx+1]; int scale = points[3*tx+2]; int ptr = xpos + (ypos + (scale+1)*height)*pitch; float val = d_Data0[ptr]; float *data1 = &d_Data0[ptr]; float dxx = 2.0f*val - data1[-1] - data1[1]; float dyy = 2.0f*val - data1[-pitch] - data1[pitch]; float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]); float tra = dxx + dyy; float det = dxx*dyy - dxy*dxy; if (tra*tra<edgeLimit*det) { float edge = __fdividef(tra*tra, det); float dx = 0.5f*(data1[1] - data1[-1]); float dy = 0.5f*(data1[pitch] - data1[-pitch]); float *data0 = d_Data0 + ptr - height*pitch; float *data2 = d_Data0 + ptr + height*pitch; float ds = 0.5f*(data0[0] - data2[0]); float dss = 2.0f*val - data2[0] - data0[0]; float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]); float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]); float idxx = dyy*dss - dys*dys; float idxy = dys*dxs - dxy*dss; float idxs = dxy*dys - dyy*dxs; float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs); float idyy = dxx*dss - dxs*dxs; float idys = dxy*dxs - dxx*dys; float idss = dxx*dyy - dxy*dxy; float pdx = idet*(idxx*dx + idxy*dy + idxs*ds); float pdy = idet*(idxy*dx + idyy*dy + idys*ds); float pds = idet*(idxs*dx + idys*dy + idss*ds); if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) { pdx = __fdividef(dx, dxx); pdy = __fdividef(dy, dyy); pds = __fdividef(ds, dss); } float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds); int maxPts = d_MaxNumPoints; float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor); if (sc>=lowestScale) { unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff); idx = (idx>=maxPts ? maxPts-1 : idx); d_Sift[idx].xpos = xpos + pdx; d_Sift[idx].ypos = ypos + pdy; d_Sift[idx].scale = sc; d_Sift[idx].sharpness = val + dval; d_Sift[idx].edgeness = edge; d_Sift[idx].subsampling = subsampling; } } } } __global__ void FindPointsMultiNew(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave) { #define MEMWID (MINMAX_W + 2) __shared__ unsigned short points[2*MEMWID]; if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]); } int tx = threadIdx.x; int block = blockIdx.x/NUM_SCALES; int scale = blockIdx.x - NUM_SCALES*block; int minx = block*MINMAX_W; int maxx = min(minx + MINMAX_W, width); int xpos = minx + tx; int size = pitch*height; int ptr = size*scale + max(min(xpos-1, width-1), 0); int yloops = min(height - MINMAX_H*blockIdx.y, MINMAX_H); float maxv = 0.0f; for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; float val = d_Data0[yptr1 + 1*size]; maxv = fmaxf(maxv, fabs(val)); } //if (tx==0) printf("XXX1\n"); if (!__any_sync(0xffffffff, maxv>thresh)) return; //if (tx==0) printf("XXX2\n"); int ptbits = 0; for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; float d11 = d_Data0[yptr1 + 1*size]; if (__any_sync(0xffffffff, fabs(d11)>thresh)) { int yptr0 = ptr + max(0,ypos-1)*pitch; int yptr2 = ptr + min(height-1,ypos+1)*pitch; float d01 = d_Data0[yptr1]; float d10 = d_Data0[yptr0 + 1*size]; float d12 = d_Data0[yptr2 + 1*size]; float d21 = d_Data0[yptr1 + 2*size]; float d00 = d_Data0[yptr0]; float d02 = d_Data0[yptr2]; float ymin1 = fminf(fminf(d00, d01), d02); float ymax1 = fmaxf(fmaxf(d00, d01), d02); float d20 = d_Data0[yptr0 + 2*size]; float d22 = d_Data0[yptr2 + 2*size]; float ymin3 = fminf(fminf(d20, d21), d22); float ymax3 = fmaxf(fmaxf(d20, d21), d22); float ymin2 = fminf(fminf(ymin1, fminf(fminf(d10, d12), d11)), ymin3); float ymax2 = fmaxf(fmaxf(ymax1, fmaxf(fmaxf(d10, d12), d11)), ymax3); float nmin2 = fminf(ShiftUp(ymin2, 1), ShiftDown(ymin2, 1)); float nmax2 = fmaxf(ShiftUp(ymax2, 1), ShiftDown(ymax2, 1)); float minv = fminf(fminf(nmin2, ymin1), ymin3); minv = fminf(fminf(minv, d10), d12); float maxv = fmaxf(fmaxf(nmax2, ymax1), ymax3); maxv = fmaxf(fmaxf(maxv, d10), d12); if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) ptbits |= ((d11 < fminf(-thresh, minv)) | (d11 > fmaxf(thresh, maxv))) << y; } } unsigned int totbits = __popc(ptbits); unsigned int numbits = totbits; for (int d=1;d<32;d<<=1) { unsigned int num = ShiftUp(totbits, d); if (tx >= d) totbits += num; } int pos = totbits - numbits; for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; if (ptbits & (1 << y) && pos<MEMWID) { points[2*pos + 0] = xpos - 1; points[2*pos + 1] = ypos; pos ++; } } totbits = Shuffle(totbits, 31); if (tx<totbits) { int xpos = points[2*tx + 0]; int ypos = points[2*tx + 1]; int ptr = xpos + (ypos + (scale + 1)*height)*pitch; float val = d_Data0[ptr]; float *data1 = &d_Data0[ptr]; float dxx = 2.0f*val - data1[-1] - data1[1]; float dyy = 2.0f*val - data1[-pitch] - data1[pitch]; float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]); float tra = dxx + dyy; float det = dxx*dyy - dxy*dxy; if (tra*tra<edgeLimit*det) { float edge = __fdividef(tra*tra, det); float dx = 0.5f*(data1[1] - data1[-1]); float dy = 0.5f*(data1[pitch] - data1[-pitch]); float *data0 = d_Data0 + ptr - height*pitch; float *data2 = d_Data0 + ptr + height*pitch; float ds = 0.5f*(data0[0] - data2[0]); float dss = 2.0f*val - data2[0] - data0[0]; float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]); float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]); float idxx = dyy*dss - dys*dys; float idxy = dys*dxs - dxy*dss; float idxs = dxy*dys - dyy*dxs; float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs); float idyy = dxx*dss - dxs*dxs; float idys = dxy*dxs - dxx*dys; float idss = dxx*dyy - dxy*dxy; float pdx = idet*(idxx*dx + idxy*dy + idxs*ds); float pdy = idet*(idxy*dx + idyy*dy + idys*ds); float pds = idet*(idxs*dx + idys*dy + idss*ds); if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) { pdx = __fdividef(dx, dxx); pdy = __fdividef(dy, dyy); pds = __fdividef(ds, dss); } float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds); int maxPts = d_MaxNumPoints; float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor); if (sc>=lowestScale) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff); idx = (idx>=maxPts ? maxPts-1 : idx); d_Sift[idx].xpos = xpos + pdx; d_Sift[idx].ypos = ypos + pdy; d_Sift[idx].scale = sc; d_Sift[idx].sharpness = val + dval; d_Sift[idx].edgeness = edge; d_Sift[idx].subsampling = subsampling; } } } } __global__ void FindPointsMulti(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave) { #define MEMWID (MINMAX_W + 2) __shared__ unsigned int cnt; __shared__ unsigned short points[3*MEMWID]; if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]); } int tx = threadIdx.x; int block = blockIdx.x/NUM_SCALES; int scale = blockIdx.x - NUM_SCALES*block; int minx = block*MINMAX_W; int maxx = min(minx + MINMAX_W, width); int xpos = minx + tx; int size = pitch*height; int ptr = size*scale + max(min(xpos-1, width-1), 0); int yloops = min(height - MINMAX_H*blockIdx.y, MINMAX_H); float maxv = 0.0f; for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; float val = d_Data0[yptr1 + 1*size]; maxv = fmaxf(maxv, fabs(val)); } maxv = fmaxf(maxv, ShiftDown(maxv, 16, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 8, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 4, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 2, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 1, MINMAX_W)); if (Shuffle(maxv, 0)<=thresh) return; if (tx==0) cnt = 0; __syncthreads(); for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; int yptr0 = ptr + max(0,ypos-1)*pitch; int yptr2 = ptr + min(height-1,ypos+1)*pitch; float d20 = d_Data0[yptr0 + 1*size]; float d21 = d_Data0[yptr1 + 1*size]; float d22 = d_Data0[yptr2 + 1*size]; float d31 = d_Data0[yptr1 + 2*size]; float d11 = d_Data0[yptr1]; float d10 = d_Data0[yptr0]; float d12 = d_Data0[yptr2]; float ymin1 = fminf(fminf(d10, d11), d12); float ymax1 = fmaxf(fmaxf(d10, d11), d12); float d30 = d_Data0[yptr0 + 2*size]; float d32 = d_Data0[yptr2 + 2*size]; float ymin3 = fminf(fminf(d30, d31), d32); float ymax3 = fmaxf(fmaxf(d30, d31), d32); float ymin2 = fminf(fminf(ymin1, fminf(fminf(d20, d22), d21)), ymin3); float ymax2 = fmaxf(fmaxf(ymax1, fmaxf(fmaxf(d20, d22), d21)), ymax3); float nmin2 = fminf(ShiftUp(ymin2, 1), ShiftDown(ymin2, 1)); float nmax2 = fmaxf(ShiftUp(ymax2, 1), ShiftDown(ymax2, 1)); if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) { if (d21<-thresh) { float minv = fminf(fminf(nmin2, ymin1), ymin3); minv = fminf(fminf(minv, d20), d22); if (d21<minv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } if (d21>thresh) { float maxv = fmaxf(fmaxf(nmax2, ymax1), ymax3); maxv = fmaxf(fmaxf(maxv, d20), d22); if (d21>maxv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } } } if (tx<cnt) { int xpos = points[3*tx+0]; int ypos = points[3*tx+1]; int scale = points[3*tx+2]; int ptr = xpos + (ypos + (scale+1)*height)*pitch; float val = d_Data0[ptr]; float *data1 = &d_Data0[ptr]; float dxx = 2.0f*val - data1[-1] - data1[1]; float dyy = 2.0f*val - data1[-pitch] - data1[pitch]; float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]); float tra = dxx + dyy; float det = dxx*dyy - dxy*dxy; if (tra*tra<edgeLimit*det) { float edge = __fdividef(tra*tra, det); float dx = 0.5f*(data1[1] - data1[-1]); float dy = 0.5f*(data1[pitch] - data1[-pitch]); float *data0 = d_Data0 + ptr - height*pitch; float *data2 = d_Data0 + ptr + height*pitch; float ds = 0.5f*(data0[0] - data2[0]); float dss = 2.0f*val - data2[0] - data0[0]; float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]); float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]); float idxx = dyy*dss - dys*dys; float idxy = dys*dxs - dxy*dss; float idxs = dxy*dys - dyy*dxs; float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs); float idyy = dxx*dss - dxs*dxs; float idys = dxy*dxs - dxx*dys; float idss = dxx*dyy - dxy*dxy; float pdx = idet*(idxx*dx + idxy*dy + idxs*ds); float pdy = idet*(idxy*dx + idyy*dy + idys*ds); float pds = idet*(idxs*dx + idys*dy + idss*ds); if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) { pdx = __fdividef(dx, dxx); pdy = __fdividef(dy, dyy); pds = __fdividef(ds, dss); } float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds); int maxPts = d_MaxNumPoints; float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor); if (sc>=lowestScale) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff); idx = (idx>=maxPts ? maxPts-1 : idx); d_Sift[idx].xpos = xpos + pdx; d_Sift[idx].ypos = ypos + pdy; d_Sift[idx].scale = sc; d_Sift[idx].sharpness = val + dval; d_Sift[idx].edgeness = edge; d_Sift[idx].subsampling = subsampling; } } } } __global__ void FindPointsMultiOld(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave) { #define MEMWID (MINMAX_W + 2) __shared__ float ymin1[MEMWID], ymin2[MEMWID], ymin3[MEMWID]; __shared__ float ymax1[MEMWID], ymax2[MEMWID], ymax3[MEMWID]; __shared__ unsigned int cnt; __shared__ unsigned short points[3*MEMWID]; if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]); } int tx = threadIdx.x; int block = blockIdx.x/NUM_SCALES; int scale = blockIdx.x - NUM_SCALES*block; int minx = block*MINMAX_W; int maxx = min(minx + MINMAX_W, width); int xpos = minx + tx; int size = pitch*height; int ptr = size*scale + max(min(xpos-1, width-1), 0); int yloops = min(height - MINMAX_H*blockIdx.y, MINMAX_H); float maxv = 0.0f; for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; float val = d_Data0[yptr1 + 1*size]; maxv = fmaxf(maxv, fabs(val)); } maxv = fmaxf(maxv, ShiftDown(maxv, 16, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 8, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 4, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 2, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 1, MINMAX_W)); if (Shuffle(maxv, 0)<=thresh) return; if (tx==0) cnt = 0; __syncthreads(); for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; int yptr0 = ptr + max(0,ypos-1)*pitch; int yptr2 = ptr + min(height-1,ypos+1)*pitch; float d20 = d_Data0[yptr0 + 1*size]; float d21 = d_Data0[yptr1 + 1*size]; float d22 = d_Data0[yptr2 + 1*size]; float d31 = d_Data0[yptr1 + 2*size]; float d11 = d_Data0[yptr1]; float d10 = d_Data0[yptr0]; float d12 = d_Data0[yptr2]; ymin1[tx] = fminf(fminf(d10, d11), d12); ymax1[tx] = fmaxf(fmaxf(d10, d11), d12); float d30 = d_Data0[yptr0 + 2*size]; float d32 = d_Data0[yptr2 + 2*size]; ymin3[tx] = fminf(fminf(d30, d31), d32); ymax3[tx] = fmaxf(fmaxf(d30, d31), d32); ymin2[tx] = fminf(fminf(ymin1[tx], fminf(fminf(d20, d22), d21)), ymin3[tx]); ymax2[tx] = fmaxf(fmaxf(ymax1[tx], fmaxf(fmaxf(d20, d22), d21)), ymax3[tx]); __syncthreads(); if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) { if (d21<-thresh) { float minv = fminf(fminf(fminf(ymin2[tx-1], ymin2[tx+1]), ymin1[tx]), ymin3[tx]); minv = fminf(fminf(minv, d20), d22); if (d21<minv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } if (d21>thresh) { float maxv = fmaxf(fmaxf(fmaxf(ymax2[tx-1], ymax2[tx+1]), ymax1[tx]), ymax3[tx]); maxv = fmaxf(fmaxf(maxv, d20), d22); if (d21>maxv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } } __syncthreads(); } if (tx<cnt) { int xpos = points[3*tx+0]; int ypos = points[3*tx+1]; int scale = points[3*tx+2]; int ptr = xpos + (ypos + (scale+1)*height)*pitch; float val = d_Data0[ptr]; float *data1 = &d_Data0[ptr]; float dxx = 2.0f*val - data1[-1] - data1[1]; float dyy = 2.0f*val - data1[-pitch] - data1[pitch]; float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]); float tra = dxx + dyy; float det = dxx*dyy - dxy*dxy; if (tra*tra<edgeLimit*det) { float edge = __fdividef(tra*tra, det); float dx = 0.5f*(data1[1] - data1[-1]); float dy = 0.5f*(data1[pitch] - data1[-pitch]); float *data0 = d_Data0 + ptr - height*pitch; float *data2 = d_Data0 + ptr + height*pitch; float ds = 0.5f*(data0[0] - data2[0]); float dss = 2.0f*val - data2[0] - data0[0]; float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]); float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]); float idxx = dyy*dss - dys*dys; float idxy = dys*dxs - dxy*dss; float idxs = dxy*dys - dyy*dxs; float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs); float idyy = dxx*dss - dxs*dxs; float idys = dxy*dxs - dxx*dys; float idss = dxx*dyy - dxy*dxy; float pdx = idet*(idxx*dx + idxy*dy + idxs*ds); float pdy = idet*(idxy*dx + idyy*dy + idys*ds); float pds = idet*(idxs*dx + idys*dy + idss*ds); if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) { pdx = __fdividef(dx, dxx); pdy = __fdividef(dy, dyy); pds = __fdividef(ds, dss); } float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds); int maxPts = d_MaxNumPoints; float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor); if (sc>=lowestScale) { unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff); idx = (idx>=maxPts ? maxPts-1 : idx); d_Sift[idx].xpos = xpos + pdx; d_Sift[idx].ypos = ypos + pdy; d_Sift[idx].scale = sc; d_Sift[idx].sharpness = val + dval; d_Sift[idx].edgeness = edge; d_Sift[idx].subsampling = subsampling; } } } } __global__ void LaplaceMultiTex(hipTextureObject_t texObj, float *d_Result, int width, int pitch, int height, int octave) { __shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S]; __shared__ float data2[LAPLACE_W*LAPLACE_S]; const int tx = threadIdx.x; const int xp = blockIdx.x*LAPLACE_W + tx; const int yp = blockIdx.y; const int scale = threadIdx.y; float *kernel = d_LaplaceKernel + octave*12*16 + scale*16; float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale; float x = xp-3.5; float y = yp+0.5; sdata1[tx] = kernel[0]*tex2D<float>(texObj, x, y) + kernel[1]*(tex2D<float>(texObj, x, y-1.0) + tex2D<float>(texObj, x, y+1.0)) + kernel[2]*(tex2D<float>(texObj, x, y-2.0) + tex2D<float>(texObj, x, y+2.0)) + kernel[3]*(tex2D<float>(texObj, x, y-3.0) + tex2D<float>(texObj, x, y+3.0)) + kernel[4]*(tex2D<float>(texObj, x, y-4.0) + tex2D<float>(texObj, x, y+4.0)); __syncthreads(); float *sdata2 = data2 + LAPLACE_W*scale; if (tx<LAPLACE_W) { sdata2[tx] = kernel[0]*sdata1[tx+4] + kernel[1]*(sdata1[tx+3] + sdata1[tx+5]) + kernel[2]*(sdata1[tx+2] + sdata1[tx+6]) + kernel[3]*(sdata1[tx+1] + sdata1[tx+7]) + kernel[4]*(sdata1[tx+0] + sdata1[tx+8]); } __syncthreads(); if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width) d_Result[scale*height*pitch + yp*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W]; } __global__ void LaplaceMultiMem(float *d_Image, float *d_Result, int width, int pitch, int height, int octave) { __shared__ float buff[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S]; const int tx = threadIdx.x; const int xp = blockIdx.x*LAPLACE_W + tx; const int yp = blockIdx.y; float *data = d_Image + max(min(xp - LAPLACE_R, width-1), 0); float temp[2*LAPLACE_R + 1], kern[LAPLACE_S][LAPLACE_R + 1]; if (xp<(width + 2*LAPLACE_R)) { for (int i=0;i<=2*LAPLACE_R;i++) temp[i] = data[max(0, min(yp + i - LAPLACE_R, height - 1))*pitch]; for (int scale=0;scale<LAPLACE_S;scale++) { float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale; float *kernel = d_LaplaceKernel + octave*12*16 + scale*16; for (int i=0;i<=LAPLACE_R;i++) kern[scale][i] = kernel[i]; float sum = kern[scale][0]*temp[LAPLACE_R]; #pragma unroll for (int j=1;j<=LAPLACE_R;j++) sum += kern[scale][j]*(temp[LAPLACE_R - j] + temp[LAPLACE_R + j]); buf[tx] = sum; } } __syncthreads(); if (tx<LAPLACE_W && xp<width) { int scale = 0; float oldRes = kern[scale][0]*buff[tx + LAPLACE_R]; #pragma unroll for (int j=1;j<=LAPLACE_R;j++) oldRes += kern[scale][j]*(buff[tx + LAPLACE_R - j] + buff[tx + LAPLACE_R + j]); for (int scale=1;scale<LAPLACE_S;scale++) { float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale; float res = kern[scale][0]*buf[tx + LAPLACE_R]; #pragma unroll for (int j=1;j<=LAPLACE_R;j++) res += kern[scale][j]*(buf[tx + LAPLACE_R - j] + buf[tx + LAPLACE_R + j]); d_Result[(scale-1)*height*pitch + yp*pitch + xp] = res - oldRes; oldRes = res; } } } __global__ void LaplaceMultiMemWide(float *d_Image, float *d_Result, int width, int pitch, int height, int octave) { __shared__ float buff[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S]; const int tx = threadIdx.x; const int xp = blockIdx.x*LAPLACE_W + tx; const int xp4 = blockIdx.x*LAPLACE_W + 4*tx; const int yp = blockIdx.y; float kern[LAPLACE_S][LAPLACE_R+1]; float *data = d_Image + max(min(xp - 4, width-1), 0); float temp[9]; if (xp<(width + 2*LAPLACE_R)) { for (int i=0;i<4;i++) temp[i] = data[max(0, min(yp+i-4, height-1))*pitch]; for (int i=4;i<8+1;i++) temp[i] = data[min(yp+i-4, height-1)*pitch]; for (int scale=0;scale<LAPLACE_S;scale++) { float *kernel = d_LaplaceKernel + octave*12*16 + scale*16; for (int i=0;i<=LAPLACE_R;i++) kern[scale][i] = kernel[LAPLACE_R - i]; float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale; buf[tx] = kern[scale][4]*temp[4] + kern[scale][3]*(temp[3] + temp[5]) + kern[scale][2]*(temp[2] + temp[6]) + kern[scale][1]*(temp[1] + temp[7]) + kern[scale][0]*(temp[0] + temp[8]); } } __syncthreads(); if (tx<LAPLACE_W/4 && xp4<width) { float4 b0 = reinterpret_cast<float4*>(buff)[tx+0]; float4 b1 = reinterpret_cast<float4*>(buff)[tx+1]; float4 b2 = reinterpret_cast<float4*>(buff)[tx+2]; float4 old4, new4, dif4; old4.x = kern[0][4]*b1.x + kern[0][3]*(b0.w + b1.y) + kern[0][2]*(b0.z + b1.z) + kern[0][1]*(b0.y + b1.w) + kern[0][0]*(b0.x + b2.x); old4.y = kern[0][4]*b1.y + kern[0][3]*(b1.x + b1.z) + kern[0][2]*(b0.w + b1.w) + kern[0][1]*(b0.z + b2.x) + kern[0][0]*(b0.y + b2.y); old4.z = kern[0][4]*b1.z + kern[0][3]*(b1.y + b1.w) + kern[0][2]*(b1.x + b2.x) + kern[0][1]*(b0.w + b2.y) + kern[0][0]*(b0.z + b2.z); old4.w = kern[0][4]*b1.w + kern[0][3]*(b1.z + b2.x) + kern[0][2]*(b1.y + b2.y) + kern[0][1]*(b1.x + b2.z) + kern[0][0]*(b0.w + b2.w); for (int scale=1;scale<LAPLACE_S;scale++) { float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale; float4 b0 = reinterpret_cast<float4*>(buf)[tx+0]; float4 b1 = reinterpret_cast<float4*>(buf)[tx+1]; float4 b2 = reinterpret_cast<float4*>(buf)[tx+2]; new4.x = kern[scale][4]*b1.x + kern[scale][3]*(b0.w + b1.y) + kern[scale][2]*(b0.z + b1.z) + kern[scale][1]*(b0.y + b1.w) + kern[scale][0]*(b0.x + b2.x); new4.y = kern[scale][4]*b1.y + kern[scale][3]*(b1.x + b1.z) + kern[scale][2]*(b0.w + b1.w) + kern[scale][1]*(b0.z + b2.x) + kern[scale][0]*(b0.y + b2.y); new4.z = kern[scale][4]*b1.z + kern[scale][3]*(b1.y + b1.w) + kern[scale][2]*(b1.x + b2.x) + kern[scale][1]*(b0.w + b2.y) + kern[scale][0]*(b0.z + b2.z); new4.w = kern[scale][4]*b1.w + kern[scale][3]*(b1.z + b2.x) + kern[scale][2]*(b1.y + b2.y) + kern[scale][1]*(b1.x + b2.z) + kern[scale][0]*(b0.w + b2.w); dif4.x = new4.x - old4.x; dif4.y = new4.y - old4.y; dif4.z = new4.z - old4.z; dif4.w = new4.w - old4.w; reinterpret_cast<float4*>(&d_Result[(scale-1)*height*pitch + yp*pitch + xp4])[0] = dif4; old4 = new4; } } } __global__ void LaplaceMultiMemTest(float *d_Image, float *d_Result, int width, int pitch, int height, int octave) { __shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S]; __shared__ float data2[LAPLACE_W*LAPLACE_S]; const int tx = threadIdx.x; const int xp = blockIdx.x*LAPLACE_W + tx; const int yp = LAPLACE_H*blockIdx.y; const int scale = threadIdx.y; float *kernel = d_LaplaceKernel + octave*12*16 + scale*16; float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale; float *data = d_Image + max(min(xp - 4, width-1), 0); int h = height-1; float temp[8+LAPLACE_H], kern[LAPLACE_R+1]; for (int i=0;i<4;i++) temp[i] = data[max(0, min(yp+i-4, h))*pitch]; for (int i=4;i<8+LAPLACE_H;i++) temp[i] = data[min(yp+i-4, h)*pitch]; for (int i=0;i<=LAPLACE_R;i++) kern[i] = kernel[LAPLACE_R - i]; for (int j=0;j<LAPLACE_H;j++) { sdata1[tx] = kern[4]*temp[4+j] + kern[3]*(temp[3+j] + temp[5+j]) + kern[2]*(temp[2+j] + temp[6+j]) + kern[1]*(temp[1+j] + temp[7+j]) + kern[0]*(temp[0+j] + temp[8+j]); __syncthreads(); float *sdata2 = data2 + LAPLACE_W*scale; if (tx<LAPLACE_W) { sdata2[tx] = kern[4]*sdata1[tx+4] + kern[3]*(sdata1[tx+3] + sdata1[tx+5]) + kern[2]*(sdata1[tx+2] + sdata1[tx+6]) + kern[1]*(sdata1[tx+1] + sdata1[tx+7]) + kern[0]*(sdata1[tx+0] + sdata1[tx+8]); } __syncthreads(); if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width && (yp+j)<height) d_Result[scale*height*pitch + (yp+j)*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W]; } } __global__ void LaplaceMultiMemOld(float *d_Image, float *d_Result, int width, int pitch, int height, int octave) { __shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S]; __shared__ float data2[LAPLACE_W*LAPLACE_S]; const int tx = threadIdx.x; const int xp = blockIdx.x*LAPLACE_W + tx; const int yp = blockIdx.y; const int scale = threadIdx.y; float *kernel = d_LaplaceKernel + octave*12*16 + scale*16; float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale; float *data = d_Image + max(min(xp - 4, width-1), 0); int h = height-1; sdata1[tx] = kernel[0]*data[min(yp, h)*pitch] + kernel[1]*(data[max(0, min(yp-1, h))*pitch] + data[min(yp+1, h)*pitch]) + kernel[2]*(data[max(0, min(yp-2, h))*pitch] + data[min(yp+2, h)*pitch]) + kernel[3]*(data[max(0, min(yp-3, h))*pitch] + data[min(yp+3, h)*pitch]) + kernel[4]*(data[max(0, min(yp-4, h))*pitch] + data[min(yp+4, h)*pitch]); __syncthreads(); float *sdata2 = data2 + LAPLACE_W*scale; if (tx<LAPLACE_W) { sdata2[tx] = kernel[0]*sdata1[tx+4] + kernel[1]*(sdata1[tx+3] + sdata1[tx+5]) + kernel[2]*(sdata1[tx+2] + sdata1[tx+6]) + kernel[3]*(sdata1[tx+1] + sdata1[tx+7]) + kernel[4]*(sdata1[tx+0] + sdata1[tx+8]); } __syncthreads(); if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width) d_Result[scale*height*pitch + yp*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W]; } __global__ void LowPass(float *d_Image, float *d_Result, int width, int pitch, int height) { __shared__ float buffer[(LOWPASS_W + 2*LOWPASS_R)*LOWPASS_H]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int xp = blockIdx.x*LOWPASS_W + tx; const int yp = blockIdx.y*LOWPASS_H + ty; float *kernel = d_LowPassKernel; float *data = d_Image + max(min(xp - 4, width-1), 0); float *buff = buffer + ty*(LOWPASS_W + 2*LOWPASS_R); int h = height-1; if (yp<height) buff[tx] = kernel[4]*data[min(yp, h)*pitch] + kernel[3]*(data[max(0, min(yp-1, h))*pitch] + data[min(yp+1, h)*pitch]) + kernel[2]*(data[max(0, min(yp-2, h))*pitch] + data[min(yp+2, h)*pitch]) + kernel[1]*(data[max(0, min(yp-3, h))*pitch] + data[min(yp+3, h)*pitch]) + kernel[0]*(data[max(0, min(yp-4, h))*pitch] + data[min(yp+4, h)*pitch]); __syncthreads(); if (tx<LOWPASS_W && xp<width && yp<height) d_Result[yp*pitch + xp] = kernel[4]*buff[tx+4] + kernel[3]*(buff[tx+3] + buff[tx+5]) + kernel[2]*(buff[tx+2] + buff[tx+6]) + kernel[1]*(buff[tx+1] + buff[tx+7]) + kernel[0]*(buff[tx+0] + buff[tx+8]); } __global__ void LowPassBlockOld(float *d_Image, float *d_Result, int width, int pitch, int height) { __shared__ float xrows[16][32]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int xp = blockIdx.x*LOWPASS_W + tx; const int yp = blockIdx.y*LOWPASS_H + ty; const int N = 16; float *k = d_LowPassKernel; int xl = max(min(xp - 4, width-1), 0); for (int l=-8;l<=LOWPASS_H;l+=4) { if (l<LOWPASS_H) { int yl = max(min(yp + l + 4, height-1), 0); float val = d_Image[yl*pitch + xl]; xrows[(l + 8 + ty)%N][tx] = k[4]*ShiftDown(val, 4) + k[3]*(ShiftDown(val, 5) + ShiftDown(val, 3)) + k[2]*(ShiftDown(val, 6) + ShiftDown(val, 2)) + k[1]*(ShiftDown(val, 7) + ShiftDown(val, 1)) + k[0]*(ShiftDown(val, 8) + val); } if (l>=4) { int ys = yp + l - 4; if (xp<width && ys<height && tx<LOWPASS_W) d_Result[ys*pitch + xp] = k[4]*xrows[(l + 0 + ty)%N][tx] + k[3]*(xrows[(l - 1 + ty)%N][tx] + xrows[(l + 1 + ty)%N][tx]) + k[2]*(xrows[(l - 2 + ty)%N][tx] + xrows[(l + 2 + ty)%N][tx]) + k[1]*(xrows[(l - 3 + ty)%N][tx] + xrows[(l + 3 + ty)%N][tx]) + k[0]*(xrows[(l - 4 + ty)%N][tx] + xrows[(l + 4 + ty)%N][tx]); } if (l>=0) __syncthreads(); } } __global__ void LowPassBlock(float *d_Image, float *d_Result, int width, int pitch, int height) { __shared__ float xrows[16][32]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int xp = blockIdx.x*LOWPASS_W + tx; const int yp = blockIdx.y*LOWPASS_H + ty; const int N = 16; float *k = d_LowPassKernel; int xl = max(min(xp - 4, width-1), 0); #pragma unroll for (int l=-8;l<4;l+=4) { int ly = l + ty; int yl = max(min(yp + l + 4, height-1), 0); float val = d_Image[yl*pitch + xl]; val = k[4]*ShiftDown(val, 4) + k[3]*(ShiftDown(val, 5) + ShiftDown(val, 3)) + k[2]*(ShiftDown(val, 6) + ShiftDown(val, 2)) + k[1]*(ShiftDown(val, 7) + ShiftDown(val, 1)) + k[0]*(ShiftDown(val, 8) + val); xrows[ly + 8][tx] = val; } __syncthreads(); #pragma unroll for (int l=4;l<LOWPASS_H;l+=4) { int ly = l + ty; int yl = min(yp + l + 4, height-1); float val = d_Image[yl*pitch + xl]; val = k[4]*ShiftDown(val, 4) + k[3]*(ShiftDown(val, 5) + ShiftDown(val, 3)) + k[2]*(ShiftDown(val, 6) + ShiftDown(val, 2)) + k[1]*(ShiftDown(val, 7) + ShiftDown(val, 1)) + k[0]*(ShiftDown(val, 8) + val); xrows[(ly + 8)%N][tx] = val; int ys = yp + l - 4; if (xp<width && ys<height && tx<LOWPASS_W) d_Result[ys*pitch + xp] = k[4]*xrows[(ly + 0)%N][tx] + k[3]*(xrows[(ly - 1)%N][tx] + xrows[(ly + 1)%N][tx]) + k[2]*(xrows[(ly - 2)%N][tx] + xrows[(ly + 2)%N][tx]) + k[1]*(xrows[(ly - 3)%N][tx] + xrows[(ly + 3)%N][tx]) + k[0]*(xrows[(ly - 4)%N][tx] + xrows[(ly + 4)%N][tx]); __syncthreads(); } int ly = LOWPASS_H + ty; int ys = yp + LOWPASS_H - 4; if (xp<width && ys<height && tx<LOWPASS_W) d_Result[ys*pitch + xp] = k[4]*xrows[(ly + 0)%N][tx] + k[3]*(xrows[(ly - 1)%N][tx] + xrows[(ly + 1)%N][tx]) + k[2]*(xrows[(ly - 2)%N][tx] + xrows[(ly + 2)%N][tx]) + k[1]*(xrows[(ly - 3)%N][tx] + xrows[(ly + 3)%N][tx]) + k[0]*(xrows[(ly - 4)%N][tx] + xrows[(ly + 4)%N][tx]); }
8fb83b44df19a93ca6363114519c7563d9da0100.cu
//********************************************************// // CUDA SIFT extractor by Marten Bjorkman aka Celebrandil // //********************************************************// #include "cudautils.h" #include "cudaSiftD.h" #include "cudaSift.h" /////////////////////////////////////////////////////////////////////////////// // Kernel configuration /////////////////////////////////////////////////////////////////////////////// __constant__ int d_MaxNumPoints; __device__ unsigned int d_PointCounter[8*2+1]; __constant__ float d_ScaleDownKernel[5]; __constant__ float d_LowPassKernel[2*LOWPASS_R+1]; __constant__ float d_LaplaceKernel[8*12*16]; /////////////////////////////////////////////////////////////////////////////// // Lowpass filter and subsample image /////////////////////////////////////////////////////////////////////////////// __global__ void ScaleDownDenseShift(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch) { #define BW (SCALEDOWN_W+4) #define BH (SCALEDOWN_H+4) #define W2 (SCALEDOWN_W/2) #define H2 (SCALEDOWN_H/2) __shared__ float brows[BH*BW]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int xp = blockIdx.x*SCALEDOWN_W + tx; const int yp = blockIdx.y*SCALEDOWN_H + ty; const float k0 = d_ScaleDownKernel[0]; const float k1 = d_ScaleDownKernel[1]; const float k2 = d_ScaleDownKernel[2]; const int xl = min(width-1, max(0, xp-2)); const int yl = min(height-1, max(0, yp-2)); if (xp<(width+4) && yp<(height+4)) { float v = d_Data[yl*pitch + xl]; brows[BW*ty + tx] = k0*(v + ShiftDown(v, 4)) + k1*(ShiftDown(v, 1) + ShiftDown(v, 3)) + k2*ShiftDown(v, 2); } __syncthreads(); const int xs = blockIdx.x*W2 + tx; const int ys = blockIdx.y*H2 + ty; if (tx<W2 && ty<H2 && xs<(width/2) && ys<(height/2)) { float *ptr = &brows[BW*(ty*2) + (tx*2)]; d_Result[ys*newpitch + xs] = k0*(ptr[0] + ptr[4*BW]) + k1*(ptr[1*BW] + ptr[3*BW]) + k2*ptr[2*BW]; } } __global__ void ScaleDownDense(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch) { #define BW (SCALEDOWN_W+4) #define BH (SCALEDOWN_H+4) #define W2 (SCALEDOWN_W/2) #define H2 (SCALEDOWN_H/2) __shared__ float irows[BH*BW]; __shared__ float brows[BH*W2]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int xp = blockIdx.x*SCALEDOWN_W + tx; const int yp = blockIdx.y*SCALEDOWN_H + ty; const int xl = min(width-1, max(0, xp-2)); const int yl = min(height-1, max(0, yp-2)); const float k0 = d_ScaleDownKernel[0]; const float k1 = d_ScaleDownKernel[1]; const float k2 = d_ScaleDownKernel[2]; if (xp<(width+4) && yp<(height+4)) irows[BW*ty + tx] = d_Data[yl*pitch + xl]; __syncthreads(); if (yp<(height+4) && tx<W2) { float *ptr = &irows[BW*ty + 2*tx]; brows[W2*ty + tx] = k0*(ptr[0] + ptr[4]) + k1*(ptr[1] + ptr[3]) + k2*ptr[2]; } __syncthreads(); const int xs = blockIdx.x*W2 + tx; const int ys = blockIdx.y*H2 + ty; if (tx<W2 && ty<H2 && xs<(width/2) && ys<(height/2)) { float *ptr = &brows[W2*(ty*2) + tx]; d_Result[ys*newpitch + xs] = k0*(ptr[0] + ptr[4*W2]) + k1*(ptr[1*W2] + ptr[3*W2]) + k2*ptr[2*W2]; } } __global__ void ScaleDown(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch) { __shared__ float inrow[SCALEDOWN_W+4]; __shared__ float brow[5*(SCALEDOWN_W/2)]; __shared__ int yRead[SCALEDOWN_H+4]; __shared__ int yWrite[SCALEDOWN_H+4]; #define dx2 (SCALEDOWN_W/2) const int tx = threadIdx.x; const int tx0 = tx + 0*dx2; const int tx1 = tx + 1*dx2; const int tx2 = tx + 2*dx2; const int tx3 = tx + 3*dx2; const int tx4 = tx + 4*dx2; const int xStart = blockIdx.x*SCALEDOWN_W; const int yStart = blockIdx.y*SCALEDOWN_H; const int xWrite = xStart/2 + tx; float k0 = d_ScaleDownKernel[0]; float k1 = d_ScaleDownKernel[1]; float k2 = d_ScaleDownKernel[2]; if (tx<SCALEDOWN_H+4) { int y = yStart + tx - 2; y = (y<0 ? 0 : y); y = (y>=height ? height-1 : y); yRead[tx] = y*pitch; yWrite[tx] = (yStart + tx - 4)/2 * newpitch; } __syncthreads(); int xRead = xStart + tx - 2; xRead = (xRead<0 ? 0 : xRead); xRead = (xRead>=width ? width-1 : xRead); int maxtx = min(dx2, width/2 - xStart/2); for (int dy=0;dy<SCALEDOWN_H+4;dy+=5) { { inrow[tx] = d_Data[yRead[dy+0] + xRead]; __syncthreads(); if (tx<maxtx) { brow[tx4] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2]; if (dy>=4 && !(dy&1)) d_Result[yWrite[dy+0] + xWrite] = k2*brow[tx2] + k0*(brow[tx0]+brow[tx4]) + k1*(brow[tx1]+brow[tx3]); } __syncthreads(); } if (dy<(SCALEDOWN_H+3)) { inrow[tx] = d_Data[yRead[dy+1] + xRead]; __syncthreads(); if (tx<maxtx) { brow[tx0] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2]; if (dy>=3 && (dy&1)) d_Result[yWrite[dy+1] + xWrite] = k2*brow[tx3] + k0*(brow[tx1]+brow[tx0]) + k1*(brow[tx2]+brow[tx4]); } __syncthreads(); } if (dy<(SCALEDOWN_H+2)) { inrow[tx] = d_Data[yRead[dy+2] + xRead]; __syncthreads(); if (tx<maxtx) { brow[tx1] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2]; if (dy>=2 && !(dy&1)) d_Result[yWrite[dy+2] + xWrite] = k2*brow[tx4] + k0*(brow[tx2]+brow[tx1]) + k1*(brow[tx3]+brow[tx0]); } __syncthreads(); } if (dy<(SCALEDOWN_H+1)) { inrow[tx] = d_Data[yRead[dy+3] + xRead]; __syncthreads(); if (tx<maxtx) { brow[tx2] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2]; if (dy>=1 && (dy&1)) d_Result[yWrite[dy+3] + xWrite] = k2*brow[tx0] + k0*(brow[tx3]+brow[tx2]) + k1*(brow[tx4]+brow[tx1]); } __syncthreads(); } if (dy<SCALEDOWN_H) { inrow[tx] = d_Data[yRead[dy+4] + xRead]; __syncthreads(); if (tx<dx2 && xWrite<width/2) { brow[tx3] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2]; if (!(dy&1)) d_Result[yWrite[dy+4] + xWrite] = k2*brow[tx1] + k0*(brow[tx4]+brow[tx3]) + k1*(brow[tx0]+brow[tx2]); } __syncthreads(); } } } __global__ void ScaleUp(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch) { const int tx = threadIdx.x; const int ty = threadIdx.y; int x = blockIdx.x*SCALEUP_W + 2*tx; int y = blockIdx.y*SCALEUP_H + 2*ty; if (x<2*width && y<2*height) { int xl = blockIdx.x*(SCALEUP_W/2) + tx; int yu = blockIdx.y*(SCALEUP_H/2) + ty; int xr = min(xl + 1, width - 1); int yd = min(yu + 1, height - 1); float vul = d_Data[yu*pitch + xl]; float vur = d_Data[yu*pitch + xr]; float vdl = d_Data[yd*pitch + xl]; float vdr = d_Data[yd*pitch + xr]; d_Result[(y + 0)*newpitch + x + 0] = vul; d_Result[(y + 0)*newpitch + x + 1] = 0.50f*(vul + vur); d_Result[(y + 1)*newpitch + x + 0] = 0.50f*(vul + vdl); d_Result[(y + 1)*newpitch + x + 1] = 0.25f*(vul + vur + vdl + vdr); } } __global__ void ExtractSiftDescriptors(cudaTextureObject_t texObj, SiftPoint *d_sift, int fstPts, float subsampling) { __shared__ float gauss[16]; __shared__ float buffer[128]; __shared__ float sums[4]; const int tx = threadIdx.x; // 0 -> 16 const int ty = threadIdx.y; // 0 -> 8 const int idx = ty*16 + tx; const int bx = blockIdx.x + fstPts; // 0 -> numPts if (ty==0) gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f); buffer[idx] = 0.0f; __syncthreads(); // Compute angles and gradients float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation; float sina = sinf(theta); // cosa -sina float cosa = cosf(theta); // sina cosa float scale = 12.0f/16.0f*d_sift[bx].scale; float ssina = scale*sina; float scosa = scale*cosa; for (int y=ty;y<16;y+=8) { float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f; float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f; float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) - tex2D<float>(texObj, xpos-cosa, ypos-sina); float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) - tex2D<float>(texObj, xpos+sina, ypos-cosa); float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy); float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f; int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins float horf = (tx - 1.5f)/4.0f - hori; float ihorf = 1.0f - horf; int veri = (y + 2)/4 - 1; float verf = (y - 1.5f)/4.0f - veri; float iverf = 1.0f - verf; int angi = angf; int angp = (angi<7 ? angi+1 : 0); angf -= angi; float iangf = 1.0f - angf; int hist = 8*(4*veri + hori); // Each gradient measure is interpolated int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores int p2 = angp + hist; if (tx>=2) { float grad1 = ihorf*grad; if (y>=2) { // Upper left float grad2 = iverf*grad1; atomicAdd(buffer + p1, iangf*grad2); atomicAdd(buffer + p2, angf*grad2); } if (y<=13) { // Lower left float grad2 = verf*grad1; atomicAdd(buffer + p1+32, iangf*grad2); atomicAdd(buffer + p2+32, angf*grad2); } } if (tx<=13) { float grad1 = horf*grad; if (y>=2) { // Upper right float grad2 = iverf*grad1; atomicAdd(buffer + p1+8, iangf*grad2); atomicAdd(buffer + p2+8, angf*grad2); } if (y<=13) { // Lower right float grad2 = verf*grad1; atomicAdd(buffer + p1+40, iangf*grad2); atomicAdd(buffer + p2+40, angf*grad2); } } } __syncthreads(); // Normalize twice and suppress peaks first time float sum = buffer[idx]*buffer[idx]; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum1 = sums[0] + sums[1] + sums[2] + sums[3]; tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f); sum = tsum1*tsum1; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum2 = sums[0] + sums[1] + sums[2] + sums[3]; float *desc = d_sift[bx].data; desc[idx] = tsum1 * rsqrtf(tsum2); if (idx==0) { d_sift[bx].xpos *= subsampling; d_sift[bx].ypos *= subsampling; d_sift[bx].scale *= subsampling; } } __device__ float FastAtan2(float y, float x) { float absx = abs(x); float absy = abs(y); float a = __fdiv_rn(min(absx, absy), max(absx, absy)); float s = a*a; float r = ((-0.0464964749f*s + 0.15931422f)*s - 0.327622764f)*s*a + a; r = (absy>absx ? 1.57079637f - r : r); r = (x<0 ? 3.14159274f - r : r); r = (y<0 ? -r : r); return r; } __global__ void ExtractSiftDescriptorsCONSTNew(cudaTextureObject_t texObj, SiftPoint *d_sift, float subsampling, int octave) { __shared__ float gauss[16]; __shared__ float buffer[128]; __shared__ float sums[4]; const int tx = threadIdx.x; // 0 -> 16 const int ty = threadIdx.y; // 0 -> 8 const int idx = ty*16 + tx; if (ty==0) gauss[tx] = __expf(-(tx-7.5f)*(tx-7.5f)/128.0f); int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints); int totPts = min(d_PointCounter[2*octave+1], d_MaxNumPoints); //if (tx==0 && ty==0) // printf("%d %d %d %d\n", octave, fstPts, min(d_PointCounter[2*octave], d_MaxNumPoints), totPts); for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) { buffer[idx] = 0.0f; __syncthreads(); // Compute angles and gradients float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation; float sina = __sinf(theta); // cosa -sina float cosa = __cosf(theta); // sina cosa float scale = 12.0f/16.0f*d_sift[bx].scale; float ssina = scale*sina; float scosa = scale*cosa; for (int y=ty;y<16;y+=8) { float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f; float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f; float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) - tex2D<float>(texObj, xpos-cosa, ypos-sina); float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) - tex2D<float>(texObj, xpos+sina, ypos-cosa); float grad = gauss[y]*gauss[tx] * __fsqrt_rn(dx*dx + dy*dy); float angf = 4.0f/3.1415f*FastAtan2(dy, dx) + 4.0f; int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins float horf = (tx - 1.5f)/4.0f - hori; float ihorf = 1.0f - horf; int veri = (y + 2)/4 - 1; float verf = (y - 1.5f)/4.0f - veri; float iverf = 1.0f - verf; int angi = angf; int angp = (angi<7 ? angi+1 : 0); angf -= angi; float iangf = 1.0f - angf; int hist = 8*(4*veri + hori); // Each gradient measure is interpolated int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores int p2 = angp + hist; if (tx>=2) { float grad1 = ihorf*grad; if (y>=2) { // Upper left float grad2 = iverf*grad1; atomicAdd(buffer + p1, iangf*grad2); atomicAdd(buffer + p2, angf*grad2); } if (y<=13) { // Lower left float grad2 = verf*grad1; atomicAdd(buffer + p1+32, iangf*grad2); atomicAdd(buffer + p2+32, angf*grad2); } } if (tx<=13) { float grad1 = horf*grad; if (y>=2) { // Upper right float grad2 = iverf*grad1; atomicAdd(buffer + p1+8, iangf*grad2); atomicAdd(buffer + p2+8, angf*grad2); } if (y<=13) { // Lower right float grad2 = verf*grad1; atomicAdd(buffer + p1+40, iangf*grad2); atomicAdd(buffer + p2+40, angf*grad2); } } } __syncthreads(); // Normalize twice and suppress peaks first time float sum = buffer[idx]*buffer[idx]; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum1 = sums[0] + sums[1] + sums[2] + sums[3]; tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f); sum = tsum1*tsum1; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum2 = sums[0] + sums[1] + sums[2] + sums[3]; float *desc = d_sift[bx].data; desc[idx] = tsum1 * rsqrtf(tsum2); if (idx==0) { d_sift[bx].xpos *= subsampling; d_sift[bx].ypos *= subsampling; d_sift[bx].scale *= subsampling; } __syncthreads(); } } __global__ void ExtractSiftDescriptorsCONST(cudaTextureObject_t texObj, SiftPoint *d_sift, float subsampling, int octave) { __shared__ float gauss[16]; __shared__ float buffer[128]; __shared__ float sums[4]; const int tx = threadIdx.x; // 0 -> 16 const int ty = threadIdx.y; // 0 -> 8 const int idx = ty*16 + tx; if (ty==0) gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f); int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints); int totPts = min(d_PointCounter[2*octave+1], d_MaxNumPoints); //if (tx==0 && ty==0) // printf("%d %d %d %d\n", octave, fstPts, min(d_PointCounter[2*octave], d_MaxNumPoints), totPts); for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) { buffer[idx] = 0.0f; __syncthreads(); // Compute angles and gradients float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation; float sina = sinf(theta); // cosa -sina float cosa = cosf(theta); // sina cosa float scale = 12.0f/16.0f*d_sift[bx].scale; float ssina = scale*sina; float scosa = scale*cosa; for (int y=ty;y<16;y+=8) { float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f; float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f; float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) - tex2D<float>(texObj, xpos-cosa, ypos-sina); float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) - tex2D<float>(texObj, xpos+sina, ypos-cosa); float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy); float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f; int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins float horf = (tx - 1.5f)/4.0f - hori; float ihorf = 1.0f - horf; int veri = (y + 2)/4 - 1; float verf = (y - 1.5f)/4.0f - veri; float iverf = 1.0f - verf; int angi = angf; int angp = (angi<7 ? angi+1 : 0); angf -= angi; float iangf = 1.0f - angf; int hist = 8*(4*veri + hori); // Each gradient measure is interpolated int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores int p2 = angp + hist; if (tx>=2) { float grad1 = ihorf*grad; if (y>=2) { // Upper left float grad2 = iverf*grad1; atomicAdd(buffer + p1, iangf*grad2); atomicAdd(buffer + p2, angf*grad2); } if (y<=13) { // Lower left float grad2 = verf*grad1; atomicAdd(buffer + p1+32, iangf*grad2); atomicAdd(buffer + p2+32, angf*grad2); } } if (tx<=13) { float grad1 = horf*grad; if (y>=2) { // Upper right float grad2 = iverf*grad1; atomicAdd(buffer + p1+8, iangf*grad2); atomicAdd(buffer + p2+8, angf*grad2); } if (y<=13) { // Lower right float grad2 = verf*grad1; atomicAdd(buffer + p1+40, iangf*grad2); atomicAdd(buffer + p2+40, angf*grad2); } } } __syncthreads(); // Normalize twice and suppress peaks first time float sum = buffer[idx]*buffer[idx]; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum1 = sums[0] + sums[1] + sums[2] + sums[3]; tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f); sum = tsum1*tsum1; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum2 = sums[0] + sums[1] + sums[2] + sums[3]; float *desc = d_sift[bx].data; desc[idx] = tsum1 * rsqrtf(tsum2); if (idx==0) { d_sift[bx].xpos *= subsampling; d_sift[bx].ypos *= subsampling; d_sift[bx].scale *= subsampling; } __syncthreads(); } } __global__ void ExtractSiftDescriptorsOld(cudaTextureObject_t texObj, SiftPoint *d_sift, int fstPts, float subsampling) { __shared__ float gauss[16]; __shared__ float buffer[128]; __shared__ float sums[128]; const int tx = threadIdx.x; // 0 -> 16 const int ty = threadIdx.y; // 0 -> 8 const int idx = ty*16 + tx; const int bx = blockIdx.x + fstPts; // 0 -> numPts if (ty==0) gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f); buffer[idx] = 0.0f; __syncthreads(); // Compute angles and gradients float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation; float sina = sinf(theta); // cosa -sina float cosa = cosf(theta); // sina cosa float scale = 12.0f/16.0f*d_sift[bx].scale; float ssina = scale*sina; float scosa = scale*cosa; for (int y=ty;y<16;y+=8) { float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f; float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f; float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) - tex2D<float>(texObj, xpos-cosa, ypos-sina); float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) - tex2D<float>(texObj, xpos+sina, ypos-cosa); float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy); float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f; int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins float horf = (tx - 1.5f)/4.0f - hori; float ihorf = 1.0f - horf; int veri = (y + 2)/4 - 1; float verf = (y - 1.5f)/4.0f - veri; float iverf = 1.0f - verf; int angi = angf; int angp = (angi<7 ? angi+1 : 0); angf -= angi; float iangf = 1.0f - angf; int hist = 8*(4*veri + hori); // Each gradient measure is interpolated int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores int p2 = angp + hist; if (tx>=2) { float grad1 = ihorf*grad; if (y>=2) { // Upper left float grad2 = iverf*grad1; atomicAdd(buffer + p1, iangf*grad2); atomicAdd(buffer + p2, angf*grad2); } if (y<=13) { // Lower left float grad2 = verf*grad1; atomicAdd(buffer + p1+32, iangf*grad2); atomicAdd(buffer + p2+32, angf*grad2); } } if (tx<=13) { float grad1 = horf*grad; if (y>=2) { // Upper right float grad2 = iverf*grad1; atomicAdd(buffer + p1+8, iangf*grad2); atomicAdd(buffer + p2+8, angf*grad2); } if (y<=13) { // Lower right float grad2 = verf*grad1; atomicAdd(buffer + p1+40, iangf*grad2); atomicAdd(buffer + p2+40, angf*grad2); } } } __syncthreads(); // Normalize twice and suppress peaks first time if (idx<64) sums[idx] = buffer[idx]*buffer[idx] + buffer[idx+64]*buffer[idx+64]; __syncthreads(); if (idx<32) sums[idx] = sums[idx] + sums[idx+32]; __syncthreads(); if (idx<16) sums[idx] = sums[idx] + sums[idx+16]; __syncthreads(); if (idx<8) sums[idx] = sums[idx] + sums[idx+8]; __syncthreads(); if (idx<4) sums[idx] = sums[idx] + sums[idx+4]; __syncthreads(); float tsum1 = sums[0] + sums[1] + sums[2] + sums[3]; buffer[idx] = buffer[idx] * rsqrtf(tsum1); if (buffer[idx]>0.2f) buffer[idx] = 0.2f; __syncthreads(); if (idx<64) sums[idx] = buffer[idx]*buffer[idx] + buffer[idx+64]*buffer[idx+64]; __syncthreads(); if (idx<32) sums[idx] = sums[idx] + sums[idx+32]; __syncthreads(); if (idx<16) sums[idx] = sums[idx] + sums[idx+16]; __syncthreads(); if (idx<8) sums[idx] = sums[idx] + sums[idx+8]; __syncthreads(); if (idx<4) sums[idx] = sums[idx] + sums[idx+4]; __syncthreads(); float tsum2 = sums[0] + sums[1] + sums[2] + sums[3]; float *desc = d_sift[bx].data; desc[idx] = buffer[idx] * rsqrtf(tsum2); if (idx==0) { d_sift[bx].xpos *= subsampling; d_sift[bx].ypos *= subsampling; d_sift[bx].scale *= subsampling; } } __device__ void ExtractSiftDescriptor(cudaTextureObject_t texObj, SiftPoint *d_sift, float subsampling, int octave, int bx) { __shared__ float gauss[16]; __shared__ float buffer[128]; __shared__ float sums[4]; const int idx = threadIdx.x; const int tx = idx & 15; // 0 -> 16 const int ty = idx / 16; // 0 -> 8 if (ty==0) gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f); buffer[idx] = 0.0f; __syncthreads(); // Compute angles and gradients float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation; float sina = sinf(theta); // cosa -sina float cosa = cosf(theta); // sina cosa float scale = 12.0f/16.0f*d_sift[bx].scale; float ssina = scale*sina; float scosa = scale*cosa; for (int y=ty;y<16;y+=8) { float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f; float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f; float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) - tex2D<float>(texObj, xpos-cosa, ypos-sina); float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) - tex2D<float>(texObj, xpos+sina, ypos-cosa); float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy); float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f; int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins float horf = (tx - 1.5f)/4.0f - hori; float ihorf = 1.0f - horf; int veri = (y + 2)/4 - 1; float verf = (y - 1.5f)/4.0f - veri; float iverf = 1.0f - verf; int angi = angf; int angp = (angi<7 ? angi+1 : 0); angf -= angi; float iangf = 1.0f - angf; int hist = 8*(4*veri + hori); // Each gradient measure is interpolated int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores int p2 = angp + hist; if (tx>=2) { float grad1 = ihorf*grad; if (y>=2) { // Upper left float grad2 = iverf*grad1; atomicAdd(buffer + p1, iangf*grad2); atomicAdd(buffer + p2, angf*grad2); } if (y<=13) { // Lower left float grad2 = verf*grad1; atomicAdd(buffer + p1+32, iangf*grad2); atomicAdd(buffer + p2+32, angf*grad2); } } if (tx<=13) { float grad1 = horf*grad; if (y>=2) { // Upper right float grad2 = iverf*grad1; atomicAdd(buffer + p1+8, iangf*grad2); atomicAdd(buffer + p2+8, angf*grad2); } if (y<=13) { // Lower right float grad2 = verf*grad1; atomicAdd(buffer + p1+40, iangf*grad2); atomicAdd(buffer + p2+40, angf*grad2); } } } __syncthreads(); // Normalize twice and suppress peaks first time float sum = buffer[idx]*buffer[idx]; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum1 = sums[0] + sums[1] + sums[2] + sums[3]; tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f); sum = tsum1*tsum1; for (int i=16;i>0;i/=2) sum += ShiftDown(sum, i); if ((idx&31)==0) sums[idx/32] = sum; __syncthreads(); float tsum2 = sums[0] + sums[1] + sums[2] + sums[3]; float *desc = d_sift[bx].data; desc[idx] = tsum1 * rsqrtf(tsum2); if (idx==0) { d_sift[bx].xpos *= subsampling; d_sift[bx].ypos *= subsampling; d_sift[bx].scale *= subsampling; } __syncthreads(); } __global__ void RescalePositions(SiftPoint *d_sift, int numPts, float scale) { int num = blockIdx.x*blockDim.x + threadIdx.x; if (num<numPts) { d_sift[num].xpos *= scale; d_sift[num].ypos *= scale; d_sift[num].scale *= scale; } } __global__ void ComputeOrientations(cudaTextureObject_t texObj, SiftPoint *d_Sift, int fstPts) { __shared__ float hist[64]; __shared__ float gauss[11]; const int tx = threadIdx.x; const int bx = blockIdx.x + fstPts; float i2sigma2 = -1.0f/(4.5f*d_Sift[bx].scale*d_Sift[bx].scale); if (tx<11) gauss[tx] = exp(i2sigma2*(tx-5)*(tx-5)); if (tx<64) hist[tx] = 0.0f; __syncthreads(); float xp = d_Sift[bx].xpos - 4.5f; float yp = d_Sift[bx].ypos - 4.5f; int yd = tx/11; int xd = tx - yd*11; float xf = xp + xd; float yf = yp + yd; if (yd<11) { float dx = tex2D<float>(texObj, xf+1.0, yf) - tex2D<float>(texObj, xf-1.0, yf); float dy = tex2D<float>(texObj, xf, yf+1.0) - tex2D<float>(texObj, xf, yf-1.0); int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f; if (bin>31) bin = 0; float grad = sqrtf(dx*dx + dy*dy); atomicAdd(&hist[bin], grad*gauss[xd]*gauss[yd]); } __syncthreads(); int x1m = (tx>=1 ? tx-1 : tx+31); int x1p = (tx<=30 ? tx+1 : tx-31); if (tx<32) { int x2m = (tx>=2 ? tx-2 : tx+30); int x2p = (tx<=29 ? tx+2 : tx-30); hist[tx+32] = 6.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + (hist[x2m] + hist[x2p]); } __syncthreads(); if (tx<32) { float v = hist[32+tx]; hist[tx] = (v>hist[32+x1m] && v>=hist[32+x1p] ? v : 0.0f); } __syncthreads(); if (tx==0) { float maxval1 = 0.0; float maxval2 = 0.0; int i1 = -1; int i2 = -1; for (int i=0;i<32;i++) { float v = hist[i]; if (v>maxval1) { maxval2 = maxval1; maxval1 = v; i2 = i1; i1 = i; } else if (v>maxval2) { maxval2 = v; i2 = i; } } float val1 = hist[32+((i1+1)&31)]; float val2 = hist[32+((i1+31)&31)]; float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2); d_Sift[bx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak); if (maxval2>0.8f*maxval1) { float val1 = hist[32+((i2+1)&31)]; float val2 = hist[32+((i2+31)&31)]; float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2); unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff); if (idx<d_MaxNumPoints) { d_Sift[idx].xpos = d_Sift[bx].xpos; d_Sift[idx].ypos = d_Sift[bx].ypos; d_Sift[idx].scale = d_Sift[bx].scale; d_Sift[idx].sharpness = d_Sift[bx].sharpness; d_Sift[idx].edgeness = d_Sift[bx].edgeness; d_Sift[idx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);; d_Sift[idx].subsampling = d_Sift[bx].subsampling; } } } } // With constant number of blocks __global__ void ComputeOrientationsCONSTNew(float *image, int w, int p, int h, SiftPoint *d_Sift, int octave) { #define RAD 9 #define WID (2*RAD + 1) #define LEN 32 //%%%% Note: Lowe suggests 36, not 32 __shared__ float img[WID][WID], tmp[WID][WID]; __shared__ float hist[2*LEN]; __shared__ float gaussx[WID], gaussy[WID]; const int tx = threadIdx.x; int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints); int totPts = min(d_PointCounter[2*octave+0], d_MaxNumPoints); for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) { float sc = d_Sift[bx].scale; for (int i=tx;i<2*LEN;i+=blockDim.x) hist[i] = 0.0f; float xp = d_Sift[bx].xpos; float yp = d_Sift[bx].ypos; int xi = (int)xp; int yi = (int)yp; float xf = xp - xi; float yf = yp - yi; for (int i=tx;i<WID*WID;i+=blockDim.x) { int y = i/WID; int x = i - y*WID; int xp = max(min(x - RAD + xi, w - 1), 0); int yp = max(min(y - RAD + yi, h - 1), 0); img[y][x] = image[yp*p + xp]; } float fac[5]; fac[1] = fac[3] = (sc>0.5f ? __expf(-1.0f/(2.0f*(sc*sc - 0.25f))) : 0.0f); fac[0] = fac[4] = (sc>0.5f ? __expf(-4.0f/(2.0f*(sc*sc - 0.25f))) : 0.0f); fac[2] = 1.0f; float i2sigma2 = -1.0f/(2.0f*2.0f*2.0f*sc*sc); //%%%% Note: Lowe suggests 1.5, not 2.0 if (tx<WID) { gaussx[tx] = __expf(i2sigma2*(tx-RAD-xf)*(tx-RAD-xf)); gaussy[tx] = __expf(i2sigma2*(tx-RAD-yf)*(tx-RAD-yf)); } __syncthreads(); for (int i=tx;i<(WID-4)*WID;i+=blockDim.x) { int y = i/WID; int x = i - y*WID; y += 2; tmp[y][x] = img[y][x] + fac[1]*(img[y-1][x] + img[y+1][x]) + fac[0]*(img[y-2][x] + img[y+2][x]); } __syncthreads(); for (int i=tx;i<(WID-4)*(WID-4);i+=blockDim.x) { int y = i/(WID-4); int x = i - y*(WID-4); x += 2; y += 2; img[y][x] = tmp[y][x] + fac[1]*(tmp[y][x-1] + tmp[y][x+1]) + fac[0]*(tmp[y][x-2] + tmp[y][x+2]); } __syncthreads(); for (int i=tx;i<(WID-6)*(WID-6);i+=blockDim.x) { int y = i/(WID-6); int x = i - y*(WID-6); x += 3; y += 3; float dx = img[y][x+1] - img[y][x-1]; float dy = img[y+1][x] - img[y-1][x]; int bin = (int)((LEN/2)*atan2f(dy, dx)/3.1416f + (LEN/2) + 0.5f)%LEN; float grad = __fsqrt_rn(dx*dx + dy*dy); atomicAdd(&hist[LEN + bin], grad*gaussx[x]*gaussy[y]); } __syncthreads(); int x1m = (tx>=1 ? tx-1 : tx+LEN-1); int x1p = (tx<(LEN-1) ? tx+1 : tx-LEN+1); int x2m = (tx>=2 ? tx-2 : tx+LEN-2); int x2p = (tx<(LEN-2) ? tx+2 : tx-LEN+2); if (tx<LEN) { hist[tx] = 6.0f*hist[tx + LEN] + 4.0f*(hist[x1m + LEN] + hist[x1p + LEN]) + 1.0f*(hist[x2m + LEN] + hist[x2p + LEN]); hist[tx + LEN] = 8.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + 0.0f*(hist[x2m] + hist[x2p]); float val = hist[tx + LEN]; hist[tx] = (val>hist[x1m + LEN] && val>=hist[x1p + LEN] ? val : 0.0f); } __syncthreads(); if (tx==0) { float maxval1 = 0.0; float maxval2 = 0.0; int i1 = -1; int i2 = -1; for (int i=0;i<LEN;i++) { float v = hist[i]; if (v>maxval1) { maxval2 = maxval1; maxval1 = v; i2 = i1; i1 = i; } else if (v>maxval2) { maxval2 = v; i2 = i; } } float val1 = hist[LEN + ((i1 + 1)%LEN)]; float val2 = hist[LEN + ((i1 + LEN - 1)%LEN)]; float peak = i1 + 0.5f*(val1 - val2) / (2.0f*maxval1 - val1 - val2); d_Sift[bx].orientation = 360.0f*(peak<0.0f ? peak + LEN : peak)/LEN; atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave+0]); if (maxval2>0.8f*maxval1 && true) { float val1 = hist[LEN + ((i2 + 1)%LEN)]; float val2 = hist[LEN + ((i2 + LEN - 1)%LEN)]; float peak = i2 + 0.5f*(val1 - val2) / (2.0f*maxval2 - val1 - val2); unsigned int idx = atomicInc(&d_PointCounter[2*octave+1], 0x7fffffff); if (idx<d_MaxNumPoints) { d_Sift[idx].xpos = d_Sift[bx].xpos; d_Sift[idx].ypos = d_Sift[bx].ypos; d_Sift[idx].scale = sc; d_Sift[idx].sharpness = d_Sift[bx].sharpness; d_Sift[idx].edgeness = d_Sift[bx].edgeness; d_Sift[idx].orientation = 360.0f*(peak<0.0f ? peak + LEN : peak)/LEN; d_Sift[idx].subsampling = d_Sift[bx].subsampling; } } } } #undef RAD #undef WID #undef LEN } // With constant number of blocks __global__ void ComputeOrientationsCONST(cudaTextureObject_t texObj, SiftPoint *d_Sift, int octave) { __shared__ float hist[64]; __shared__ float gauss[11]; const int tx = threadIdx.x; int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints); int totPts = min(d_PointCounter[2*octave+0], d_MaxNumPoints); for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) { float i2sigma2 = -1.0f/(2.0f*1.5f*1.5f*d_Sift[bx].scale*d_Sift[bx].scale); if (tx<11) gauss[tx] = exp(i2sigma2*(tx-5)*(tx-5)); if (tx<64) hist[tx] = 0.0f; __syncthreads(); float xp = d_Sift[bx].xpos - 4.5f; float yp = d_Sift[bx].ypos - 4.5f; int yd = tx/11; int xd = tx - yd*11; float xf = xp + xd; float yf = yp + yd; if (yd<11) { float dx = tex2D<float>(texObj, xf+1.0, yf) - tex2D<float>(texObj, xf-1.0, yf); float dy = tex2D<float>(texObj, xf, yf+1.0) - tex2D<float>(texObj, xf, yf-1.0); int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f; if (bin>31) bin = 0; float grad = sqrtf(dx*dx + dy*dy); atomicAdd(&hist[bin], grad*gauss[xd]*gauss[yd]); } __syncthreads(); int x1m = (tx>=1 ? tx-1 : tx+31); int x1p = (tx<=30 ? tx+1 : tx-31); if (tx<32) { int x2m = (tx>=2 ? tx-2 : tx+30); int x2p = (tx<=29 ? tx+2 : tx-30); hist[tx+32] = 6.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + (hist[x2m] + hist[x2p]); } __syncthreads(); if (tx<32) { float v = hist[32+tx]; hist[tx] = (v>hist[32+x1m] && v>=hist[32+x1p] ? v : 0.0f); } __syncthreads(); if (tx==0) { float maxval1 = 0.0; float maxval2 = 0.0; int i1 = -1; int i2 = -1; for (int i=0;i<32;i++) { float v = hist[i]; if (v>maxval1) { maxval2 = maxval1; maxval1 = v; i2 = i1; i1 = i; } else if (v>maxval2) { maxval2 = v; i2 = i; } } float val1 = hist[32+((i1+1)&31)]; float val2 = hist[32+((i1+31)&31)]; float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2); d_Sift[bx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak); atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave+0]); if (maxval2>0.8f*maxval1 && true) { float val1 = hist[32+((i2+1)&31)]; float val2 = hist[32+((i2+31)&31)]; float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2); unsigned int idx = atomicInc(&d_PointCounter[2*octave+1], 0x7fffffff); if (idx<d_MaxNumPoints) { d_Sift[idx].xpos = d_Sift[bx].xpos; d_Sift[idx].ypos = d_Sift[bx].ypos; d_Sift[idx].scale = d_Sift[bx].scale; d_Sift[idx].sharpness = d_Sift[bx].sharpness; d_Sift[idx].edgeness = d_Sift[bx].edgeness; d_Sift[idx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);; d_Sift[idx].subsampling = d_Sift[bx].subsampling; } } } __syncthreads(); } } // With constant number of blocks __global__ void OrientAndExtractCONST(cudaTextureObject_t texObj, SiftPoint *d_Sift, float subsampling, int octave) { __shared__ float hist[64]; __shared__ float gauss[11]; __shared__ unsigned int idx; //%%%% const int tx = threadIdx.x; int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints); int totPts = min(d_PointCounter[2*octave+0], d_MaxNumPoints); for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) { float i2sigma2 = -1.0f/(4.5f*d_Sift[bx].scale*d_Sift[bx].scale); if (tx<11) gauss[tx] = exp(i2sigma2*(tx-5)*(tx-5)); if (tx<64) hist[tx] = 0.0f; __syncthreads(); float xp = d_Sift[bx].xpos - 4.5f; float yp = d_Sift[bx].ypos - 4.5f; int yd = tx/11; int xd = tx - yd*11; float xf = xp + xd; float yf = yp + yd; if (yd<11) { float dx = tex2D<float>(texObj, xf+1.0, yf) - tex2D<float>(texObj, xf-1.0, yf); float dy = tex2D<float>(texObj, xf, yf+1.0) - tex2D<float>(texObj, xf, yf-1.0); int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f; if (bin>31) bin = 0; float grad = sqrtf(dx*dx + dy*dy); atomicAdd(&hist[bin], grad*gauss[xd]*gauss[yd]); } __syncthreads(); int x1m = (tx>=1 ? tx-1 : tx+31); int x1p = (tx<=30 ? tx+1 : tx-31); if (tx<32) { int x2m = (tx>=2 ? tx-2 : tx+30); int x2p = (tx<=29 ? tx+2 : tx-30); hist[tx+32] = 6.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + (hist[x2m] + hist[x2p]); } __syncthreads(); if (tx<32) { float v = hist[32+tx]; hist[tx] = (v>hist[32+x1m] && v>=hist[32+x1p] ? v : 0.0f); } __syncthreads(); if (tx==0) { float maxval1 = 0.0; float maxval2 = 0.0; int i1 = -1; int i2 = -1; for (int i=0;i<32;i++) { float v = hist[i]; if (v>maxval1) { maxval2 = maxval1; maxval1 = v; i2 = i1; i1 = i; } else if (v>maxval2) { maxval2 = v; i2 = i; } } float val1 = hist[32+((i1+1)&31)]; float val2 = hist[32+((i1+31)&31)]; float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2); d_Sift[bx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak); idx = 0xffffffff; //%%%% atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave+0]); if (maxval2>0.8f*maxval1) { float val1 = hist[32+((i2+1)&31)]; float val2 = hist[32+((i2+31)&31)]; float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2); idx = atomicInc(&d_PointCounter[2*octave+1], 0x7fffffff); //%%%% if (idx<d_MaxNumPoints) { d_Sift[idx].xpos = d_Sift[bx].xpos; d_Sift[idx].ypos = d_Sift[bx].ypos; d_Sift[idx].scale = d_Sift[bx].scale; d_Sift[idx].sharpness = d_Sift[bx].sharpness; d_Sift[idx].edgeness = d_Sift[bx].edgeness; d_Sift[idx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);; d_Sift[idx].subsampling = d_Sift[bx].subsampling; } } } __syncthreads(); ExtractSiftDescriptor(texObj, d_Sift, subsampling, octave, bx); //%%%% if (idx<d_MaxNumPoints) //%%%% ExtractSiftDescriptor(texObj, d_Sift, subsampling, octave, idx); //%%%% } } /////////////////////////////////////////////////////////////////////////////// // Subtract two images (multi-scale version) /////////////////////////////////////////////////////////////////////////////// __global__ void FindPointsMultiTest(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave) { #define MEMWID (MINMAX_W + 2) __shared__ unsigned int cnt; __shared__ unsigned short points[3*MEMWID]; if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0 && threadIdx.y==0) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]); } int tx = threadIdx.x; int ty = threadIdx.y; if (tx==0 && ty==0) cnt = 0; __syncthreads(); int ypos = MINMAX_H*blockIdx.y + ty; if (ypos>=height) return; int block = blockIdx.x/NUM_SCALES; int scale = blockIdx.x - NUM_SCALES*block; int minx = block*MINMAX_W; int maxx = min(minx + MINMAX_W, width); int xpos = minx + tx; int size = pitch*height; int ptr = size*scale + max(min(xpos-1, width-1), 0); float maxv = fabs(d_Data0[ptr + ypos*pitch + 1*size]); maxv = fmaxf(maxv, ShiftDown(maxv, 16, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 8, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 4, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 2, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 1, MINMAX_W)); if (Shuffle(maxv, 0)>thresh) { int yptr1 = ptr + ypos*pitch; int yptr0 = ptr + max(0,ypos-1)*pitch; int yptr2 = ptr + min(height-1,ypos+1)*pitch; float d20 = d_Data0[yptr0 + 1*size]; float d21 = d_Data0[yptr1 + 1*size]; float d22 = d_Data0[yptr2 + 1*size]; float d31 = d_Data0[yptr1 + 2*size]; float d11 = d_Data0[yptr1]; float d10 = d_Data0[yptr0]; float d12 = d_Data0[yptr2]; float ymin1 = fminf(fminf(d10, d11), d12); float ymax1 = fmaxf(fmaxf(d10, d11), d12); float d30 = d_Data0[yptr0 + 2*size]; float d32 = d_Data0[yptr2 + 2*size]; float ymin3 = fminf(fminf(d30, d31), d32); float ymax3 = fmaxf(fmaxf(d30, d31), d32); float ymin2 = fminf(fminf(ymin1, fminf(fminf(d20, d22), d21)), ymin3); float ymax2 = fmaxf(fmaxf(ymax1, fmaxf(fmaxf(d20, d22), d21)), ymax3); float nmin2 = fminf(ShiftUp(ymin2, 1), ShiftDown(ymin2, 1)); float nmax2 = fmaxf(ShiftUp(ymax2, 1), ShiftDown(ymax2, 1)); if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) { if (d21<-thresh) { float minv = fminf(fminf(nmin2, ymin1), ymin3); minv = fminf(fminf(minv, d20), d22); if (d21<minv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } if (d21>thresh) { float maxv = fmaxf(fmaxf(nmax2, ymax1), ymax3); maxv = fmaxf(fmaxf(maxv, d20), d22); if (d21>maxv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } } } __syncthreads(); if (ty==0 && tx<cnt) { int xpos = points[3*tx+0]; int ypos = points[3*tx+1]; int scale = points[3*tx+2]; int ptr = xpos + (ypos + (scale+1)*height)*pitch; float val = d_Data0[ptr]; float *data1 = &d_Data0[ptr]; float dxx = 2.0f*val - data1[-1] - data1[1]; float dyy = 2.0f*val - data1[-pitch] - data1[pitch]; float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]); float tra = dxx + dyy; float det = dxx*dyy - dxy*dxy; if (tra*tra<edgeLimit*det) { float edge = __fdividef(tra*tra, det); float dx = 0.5f*(data1[1] - data1[-1]); float dy = 0.5f*(data1[pitch] - data1[-pitch]); float *data0 = d_Data0 + ptr - height*pitch; float *data2 = d_Data0 + ptr + height*pitch; float ds = 0.5f*(data0[0] - data2[0]); float dss = 2.0f*val - data2[0] - data0[0]; float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]); float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]); float idxx = dyy*dss - dys*dys; float idxy = dys*dxs - dxy*dss; float idxs = dxy*dys - dyy*dxs; float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs); float idyy = dxx*dss - dxs*dxs; float idys = dxy*dxs - dxx*dys; float idss = dxx*dyy - dxy*dxy; float pdx = idet*(idxx*dx + idxy*dy + idxs*ds); float pdy = idet*(idxy*dx + idyy*dy + idys*ds); float pds = idet*(idxs*dx + idys*dy + idss*ds); if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) { pdx = __fdividef(dx, dxx); pdy = __fdividef(dy, dyy); pds = __fdividef(ds, dss); } float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds); int maxPts = d_MaxNumPoints; float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor); if (sc>=lowestScale) { unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff); idx = (idx>=maxPts ? maxPts-1 : idx); d_Sift[idx].xpos = xpos + pdx; d_Sift[idx].ypos = ypos + pdy; d_Sift[idx].scale = sc; d_Sift[idx].sharpness = val + dval; d_Sift[idx].edgeness = edge; d_Sift[idx].subsampling = subsampling; } } } } __global__ void FindPointsMultiNew(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave) { #define MEMWID (MINMAX_W + 2) __shared__ unsigned short points[2*MEMWID]; if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]); } int tx = threadIdx.x; int block = blockIdx.x/NUM_SCALES; int scale = blockIdx.x - NUM_SCALES*block; int minx = block*MINMAX_W; int maxx = min(minx + MINMAX_W, width); int xpos = minx + tx; int size = pitch*height; int ptr = size*scale + max(min(xpos-1, width-1), 0); int yloops = min(height - MINMAX_H*blockIdx.y, MINMAX_H); float maxv = 0.0f; for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; float val = d_Data0[yptr1 + 1*size]; maxv = fmaxf(maxv, fabs(val)); } //if (tx==0) printf("XXX1\n"); if (!__any_sync(0xffffffff, maxv>thresh)) return; //if (tx==0) printf("XXX2\n"); int ptbits = 0; for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; float d11 = d_Data0[yptr1 + 1*size]; if (__any_sync(0xffffffff, fabs(d11)>thresh)) { int yptr0 = ptr + max(0,ypos-1)*pitch; int yptr2 = ptr + min(height-1,ypos+1)*pitch; float d01 = d_Data0[yptr1]; float d10 = d_Data0[yptr0 + 1*size]; float d12 = d_Data0[yptr2 + 1*size]; float d21 = d_Data0[yptr1 + 2*size]; float d00 = d_Data0[yptr0]; float d02 = d_Data0[yptr2]; float ymin1 = fminf(fminf(d00, d01), d02); float ymax1 = fmaxf(fmaxf(d00, d01), d02); float d20 = d_Data0[yptr0 + 2*size]; float d22 = d_Data0[yptr2 + 2*size]; float ymin3 = fminf(fminf(d20, d21), d22); float ymax3 = fmaxf(fmaxf(d20, d21), d22); float ymin2 = fminf(fminf(ymin1, fminf(fminf(d10, d12), d11)), ymin3); float ymax2 = fmaxf(fmaxf(ymax1, fmaxf(fmaxf(d10, d12), d11)), ymax3); float nmin2 = fminf(ShiftUp(ymin2, 1), ShiftDown(ymin2, 1)); float nmax2 = fmaxf(ShiftUp(ymax2, 1), ShiftDown(ymax2, 1)); float minv = fminf(fminf(nmin2, ymin1), ymin3); minv = fminf(fminf(minv, d10), d12); float maxv = fmaxf(fmaxf(nmax2, ymax1), ymax3); maxv = fmaxf(fmaxf(maxv, d10), d12); if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) ptbits |= ((d11 < fminf(-thresh, minv)) | (d11 > fmaxf(thresh, maxv))) << y; } } unsigned int totbits = __popc(ptbits); unsigned int numbits = totbits; for (int d=1;d<32;d<<=1) { unsigned int num = ShiftUp(totbits, d); if (tx >= d) totbits += num; } int pos = totbits - numbits; for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; if (ptbits & (1 << y) && pos<MEMWID) { points[2*pos + 0] = xpos - 1; points[2*pos + 1] = ypos; pos ++; } } totbits = Shuffle(totbits, 31); if (tx<totbits) { int xpos = points[2*tx + 0]; int ypos = points[2*tx + 1]; int ptr = xpos + (ypos + (scale + 1)*height)*pitch; float val = d_Data0[ptr]; float *data1 = &d_Data0[ptr]; float dxx = 2.0f*val - data1[-1] - data1[1]; float dyy = 2.0f*val - data1[-pitch] - data1[pitch]; float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]); float tra = dxx + dyy; float det = dxx*dyy - dxy*dxy; if (tra*tra<edgeLimit*det) { float edge = __fdividef(tra*tra, det); float dx = 0.5f*(data1[1] - data1[-1]); float dy = 0.5f*(data1[pitch] - data1[-pitch]); float *data0 = d_Data0 + ptr - height*pitch; float *data2 = d_Data0 + ptr + height*pitch; float ds = 0.5f*(data0[0] - data2[0]); float dss = 2.0f*val - data2[0] - data0[0]; float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]); float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]); float idxx = dyy*dss - dys*dys; float idxy = dys*dxs - dxy*dss; float idxs = dxy*dys - dyy*dxs; float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs); float idyy = dxx*dss - dxs*dxs; float idys = dxy*dxs - dxx*dys; float idss = dxx*dyy - dxy*dxy; float pdx = idet*(idxx*dx + idxy*dy + idxs*ds); float pdy = idet*(idxy*dx + idyy*dy + idys*ds); float pds = idet*(idxs*dx + idys*dy + idss*ds); if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) { pdx = __fdividef(dx, dxx); pdy = __fdividef(dy, dyy); pds = __fdividef(ds, dss); } float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds); int maxPts = d_MaxNumPoints; float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor); if (sc>=lowestScale) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff); idx = (idx>=maxPts ? maxPts-1 : idx); d_Sift[idx].xpos = xpos + pdx; d_Sift[idx].ypos = ypos + pdy; d_Sift[idx].scale = sc; d_Sift[idx].sharpness = val + dval; d_Sift[idx].edgeness = edge; d_Sift[idx].subsampling = subsampling; } } } } __global__ void FindPointsMulti(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave) { #define MEMWID (MINMAX_W + 2) __shared__ unsigned int cnt; __shared__ unsigned short points[3*MEMWID]; if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]); } int tx = threadIdx.x; int block = blockIdx.x/NUM_SCALES; int scale = blockIdx.x - NUM_SCALES*block; int minx = block*MINMAX_W; int maxx = min(minx + MINMAX_W, width); int xpos = minx + tx; int size = pitch*height; int ptr = size*scale + max(min(xpos-1, width-1), 0); int yloops = min(height - MINMAX_H*blockIdx.y, MINMAX_H); float maxv = 0.0f; for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; float val = d_Data0[yptr1 + 1*size]; maxv = fmaxf(maxv, fabs(val)); } maxv = fmaxf(maxv, ShiftDown(maxv, 16, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 8, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 4, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 2, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 1, MINMAX_W)); if (Shuffle(maxv, 0)<=thresh) return; if (tx==0) cnt = 0; __syncthreads(); for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; int yptr0 = ptr + max(0,ypos-1)*pitch; int yptr2 = ptr + min(height-1,ypos+1)*pitch; float d20 = d_Data0[yptr0 + 1*size]; float d21 = d_Data0[yptr1 + 1*size]; float d22 = d_Data0[yptr2 + 1*size]; float d31 = d_Data0[yptr1 + 2*size]; float d11 = d_Data0[yptr1]; float d10 = d_Data0[yptr0]; float d12 = d_Data0[yptr2]; float ymin1 = fminf(fminf(d10, d11), d12); float ymax1 = fmaxf(fmaxf(d10, d11), d12); float d30 = d_Data0[yptr0 + 2*size]; float d32 = d_Data0[yptr2 + 2*size]; float ymin3 = fminf(fminf(d30, d31), d32); float ymax3 = fmaxf(fmaxf(d30, d31), d32); float ymin2 = fminf(fminf(ymin1, fminf(fminf(d20, d22), d21)), ymin3); float ymax2 = fmaxf(fmaxf(ymax1, fmaxf(fmaxf(d20, d22), d21)), ymax3); float nmin2 = fminf(ShiftUp(ymin2, 1), ShiftDown(ymin2, 1)); float nmax2 = fmaxf(ShiftUp(ymax2, 1), ShiftDown(ymax2, 1)); if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) { if (d21<-thresh) { float minv = fminf(fminf(nmin2, ymin1), ymin3); minv = fminf(fminf(minv, d20), d22); if (d21<minv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } if (d21>thresh) { float maxv = fmaxf(fmaxf(nmax2, ymax1), ymax3); maxv = fmaxf(fmaxf(maxv, d20), d22); if (d21>maxv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } } } if (tx<cnt) { int xpos = points[3*tx+0]; int ypos = points[3*tx+1]; int scale = points[3*tx+2]; int ptr = xpos + (ypos + (scale+1)*height)*pitch; float val = d_Data0[ptr]; float *data1 = &d_Data0[ptr]; float dxx = 2.0f*val - data1[-1] - data1[1]; float dyy = 2.0f*val - data1[-pitch] - data1[pitch]; float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]); float tra = dxx + dyy; float det = dxx*dyy - dxy*dxy; if (tra*tra<edgeLimit*det) { float edge = __fdividef(tra*tra, det); float dx = 0.5f*(data1[1] - data1[-1]); float dy = 0.5f*(data1[pitch] - data1[-pitch]); float *data0 = d_Data0 + ptr - height*pitch; float *data2 = d_Data0 + ptr + height*pitch; float ds = 0.5f*(data0[0] - data2[0]); float dss = 2.0f*val - data2[0] - data0[0]; float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]); float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]); float idxx = dyy*dss - dys*dys; float idxy = dys*dxs - dxy*dss; float idxs = dxy*dys - dyy*dxs; float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs); float idyy = dxx*dss - dxs*dxs; float idys = dxy*dxs - dxx*dys; float idss = dxx*dyy - dxy*dxy; float pdx = idet*(idxx*dx + idxy*dy + idxs*ds); float pdy = idet*(idxy*dx + idyy*dy + idys*ds); float pds = idet*(idxs*dx + idys*dy + idss*ds); if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) { pdx = __fdividef(dx, dxx); pdy = __fdividef(dy, dyy); pds = __fdividef(ds, dss); } float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds); int maxPts = d_MaxNumPoints; float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor); if (sc>=lowestScale) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff); idx = (idx>=maxPts ? maxPts-1 : idx); d_Sift[idx].xpos = xpos + pdx; d_Sift[idx].ypos = ypos + pdy; d_Sift[idx].scale = sc; d_Sift[idx].sharpness = val + dval; d_Sift[idx].edgeness = edge; d_Sift[idx].subsampling = subsampling; } } } } __global__ void FindPointsMultiOld(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave) { #define MEMWID (MINMAX_W + 2) __shared__ float ymin1[MEMWID], ymin2[MEMWID], ymin3[MEMWID]; __shared__ float ymax1[MEMWID], ymax2[MEMWID], ymax3[MEMWID]; __shared__ unsigned int cnt; __shared__ unsigned short points[3*MEMWID]; if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0) { atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]); atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]); } int tx = threadIdx.x; int block = blockIdx.x/NUM_SCALES; int scale = blockIdx.x - NUM_SCALES*block; int minx = block*MINMAX_W; int maxx = min(minx + MINMAX_W, width); int xpos = minx + tx; int size = pitch*height; int ptr = size*scale + max(min(xpos-1, width-1), 0); int yloops = min(height - MINMAX_H*blockIdx.y, MINMAX_H); float maxv = 0.0f; for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; float val = d_Data0[yptr1 + 1*size]; maxv = fmaxf(maxv, fabs(val)); } maxv = fmaxf(maxv, ShiftDown(maxv, 16, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 8, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 4, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 2, MINMAX_W)); maxv = fmaxf(maxv, ShiftDown(maxv, 1, MINMAX_W)); if (Shuffle(maxv, 0)<=thresh) return; if (tx==0) cnt = 0; __syncthreads(); for (int y=0;y<yloops;y++) { int ypos = MINMAX_H*blockIdx.y + y; int yptr1 = ptr + ypos*pitch; int yptr0 = ptr + max(0,ypos-1)*pitch; int yptr2 = ptr + min(height-1,ypos+1)*pitch; float d20 = d_Data0[yptr0 + 1*size]; float d21 = d_Data0[yptr1 + 1*size]; float d22 = d_Data0[yptr2 + 1*size]; float d31 = d_Data0[yptr1 + 2*size]; float d11 = d_Data0[yptr1]; float d10 = d_Data0[yptr0]; float d12 = d_Data0[yptr2]; ymin1[tx] = fminf(fminf(d10, d11), d12); ymax1[tx] = fmaxf(fmaxf(d10, d11), d12); float d30 = d_Data0[yptr0 + 2*size]; float d32 = d_Data0[yptr2 + 2*size]; ymin3[tx] = fminf(fminf(d30, d31), d32); ymax3[tx] = fmaxf(fmaxf(d30, d31), d32); ymin2[tx] = fminf(fminf(ymin1[tx], fminf(fminf(d20, d22), d21)), ymin3[tx]); ymax2[tx] = fmaxf(fmaxf(ymax1[tx], fmaxf(fmaxf(d20, d22), d21)), ymax3[tx]); __syncthreads(); if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) { if (d21<-thresh) { float minv = fminf(fminf(fminf(ymin2[tx-1], ymin2[tx+1]), ymin1[tx]), ymin3[tx]); minv = fminf(fminf(minv, d20), d22); if (d21<minv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } if (d21>thresh) { float maxv = fmaxf(fmaxf(fmaxf(ymax2[tx-1], ymax2[tx+1]), ymax1[tx]), ymax3[tx]); maxv = fmaxf(fmaxf(maxv, d20), d22); if (d21>maxv) { int pos = atomicInc(&cnt, MEMWID-1); points[3*pos+0] = xpos - 1; points[3*pos+1] = ypos; points[3*pos+2] = scale; } } } __syncthreads(); } if (tx<cnt) { int xpos = points[3*tx+0]; int ypos = points[3*tx+1]; int scale = points[3*tx+2]; int ptr = xpos + (ypos + (scale+1)*height)*pitch; float val = d_Data0[ptr]; float *data1 = &d_Data0[ptr]; float dxx = 2.0f*val - data1[-1] - data1[1]; float dyy = 2.0f*val - data1[-pitch] - data1[pitch]; float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]); float tra = dxx + dyy; float det = dxx*dyy - dxy*dxy; if (tra*tra<edgeLimit*det) { float edge = __fdividef(tra*tra, det); float dx = 0.5f*(data1[1] - data1[-1]); float dy = 0.5f*(data1[pitch] - data1[-pitch]); float *data0 = d_Data0 + ptr - height*pitch; float *data2 = d_Data0 + ptr + height*pitch; float ds = 0.5f*(data0[0] - data2[0]); float dss = 2.0f*val - data2[0] - data0[0]; float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]); float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]); float idxx = dyy*dss - dys*dys; float idxy = dys*dxs - dxy*dss; float idxs = dxy*dys - dyy*dxs; float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs); float idyy = dxx*dss - dxs*dxs; float idys = dxy*dxs - dxx*dys; float idss = dxx*dyy - dxy*dxy; float pdx = idet*(idxx*dx + idxy*dy + idxs*ds); float pdy = idet*(idxy*dx + idyy*dy + idys*ds); float pds = idet*(idxs*dx + idys*dy + idss*ds); if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) { pdx = __fdividef(dx, dxx); pdy = __fdividef(dy, dyy); pds = __fdividef(ds, dss); } float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds); int maxPts = d_MaxNumPoints; float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor); if (sc>=lowestScale) { unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff); idx = (idx>=maxPts ? maxPts-1 : idx); d_Sift[idx].xpos = xpos + pdx; d_Sift[idx].ypos = ypos + pdy; d_Sift[idx].scale = sc; d_Sift[idx].sharpness = val + dval; d_Sift[idx].edgeness = edge; d_Sift[idx].subsampling = subsampling; } } } } __global__ void LaplaceMultiTex(cudaTextureObject_t texObj, float *d_Result, int width, int pitch, int height, int octave) { __shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S]; __shared__ float data2[LAPLACE_W*LAPLACE_S]; const int tx = threadIdx.x; const int xp = blockIdx.x*LAPLACE_W + tx; const int yp = blockIdx.y; const int scale = threadIdx.y; float *kernel = d_LaplaceKernel + octave*12*16 + scale*16; float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale; float x = xp-3.5; float y = yp+0.5; sdata1[tx] = kernel[0]*tex2D<float>(texObj, x, y) + kernel[1]*(tex2D<float>(texObj, x, y-1.0) + tex2D<float>(texObj, x, y+1.0)) + kernel[2]*(tex2D<float>(texObj, x, y-2.0) + tex2D<float>(texObj, x, y+2.0)) + kernel[3]*(tex2D<float>(texObj, x, y-3.0) + tex2D<float>(texObj, x, y+3.0)) + kernel[4]*(tex2D<float>(texObj, x, y-4.0) + tex2D<float>(texObj, x, y+4.0)); __syncthreads(); float *sdata2 = data2 + LAPLACE_W*scale; if (tx<LAPLACE_W) { sdata2[tx] = kernel[0]*sdata1[tx+4] + kernel[1]*(sdata1[tx+3] + sdata1[tx+5]) + kernel[2]*(sdata1[tx+2] + sdata1[tx+6]) + kernel[3]*(sdata1[tx+1] + sdata1[tx+7]) + kernel[4]*(sdata1[tx+0] + sdata1[tx+8]); } __syncthreads(); if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width) d_Result[scale*height*pitch + yp*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W]; } __global__ void LaplaceMultiMem(float *d_Image, float *d_Result, int width, int pitch, int height, int octave) { __shared__ float buff[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S]; const int tx = threadIdx.x; const int xp = blockIdx.x*LAPLACE_W + tx; const int yp = blockIdx.y; float *data = d_Image + max(min(xp - LAPLACE_R, width-1), 0); float temp[2*LAPLACE_R + 1], kern[LAPLACE_S][LAPLACE_R + 1]; if (xp<(width + 2*LAPLACE_R)) { for (int i=0;i<=2*LAPLACE_R;i++) temp[i] = data[max(0, min(yp + i - LAPLACE_R, height - 1))*pitch]; for (int scale=0;scale<LAPLACE_S;scale++) { float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale; float *kernel = d_LaplaceKernel + octave*12*16 + scale*16; for (int i=0;i<=LAPLACE_R;i++) kern[scale][i] = kernel[i]; float sum = kern[scale][0]*temp[LAPLACE_R]; #pragma unroll for (int j=1;j<=LAPLACE_R;j++) sum += kern[scale][j]*(temp[LAPLACE_R - j] + temp[LAPLACE_R + j]); buf[tx] = sum; } } __syncthreads(); if (tx<LAPLACE_W && xp<width) { int scale = 0; float oldRes = kern[scale][0]*buff[tx + LAPLACE_R]; #pragma unroll for (int j=1;j<=LAPLACE_R;j++) oldRes += kern[scale][j]*(buff[tx + LAPLACE_R - j] + buff[tx + LAPLACE_R + j]); for (int scale=1;scale<LAPLACE_S;scale++) { float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale; float res = kern[scale][0]*buf[tx + LAPLACE_R]; #pragma unroll for (int j=1;j<=LAPLACE_R;j++) res += kern[scale][j]*(buf[tx + LAPLACE_R - j] + buf[tx + LAPLACE_R + j]); d_Result[(scale-1)*height*pitch + yp*pitch + xp] = res - oldRes; oldRes = res; } } } __global__ void LaplaceMultiMemWide(float *d_Image, float *d_Result, int width, int pitch, int height, int octave) { __shared__ float buff[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S]; const int tx = threadIdx.x; const int xp = blockIdx.x*LAPLACE_W + tx; const int xp4 = blockIdx.x*LAPLACE_W + 4*tx; const int yp = blockIdx.y; float kern[LAPLACE_S][LAPLACE_R+1]; float *data = d_Image + max(min(xp - 4, width-1), 0); float temp[9]; if (xp<(width + 2*LAPLACE_R)) { for (int i=0;i<4;i++) temp[i] = data[max(0, min(yp+i-4, height-1))*pitch]; for (int i=4;i<8+1;i++) temp[i] = data[min(yp+i-4, height-1)*pitch]; for (int scale=0;scale<LAPLACE_S;scale++) { float *kernel = d_LaplaceKernel + octave*12*16 + scale*16; for (int i=0;i<=LAPLACE_R;i++) kern[scale][i] = kernel[LAPLACE_R - i]; float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale; buf[tx] = kern[scale][4]*temp[4] + kern[scale][3]*(temp[3] + temp[5]) + kern[scale][2]*(temp[2] + temp[6]) + kern[scale][1]*(temp[1] + temp[7]) + kern[scale][0]*(temp[0] + temp[8]); } } __syncthreads(); if (tx<LAPLACE_W/4 && xp4<width) { float4 b0 = reinterpret_cast<float4*>(buff)[tx+0]; float4 b1 = reinterpret_cast<float4*>(buff)[tx+1]; float4 b2 = reinterpret_cast<float4*>(buff)[tx+2]; float4 old4, new4, dif4; old4.x = kern[0][4]*b1.x + kern[0][3]*(b0.w + b1.y) + kern[0][2]*(b0.z + b1.z) + kern[0][1]*(b0.y + b1.w) + kern[0][0]*(b0.x + b2.x); old4.y = kern[0][4]*b1.y + kern[0][3]*(b1.x + b1.z) + kern[0][2]*(b0.w + b1.w) + kern[0][1]*(b0.z + b2.x) + kern[0][0]*(b0.y + b2.y); old4.z = kern[0][4]*b1.z + kern[0][3]*(b1.y + b1.w) + kern[0][2]*(b1.x + b2.x) + kern[0][1]*(b0.w + b2.y) + kern[0][0]*(b0.z + b2.z); old4.w = kern[0][4]*b1.w + kern[0][3]*(b1.z + b2.x) + kern[0][2]*(b1.y + b2.y) + kern[0][1]*(b1.x + b2.z) + kern[0][0]*(b0.w + b2.w); for (int scale=1;scale<LAPLACE_S;scale++) { float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale; float4 b0 = reinterpret_cast<float4*>(buf)[tx+0]; float4 b1 = reinterpret_cast<float4*>(buf)[tx+1]; float4 b2 = reinterpret_cast<float4*>(buf)[tx+2]; new4.x = kern[scale][4]*b1.x + kern[scale][3]*(b0.w + b1.y) + kern[scale][2]*(b0.z + b1.z) + kern[scale][1]*(b0.y + b1.w) + kern[scale][0]*(b0.x + b2.x); new4.y = kern[scale][4]*b1.y + kern[scale][3]*(b1.x + b1.z) + kern[scale][2]*(b0.w + b1.w) + kern[scale][1]*(b0.z + b2.x) + kern[scale][0]*(b0.y + b2.y); new4.z = kern[scale][4]*b1.z + kern[scale][3]*(b1.y + b1.w) + kern[scale][2]*(b1.x + b2.x) + kern[scale][1]*(b0.w + b2.y) + kern[scale][0]*(b0.z + b2.z); new4.w = kern[scale][4]*b1.w + kern[scale][3]*(b1.z + b2.x) + kern[scale][2]*(b1.y + b2.y) + kern[scale][1]*(b1.x + b2.z) + kern[scale][0]*(b0.w + b2.w); dif4.x = new4.x - old4.x; dif4.y = new4.y - old4.y; dif4.z = new4.z - old4.z; dif4.w = new4.w - old4.w; reinterpret_cast<float4*>(&d_Result[(scale-1)*height*pitch + yp*pitch + xp4])[0] = dif4; old4 = new4; } } } __global__ void LaplaceMultiMemTest(float *d_Image, float *d_Result, int width, int pitch, int height, int octave) { __shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S]; __shared__ float data2[LAPLACE_W*LAPLACE_S]; const int tx = threadIdx.x; const int xp = blockIdx.x*LAPLACE_W + tx; const int yp = LAPLACE_H*blockIdx.y; const int scale = threadIdx.y; float *kernel = d_LaplaceKernel + octave*12*16 + scale*16; float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale; float *data = d_Image + max(min(xp - 4, width-1), 0); int h = height-1; float temp[8+LAPLACE_H], kern[LAPLACE_R+1]; for (int i=0;i<4;i++) temp[i] = data[max(0, min(yp+i-4, h))*pitch]; for (int i=4;i<8+LAPLACE_H;i++) temp[i] = data[min(yp+i-4, h)*pitch]; for (int i=0;i<=LAPLACE_R;i++) kern[i] = kernel[LAPLACE_R - i]; for (int j=0;j<LAPLACE_H;j++) { sdata1[tx] = kern[4]*temp[4+j] + kern[3]*(temp[3+j] + temp[5+j]) + kern[2]*(temp[2+j] + temp[6+j]) + kern[1]*(temp[1+j] + temp[7+j]) + kern[0]*(temp[0+j] + temp[8+j]); __syncthreads(); float *sdata2 = data2 + LAPLACE_W*scale; if (tx<LAPLACE_W) { sdata2[tx] = kern[4]*sdata1[tx+4] + kern[3]*(sdata1[tx+3] + sdata1[tx+5]) + kern[2]*(sdata1[tx+2] + sdata1[tx+6]) + kern[1]*(sdata1[tx+1] + sdata1[tx+7]) + kern[0]*(sdata1[tx+0] + sdata1[tx+8]); } __syncthreads(); if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width && (yp+j)<height) d_Result[scale*height*pitch + (yp+j)*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W]; } } __global__ void LaplaceMultiMemOld(float *d_Image, float *d_Result, int width, int pitch, int height, int octave) { __shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S]; __shared__ float data2[LAPLACE_W*LAPLACE_S]; const int tx = threadIdx.x; const int xp = blockIdx.x*LAPLACE_W + tx; const int yp = blockIdx.y; const int scale = threadIdx.y; float *kernel = d_LaplaceKernel + octave*12*16 + scale*16; float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale; float *data = d_Image + max(min(xp - 4, width-1), 0); int h = height-1; sdata1[tx] = kernel[0]*data[min(yp, h)*pitch] + kernel[1]*(data[max(0, min(yp-1, h))*pitch] + data[min(yp+1, h)*pitch]) + kernel[2]*(data[max(0, min(yp-2, h))*pitch] + data[min(yp+2, h)*pitch]) + kernel[3]*(data[max(0, min(yp-3, h))*pitch] + data[min(yp+3, h)*pitch]) + kernel[4]*(data[max(0, min(yp-4, h))*pitch] + data[min(yp+4, h)*pitch]); __syncthreads(); float *sdata2 = data2 + LAPLACE_W*scale; if (tx<LAPLACE_W) { sdata2[tx] = kernel[0]*sdata1[tx+4] + kernel[1]*(sdata1[tx+3] + sdata1[tx+5]) + kernel[2]*(sdata1[tx+2] + sdata1[tx+6]) + kernel[3]*(sdata1[tx+1] + sdata1[tx+7]) + kernel[4]*(sdata1[tx+0] + sdata1[tx+8]); } __syncthreads(); if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width) d_Result[scale*height*pitch + yp*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W]; } __global__ void LowPass(float *d_Image, float *d_Result, int width, int pitch, int height) { __shared__ float buffer[(LOWPASS_W + 2*LOWPASS_R)*LOWPASS_H]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int xp = blockIdx.x*LOWPASS_W + tx; const int yp = blockIdx.y*LOWPASS_H + ty; float *kernel = d_LowPassKernel; float *data = d_Image + max(min(xp - 4, width-1), 0); float *buff = buffer + ty*(LOWPASS_W + 2*LOWPASS_R); int h = height-1; if (yp<height) buff[tx] = kernel[4]*data[min(yp, h)*pitch] + kernel[3]*(data[max(0, min(yp-1, h))*pitch] + data[min(yp+1, h)*pitch]) + kernel[2]*(data[max(0, min(yp-2, h))*pitch] + data[min(yp+2, h)*pitch]) + kernel[1]*(data[max(0, min(yp-3, h))*pitch] + data[min(yp+3, h)*pitch]) + kernel[0]*(data[max(0, min(yp-4, h))*pitch] + data[min(yp+4, h)*pitch]); __syncthreads(); if (tx<LOWPASS_W && xp<width && yp<height) d_Result[yp*pitch + xp] = kernel[4]*buff[tx+4] + kernel[3]*(buff[tx+3] + buff[tx+5]) + kernel[2]*(buff[tx+2] + buff[tx+6]) + kernel[1]*(buff[tx+1] + buff[tx+7]) + kernel[0]*(buff[tx+0] + buff[tx+8]); } __global__ void LowPassBlockOld(float *d_Image, float *d_Result, int width, int pitch, int height) { __shared__ float xrows[16][32]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int xp = blockIdx.x*LOWPASS_W + tx; const int yp = blockIdx.y*LOWPASS_H + ty; const int N = 16; float *k = d_LowPassKernel; int xl = max(min(xp - 4, width-1), 0); for (int l=-8;l<=LOWPASS_H;l+=4) { if (l<LOWPASS_H) { int yl = max(min(yp + l + 4, height-1), 0); float val = d_Image[yl*pitch + xl]; xrows[(l + 8 + ty)%N][tx] = k[4]*ShiftDown(val, 4) + k[3]*(ShiftDown(val, 5) + ShiftDown(val, 3)) + k[2]*(ShiftDown(val, 6) + ShiftDown(val, 2)) + k[1]*(ShiftDown(val, 7) + ShiftDown(val, 1)) + k[0]*(ShiftDown(val, 8) + val); } if (l>=4) { int ys = yp + l - 4; if (xp<width && ys<height && tx<LOWPASS_W) d_Result[ys*pitch + xp] = k[4]*xrows[(l + 0 + ty)%N][tx] + k[3]*(xrows[(l - 1 + ty)%N][tx] + xrows[(l + 1 + ty)%N][tx]) + k[2]*(xrows[(l - 2 + ty)%N][tx] + xrows[(l + 2 + ty)%N][tx]) + k[1]*(xrows[(l - 3 + ty)%N][tx] + xrows[(l + 3 + ty)%N][tx]) + k[0]*(xrows[(l - 4 + ty)%N][tx] + xrows[(l + 4 + ty)%N][tx]); } if (l>=0) __syncthreads(); } } __global__ void LowPassBlock(float *d_Image, float *d_Result, int width, int pitch, int height) { __shared__ float xrows[16][32]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int xp = blockIdx.x*LOWPASS_W + tx; const int yp = blockIdx.y*LOWPASS_H + ty; const int N = 16; float *k = d_LowPassKernel; int xl = max(min(xp - 4, width-1), 0); #pragma unroll for (int l=-8;l<4;l+=4) { int ly = l + ty; int yl = max(min(yp + l + 4, height-1), 0); float val = d_Image[yl*pitch + xl]; val = k[4]*ShiftDown(val, 4) + k[3]*(ShiftDown(val, 5) + ShiftDown(val, 3)) + k[2]*(ShiftDown(val, 6) + ShiftDown(val, 2)) + k[1]*(ShiftDown(val, 7) + ShiftDown(val, 1)) + k[0]*(ShiftDown(val, 8) + val); xrows[ly + 8][tx] = val; } __syncthreads(); #pragma unroll for (int l=4;l<LOWPASS_H;l+=4) { int ly = l + ty; int yl = min(yp + l + 4, height-1); float val = d_Image[yl*pitch + xl]; val = k[4]*ShiftDown(val, 4) + k[3]*(ShiftDown(val, 5) + ShiftDown(val, 3)) + k[2]*(ShiftDown(val, 6) + ShiftDown(val, 2)) + k[1]*(ShiftDown(val, 7) + ShiftDown(val, 1)) + k[0]*(ShiftDown(val, 8) + val); xrows[(ly + 8)%N][tx] = val; int ys = yp + l - 4; if (xp<width && ys<height && tx<LOWPASS_W) d_Result[ys*pitch + xp] = k[4]*xrows[(ly + 0)%N][tx] + k[3]*(xrows[(ly - 1)%N][tx] + xrows[(ly + 1)%N][tx]) + k[2]*(xrows[(ly - 2)%N][tx] + xrows[(ly + 2)%N][tx]) + k[1]*(xrows[(ly - 3)%N][tx] + xrows[(ly + 3)%N][tx]) + k[0]*(xrows[(ly - 4)%N][tx] + xrows[(ly + 4)%N][tx]); __syncthreads(); } int ly = LOWPASS_H + ty; int ys = yp + LOWPASS_H - 4; if (xp<width && ys<height && tx<LOWPASS_W) d_Result[ys*pitch + xp] = k[4]*xrows[(ly + 0)%N][tx] + k[3]*(xrows[(ly - 1)%N][tx] + xrows[(ly + 1)%N][tx]) + k[2]*(xrows[(ly - 2)%N][tx] + xrows[(ly + 2)%N][tx]) + k[1]*(xrows[(ly - 3)%N][tx] + xrows[(ly + 3)%N][tx]) + k[0]*(xrows[(ly - 4)%N][tx] + xrows[(ly + 4)%N][tx]); }
92b37e0208e48f2c1260d1222cd9f0298731528a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaSift.h" #include "cudautils.h" //================= Device matching functions =====================// __global__ void MatchSiftPoints(SiftPoint *sift1, SiftPoint *sift2, float *corrData, int numPts1, int numPts2) { __shared__ float siftPoint[128]; __shared__ float sums[16*16]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1 = blockIdx.x; const int p2 = blockIdx.y*16 + ty; const float *ptr1 = sift1[p1].data; const float *ptr2 = sift2[p2].data; const int i = 16*ty + tx; if (ty<8) siftPoint[i] = ptr1[i]; __syncthreads(); float sum = 0.0f; if (p2<numPts2) for (int j=0;j<8;j++) sum += siftPoint[16*j+tx] * ptr2[16*j+tx]; sums[i] = sum; __syncthreads(); if (tx<8) sums[i] += sums[i+8]; __syncthreads(); if (tx<4) sums[i] += sums[i+4]; __syncthreads(); if (ty==0) { sum = sums[16*tx+0] + sums[16*tx+1] + sums[16*tx+2] + sums[16*tx+3]; corrData[p1*gridDim.y*16 + blockIdx.y*16 + tx] = sum; } __syncthreads(); } __global__ void MatchSiftPoints2(SiftPoint *sift1, SiftPoint *sift2, float *corrData, int numPts1, int numPts2) { __shared__ float siftPoints1[16*128]; __shared__ float siftPoints2[16*128]; const int tx = threadIdx.x; const int ty = threadIdx.y; const float *ptr1 = sift1[min(numPts1-1,blockIdx.x*16 + ty)].data; const float *ptr2 = sift2[min(numPts2-1,blockIdx.y*16 + ty)].data; for (int i=0;i<8;i++) { siftPoints1[128*ty+16*i+tx] = ptr1[16*i+tx]; siftPoints2[128*ty+16*i+tx] = ptr2[16*i+tx]; } __syncthreads(); const int p1 = blockIdx.x*16 + ty; const int p2 = blockIdx.y*16 + tx; const float *pt1 = &siftPoints1[ty*128]; const float *pt2 = &siftPoints2[tx*128]; float sum = 0.0f; for (int i=0;i<128;i++) { int itx = (i + tx)&127; // avoid bank conflicts sum += pt1[itx]*pt2[itx]; } if (p1<numPts1) corrData[p1*gridDim.y*16 + p2] = (p2<numPts2 ? sum : -1.0f); } __global__ void FindMaxCorr(float *corrData, SiftPoint *sift1, SiftPoint *sift2, int numPts1, int corrWidth, int siftSize) { __shared__ float maxScore[16*16]; __shared__ float maxScor2[16*16]; __shared__ int maxIndex[16*16]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = ty*16 + tx; int p1 = blockIdx.x*16 + threadIdx.y; p1 = (p1>=numPts1 ? numPts1-1 : p1); maxScore[idx] = -1.0f; maxScor2[idx] = -1.0f; maxIndex[idx] = -1; __syncthreads(); float *corrs = &corrData[p1*corrWidth]; for (int i=tx;i<corrWidth;i+=16) { float val = corrs[i]; if (val>maxScore[idx]) { maxScor2[idx] = maxScore[idx]; maxScore[idx] = val; maxIndex[idx] = i; } else if (val>maxScor2[idx]) maxScor2[idx] = val; } __syncthreads(); for (int len=8;len>0;len/=2) { if (tx<8) { float val = maxScore[idx+len]; int i = maxIndex[idx+len]; if (val>maxScore[idx]) { maxScor2[idx] = maxScore[idx]; maxScore[idx] = val; maxIndex[idx] = i; } else if (val>maxScor2[idx]) maxScor2[idx] = val; float va2 = maxScor2[idx+len]; if (va2>maxScor2[idx]) maxScor2[idx] = va2; } __syncthreads(); } if (tx==0) { sift1[p1].score = maxScore[ty*16]; sift1[p1].ambiguity = maxScor2[ty*16] / (maxScore[ty*16] + 1e-6); sift1[p1].match = maxIndex[ty*16]; sift1[p1].match_xpos = sift2[maxIndex[ty*16]].xpos; sift1[p1].match_ypos = sift2[maxIndex[ty*16]].ypos; } } // Version based on suggestion by Nicholas Lin __global__ void FindMaxCorr3(float *corrData, SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { int block_dim = blockDim.x; // blockDim.x == 16 const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1 = blockIdx.x * block_dim + ty; const int idx = ty * 16 + tx; __shared__ int maxIndex[16 * 16]; maxIndex[idx] = 0; __syncthreads(); float *corrs = NULL; if (p1 < numPts1) { corrs = &corrData[p1 * block_dim * 2]; corrs[tx] = 0.0f; corrs[tx + 16] = 0.0f; const float *pt1 = sift1[p1].data; for (int p2 = tx; p2 < numPts2; p2 += 16) { float *pt2 = sift2[p2].data; float sum = 0.0f; for (int i = 0; i < 128; i++) sum += pt1[i] * pt2[i]; if (sum > corrs[tx]) { corrs[tx + 16] = corrs[tx]; corrs[tx] = sum; maxIndex[idx] = p2; } else if (sum > corrs[tx + 16]) corrs[tx + 16] = sum; } } __syncthreads(); if (p1 < numPts1) { for (int len = 8; len > 0; len /= 2) { if (tx < len) { float val = corrs[tx + len]; int i = maxIndex[idx + len]; if (val > corrs[tx]) { corrs[tx + 16] = corrs[tx]; corrs[tx] = val; maxIndex[idx] = i; } else if (val > corrs[tx + 16]) corrs[tx + 16] = val; float va2 = corrs[tx + 16 + len]; if (va2 > corrs[tx + 16]) corrs[tx + 16] = va2; } __syncthreads(); } if (tx==0) { sift1[p1].score = corrs[0]; sift1[p1].ambiguity = corrs[16] / (corrs[0] + 1e-6); sift1[p1].match = maxIndex[ty << 4]; sift1[p1].match_xpos = sift2[maxIndex[ty << 4]].xpos; sift1[p1].match_ypos = sift2[maxIndex[ty << 4]].ypos; } } } #define FMC2W 16 #define FMC2H 4 __global__ void FindMaxCorr2(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float siftPoint[128]; __shared__ float maxScore[FMC2H]; __shared__ float maxScor2[FMC2H]; __shared__ int maxIndex[FMC2H]; const int p1 = blockIdx.x; if (p1>=numPts1) return; const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = ty*FMC2W + tx; if (idx<FMC2H) { maxScore[idx] = -1.0f; maxScor2[idx] = -1.0f; maxIndex[idx] = 0; } __syncthreads(); const float *pt1 = sift1[p1].data; for (int i=idx;i<128;i+=FMC2W*FMC2H) siftPoint[i] = pt1[i]; __syncthreads(); for (int p2=ty;p2<numPts2;p2+=FMC2H) { const float *pt2 = sift2[p2].data; float sum = 0.0f; for (int j=tx;j<128;j+=FMC2W) sum += siftPoint[j] * pt2[j]; for (int j=FMC2W/2;j>0;j/=2) sum += ShiftDown(sum, j); if (tx==0) { if (sum>maxScore[ty]) { maxScor2[ty] = maxScore[ty]; maxScore[ty] = sum; maxIndex[ty] = p2; } else if (sum>maxScor2[ty]) maxScor2[ty] = sum; } } __syncthreads(); for (int len=FMC2H/2;len>0;len/=2) { if (ty==0 && tx<len) { float val = maxScore[tx+len]; int p2 = maxIndex[tx+len]; if (val>maxScore[tx]) { maxScor2[tx] = maxScore[tx]; maxScore[tx] = val; maxIndex[tx] = p2; } else if (val>maxScor2[tx]) maxScor2[tx] = val; float va2 = maxScor2[tx+len]; if (va2>maxScor2[tx]) maxScor2[tx] = va2; } __syncthreads(); } if (ty==0 && tx==0) { sift1[p1].score = maxScore[0]; sift1[p1].ambiguity = maxScor2[0] / (maxScore[0] + 1e-6); sift1[p1].match = maxIndex[0]; sift1[p1].match_xpos = sift2[maxIndex[0]].xpos; sift1[p1].match_ypos = sift2[maxIndex[0]].ypos; } } __global__ void FindMaxCorr4(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float siftPoint[128*FMC2H]; __shared__ float maxScore[FMC2H]; __shared__ float maxScor2[FMC2H]; __shared__ int maxIndex[FMC2H]; const int tx = threadIdx.x; const int ty = threadIdx.y; if (tx==0) { maxScore[ty] = -1.0f; maxScor2[ty] = -1.0f; maxIndex[ty] = 0; } const int p1 = blockIdx.x*FMC2H + ty; const float *pt1 = sift1[p1].data; for (int j=tx;j<128;j+=FMC2W) siftPoint[128*ty + j] = pt1[j]; __syncthreads(); for (int p2=0;p2<numPts2;p2++) { const float *pt2 = sift2[p2].data; float sum = 0.0f; for (int j=tx;j<128;j+=FMC2W) sum += siftPoint[128*ty + j] * pt2[j]; for (int j=FMC2W/2;j>0;j/=2) sum += ShiftDown(sum, j); if (tx==0) { if (sum>maxScore[ty]) { maxScor2[ty] = maxScore[ty]; maxScore[ty] = sum; maxIndex[ty] = p2; } else if (sum>maxScor2[ty]) maxScor2[ty] = sum; } } __syncthreads(); if (tx==0) { sift1[p1].score = maxScore[ty]; sift1[p1].ambiguity = maxScor2[ty] / (maxScore[ty] + 1e-6); sift1[p1].match = maxIndex[ty]; sift1[p1].match_xpos = sift2[maxIndex[ty]].xpos; sift1[p1].match_ypos = sift2[maxIndex[ty]].ypos; } } __global__ void CleanMatches(SiftPoint *sift1, int numPts1) { const int p1 = min(blockIdx.x*64 + threadIdx.x, numPts1-1); sift1[p1].score = 0.0f; } #define M7W 32 #define M7H 32 #define M7R 4 #define NRX 2 #define NDIM 128 __global__ void FindMaxCorr10(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float4 buffer1[M7W*NDIM/4]; __shared__ float4 buffer2[M7H*NDIM/4]; int tx = threadIdx.x; int ty = threadIdx.y; int bp1 = M7W*blockIdx.x; for (int j=ty;j<M7W;j+=M7H/M7R) { int p1 = min(bp1 + j, numPts1 - 1); for (int d=tx;d<NDIM/4;d+=M7W) buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)&sift1[p1].data)[d]; } float max_score[NRX]; float sec_score[NRX]; int index[NRX]; for (int i=0;i<NRX;i++) { max_score[i] = 0.0f; sec_score[i] = 0.0f; index[i] = -1; } int idx = ty*M7W + tx; int ix = idx%(M7W/NRX); int iy = idx/(M7W/NRX); for (int bp2=0;bp2<numPts2 - M7H + 1;bp2+=M7H) { for (int j=ty;j<M7H;j+=M7H/M7R) { int p2 = min(bp2 + j, numPts2 - 1); for (int d=tx;d<NDIM/4;d+=M7W) buffer2[j*NDIM/4 + d] = ((float4*)&sift2[p2].data)[d]; } __syncthreads(); if (idx<M7W*M7H/M7R/NRX) { float score[M7R][NRX]; for (int dy=0;dy<M7R;dy++) for (int i=0;i<NRX;i++) score[dy][i] = 0.0f; for (int d=0;d<NDIM/4;d++) { float4 v1[NRX]; for (int i=0;i<NRX;i++) v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)]; for (int dy=0;dy<M7R;dy++) { float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d]; for (int i=0;i<NRX;i++) { score[dy][i] += v1[i].x*v2.x; score[dy][i] += v1[i].y*v2.y; score[dy][i] += v1[i].z*v2.z; score[dy][i] += v1[i].w*v2.w; } } } for (int dy=0;dy<M7R;dy++) { for (int i=0;i<NRX;i++) { if (score[dy][i]>max_score[i]) { sec_score[i] = max_score[i]; max_score[i] = score[dy][i]; index[i] = min(bp2 + M7R*iy + dy, numPts2-1); } else if (score[dy][i]>sec_score[i]) sec_score[i] = score[dy][i]; } } } __syncthreads(); } float *scores1 = (float*)buffer1; float *scores2 = &scores1[M7W*M7H/M7R]; int *indices = (int*)&scores2[M7W*M7H/M7R]; if (idx<M7W*M7H/M7R/NRX) { for (int i=0;i<NRX;i++) { scores1[iy*M7W + (M7W/NRX)*i + ix] = max_score[i]; scores2[iy*M7W + (M7W/NRX)*i + ix] = sec_score[i]; indices[iy*M7W + (M7W/NRX)*i + ix] = index[i]; } } __syncthreads(); if (ty==0) { float max_score = scores1[tx]; float sec_score = scores2[tx]; int index = indices[tx]; for (int y=0;y<M7H/M7R;y++) if (index != indices[y*M7W + tx]) { if (scores1[y*M7W + tx]>max_score) { sec_score = max(max_score, sec_score); max_score = scores1[y*M7W + tx]; index = indices[y*M7W + tx]; } else if (scores1[y*M7W + tx]>sec_score) sec_score = scores1[y*M7W + tx]; } sift1[bp1 + tx].score = max_score; sift1[bp1 + tx].match = index; sift1[bp1 + tx].match_xpos = sift2[index].xpos; sift1[bp1 + tx].match_ypos = sift2[index].ypos; sift1[bp1 + tx].ambiguity = sec_score / (max_score + 1e-6f); } } #define FMC_GH 512 #define FMC_BW 32 #define FMC_BH 32 #define FMC_BD 16 #define FMC_TW 1 #define FMC_TH 4 #define FMC_NW (FMC_BW/FMC_TW) // 32 #define FMC_NH (FMC_BH/FMC_TH) // 8 #define FMC_NT (FMC_NW*FMC_NH) // 256 = 8 warps __device__ volatile int lock = 0; __global__ void FindMaxCorr9(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float4 siftParts1[FMC_BW*FMC_BD]; // 4*32*8 = 1024 __shared__ float4 siftParts2[FMC_BH*FMC_BD]; // 4*32*8 = 1024 //__shared__ float blksums[FMC_BW*FMC_BH]; // 32*32 = 1024 const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = ty*FMC_NW + tx; float4 *pts1 = 0, *pts2 = 0; if (idx<FMC_BW) { const int p1l = min(blockIdx.x*FMC_BW + idx, numPts1-1); pts1 = (float4*)sift1[p1l].data; } float maxScore = -1.0f; float maxScor2 = -1.0f; int maxIndex = 0; for (int k=0;k<min(FMC_GH, numPts2 - FMC_BH + 1);k+=FMC_BH) { if (idx<FMC_BH) { const int p2l = min(blockIdx.y*FMC_GH + k + idx, numPts2-1); pts2 = (float4*)sift2[p2l].data; } float sums[FMC_TW*FMC_TH]; for (int i=0;i<FMC_TW*FMC_TH;i++) sums[i] = 0.0f; if (idx<FMC_BW) for (int i=0;i<FMC_BD/2;i++) siftParts1[(i + 0)*FMC_BW + idx] = pts1[0 + i]; if (idx<FMC_BH) for (int i=0;i<FMC_BD/2;i++) siftParts2[(i + 0)*FMC_BH + idx] = pts2[0 + i]; __syncthreads(); int b = FMC_BD/2; for (int d=FMC_BD/2;d<32;d+=FMC_BD/2) { if (idx<FMC_BW) for (int i=0;i<FMC_BD/2;i++) siftParts1[(i + b)*FMC_BW + idx] = pts1[d + i]; if (idx<FMC_BH) for (int i=0;i<FMC_BD/2;i++) siftParts2[(i + b)*FMC_BH + idx] = pts2[d + i]; b ^= FMC_BD/2; for (int i=0;i<FMC_BD/2;i++) { float4 v1[FMC_TW]; for (int ix=0;ix<FMC_TW;ix++) v1[ix] = siftParts1[(i + b)*FMC_BW + (tx*FMC_TW + ix)]; for (int iy=0;iy<FMC_TH;iy++) { float4 v2 = siftParts2[(i + b)*FMC_BH + (ty*FMC_TH + iy)]; for (int ix=0;ix<FMC_TW;ix++) { sums[iy*FMC_TW + ix] += v1[ix].x * v2.x; sums[iy*FMC_TW + ix] += v1[ix].y * v2.y; sums[iy*FMC_TW + ix] += v1[ix].z * v2.z; sums[iy*FMC_TW + ix] += v1[ix].w * v2.w; } } } __syncthreads(); } b ^= FMC_BD/2; for (int i=0;i<FMC_BD/2;i++) { float4 v1[FMC_TW]; for (int ix=0;ix<FMC_TW;ix++) v1[ix] = siftParts1[(i + b)*FMC_BW + (tx*FMC_TW + ix)]; for (int iy=0;iy<FMC_TH;iy++) { float4 v2 = siftParts2[(i + b)*FMC_BH + (ty*FMC_TH + iy)]; for (int ix=0;ix<FMC_TW;ix++) { sums[iy*FMC_TW + ix] += v1[ix].x * v2.x; sums[iy*FMC_TW + ix] += v1[ix].y * v2.y; sums[iy*FMC_TW + ix] += v1[ix].z * v2.z; sums[iy*FMC_TW + ix] += v1[ix].w * v2.w; } } } __syncthreads(); float *blksums = (float*)siftParts1; for (int iy=0;iy<FMC_TH;iy++) for (int ix=0;ix<FMC_TW;ix++) blksums[(ty*FMC_TH + iy)*FMC_BW + (tx*FMC_TW + ix)] = sums[iy*FMC_TW + ix]; __syncthreads(); if (idx<FMC_BW) { for (int j=0;j<FMC_BH;j++) { float sum = blksums[j*FMC_BW + idx]; if (sum>maxScore) { maxScor2 = maxScore; maxScore = sum; maxIndex = min(blockIdx.y*FMC_GH + k + j, numPts2-1); } else if (sum>maxScor2) maxScor2 = sum; } } __syncthreads(); } const int p1 = min(blockIdx.x*FMC_BW + idx, numPts1-1); if (idx==0) while (atomicCAS((int *)&lock, 0, 1) != 0); __syncthreads(); if (idx<FMC_BW) { float maxScor2Old = sift1[p1].ambiguity*(sift1[p1].score + 1e-6f); if (maxScore>sift1[p1].score) { maxScor2 = max(sift1[p1].score, maxScor2); sift1[p1].ambiguity = maxScor2 / (maxScore + 1e-6f); sift1[p1].score = maxScore; sift1[p1].match = maxIndex; sift1[p1].match_xpos = sift2[maxIndex].xpos; sift1[p1].match_ypos = sift2[maxIndex].ypos; } else if (maxScore>maxScor2Old) sift1[p1].ambiguity = maxScore / (sift1[p1].score + 1e-6f); } __syncthreads(); if (idx==0) atomicExch((int* )&lock, 0); } __global__ void FindMaxCorr8(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float4 siftParts1[FMC_BW*FMC_BD]; // 4*32*8 = 1024 __shared__ float4 siftParts2[FMC_BH*FMC_BD]; // 4*32*8 = 1024 __shared__ float blksums[FMC_BW*FMC_BH]; // 32*32 = 1024 const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = ty*FMC_NW + tx; float4 *pts1 = 0, *pts2 = 0; if (idx<FMC_BW) { const int p1l = min(blockIdx.x*FMC_BW + idx, numPts1-1); pts1 = (float4*)sift1[p1l].data; } float maxScore = -1.0f; float maxScor2 = -1.0f; int maxIndex = 0; for (int k=0;k<min(FMC_GH, numPts2 - FMC_BH + 1);k+=FMC_BH) { if (idx<FMC_BH) { const int p2l = min(blockIdx.y*FMC_GH + k + idx, numPts2-1); pts2 = (float4*)sift2[p2l].data; } float sums[FMC_TW*FMC_TH]; for (int i=0;i<FMC_TW*FMC_TH;i++) sums[i] = 0.0f; for (int d=0;d<32;d+=FMC_BD) { if (idx<FMC_BW) for (int i=0;i<FMC_BD;i++) siftParts1[i*FMC_BW + idx] = pts1[d + i]; if (idx<FMC_BH) for (int i=0;i<FMC_BD;i++) siftParts2[i*FMC_BH + idx] = pts2[d + i]; __syncthreads(); for (int i=0;i<FMC_BD;i++) { float4 v1[FMC_TW]; for (int ix=0;ix<FMC_TW;ix++) v1[ix] = siftParts1[i*FMC_BW + (tx*FMC_TW + ix)]; for (int iy=0;iy<FMC_TH;iy++) { float4 v2 = siftParts2[i*FMC_BH + (ty*FMC_TH + iy)]; for (int ix=0;ix<FMC_TW;ix++) { sums[iy*FMC_TW + ix] += v1[ix].x * v2.x; sums[iy*FMC_TW + ix] += v1[ix].y * v2.y; sums[iy*FMC_TW + ix] += v1[ix].z * v2.z; sums[iy*FMC_TW + ix] += v1[ix].w * v2.w; } } } __syncthreads(); } //float *blksums = (float*)siftParts1; for (int iy=0;iy<FMC_TH;iy++) for (int ix=0;ix<FMC_TW;ix++) blksums[(ty*FMC_TH + iy)*FMC_BW + (tx*FMC_TW + ix)] = sums[iy*FMC_TW + ix]; __syncthreads(); if (idx<FMC_BW) { for (int j=0;j<FMC_BH;j++) { float sum = blksums[j*FMC_BW + idx]; if (sum>maxScore) { maxScor2 = maxScore; maxScore = sum; maxIndex = min(blockIdx.y*FMC_GH + k + j, numPts2-1); } else if (sum>maxScor2) maxScor2 = sum; } } __syncthreads(); } const int p1 = min(blockIdx.x*FMC_BW + idx, numPts1-1); if (idx==0) while (atomicCAS((int *)&lock, 0, 1) != 0); __syncthreads(); if (idx<FMC_BW) { float maxScor2Old = sift1[p1].ambiguity*(sift1[p1].score + 1e-6f); if (maxScore>sift1[p1].score) { maxScor2 = max(sift1[p1].score, maxScor2); sift1[p1].ambiguity = maxScor2 / (maxScore + 1e-6f); sift1[p1].score = maxScore; sift1[p1].match = maxIndex; sift1[p1].match_xpos = sift2[maxIndex].xpos; sift1[p1].match_ypos = sift2[maxIndex].ypos; } else if (maxScore>maxScor2Old) sift1[p1].ambiguity = maxScore / (sift1[p1].score + 1e-6f); } __syncthreads(); if (idx==0) atomicExch((int* )&lock, 0); } __global__ void FindMaxCorr7(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float siftParts1[17*64]; // features in columns __shared__ float siftParts2[16*64]; // one extra to avoid shared conflicts float4 *pts1 = (float4*)siftParts1; float4 *pts2 = (float4*)siftParts2; const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1l = min(blockIdx.x*16 + ty, numPts1-1); const float4 *p1l4 = (float4*)sift1[p1l].data; float maxScore = -1.0f; float maxScor2 = -1.0f; int maxIndex = 0; for (int k=0;k<512/16;k++) { const int p2l = min(blockIdx.y*512 + k*16 + ty, numPts2-1); const float4 *p2l4 = (float4*)sift2[p2l].data; #define NUM 4 float sum[NUM]; if (ty<(16/NUM)) for (int l=0;l<NUM;l++) sum[l] = 0.0f; __syncthreads(); for (int i=0;i<2;i++) { pts1[17*tx + ty] = p1l4[i*16 + tx]; pts2[16*ty + tx] = p2l4[i*16 + tx]; __syncthreads(); if (ty<(16/NUM)) { #pragma unroll for (int j=0;j<16;j++) { float4 p1v = pts1[17* j + tx]; #pragma unroll for (int l=0;l<NUM;l++) { float4 p2v = pts2[16*(ty + l*(16/NUM)) + j]; sum[l] += p1v.x * p2v.x; sum[l] += p1v.y * p2v.y; sum[l] += p1v.z * p2v.z; sum[l] += p1v.w * p2v.w; } } } __syncthreads(); } float *sums = siftParts1; if (ty<(16/NUM)) for (int l=0;l<NUM;l++) sums[16*(ty + l*(16/NUM)) + tx] = sum[l]; __syncthreads(); if (ty==0) { for (int j=0;j<16;j++) { float sum = sums[16*j + tx]; if (sum>maxScore) { maxScor2 = maxScore; maxScore = sum; maxIndex = min(blockIdx.y*512 + k*16 + j, numPts2-1); } else if (sum>maxScor2) maxScor2 = sum; } } __syncthreads(); } const int p1 = min(blockIdx.x*16 + tx, numPts1-1); if (tx==0 && ty==0) while (atomicCAS((int *)&lock, 0, 1) != 0); __syncthreads(); if (ty==0) { float maxScor2Old = sift1[p1].ambiguity*(sift1[p1].score + 1e-6f); if (maxScore>sift1[p1].score) { maxScor2 = max(sift1[p1].score, maxScor2); sift1[p1].ambiguity = maxScor2 / (maxScore + 1e-6f); sift1[p1].score = maxScore; sift1[p1].match = maxIndex; sift1[p1].match_xpos = sift2[maxIndex].xpos; sift1[p1].match_ypos = sift2[maxIndex].ypos; } else if (maxScore>maxScor2Old) sift1[p1].ambiguity = maxScore / (sift1[p1].score + 1e-6f); } __syncthreads(); if (tx==0 && ty==0) atomicExch((int* )&lock, 0); } __global__ void FindMaxCorr6(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { //__shared__ float siftParts1[128*16]; // features in columns __shared__ float siftParts2[128*16]; // one extra to avoid shared conflicts __shared__ float sums[16*16]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1l = min(blockIdx.x*16 + ty, numPts1-1); float *pt1l = sift1[p1l].data; float4 part1 = reinterpret_cast<float4*>(pt1l)[tx]; float maxScore = -1.0f; float maxScor2 = -1.0f; int maxIndex = 0; for (int k=0;k<512;k+=16) { const int p2l = min(blockIdx.y*512 + k + ty, numPts2-1); float *pt2l = sift2[p2l].data; reinterpret_cast<float4*>(siftParts2)[32*ty + tx] = reinterpret_cast<float4*>(pt2l)[tx]; __syncthreads(); for (int i=0;i<16;i++) { float4 part2 = reinterpret_cast<float4*>(siftParts2)[32*i + tx]; float sum = part1.x*part2.x + part1.y*part2.y + part1.z*part2.z + part1.w*part2.w; sum += ShiftDown(sum, 16); sum += ShiftDown(sum, 8); sum += ShiftDown(sum, 4); sum += ShiftDown(sum, 2); sum += ShiftDown(sum, 1); if (tx==0) sums[16*i + ty] = sum; } __syncthreads(); if (ty==0 && tx<16) { for (int j=0;j<16;j++) { float sum = sums[16*j + tx]; if (sum>maxScore) { maxScor2 = maxScore; maxScore = sum; maxIndex = min(blockIdx.y*512 + k + j, numPts2-1); } else if (sum>maxScor2) maxScor2 = sum; } } __syncthreads(); } if (tx==0 && ty==0) while (atomicCAS((int *)&lock, 0, 1) != 0); __syncthreads(); if (ty==0 && tx<16) { const int p1 = min(blockIdx.x*16 + tx, numPts1-1); float maxScor2Old = sift1[p1].ambiguity*(sift1[p1].score + 1e-6f); if (maxScore>sift1[p1].score) { maxScor2 = max(sift1[p1].score, maxScor2); sift1[p1].ambiguity = maxScor2 / (maxScore + 1e-6f); sift1[p1].score = maxScore; sift1[p1].match = maxIndex; sift1[p1].match_xpos = sift2[maxIndex].xpos; sift1[p1].match_ypos = sift2[maxIndex].ypos; } else if (maxScore>maxScor2Old) sift1[p1].ambiguity = maxScore / (sift1[p1].score + 1e-6f); } __syncthreads(); if (tx==0 && ty==0) atomicExch((int* )&lock, 0); } __global__ void FindMaxCorr5(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float siftParts1[17*16]; // features in columns __shared__ float siftParts2[17*16]; // one extra to avoid shared conflicts const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1l = min(blockIdx.x*16 + ty, numPts1-1); const float *pt1l = sift1[p1l].data; float maxScore = -1.0f; float maxScor2 = -1.0f; int maxIndex = 0; for (int k=0;k<512/16;k++) { const int p2l = min(blockIdx.y*512 + k*16 + ty, numPts2-1); const float *pt2l = sift2[p2l].data; float sum = 0.0f; for (int i=0;i<8;i++) { siftParts1[17*tx + ty] = pt1l[i*16 + tx]; // load and transpose siftParts2[17*tx + ty] = pt2l[i*16 + tx]; __syncthreads(); for (int j=0;j<16;j++) sum += siftParts1[17*j + tx] * siftParts2[17*j + ty]; __syncthreads(); } float *sums = siftParts1; sums[16*ty + tx] = sum; __syncthreads(); if (ty==0) { for (int j=0;j<16;j++) { float sum = sums[16*j + tx]; if (sum>maxScore) { maxScor2 = maxScore; maxScore = sum; maxIndex = min(blockIdx.y*512 + k*16 + j, numPts2-1); } else if (sum>maxScor2) maxScor2 = sum; } } __syncthreads(); } const int p1 = min(blockIdx.x*16 + tx, numPts1-1); if (tx==0 && ty==0) while (atomicCAS((int *)&lock, 0, 1) != 0); __syncthreads(); if (ty==0) { float maxScor2Old = sift1[p1].ambiguity*(sift1[p1].score + 1e-6f); if (maxScore>sift1[p1].score) { maxScor2 = max(sift1[p1].score, maxScor2); sift1[p1].ambiguity = maxScor2 / (maxScore + 1e-6f); sift1[p1].score = maxScore; sift1[p1].match = maxIndex; sift1[p1].match_xpos = sift2[maxIndex].xpos; sift1[p1].match_ypos = sift2[maxIndex].ypos; } else if (maxScore>maxScor2Old) sift1[p1].ambiguity = maxScore / (sift1[p1].score + 1e-6f); } __syncthreads(); if (tx==0 && ty==0) atomicExch((int* )&lock, 0); } template <int size> __device__ void InvertMatrix(float elem[size][size], float res[size][size]) { int indx[size]; float b[size]; float vv[size]; for (int i=0;i<size;i++) indx[i] = 0; int imax = 0; float d = 1.0; for (int i=0;i<size;i++) { // find biggest element for each row float big = 0.0; for (int j=0;j<size;j++) { float temp = fabs(elem[i][j]); if (temp>big) big = temp; } if (big>0.0) vv[i] = 1.0/big; else vv[i] = 1e16; } for (int j=0;j<size;j++) { for (int i=0;i<j;i++) { // i<j float sum = elem[i][j]; // i<j (lower left) for (int k=0;k<i;k++) // k<i<j sum -= elem[i][k]*elem[k][j]; // i>k (upper right), k<j (lower left) elem[i][j] = sum; // i<j (lower left) } float big = 0.0; for (int i=j;i<size;i++) { // i>=j float sum = elem[i][j]; // i>=j (upper right) for (int k=0;k<j;k++) // k<j<=i sum -= elem[i][k]*elem[k][j]; // i>k (upper right), k<j (lower left) elem[i][j] = sum; // i>=j (upper right) float dum = vv[i]*fabs(sum); if (dum>=big) { big = dum; imax = i; } } if (j!=imax) { // imax>j for (int k=0;k<size;k++) { float dum = elem[imax][k]; // upper right and lower left elem[imax][k] = elem[j][k]; elem[j][k] = dum; } d = -d; vv[imax] = vv[j]; } indx[j] = imax; if (elem[j][j]==0.0) // j==j (upper right) elem[j][j] = 1e-16; if (j!=(size-1)) { float dum = 1.0/elem[j][j]; for (int i=j+1;i<size;i++) // i>j elem[i][j] *= dum; // i>j (upper right) } } for (int j=0;j<size;j++) { for (int k=0;k<size;k++) b[k] = 0.0; b[j] = 1.0; int ii = -1; for (int i=0;i<size;i++) { int ip = indx[i]; float sum = b[ip]; b[ip] = b[i]; if (ii!=-1) for (int j=ii;j<i;j++) sum -= elem[i][j]*b[j]; // i>j (upper right) else if (sum!=0.0) ii = i; b[i] = sum; } for (int i=size-1;i>=0;i--) { float sum = b[i]; for (int j=i+1;j<size;j++) sum -= elem[i][j]*b[j]; // i<j (lower left) b[i] = sum/elem[i][i]; // i==i (upper right) } for (int i=0;i<size;i++) res[i][j] = b[i]; } } __global__ void ComputeHomographies(float *coord, int *randPts, float *homo, int numPts) { float a[8][8], ia[8][8]; float b[8]; const int bx = blockIdx.x; const int tx = threadIdx.x; const int idx = blockDim.x*bx + tx; const int numLoops = blockDim.x*gridDim.x; for (int i=0;i<4;i++) { int pt = randPts[i*numLoops+idx]; float x1 = coord[pt+0*numPts]; float y1 = coord[pt+1*numPts]; float x2 = coord[pt+2*numPts]; float y2 = coord[pt+3*numPts]; float *row1 = a[2*i+0]; row1[0] = x1; row1[1] = y1; row1[2] = 1.0; row1[3] = row1[4] = row1[5] = 0.0; row1[6] = -x2*x1; row1[7] = -x2*y1; float *row2 = a[2*i+1]; row2[0] = row2[1] = row2[2] = 0.0; row2[3] = x1; row2[4] = y1; row2[5] = 1.0; row2[6] = -y2*x1; row2[7] = -y2*y1; b[2*i+0] = x2; b[2*i+1] = y2; } InvertMatrix<8>(a, ia); __syncthreads(); for (int j=0;j<8;j++) { float sum = 0.0f; for (int i=0;i<8;i++) sum += ia[j][i]*b[i]; homo[j*numLoops+idx] = sum; } __syncthreads(); } #define TESTHOMO_TESTS 16 // number of tests per block, alt. 32, 32 #define TESTHOMO_LOOPS 16 // number of loops per block, alt. 8, 16 __global__ void TestHomographies(float *d_coord, float *d_homo, int *d_counts, int numPts, float thresh2) { __shared__ float homo[8*TESTHOMO_LOOPS]; __shared__ int cnts[TESTHOMO_TESTS*TESTHOMO_LOOPS]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = blockIdx.y*blockDim.y + tx; const int numLoops = blockDim.y*gridDim.y; if (ty<8 && tx<TESTHOMO_LOOPS) homo[tx*8+ty] = d_homo[idx+ty*numLoops]; __syncthreads(); float a[8]; for (int i=0;i<8;i++) a[i] = homo[ty*8+i]; int cnt = 0; for (int i=tx;i<numPts;i+=TESTHOMO_TESTS) { float x1 = d_coord[i+0*numPts]; float y1 = d_coord[i+1*numPts]; float x2 = d_coord[i+2*numPts]; float y2 = d_coord[i+3*numPts]; float nomx = __fmul_rz(a[0],x1) + __fmul_rz(a[1],y1) + a[2]; float nomy = __fmul_rz(a[3],x1) + __fmul_rz(a[4],y1) + a[5]; float deno = __fmul_rz(a[6],x1) + __fmul_rz(a[7],y1) + 1.0f; float errx = __fmul_rz(x2,deno) - nomx; float erry = __fmul_rz(y2,deno) - nomy; float err2 = __fmul_rz(errx,errx) + __fmul_rz(erry,erry); if (err2<__fmul_rz(thresh2,__fmul_rz(deno,deno))) cnt ++; } int kty = TESTHOMO_TESTS*ty; cnts[kty + tx] = cnt; __syncthreads(); int len = TESTHOMO_TESTS/2; while (len>0) { if (tx<len) cnts[kty + tx] += cnts[kty + tx + len]; len /= 2; __syncthreads(); } if (tx<TESTHOMO_LOOPS && ty==0) d_counts[idx] = cnts[TESTHOMO_TESTS*tx]; __syncthreads(); } //================= Host matching functions =====================// double FindHomography(SiftData &data, float *homography, int *numMatches, int numLoops, float minScore, float maxAmbiguity, float thresh) { *numMatches = 0; homography[0] = homography[4] = homography[8] = 1.0f; homography[1] = homography[2] = homography[3] = 0.0f; homography[5] = homography[6] = homography[7] = 0.0f; #ifdef MANAGEDMEM SiftPoint *d_sift = data.m_data; #else if (data.d_data==NULL) return 0.0f; SiftPoint *d_sift = data.d_data; #endif TimerGPU timer(0); numLoops = iDivUp(numLoops,16)*16; int numPts = data.numPts; if (numPts<8) return 0.0f; int numPtsUp = iDivUp(numPts, 16)*16; float *d_coord, *d_homo; int *d_randPts, *h_randPts; int randSize = 4*sizeof(int)*numLoops; int szFl = sizeof(float); int szPt = sizeof(SiftPoint); safeCall(hipMalloc((void **)&d_coord, 4*sizeof(float)*numPtsUp)); safeCall(hipMalloc((void **)&d_randPts, randSize)); safeCall(hipMalloc((void **)&d_homo, 8*sizeof(float)*numLoops)); h_randPts = (int*)malloc(randSize); float *h_scores = (float *)malloc(sizeof(float)*numPtsUp); float *h_ambiguities = (float *)malloc(sizeof(float)*numPtsUp); safeCall(hipMemcpy2D(h_scores, szFl, &d_sift[0].score, szPt, szFl, numPts, hipMemcpyDeviceToHost)); safeCall(hipMemcpy2D(h_ambiguities, szFl, &d_sift[0].ambiguity, szPt, szFl, numPts, hipMemcpyDeviceToHost)); int *validPts = (int *)malloc(sizeof(int)*numPts); int numValid = 0; for (int i=0;i<numPts;i++) { if (h_scores[i]>minScore && h_ambiguities[i]<maxAmbiguity) validPts[numValid++] = i; } free(h_scores); free(h_ambiguities); if (numValid>=8) { for (int i=0;i<numLoops;i++) { int p1 = rand() % numValid; int p2 = rand() % numValid; int p3 = rand() % numValid; int p4 = rand() % numValid; while (p2==p1) p2 = rand() % numValid; while (p3==p1 || p3==p2) p3 = rand() % numValid; while (p4==p1 || p4==p2 || p4==p3) p4 = rand() % numValid; h_randPts[i+0*numLoops] = validPts[p1]; h_randPts[i+1*numLoops] = validPts[p2]; h_randPts[i+2*numLoops] = validPts[p3]; h_randPts[i+3*numLoops] = validPts[p4]; } safeCall(hipMemcpy(d_randPts, h_randPts, randSize, hipMemcpyHostToDevice)); safeCall(hipMemcpy2D(&d_coord[0*numPtsUp], szFl, &d_sift[0].xpos, szPt, szFl, numPts, hipMemcpyDeviceToDevice)); safeCall(hipMemcpy2D(&d_coord[1*numPtsUp], szFl, &d_sift[0].ypos, szPt, szFl, numPts, hipMemcpyDeviceToDevice)); safeCall(hipMemcpy2D(&d_coord[2*numPtsUp], szFl, &d_sift[0].match_xpos, szPt, szFl, numPts, hipMemcpyDeviceToDevice)); safeCall(hipMemcpy2D(&d_coord[3*numPtsUp], szFl, &d_sift[0].match_ypos, szPt, szFl, numPts, hipMemcpyDeviceToDevice)); hipLaunchKernelGGL(( ComputeHomographies), dim3(numLoops/16), dim3(16), 0, 0, d_coord, d_randPts, d_homo, numPtsUp); safeCall(hipDeviceSynchronize()); checkMsg("ComputeHomographies() execution failed\n"); dim3 blocks(1, numLoops/TESTHOMO_LOOPS); dim3 threads(TESTHOMO_TESTS, TESTHOMO_LOOPS); hipLaunchKernelGGL(( TestHomographies), dim3(blocks), dim3(threads), 0, 0, d_coord, d_homo, d_randPts, numPtsUp, thresh*thresh); safeCall(hipDeviceSynchronize()); checkMsg("TestHomographies() execution failed\n"); safeCall(hipMemcpy(h_randPts, d_randPts, sizeof(int)*numLoops, hipMemcpyDeviceToHost)); int maxIndex = -1, maxCount = -1; for (int i=0;i<numLoops;i++) if (h_randPts[i]>maxCount) { maxCount = h_randPts[i]; maxIndex = i; } *numMatches = maxCount; safeCall(hipMemcpy2D(homography, szFl, &d_homo[maxIndex], sizeof(float)*numLoops, szFl, 8, hipMemcpyDeviceToHost)); } free(validPts); free(h_randPts); safeCall(hipFree(d_homo)); safeCall(hipFree(d_randPts)); safeCall(hipFree(d_coord)); double gpuTime = timer.read(); #ifdef VERBOSE printf("FindHomography time = %.2f ms\n", gpuTime); #endif return gpuTime; } double MatchSiftData(SiftData &data1, SiftData &data2, hipStream_t stream) { TimerGPU timer(0); int numPts1 = data1.numPts; int numPts2 = data2.numPts; if (!numPts1 || !numPts2) return 0.0; #ifdef MANAGEDMEM SiftPoint *sift1 = data1.m_data; SiftPoint *sift2 = data2.m_data; #else if (data1.d_data==NULL || data2.d_data==NULL) return 0.0f; SiftPoint *sift1 = data1.d_data; SiftPoint *sift2 = data2.d_data; #endif // Original version with correlation and maximization in two different kernels // Global memory reguirement: O(N^2) #if 0 float *d_corrData; int corrWidth = iDivUp(numPts2, 16)*16; int corrSize = sizeof(float)*numPts1*corrWidth; safeCall(hipMalloc((void **)&d_corrData, corrSize)); #if 0 // K40c 10.9ms, 1080 Ti 3.8ms dim3 blocks1(numPts1, iDivUp(numPts2, 16)); dim3 threads1(16, 16); // each block: 1 points x 16 points hipLaunchKernelGGL(( MatchSiftPoints), dim3(blocks1), dim3(threads1), 0, 0, sift1, sift2, d_corrData, numPts1, numPts2); #else // K40c 7.6ms, 1080 Ti 1.4ms dim3 blocks(iDivUp(numPts1,16), iDivUp(numPts2, 16)); dim3 threads(16, 16); // each block: 16 points x 16 points hipLaunchKernelGGL(( MatchSiftPoints2), dim3(blocks), dim3(threads), 0, 0, sift1, sift2, d_corrData, numPts1, numPts2); #endif safeCall(hipDeviceSynchronize()); dim3 blocksMax(iDivUp(numPts1, 16)); dim3 threadsMax(16, 16); hipLaunchKernelGGL(( FindMaxCorr), dim3(blocksMax), dim3(threadsMax), 0, 0, d_corrData, sift1, sift2, numPts1, corrWidth, sizeof(SiftPoint)); safeCall(hipDeviceSynchronize()); checkMsg("FindMaxCorr() execution failed\n"); safeCall(hipFree(d_corrData)); #endif // Version suggested by Nicholas Lin with combined correlation and maximization // Global memory reguirement: O(N) #if 0 // K40c 51.2ms, 1080 Ti 9.6ms int block_dim = 16; float *d_corrData; int corrSize = numPts1 * block_dim * 2; safeCall(hipMalloc((void **)&d_corrData, sizeof(float) * corrSize)); dim3 blocks(iDivUp(numPts1, block_dim)); dim3 threads(block_dim, block_dim); hipLaunchKernelGGL(( FindMaxCorr3), dim3(blocks), dim3(threads) , 0, 0, d_corrData, sift1, sift2, numPts1, numPts2); safeCall(hipDeviceSynchronize()); checkMsg("FindMaxCorr3() execution failed\n"); safeCall(hipFree(d_corrData)); #endif // Combined version with no global memory requirement using one 1 point per block #if 0 // K40c 8.9ms, 1080 Ti 2.1ms, 2080 Ti 1.0ms dim3 blocksMax(numPts1); dim3 threadsMax(FMC2W, FMC2H); hipLaunchKernelGGL(( FindMaxCorr2), dim3(blocksMax), dim3(threadsMax), 0, 0, sift1, sift2, numPts1, numPts2); safeCall(hipDeviceSynchronize()); checkMsg("FindMaxCorr2() execution failed\n"); #endif // Combined version with no global memory requirement using one FMC2H points per block #if 0 // K40c 9.2ms, 1080 Ti 1.3ms, 2080 Ti 1.1ms dim3 blocksMax2(iDivUp(numPts1, FMC2H)); dim3 threadsMax2(FMC2W, FMC2H); hipLaunchKernelGGL(( FindMaxCorr4), dim3(blocksMax2), dim3(threadsMax2), 0, 0, sift1, sift2, numPts1, numPts2); safeCall(hipDeviceSynchronize()); checkMsg("FindMaxCorr4() execution failed\n"); #endif // Combined version with no global memory requirement using global locks #if 1 dim3 blocksMax3(iDivUp(numPts1, 16), iDivUp(numPts2, 512)); dim3 threadsMax3(16, 16); hipLaunchKernelGGL(( CleanMatches), dim3(iDivUp(numPts1, 64)), dim3(64), 0, stream, sift1, numPts1); int mode = 10; if (mode==5)// K40c 5.0ms, 1080 Ti 1.2ms, 2080 Ti 0.83ms hipLaunchKernelGGL(( FindMaxCorr5), dim3(blocksMax3), dim3(threadsMax3), 0, 0, sift1, sift2, numPts1, numPts2); else if (mode==6) { // 2080 Ti 0.89ms threadsMax3 = dim3(32, 16); hipLaunchKernelGGL(( FindMaxCorr6), dim3(blocksMax3), dim3(threadsMax3), 0, 0, sift1, sift2, numPts1, numPts2); } else if (mode==7) // 2080 Ti 0.50ms hipLaunchKernelGGL(( FindMaxCorr7), dim3(blocksMax3), dim3(threadsMax3), 0, 0, sift1, sift2, numPts1, numPts2); else if (mode==8) { // 2080 Ti 0.45ms blocksMax3 = dim3(iDivUp(numPts1, FMC_BW), iDivUp(numPts2, FMC_GH)); threadsMax3 = dim3(FMC_NW, FMC_NH); hipLaunchKernelGGL(( FindMaxCorr8), dim3(blocksMax3), dim3(threadsMax3), 0, 0, sift1, sift2, numPts1, numPts2); } else if (mode==9) { // 2080 Ti 0.46ms blocksMax3 = dim3(iDivUp(numPts1, FMC_BW), iDivUp(numPts2, FMC_GH)); threadsMax3 = dim3(FMC_NW, FMC_NH); hipLaunchKernelGGL(( FindMaxCorr9), dim3(blocksMax3), dim3(threadsMax3), 0, 0, sift1, sift2, numPts1, numPts2); } else if (mode==10) { // 2080 Ti 0.24ms blocksMax3 = dim3(iDivUp(numPts1, M7W)); threadsMax3 = dim3(M7W, M7H/M7R); hipLaunchKernelGGL(( FindMaxCorr10), dim3(blocksMax3), dim3(threadsMax3), 0, stream, sift1, sift2, numPts1, numPts2); } safeCall(hipStreamSynchronize(stream)); checkMsg("FindMaxCorr5() execution failed\n"); #endif if (data1.h_data!=NULL) { float *h_ptr = &data1.h_data[0].score; float *d_ptr = &data1.d_data[0].score; safeCall(hipMemcpy2DAsync(h_ptr, sizeof(SiftPoint), d_ptr, sizeof(SiftPoint), 5*sizeof(float), data1.numPts, hipMemcpyDeviceToHost, stream)); } double gpuTime = timer.read(); #ifdef VERBOSE printf("MatchSiftData time = %.2f ms\n", gpuTime); #endif return gpuTime; }
92b37e0208e48f2c1260d1222cd9f0298731528a.cu
#include "cudaSift.h" #include "cudautils.h" //================= Device matching functions =====================// __global__ void MatchSiftPoints(SiftPoint *sift1, SiftPoint *sift2, float *corrData, int numPts1, int numPts2) { __shared__ float siftPoint[128]; __shared__ float sums[16*16]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1 = blockIdx.x; const int p2 = blockIdx.y*16 + ty; const float *ptr1 = sift1[p1].data; const float *ptr2 = sift2[p2].data; const int i = 16*ty + tx; if (ty<8) siftPoint[i] = ptr1[i]; __syncthreads(); float sum = 0.0f; if (p2<numPts2) for (int j=0;j<8;j++) sum += siftPoint[16*j+tx] * ptr2[16*j+tx]; sums[i] = sum; __syncthreads(); if (tx<8) sums[i] += sums[i+8]; __syncthreads(); if (tx<4) sums[i] += sums[i+4]; __syncthreads(); if (ty==0) { sum = sums[16*tx+0] + sums[16*tx+1] + sums[16*tx+2] + sums[16*tx+3]; corrData[p1*gridDim.y*16 + blockIdx.y*16 + tx] = sum; } __syncthreads(); } __global__ void MatchSiftPoints2(SiftPoint *sift1, SiftPoint *sift2, float *corrData, int numPts1, int numPts2) { __shared__ float siftPoints1[16*128]; __shared__ float siftPoints2[16*128]; const int tx = threadIdx.x; const int ty = threadIdx.y; const float *ptr1 = sift1[min(numPts1-1,blockIdx.x*16 + ty)].data; const float *ptr2 = sift2[min(numPts2-1,blockIdx.y*16 + ty)].data; for (int i=0;i<8;i++) { siftPoints1[128*ty+16*i+tx] = ptr1[16*i+tx]; siftPoints2[128*ty+16*i+tx] = ptr2[16*i+tx]; } __syncthreads(); const int p1 = blockIdx.x*16 + ty; const int p2 = blockIdx.y*16 + tx; const float *pt1 = &siftPoints1[ty*128]; const float *pt2 = &siftPoints2[tx*128]; float sum = 0.0f; for (int i=0;i<128;i++) { int itx = (i + tx)&127; // avoid bank conflicts sum += pt1[itx]*pt2[itx]; } if (p1<numPts1) corrData[p1*gridDim.y*16 + p2] = (p2<numPts2 ? sum : -1.0f); } __global__ void FindMaxCorr(float *corrData, SiftPoint *sift1, SiftPoint *sift2, int numPts1, int corrWidth, int siftSize) { __shared__ float maxScore[16*16]; __shared__ float maxScor2[16*16]; __shared__ int maxIndex[16*16]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = ty*16 + tx; int p1 = blockIdx.x*16 + threadIdx.y; p1 = (p1>=numPts1 ? numPts1-1 : p1); maxScore[idx] = -1.0f; maxScor2[idx] = -1.0f; maxIndex[idx] = -1; __syncthreads(); float *corrs = &corrData[p1*corrWidth]; for (int i=tx;i<corrWidth;i+=16) { float val = corrs[i]; if (val>maxScore[idx]) { maxScor2[idx] = maxScore[idx]; maxScore[idx] = val; maxIndex[idx] = i; } else if (val>maxScor2[idx]) maxScor2[idx] = val; } __syncthreads(); for (int len=8;len>0;len/=2) { if (tx<8) { float val = maxScore[idx+len]; int i = maxIndex[idx+len]; if (val>maxScore[idx]) { maxScor2[idx] = maxScore[idx]; maxScore[idx] = val; maxIndex[idx] = i; } else if (val>maxScor2[idx]) maxScor2[idx] = val; float va2 = maxScor2[idx+len]; if (va2>maxScor2[idx]) maxScor2[idx] = va2; } __syncthreads(); } if (tx==0) { sift1[p1].score = maxScore[ty*16]; sift1[p1].ambiguity = maxScor2[ty*16] / (maxScore[ty*16] + 1e-6); sift1[p1].match = maxIndex[ty*16]; sift1[p1].match_xpos = sift2[maxIndex[ty*16]].xpos; sift1[p1].match_ypos = sift2[maxIndex[ty*16]].ypos; } } // Version based on suggestion by Nicholas Lin __global__ void FindMaxCorr3(float *corrData, SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { int block_dim = blockDim.x; // blockDim.x == 16 const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1 = blockIdx.x * block_dim + ty; const int idx = ty * 16 + tx; __shared__ int maxIndex[16 * 16]; maxIndex[idx] = 0; __syncthreads(); float *corrs = NULL; if (p1 < numPts1) { corrs = &corrData[p1 * block_dim * 2]; corrs[tx] = 0.0f; corrs[tx + 16] = 0.0f; const float *pt1 = sift1[p1].data; for (int p2 = tx; p2 < numPts2; p2 += 16) { float *pt2 = sift2[p2].data; float sum = 0.0f; for (int i = 0; i < 128; i++) sum += pt1[i] * pt2[i]; if (sum > corrs[tx]) { corrs[tx + 16] = corrs[tx]; corrs[tx] = sum; maxIndex[idx] = p2; } else if (sum > corrs[tx + 16]) corrs[tx + 16] = sum; } } __syncthreads(); if (p1 < numPts1) { for (int len = 8; len > 0; len /= 2) { if (tx < len) { float val = corrs[tx + len]; int i = maxIndex[idx + len]; if (val > corrs[tx]) { corrs[tx + 16] = corrs[tx]; corrs[tx] = val; maxIndex[idx] = i; } else if (val > corrs[tx + 16]) corrs[tx + 16] = val; float va2 = corrs[tx + 16 + len]; if (va2 > corrs[tx + 16]) corrs[tx + 16] = va2; } __syncthreads(); } if (tx==0) { sift1[p1].score = corrs[0]; sift1[p1].ambiguity = corrs[16] / (corrs[0] + 1e-6); sift1[p1].match = maxIndex[ty << 4]; sift1[p1].match_xpos = sift2[maxIndex[ty << 4]].xpos; sift1[p1].match_ypos = sift2[maxIndex[ty << 4]].ypos; } } } #define FMC2W 16 #define FMC2H 4 __global__ void FindMaxCorr2(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float siftPoint[128]; __shared__ float maxScore[FMC2H]; __shared__ float maxScor2[FMC2H]; __shared__ int maxIndex[FMC2H]; const int p1 = blockIdx.x; if (p1>=numPts1) return; const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = ty*FMC2W + tx; if (idx<FMC2H) { maxScore[idx] = -1.0f; maxScor2[idx] = -1.0f; maxIndex[idx] = 0; } __syncthreads(); const float *pt1 = sift1[p1].data; for (int i=idx;i<128;i+=FMC2W*FMC2H) siftPoint[i] = pt1[i]; __syncthreads(); for (int p2=ty;p2<numPts2;p2+=FMC2H) { const float *pt2 = sift2[p2].data; float sum = 0.0f; for (int j=tx;j<128;j+=FMC2W) sum += siftPoint[j] * pt2[j]; for (int j=FMC2W/2;j>0;j/=2) sum += ShiftDown(sum, j); if (tx==0) { if (sum>maxScore[ty]) { maxScor2[ty] = maxScore[ty]; maxScore[ty] = sum; maxIndex[ty] = p2; } else if (sum>maxScor2[ty]) maxScor2[ty] = sum; } } __syncthreads(); for (int len=FMC2H/2;len>0;len/=2) { if (ty==0 && tx<len) { float val = maxScore[tx+len]; int p2 = maxIndex[tx+len]; if (val>maxScore[tx]) { maxScor2[tx] = maxScore[tx]; maxScore[tx] = val; maxIndex[tx] = p2; } else if (val>maxScor2[tx]) maxScor2[tx] = val; float va2 = maxScor2[tx+len]; if (va2>maxScor2[tx]) maxScor2[tx] = va2; } __syncthreads(); } if (ty==0 && tx==0) { sift1[p1].score = maxScore[0]; sift1[p1].ambiguity = maxScor2[0] / (maxScore[0] + 1e-6); sift1[p1].match = maxIndex[0]; sift1[p1].match_xpos = sift2[maxIndex[0]].xpos; sift1[p1].match_ypos = sift2[maxIndex[0]].ypos; } } __global__ void FindMaxCorr4(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float siftPoint[128*FMC2H]; __shared__ float maxScore[FMC2H]; __shared__ float maxScor2[FMC2H]; __shared__ int maxIndex[FMC2H]; const int tx = threadIdx.x; const int ty = threadIdx.y; if (tx==0) { maxScore[ty] = -1.0f; maxScor2[ty] = -1.0f; maxIndex[ty] = 0; } const int p1 = blockIdx.x*FMC2H + ty; const float *pt1 = sift1[p1].data; for (int j=tx;j<128;j+=FMC2W) siftPoint[128*ty + j] = pt1[j]; __syncthreads(); for (int p2=0;p2<numPts2;p2++) { const float *pt2 = sift2[p2].data; float sum = 0.0f; for (int j=tx;j<128;j+=FMC2W) sum += siftPoint[128*ty + j] * pt2[j]; for (int j=FMC2W/2;j>0;j/=2) sum += ShiftDown(sum, j); if (tx==0) { if (sum>maxScore[ty]) { maxScor2[ty] = maxScore[ty]; maxScore[ty] = sum; maxIndex[ty] = p2; } else if (sum>maxScor2[ty]) maxScor2[ty] = sum; } } __syncthreads(); if (tx==0) { sift1[p1].score = maxScore[ty]; sift1[p1].ambiguity = maxScor2[ty] / (maxScore[ty] + 1e-6); sift1[p1].match = maxIndex[ty]; sift1[p1].match_xpos = sift2[maxIndex[ty]].xpos; sift1[p1].match_ypos = sift2[maxIndex[ty]].ypos; } } __global__ void CleanMatches(SiftPoint *sift1, int numPts1) { const int p1 = min(blockIdx.x*64 + threadIdx.x, numPts1-1); sift1[p1].score = 0.0f; } #define M7W 32 #define M7H 32 #define M7R 4 #define NRX 2 #define NDIM 128 __global__ void FindMaxCorr10(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float4 buffer1[M7W*NDIM/4]; __shared__ float4 buffer2[M7H*NDIM/4]; int tx = threadIdx.x; int ty = threadIdx.y; int bp1 = M7W*blockIdx.x; for (int j=ty;j<M7W;j+=M7H/M7R) { int p1 = min(bp1 + j, numPts1 - 1); for (int d=tx;d<NDIM/4;d+=M7W) buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)&sift1[p1].data)[d]; } float max_score[NRX]; float sec_score[NRX]; int index[NRX]; for (int i=0;i<NRX;i++) { max_score[i] = 0.0f; sec_score[i] = 0.0f; index[i] = -1; } int idx = ty*M7W + tx; int ix = idx%(M7W/NRX); int iy = idx/(M7W/NRX); for (int bp2=0;bp2<numPts2 - M7H + 1;bp2+=M7H) { for (int j=ty;j<M7H;j+=M7H/M7R) { int p2 = min(bp2 + j, numPts2 - 1); for (int d=tx;d<NDIM/4;d+=M7W) buffer2[j*NDIM/4 + d] = ((float4*)&sift2[p2].data)[d]; } __syncthreads(); if (idx<M7W*M7H/M7R/NRX) { float score[M7R][NRX]; for (int dy=0;dy<M7R;dy++) for (int i=0;i<NRX;i++) score[dy][i] = 0.0f; for (int d=0;d<NDIM/4;d++) { float4 v1[NRX]; for (int i=0;i<NRX;i++) v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)]; for (int dy=0;dy<M7R;dy++) { float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d]; for (int i=0;i<NRX;i++) { score[dy][i] += v1[i].x*v2.x; score[dy][i] += v1[i].y*v2.y; score[dy][i] += v1[i].z*v2.z; score[dy][i] += v1[i].w*v2.w; } } } for (int dy=0;dy<M7R;dy++) { for (int i=0;i<NRX;i++) { if (score[dy][i]>max_score[i]) { sec_score[i] = max_score[i]; max_score[i] = score[dy][i]; index[i] = min(bp2 + M7R*iy + dy, numPts2-1); } else if (score[dy][i]>sec_score[i]) sec_score[i] = score[dy][i]; } } } __syncthreads(); } float *scores1 = (float*)buffer1; float *scores2 = &scores1[M7W*M7H/M7R]; int *indices = (int*)&scores2[M7W*M7H/M7R]; if (idx<M7W*M7H/M7R/NRX) { for (int i=0;i<NRX;i++) { scores1[iy*M7W + (M7W/NRX)*i + ix] = max_score[i]; scores2[iy*M7W + (M7W/NRX)*i + ix] = sec_score[i]; indices[iy*M7W + (M7W/NRX)*i + ix] = index[i]; } } __syncthreads(); if (ty==0) { float max_score = scores1[tx]; float sec_score = scores2[tx]; int index = indices[tx]; for (int y=0;y<M7H/M7R;y++) if (index != indices[y*M7W + tx]) { if (scores1[y*M7W + tx]>max_score) { sec_score = max(max_score, sec_score); max_score = scores1[y*M7W + tx]; index = indices[y*M7W + tx]; } else if (scores1[y*M7W + tx]>sec_score) sec_score = scores1[y*M7W + tx]; } sift1[bp1 + tx].score = max_score; sift1[bp1 + tx].match = index; sift1[bp1 + tx].match_xpos = sift2[index].xpos; sift1[bp1 + tx].match_ypos = sift2[index].ypos; sift1[bp1 + tx].ambiguity = sec_score / (max_score + 1e-6f); } } #define FMC_GH 512 #define FMC_BW 32 #define FMC_BH 32 #define FMC_BD 16 #define FMC_TW 1 #define FMC_TH 4 #define FMC_NW (FMC_BW/FMC_TW) // 32 #define FMC_NH (FMC_BH/FMC_TH) // 8 #define FMC_NT (FMC_NW*FMC_NH) // 256 = 8 warps __device__ volatile int lock = 0; __global__ void FindMaxCorr9(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float4 siftParts1[FMC_BW*FMC_BD]; // 4*32*8 = 1024 __shared__ float4 siftParts2[FMC_BH*FMC_BD]; // 4*32*8 = 1024 //__shared__ float blksums[FMC_BW*FMC_BH]; // 32*32 = 1024 const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = ty*FMC_NW + tx; float4 *pts1 = 0, *pts2 = 0; if (idx<FMC_BW) { const int p1l = min(blockIdx.x*FMC_BW + idx, numPts1-1); pts1 = (float4*)sift1[p1l].data; } float maxScore = -1.0f; float maxScor2 = -1.0f; int maxIndex = 0; for (int k=0;k<min(FMC_GH, numPts2 - FMC_BH + 1);k+=FMC_BH) { if (idx<FMC_BH) { const int p2l = min(blockIdx.y*FMC_GH + k + idx, numPts2-1); pts2 = (float4*)sift2[p2l].data; } float sums[FMC_TW*FMC_TH]; for (int i=0;i<FMC_TW*FMC_TH;i++) sums[i] = 0.0f; if (idx<FMC_BW) for (int i=0;i<FMC_BD/2;i++) siftParts1[(i + 0)*FMC_BW + idx] = pts1[0 + i]; if (idx<FMC_BH) for (int i=0;i<FMC_BD/2;i++) siftParts2[(i + 0)*FMC_BH + idx] = pts2[0 + i]; __syncthreads(); int b = FMC_BD/2; for (int d=FMC_BD/2;d<32;d+=FMC_BD/2) { if (idx<FMC_BW) for (int i=0;i<FMC_BD/2;i++) siftParts1[(i + b)*FMC_BW + idx] = pts1[d + i]; if (idx<FMC_BH) for (int i=0;i<FMC_BD/2;i++) siftParts2[(i + b)*FMC_BH + idx] = pts2[d + i]; b ^= FMC_BD/2; for (int i=0;i<FMC_BD/2;i++) { float4 v1[FMC_TW]; for (int ix=0;ix<FMC_TW;ix++) v1[ix] = siftParts1[(i + b)*FMC_BW + (tx*FMC_TW + ix)]; for (int iy=0;iy<FMC_TH;iy++) { float4 v2 = siftParts2[(i + b)*FMC_BH + (ty*FMC_TH + iy)]; for (int ix=0;ix<FMC_TW;ix++) { sums[iy*FMC_TW + ix] += v1[ix].x * v2.x; sums[iy*FMC_TW + ix] += v1[ix].y * v2.y; sums[iy*FMC_TW + ix] += v1[ix].z * v2.z; sums[iy*FMC_TW + ix] += v1[ix].w * v2.w; } } } __syncthreads(); } b ^= FMC_BD/2; for (int i=0;i<FMC_BD/2;i++) { float4 v1[FMC_TW]; for (int ix=0;ix<FMC_TW;ix++) v1[ix] = siftParts1[(i + b)*FMC_BW + (tx*FMC_TW + ix)]; for (int iy=0;iy<FMC_TH;iy++) { float4 v2 = siftParts2[(i + b)*FMC_BH + (ty*FMC_TH + iy)]; for (int ix=0;ix<FMC_TW;ix++) { sums[iy*FMC_TW + ix] += v1[ix].x * v2.x; sums[iy*FMC_TW + ix] += v1[ix].y * v2.y; sums[iy*FMC_TW + ix] += v1[ix].z * v2.z; sums[iy*FMC_TW + ix] += v1[ix].w * v2.w; } } } __syncthreads(); float *blksums = (float*)siftParts1; for (int iy=0;iy<FMC_TH;iy++) for (int ix=0;ix<FMC_TW;ix++) blksums[(ty*FMC_TH + iy)*FMC_BW + (tx*FMC_TW + ix)] = sums[iy*FMC_TW + ix]; __syncthreads(); if (idx<FMC_BW) { for (int j=0;j<FMC_BH;j++) { float sum = blksums[j*FMC_BW + idx]; if (sum>maxScore) { maxScor2 = maxScore; maxScore = sum; maxIndex = min(blockIdx.y*FMC_GH + k + j, numPts2-1); } else if (sum>maxScor2) maxScor2 = sum; } } __syncthreads(); } const int p1 = min(blockIdx.x*FMC_BW + idx, numPts1-1); if (idx==0) while (atomicCAS((int *)&lock, 0, 1) != 0); __syncthreads(); if (idx<FMC_BW) { float maxScor2Old = sift1[p1].ambiguity*(sift1[p1].score + 1e-6f); if (maxScore>sift1[p1].score) { maxScor2 = max(sift1[p1].score, maxScor2); sift1[p1].ambiguity = maxScor2 / (maxScore + 1e-6f); sift1[p1].score = maxScore; sift1[p1].match = maxIndex; sift1[p1].match_xpos = sift2[maxIndex].xpos; sift1[p1].match_ypos = sift2[maxIndex].ypos; } else if (maxScore>maxScor2Old) sift1[p1].ambiguity = maxScore / (sift1[p1].score + 1e-6f); } __syncthreads(); if (idx==0) atomicExch((int* )&lock, 0); } __global__ void FindMaxCorr8(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float4 siftParts1[FMC_BW*FMC_BD]; // 4*32*8 = 1024 __shared__ float4 siftParts2[FMC_BH*FMC_BD]; // 4*32*8 = 1024 __shared__ float blksums[FMC_BW*FMC_BH]; // 32*32 = 1024 const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = ty*FMC_NW + tx; float4 *pts1 = 0, *pts2 = 0; if (idx<FMC_BW) { const int p1l = min(blockIdx.x*FMC_BW + idx, numPts1-1); pts1 = (float4*)sift1[p1l].data; } float maxScore = -1.0f; float maxScor2 = -1.0f; int maxIndex = 0; for (int k=0;k<min(FMC_GH, numPts2 - FMC_BH + 1);k+=FMC_BH) { if (idx<FMC_BH) { const int p2l = min(blockIdx.y*FMC_GH + k + idx, numPts2-1); pts2 = (float4*)sift2[p2l].data; } float sums[FMC_TW*FMC_TH]; for (int i=0;i<FMC_TW*FMC_TH;i++) sums[i] = 0.0f; for (int d=0;d<32;d+=FMC_BD) { if (idx<FMC_BW) for (int i=0;i<FMC_BD;i++) siftParts1[i*FMC_BW + idx] = pts1[d + i]; if (idx<FMC_BH) for (int i=0;i<FMC_BD;i++) siftParts2[i*FMC_BH + idx] = pts2[d + i]; __syncthreads(); for (int i=0;i<FMC_BD;i++) { float4 v1[FMC_TW]; for (int ix=0;ix<FMC_TW;ix++) v1[ix] = siftParts1[i*FMC_BW + (tx*FMC_TW + ix)]; for (int iy=0;iy<FMC_TH;iy++) { float4 v2 = siftParts2[i*FMC_BH + (ty*FMC_TH + iy)]; for (int ix=0;ix<FMC_TW;ix++) { sums[iy*FMC_TW + ix] += v1[ix].x * v2.x; sums[iy*FMC_TW + ix] += v1[ix].y * v2.y; sums[iy*FMC_TW + ix] += v1[ix].z * v2.z; sums[iy*FMC_TW + ix] += v1[ix].w * v2.w; } } } __syncthreads(); } //float *blksums = (float*)siftParts1; for (int iy=0;iy<FMC_TH;iy++) for (int ix=0;ix<FMC_TW;ix++) blksums[(ty*FMC_TH + iy)*FMC_BW + (tx*FMC_TW + ix)] = sums[iy*FMC_TW + ix]; __syncthreads(); if (idx<FMC_BW) { for (int j=0;j<FMC_BH;j++) { float sum = blksums[j*FMC_BW + idx]; if (sum>maxScore) { maxScor2 = maxScore; maxScore = sum; maxIndex = min(blockIdx.y*FMC_GH + k + j, numPts2-1); } else if (sum>maxScor2) maxScor2 = sum; } } __syncthreads(); } const int p1 = min(blockIdx.x*FMC_BW + idx, numPts1-1); if (idx==0) while (atomicCAS((int *)&lock, 0, 1) != 0); __syncthreads(); if (idx<FMC_BW) { float maxScor2Old = sift1[p1].ambiguity*(sift1[p1].score + 1e-6f); if (maxScore>sift1[p1].score) { maxScor2 = max(sift1[p1].score, maxScor2); sift1[p1].ambiguity = maxScor2 / (maxScore + 1e-6f); sift1[p1].score = maxScore; sift1[p1].match = maxIndex; sift1[p1].match_xpos = sift2[maxIndex].xpos; sift1[p1].match_ypos = sift2[maxIndex].ypos; } else if (maxScore>maxScor2Old) sift1[p1].ambiguity = maxScore / (sift1[p1].score + 1e-6f); } __syncthreads(); if (idx==0) atomicExch((int* )&lock, 0); } __global__ void FindMaxCorr7(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float siftParts1[17*64]; // features in columns __shared__ float siftParts2[16*64]; // one extra to avoid shared conflicts float4 *pts1 = (float4*)siftParts1; float4 *pts2 = (float4*)siftParts2; const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1l = min(blockIdx.x*16 + ty, numPts1-1); const float4 *p1l4 = (float4*)sift1[p1l].data; float maxScore = -1.0f; float maxScor2 = -1.0f; int maxIndex = 0; for (int k=0;k<512/16;k++) { const int p2l = min(blockIdx.y*512 + k*16 + ty, numPts2-1); const float4 *p2l4 = (float4*)sift2[p2l].data; #define NUM 4 float sum[NUM]; if (ty<(16/NUM)) for (int l=0;l<NUM;l++) sum[l] = 0.0f; __syncthreads(); for (int i=0;i<2;i++) { pts1[17*tx + ty] = p1l4[i*16 + tx]; pts2[16*ty + tx] = p2l4[i*16 + tx]; __syncthreads(); if (ty<(16/NUM)) { #pragma unroll for (int j=0;j<16;j++) { float4 p1v = pts1[17* j + tx]; #pragma unroll for (int l=0;l<NUM;l++) { float4 p2v = pts2[16*(ty + l*(16/NUM)) + j]; sum[l] += p1v.x * p2v.x; sum[l] += p1v.y * p2v.y; sum[l] += p1v.z * p2v.z; sum[l] += p1v.w * p2v.w; } } } __syncthreads(); } float *sums = siftParts1; if (ty<(16/NUM)) for (int l=0;l<NUM;l++) sums[16*(ty + l*(16/NUM)) + tx] = sum[l]; __syncthreads(); if (ty==0) { for (int j=0;j<16;j++) { float sum = sums[16*j + tx]; if (sum>maxScore) { maxScor2 = maxScore; maxScore = sum; maxIndex = min(blockIdx.y*512 + k*16 + j, numPts2-1); } else if (sum>maxScor2) maxScor2 = sum; } } __syncthreads(); } const int p1 = min(blockIdx.x*16 + tx, numPts1-1); if (tx==0 && ty==0) while (atomicCAS((int *)&lock, 0, 1) != 0); __syncthreads(); if (ty==0) { float maxScor2Old = sift1[p1].ambiguity*(sift1[p1].score + 1e-6f); if (maxScore>sift1[p1].score) { maxScor2 = max(sift1[p1].score, maxScor2); sift1[p1].ambiguity = maxScor2 / (maxScore + 1e-6f); sift1[p1].score = maxScore; sift1[p1].match = maxIndex; sift1[p1].match_xpos = sift2[maxIndex].xpos; sift1[p1].match_ypos = sift2[maxIndex].ypos; } else if (maxScore>maxScor2Old) sift1[p1].ambiguity = maxScore / (sift1[p1].score + 1e-6f); } __syncthreads(); if (tx==0 && ty==0) atomicExch((int* )&lock, 0); } __global__ void FindMaxCorr6(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { //__shared__ float siftParts1[128*16]; // features in columns __shared__ float siftParts2[128*16]; // one extra to avoid shared conflicts __shared__ float sums[16*16]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1l = min(blockIdx.x*16 + ty, numPts1-1); float *pt1l = sift1[p1l].data; float4 part1 = reinterpret_cast<float4*>(pt1l)[tx]; float maxScore = -1.0f; float maxScor2 = -1.0f; int maxIndex = 0; for (int k=0;k<512;k+=16) { const int p2l = min(blockIdx.y*512 + k + ty, numPts2-1); float *pt2l = sift2[p2l].data; reinterpret_cast<float4*>(siftParts2)[32*ty + tx] = reinterpret_cast<float4*>(pt2l)[tx]; __syncthreads(); for (int i=0;i<16;i++) { float4 part2 = reinterpret_cast<float4*>(siftParts2)[32*i + tx]; float sum = part1.x*part2.x + part1.y*part2.y + part1.z*part2.z + part1.w*part2.w; sum += ShiftDown(sum, 16); sum += ShiftDown(sum, 8); sum += ShiftDown(sum, 4); sum += ShiftDown(sum, 2); sum += ShiftDown(sum, 1); if (tx==0) sums[16*i + ty] = sum; } __syncthreads(); if (ty==0 && tx<16) { for (int j=0;j<16;j++) { float sum = sums[16*j + tx]; if (sum>maxScore) { maxScor2 = maxScore; maxScore = sum; maxIndex = min(blockIdx.y*512 + k + j, numPts2-1); } else if (sum>maxScor2) maxScor2 = sum; } } __syncthreads(); } if (tx==0 && ty==0) while (atomicCAS((int *)&lock, 0, 1) != 0); __syncthreads(); if (ty==0 && tx<16) { const int p1 = min(blockIdx.x*16 + tx, numPts1-1); float maxScor2Old = sift1[p1].ambiguity*(sift1[p1].score + 1e-6f); if (maxScore>sift1[p1].score) { maxScor2 = max(sift1[p1].score, maxScor2); sift1[p1].ambiguity = maxScor2 / (maxScore + 1e-6f); sift1[p1].score = maxScore; sift1[p1].match = maxIndex; sift1[p1].match_xpos = sift2[maxIndex].xpos; sift1[p1].match_ypos = sift2[maxIndex].ypos; } else if (maxScore>maxScor2Old) sift1[p1].ambiguity = maxScore / (sift1[p1].score + 1e-6f); } __syncthreads(); if (tx==0 && ty==0) atomicExch((int* )&lock, 0); } __global__ void FindMaxCorr5(SiftPoint *sift1, SiftPoint *sift2, int numPts1, int numPts2) { __shared__ float siftParts1[17*16]; // features in columns __shared__ float siftParts2[17*16]; // one extra to avoid shared conflicts const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1l = min(blockIdx.x*16 + ty, numPts1-1); const float *pt1l = sift1[p1l].data; float maxScore = -1.0f; float maxScor2 = -1.0f; int maxIndex = 0; for (int k=0;k<512/16;k++) { const int p2l = min(blockIdx.y*512 + k*16 + ty, numPts2-1); const float *pt2l = sift2[p2l].data; float sum = 0.0f; for (int i=0;i<8;i++) { siftParts1[17*tx + ty] = pt1l[i*16 + tx]; // load and transpose siftParts2[17*tx + ty] = pt2l[i*16 + tx]; __syncthreads(); for (int j=0;j<16;j++) sum += siftParts1[17*j + tx] * siftParts2[17*j + ty]; __syncthreads(); } float *sums = siftParts1; sums[16*ty + tx] = sum; __syncthreads(); if (ty==0) { for (int j=0;j<16;j++) { float sum = sums[16*j + tx]; if (sum>maxScore) { maxScor2 = maxScore; maxScore = sum; maxIndex = min(blockIdx.y*512 + k*16 + j, numPts2-1); } else if (sum>maxScor2) maxScor2 = sum; } } __syncthreads(); } const int p1 = min(blockIdx.x*16 + tx, numPts1-1); if (tx==0 && ty==0) while (atomicCAS((int *)&lock, 0, 1) != 0); __syncthreads(); if (ty==0) { float maxScor2Old = sift1[p1].ambiguity*(sift1[p1].score + 1e-6f); if (maxScore>sift1[p1].score) { maxScor2 = max(sift1[p1].score, maxScor2); sift1[p1].ambiguity = maxScor2 / (maxScore + 1e-6f); sift1[p1].score = maxScore; sift1[p1].match = maxIndex; sift1[p1].match_xpos = sift2[maxIndex].xpos; sift1[p1].match_ypos = sift2[maxIndex].ypos; } else if (maxScore>maxScor2Old) sift1[p1].ambiguity = maxScore / (sift1[p1].score + 1e-6f); } __syncthreads(); if (tx==0 && ty==0) atomicExch((int* )&lock, 0); } template <int size> __device__ void InvertMatrix(float elem[size][size], float res[size][size]) { int indx[size]; float b[size]; float vv[size]; for (int i=0;i<size;i++) indx[i] = 0; int imax = 0; float d = 1.0; for (int i=0;i<size;i++) { // find biggest element for each row float big = 0.0; for (int j=0;j<size;j++) { float temp = fabs(elem[i][j]); if (temp>big) big = temp; } if (big>0.0) vv[i] = 1.0/big; else vv[i] = 1e16; } for (int j=0;j<size;j++) { for (int i=0;i<j;i++) { // i<j float sum = elem[i][j]; // i<j (lower left) for (int k=0;k<i;k++) // k<i<j sum -= elem[i][k]*elem[k][j]; // i>k (upper right), k<j (lower left) elem[i][j] = sum; // i<j (lower left) } float big = 0.0; for (int i=j;i<size;i++) { // i>=j float sum = elem[i][j]; // i>=j (upper right) for (int k=0;k<j;k++) // k<j<=i sum -= elem[i][k]*elem[k][j]; // i>k (upper right), k<j (lower left) elem[i][j] = sum; // i>=j (upper right) float dum = vv[i]*fabs(sum); if (dum>=big) { big = dum; imax = i; } } if (j!=imax) { // imax>j for (int k=0;k<size;k++) { float dum = elem[imax][k]; // upper right and lower left elem[imax][k] = elem[j][k]; elem[j][k] = dum; } d = -d; vv[imax] = vv[j]; } indx[j] = imax; if (elem[j][j]==0.0) // j==j (upper right) elem[j][j] = 1e-16; if (j!=(size-1)) { float dum = 1.0/elem[j][j]; for (int i=j+1;i<size;i++) // i>j elem[i][j] *= dum; // i>j (upper right) } } for (int j=0;j<size;j++) { for (int k=0;k<size;k++) b[k] = 0.0; b[j] = 1.0; int ii = -1; for (int i=0;i<size;i++) { int ip = indx[i]; float sum = b[ip]; b[ip] = b[i]; if (ii!=-1) for (int j=ii;j<i;j++) sum -= elem[i][j]*b[j]; // i>j (upper right) else if (sum!=0.0) ii = i; b[i] = sum; } for (int i=size-1;i>=0;i--) { float sum = b[i]; for (int j=i+1;j<size;j++) sum -= elem[i][j]*b[j]; // i<j (lower left) b[i] = sum/elem[i][i]; // i==i (upper right) } for (int i=0;i<size;i++) res[i][j] = b[i]; } } __global__ void ComputeHomographies(float *coord, int *randPts, float *homo, int numPts) { float a[8][8], ia[8][8]; float b[8]; const int bx = blockIdx.x; const int tx = threadIdx.x; const int idx = blockDim.x*bx + tx; const int numLoops = blockDim.x*gridDim.x; for (int i=0;i<4;i++) { int pt = randPts[i*numLoops+idx]; float x1 = coord[pt+0*numPts]; float y1 = coord[pt+1*numPts]; float x2 = coord[pt+2*numPts]; float y2 = coord[pt+3*numPts]; float *row1 = a[2*i+0]; row1[0] = x1; row1[1] = y1; row1[2] = 1.0; row1[3] = row1[4] = row1[5] = 0.0; row1[6] = -x2*x1; row1[7] = -x2*y1; float *row2 = a[2*i+1]; row2[0] = row2[1] = row2[2] = 0.0; row2[3] = x1; row2[4] = y1; row2[5] = 1.0; row2[6] = -y2*x1; row2[7] = -y2*y1; b[2*i+0] = x2; b[2*i+1] = y2; } InvertMatrix<8>(a, ia); __syncthreads(); for (int j=0;j<8;j++) { float sum = 0.0f; for (int i=0;i<8;i++) sum += ia[j][i]*b[i]; homo[j*numLoops+idx] = sum; } __syncthreads(); } #define TESTHOMO_TESTS 16 // number of tests per block, alt. 32, 32 #define TESTHOMO_LOOPS 16 // number of loops per block, alt. 8, 16 __global__ void TestHomographies(float *d_coord, float *d_homo, int *d_counts, int numPts, float thresh2) { __shared__ float homo[8*TESTHOMO_LOOPS]; __shared__ int cnts[TESTHOMO_TESTS*TESTHOMO_LOOPS]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = blockIdx.y*blockDim.y + tx; const int numLoops = blockDim.y*gridDim.y; if (ty<8 && tx<TESTHOMO_LOOPS) homo[tx*8+ty] = d_homo[idx+ty*numLoops]; __syncthreads(); float a[8]; for (int i=0;i<8;i++) a[i] = homo[ty*8+i]; int cnt = 0; for (int i=tx;i<numPts;i+=TESTHOMO_TESTS) { float x1 = d_coord[i+0*numPts]; float y1 = d_coord[i+1*numPts]; float x2 = d_coord[i+2*numPts]; float y2 = d_coord[i+3*numPts]; float nomx = __fmul_rz(a[0],x1) + __fmul_rz(a[1],y1) + a[2]; float nomy = __fmul_rz(a[3],x1) + __fmul_rz(a[4],y1) + a[5]; float deno = __fmul_rz(a[6],x1) + __fmul_rz(a[7],y1) + 1.0f; float errx = __fmul_rz(x2,deno) - nomx; float erry = __fmul_rz(y2,deno) - nomy; float err2 = __fmul_rz(errx,errx) + __fmul_rz(erry,erry); if (err2<__fmul_rz(thresh2,__fmul_rz(deno,deno))) cnt ++; } int kty = TESTHOMO_TESTS*ty; cnts[kty + tx] = cnt; __syncthreads(); int len = TESTHOMO_TESTS/2; while (len>0) { if (tx<len) cnts[kty + tx] += cnts[kty + tx + len]; len /= 2; __syncthreads(); } if (tx<TESTHOMO_LOOPS && ty==0) d_counts[idx] = cnts[TESTHOMO_TESTS*tx]; __syncthreads(); } //================= Host matching functions =====================// double FindHomography(SiftData &data, float *homography, int *numMatches, int numLoops, float minScore, float maxAmbiguity, float thresh) { *numMatches = 0; homography[0] = homography[4] = homography[8] = 1.0f; homography[1] = homography[2] = homography[3] = 0.0f; homography[5] = homography[6] = homography[7] = 0.0f; #ifdef MANAGEDMEM SiftPoint *d_sift = data.m_data; #else if (data.d_data==NULL) return 0.0f; SiftPoint *d_sift = data.d_data; #endif TimerGPU timer(0); numLoops = iDivUp(numLoops,16)*16; int numPts = data.numPts; if (numPts<8) return 0.0f; int numPtsUp = iDivUp(numPts, 16)*16; float *d_coord, *d_homo; int *d_randPts, *h_randPts; int randSize = 4*sizeof(int)*numLoops; int szFl = sizeof(float); int szPt = sizeof(SiftPoint); safeCall(cudaMalloc((void **)&d_coord, 4*sizeof(float)*numPtsUp)); safeCall(cudaMalloc((void **)&d_randPts, randSize)); safeCall(cudaMalloc((void **)&d_homo, 8*sizeof(float)*numLoops)); h_randPts = (int*)malloc(randSize); float *h_scores = (float *)malloc(sizeof(float)*numPtsUp); float *h_ambiguities = (float *)malloc(sizeof(float)*numPtsUp); safeCall(cudaMemcpy2D(h_scores, szFl, &d_sift[0].score, szPt, szFl, numPts, cudaMemcpyDeviceToHost)); safeCall(cudaMemcpy2D(h_ambiguities, szFl, &d_sift[0].ambiguity, szPt, szFl, numPts, cudaMemcpyDeviceToHost)); int *validPts = (int *)malloc(sizeof(int)*numPts); int numValid = 0; for (int i=0;i<numPts;i++) { if (h_scores[i]>minScore && h_ambiguities[i]<maxAmbiguity) validPts[numValid++] = i; } free(h_scores); free(h_ambiguities); if (numValid>=8) { for (int i=0;i<numLoops;i++) { int p1 = rand() % numValid; int p2 = rand() % numValid; int p3 = rand() % numValid; int p4 = rand() % numValid; while (p2==p1) p2 = rand() % numValid; while (p3==p1 || p3==p2) p3 = rand() % numValid; while (p4==p1 || p4==p2 || p4==p3) p4 = rand() % numValid; h_randPts[i+0*numLoops] = validPts[p1]; h_randPts[i+1*numLoops] = validPts[p2]; h_randPts[i+2*numLoops] = validPts[p3]; h_randPts[i+3*numLoops] = validPts[p4]; } safeCall(cudaMemcpy(d_randPts, h_randPts, randSize, cudaMemcpyHostToDevice)); safeCall(cudaMemcpy2D(&d_coord[0*numPtsUp], szFl, &d_sift[0].xpos, szPt, szFl, numPts, cudaMemcpyDeviceToDevice)); safeCall(cudaMemcpy2D(&d_coord[1*numPtsUp], szFl, &d_sift[0].ypos, szPt, szFl, numPts, cudaMemcpyDeviceToDevice)); safeCall(cudaMemcpy2D(&d_coord[2*numPtsUp], szFl, &d_sift[0].match_xpos, szPt, szFl, numPts, cudaMemcpyDeviceToDevice)); safeCall(cudaMemcpy2D(&d_coord[3*numPtsUp], szFl, &d_sift[0].match_ypos, szPt, szFl, numPts, cudaMemcpyDeviceToDevice)); ComputeHomographies<<<numLoops/16, 16>>>(d_coord, d_randPts, d_homo, numPtsUp); safeCall(cudaDeviceSynchronize()); checkMsg("ComputeHomographies() execution failed\n"); dim3 blocks(1, numLoops/TESTHOMO_LOOPS); dim3 threads(TESTHOMO_TESTS, TESTHOMO_LOOPS); TestHomographies<<<blocks, threads>>>(d_coord, d_homo, d_randPts, numPtsUp, thresh*thresh); safeCall(cudaDeviceSynchronize()); checkMsg("TestHomographies() execution failed\n"); safeCall(cudaMemcpy(h_randPts, d_randPts, sizeof(int)*numLoops, cudaMemcpyDeviceToHost)); int maxIndex = -1, maxCount = -1; for (int i=0;i<numLoops;i++) if (h_randPts[i]>maxCount) { maxCount = h_randPts[i]; maxIndex = i; } *numMatches = maxCount; safeCall(cudaMemcpy2D(homography, szFl, &d_homo[maxIndex], sizeof(float)*numLoops, szFl, 8, cudaMemcpyDeviceToHost)); } free(validPts); free(h_randPts); safeCall(cudaFree(d_homo)); safeCall(cudaFree(d_randPts)); safeCall(cudaFree(d_coord)); double gpuTime = timer.read(); #ifdef VERBOSE printf("FindHomography time = %.2f ms\n", gpuTime); #endif return gpuTime; } double MatchSiftData(SiftData &data1, SiftData &data2, cudaStream_t stream) { TimerGPU timer(0); int numPts1 = data1.numPts; int numPts2 = data2.numPts; if (!numPts1 || !numPts2) return 0.0; #ifdef MANAGEDMEM SiftPoint *sift1 = data1.m_data; SiftPoint *sift2 = data2.m_data; #else if (data1.d_data==NULL || data2.d_data==NULL) return 0.0f; SiftPoint *sift1 = data1.d_data; SiftPoint *sift2 = data2.d_data; #endif // Original version with correlation and maximization in two different kernels // Global memory reguirement: O(N^2) #if 0 float *d_corrData; int corrWidth = iDivUp(numPts2, 16)*16; int corrSize = sizeof(float)*numPts1*corrWidth; safeCall(cudaMalloc((void **)&d_corrData, corrSize)); #if 0 // K40c 10.9ms, 1080 Ti 3.8ms dim3 blocks1(numPts1, iDivUp(numPts2, 16)); dim3 threads1(16, 16); // each block: 1 points x 16 points MatchSiftPoints<<<blocks1, threads1>>>(sift1, sift2, d_corrData, numPts1, numPts2); #else // K40c 7.6ms, 1080 Ti 1.4ms dim3 blocks(iDivUp(numPts1,16), iDivUp(numPts2, 16)); dim3 threads(16, 16); // each block: 16 points x 16 points MatchSiftPoints2<<<blocks, threads>>>(sift1, sift2, d_corrData, numPts1, numPts2); #endif safeCall(cudaDeviceSynchronize()); dim3 blocksMax(iDivUp(numPts1, 16)); dim3 threadsMax(16, 16); FindMaxCorr<<<blocksMax, threadsMax>>>(d_corrData, sift1, sift2, numPts1, corrWidth, sizeof(SiftPoint)); safeCall(cudaDeviceSynchronize()); checkMsg("FindMaxCorr() execution failed\n"); safeCall(cudaFree(d_corrData)); #endif // Version suggested by Nicholas Lin with combined correlation and maximization // Global memory reguirement: O(N) #if 0 // K40c 51.2ms, 1080 Ti 9.6ms int block_dim = 16; float *d_corrData; int corrSize = numPts1 * block_dim * 2; safeCall(cudaMalloc((void **)&d_corrData, sizeof(float) * corrSize)); dim3 blocks(iDivUp(numPts1, block_dim)); dim3 threads(block_dim, block_dim); FindMaxCorr3<<<blocks, threads >>>(d_corrData, sift1, sift2, numPts1, numPts2); safeCall(cudaDeviceSynchronize()); checkMsg("FindMaxCorr3() execution failed\n"); safeCall(cudaFree(d_corrData)); #endif // Combined version with no global memory requirement using one 1 point per block #if 0 // K40c 8.9ms, 1080 Ti 2.1ms, 2080 Ti 1.0ms dim3 blocksMax(numPts1); dim3 threadsMax(FMC2W, FMC2H); FindMaxCorr2<<<blocksMax, threadsMax>>>(sift1, sift2, numPts1, numPts2); safeCall(cudaDeviceSynchronize()); checkMsg("FindMaxCorr2() execution failed\n"); #endif // Combined version with no global memory requirement using one FMC2H points per block #if 0 // K40c 9.2ms, 1080 Ti 1.3ms, 2080 Ti 1.1ms dim3 blocksMax2(iDivUp(numPts1, FMC2H)); dim3 threadsMax2(FMC2W, FMC2H); FindMaxCorr4<<<blocksMax2, threadsMax2>>>(sift1, sift2, numPts1, numPts2); safeCall(cudaDeviceSynchronize()); checkMsg("FindMaxCorr4() execution failed\n"); #endif // Combined version with no global memory requirement using global locks #if 1 dim3 blocksMax3(iDivUp(numPts1, 16), iDivUp(numPts2, 512)); dim3 threadsMax3(16, 16); CleanMatches<<<iDivUp(numPts1, 64), 64, 0, stream>>>(sift1, numPts1); int mode = 10; if (mode==5)// K40c 5.0ms, 1080 Ti 1.2ms, 2080 Ti 0.83ms FindMaxCorr5<<<blocksMax3, threadsMax3>>>(sift1, sift2, numPts1, numPts2); else if (mode==6) { // 2080 Ti 0.89ms threadsMax3 = dim3(32, 16); FindMaxCorr6<<<blocksMax3, threadsMax3>>>(sift1, sift2, numPts1, numPts2); } else if (mode==7) // 2080 Ti 0.50ms FindMaxCorr7<<<blocksMax3, threadsMax3>>>(sift1, sift2, numPts1, numPts2); else if (mode==8) { // 2080 Ti 0.45ms blocksMax3 = dim3(iDivUp(numPts1, FMC_BW), iDivUp(numPts2, FMC_GH)); threadsMax3 = dim3(FMC_NW, FMC_NH); FindMaxCorr8<<<blocksMax3, threadsMax3>>>(sift1, sift2, numPts1, numPts2); } else if (mode==9) { // 2080 Ti 0.46ms blocksMax3 = dim3(iDivUp(numPts1, FMC_BW), iDivUp(numPts2, FMC_GH)); threadsMax3 = dim3(FMC_NW, FMC_NH); FindMaxCorr9<<<blocksMax3, threadsMax3>>>(sift1, sift2, numPts1, numPts2); } else if (mode==10) { // 2080 Ti 0.24ms blocksMax3 = dim3(iDivUp(numPts1, M7W)); threadsMax3 = dim3(M7W, M7H/M7R); FindMaxCorr10<<<blocksMax3, threadsMax3, 0, stream>>>(sift1, sift2, numPts1, numPts2); } safeCall(cudaStreamSynchronize(stream)); checkMsg("FindMaxCorr5() execution failed\n"); #endif if (data1.h_data!=NULL) { float *h_ptr = &data1.h_data[0].score; float *d_ptr = &data1.d_data[0].score; safeCall(cudaMemcpy2DAsync(h_ptr, sizeof(SiftPoint), d_ptr, sizeof(SiftPoint), 5*sizeof(float), data1.numPts, cudaMemcpyDeviceToHost, stream)); } double gpuTime = timer.read(); #ifdef VERBOSE printf("MatchSiftData time = %.2f ms\n", gpuTime); #endif return gpuTime; }
b4e2f5d1078a1ddb3a20f89c66ed4da3cdc7f5c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // File: predict.cu // // GPU Coder version : 1.2 // CUDA/C/C++ source code generated on : 02-Nov-2018 17:52:01 // // Include Files #include "MWCudaDimUtility.h" #include "segnet_predict.h" #include "predict.h" #include "DeepLearningNetwork.h" // Function Declarations static __global__ void c_DeepLearningNetwork_predict_k(const uint8_T inputdata [518400], uint8_T b_inputdata[518400]); static __global__ void d_DeepLearningNetwork_predict_k(uint8_T inputdata[518400], real32_T inputT[518400]); static __global__ void e_DeepLearningNetwork_predict_k(real32_T out[1900800], real32_T outT[1900800]); // Function Definitions // // Arguments : dim3 blockArg // dim3 gridArg // const uint8_T inputdata[518400] // uint8_T b_inputdata[518400] // Return Type : void // static __global__ __launch_bounds__(512, 1) void c_DeepLearningNetwork_predict_k (const uint8_T inputdata[518400], uint8_T b_inputdata[518400]) { uint32_T threadId; int32_T i0; threadId = (uint32_T)mwGetGlobalThreadIndex(); i0 = (int32_T)threadId; if (i0 < 518400) { b_inputdata[i0] = inputdata[i0]; } } // // Arguments : dim3 blockArg // dim3 gridArg // uint8_T inputdata[518400] // real32_T inputT[518400] // Return Type : void // static __global__ __launch_bounds__(512, 1) void d_DeepLearningNetwork_predict_k (uint8_T inputdata[518400], real32_T inputT[518400]) { uint32_T threadId; int32_T i0; int32_T i1; int32_T p; uint32_T tmpIndex; threadId = (uint32_T)mwGetGlobalThreadIndex(); i0 = (int32_T)(threadId % 480U); tmpIndex = (threadId - (uint32_T)i0) / 480U; i1 = (int32_T)(tmpIndex % 360U); tmpIndex = (tmpIndex - (uint32_T)i1) / 360U; p = (int32_T)tmpIndex; if (p < 3) { inputT[(i0 + 480 * i1) + 172800 * p] = (real32_T)inputdata[(i1 + 360 * i0) + 172800 * p]; } } // // Arguments : dim3 blockArg // dim3 gridArg // real32_T out[1900800] // real32_T outT[1900800] // Return Type : void // static __global__ __launch_bounds__(512, 1) void e_DeepLearningNetwork_predict_k (real32_T out[1900800], real32_T outT[1900800]) { uint32_T threadId; int32_T i0; int32_T i1; int32_T p; uint32_T tmpIndex; threadId = (uint32_T)mwGetGlobalThreadIndex(); i0 = (int32_T)(threadId % 360U); tmpIndex = (threadId - (uint32_T)i0) / 360U; i1 = (int32_T)(tmpIndex % 480U); tmpIndex = (tmpIndex - (uint32_T)i1) / 480U; p = (int32_T)tmpIndex; if (p < 11) { outT[(i0 + 360 * i1) + 172800 * p] = out[(i1 + 480 * i0) + 172800 * p]; } } // // Arguments : b_SegNet_0 *obj // const uint8_T inputdata[518400] // real32_T outT[1900800] // Return Type : void // void DeepLearningNetwork_predict(b_SegNet_0 *obj, const uint8_T inputdata[518400], real32_T outT[1900800]) { real32_T (*gpu_inputT)[518400]; real32_T (*gpu_out)[1900800]; uint8_T (*gpu_inputdata)[518400]; uint8_T (*b_gpu_inputdata)[518400]; real32_T (*gpu_outT)[1900800]; hipMalloc(&gpu_outT, 7603200UL); hipMalloc(&gpu_out, 7603200UL); hipMalloc(&gpu_inputT, 2073600UL); hipMalloc(&b_gpu_inputdata, 518400UL); hipMalloc(&gpu_inputdata, 518400UL); hipMemcpy(gpu_inputdata, (void *)&inputdata[0], 518400UL, hipMemcpyHostToDevice); hipLaunchKernelGGL(( c_DeepLearningNetwork_predict_k), dim3(dim3(1013U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0, *gpu_inputdata, *b_gpu_inputdata); hipLaunchKernelGGL(( d_DeepLearningNetwork_predict_k), dim3(dim3(1013U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0, *b_gpu_inputdata, *gpu_inputT); hipMemcpy(obj->inputData, *gpu_inputT, 518400UL * sizeof(real32_T), hipMemcpyDeviceToDevice); obj->predict(); hipMemcpy(*gpu_out, obj->outputData, 1900800UL * sizeof(real32_T), hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( e_DeepLearningNetwork_predict_k), dim3(dim3(3713U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0, *gpu_out, *gpu_outT); hipMemcpy(&outT[0], gpu_outT, 7603200UL, hipMemcpyDeviceToHost); hipFree(*gpu_inputdata); hipFree(*b_gpu_inputdata); hipFree(*gpu_inputT); hipFree(*gpu_out); hipFree(*gpu_outT); } // // File trailer for predict.cu // // [EOF] //
b4e2f5d1078a1ddb3a20f89c66ed4da3cdc7f5c9.cu
// // File: predict.cu // // GPU Coder version : 1.2 // CUDA/C/C++ source code generated on : 02-Nov-2018 17:52:01 // // Include Files #include "MWCudaDimUtility.h" #include "segnet_predict.h" #include "predict.h" #include "DeepLearningNetwork.h" // Function Declarations static __global__ void c_DeepLearningNetwork_predict_k(const uint8_T inputdata [518400], uint8_T b_inputdata[518400]); static __global__ void d_DeepLearningNetwork_predict_k(uint8_T inputdata[518400], real32_T inputT[518400]); static __global__ void e_DeepLearningNetwork_predict_k(real32_T out[1900800], real32_T outT[1900800]); // Function Definitions // // Arguments : dim3 blockArg // dim3 gridArg // const uint8_T inputdata[518400] // uint8_T b_inputdata[518400] // Return Type : void // static __global__ __launch_bounds__(512, 1) void c_DeepLearningNetwork_predict_k (const uint8_T inputdata[518400], uint8_T b_inputdata[518400]) { uint32_T threadId; int32_T i0; threadId = (uint32_T)mwGetGlobalThreadIndex(); i0 = (int32_T)threadId; if (i0 < 518400) { b_inputdata[i0] = inputdata[i0]; } } // // Arguments : dim3 blockArg // dim3 gridArg // uint8_T inputdata[518400] // real32_T inputT[518400] // Return Type : void // static __global__ __launch_bounds__(512, 1) void d_DeepLearningNetwork_predict_k (uint8_T inputdata[518400], real32_T inputT[518400]) { uint32_T threadId; int32_T i0; int32_T i1; int32_T p; uint32_T tmpIndex; threadId = (uint32_T)mwGetGlobalThreadIndex(); i0 = (int32_T)(threadId % 480U); tmpIndex = (threadId - (uint32_T)i0) / 480U; i1 = (int32_T)(tmpIndex % 360U); tmpIndex = (tmpIndex - (uint32_T)i1) / 360U; p = (int32_T)tmpIndex; if (p < 3) { inputT[(i0 + 480 * i1) + 172800 * p] = (real32_T)inputdata[(i1 + 360 * i0) + 172800 * p]; } } // // Arguments : dim3 blockArg // dim3 gridArg // real32_T out[1900800] // real32_T outT[1900800] // Return Type : void // static __global__ __launch_bounds__(512, 1) void e_DeepLearningNetwork_predict_k (real32_T out[1900800], real32_T outT[1900800]) { uint32_T threadId; int32_T i0; int32_T i1; int32_T p; uint32_T tmpIndex; threadId = (uint32_T)mwGetGlobalThreadIndex(); i0 = (int32_T)(threadId % 360U); tmpIndex = (threadId - (uint32_T)i0) / 360U; i1 = (int32_T)(tmpIndex % 480U); tmpIndex = (tmpIndex - (uint32_T)i1) / 480U; p = (int32_T)tmpIndex; if (p < 11) { outT[(i0 + 360 * i1) + 172800 * p] = out[(i1 + 480 * i0) + 172800 * p]; } } // // Arguments : b_SegNet_0 *obj // const uint8_T inputdata[518400] // real32_T outT[1900800] // Return Type : void // void DeepLearningNetwork_predict(b_SegNet_0 *obj, const uint8_T inputdata[518400], real32_T outT[1900800]) { real32_T (*gpu_inputT)[518400]; real32_T (*gpu_out)[1900800]; uint8_T (*gpu_inputdata)[518400]; uint8_T (*b_gpu_inputdata)[518400]; real32_T (*gpu_outT)[1900800]; cudaMalloc(&gpu_outT, 7603200UL); cudaMalloc(&gpu_out, 7603200UL); cudaMalloc(&gpu_inputT, 2073600UL); cudaMalloc(&b_gpu_inputdata, 518400UL); cudaMalloc(&gpu_inputdata, 518400UL); cudaMemcpy(gpu_inputdata, (void *)&inputdata[0], 518400UL, cudaMemcpyHostToDevice); c_DeepLearningNetwork_predict_k<<<dim3(1013U, 1U, 1U), dim3(512U, 1U, 1U)>>> (*gpu_inputdata, *b_gpu_inputdata); d_DeepLearningNetwork_predict_k<<<dim3(1013U, 1U, 1U), dim3(512U, 1U, 1U)>>> (*b_gpu_inputdata, *gpu_inputT); cudaMemcpy(obj->inputData, *gpu_inputT, 518400UL * sizeof(real32_T), cudaMemcpyDeviceToDevice); obj->predict(); cudaMemcpy(*gpu_out, obj->outputData, 1900800UL * sizeof(real32_T), cudaMemcpyDeviceToDevice); e_DeepLearningNetwork_predict_k<<<dim3(3713U, 1U, 1U), dim3(512U, 1U, 1U)>>> (*gpu_out, *gpu_outT); cudaMemcpy(&outT[0], gpu_outT, 7603200UL, cudaMemcpyDeviceToHost); cudaFree(*gpu_inputdata); cudaFree(*b_gpu_inputdata); cudaFree(*gpu_inputT); cudaFree(*gpu_out); cudaFree(*gpu_outT); } // // File trailer for predict.cu // // [EOF] //
4ad194144f8067a16f43b47d2660bce52683b82b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bboxUtils.h" #include "hip/hip_runtime_api.h" #include "gatherNMSOutputs.h" #include "kernel.h" #include "nmsUtils.h" pluginStatus_t nmsInference(hipStream_t stream, const int N, const int perBatchBoxesSize, const int perBatchScoresSize, const bool shareLocation, const int backgroundLabelId, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const float scoreThreshold, const float iouThreshold, const DataType DT_BBOX, const void* locData, const DataType DT_SCORE, const void* confData, void* keepCount, void* nmsedBoxes, void* nmsedScores, void* nmsedClasses, void* workspace, bool isNormalized, bool confSigmoid, bool clipBoxes) { // locCount = batch_size * number_boxes_per_sample * 4 const int locCount = N * perBatchBoxesSize; /* * shareLocation * Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class. * Otherwise * Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or * not (binary classification). */ const int numLocClasses = shareLocation ? 1 : numClasses; size_t bboxDataSize = detectionForwardBBoxDataSize(N, perBatchBoxesSize, DataType::kFLOAT); void* bboxDataRaw = workspace; hipMemcpyAsync(bboxDataRaw, locData, bboxDataSize, hipMemcpyDeviceToDevice, stream); pluginStatus_t status; /* * bboxDataRaw format: * [batch size, numPriors (per sample), numLocClasses, 4] */ // float for now void* bboxData; size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, perBatchBoxesSize, DataType::kFLOAT); void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize); /* * After permutation, bboxData format: * [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4] * This is equivalent to swapping axis */ if (!shareLocation) { status = permuteData( stream, locCount, numLocClasses, numPredsPerClass, 4, DataType::kFLOAT, false, bboxDataRaw, bboxPermute); ASSERT_FAILURE(status == STATUS_SUCCESS); bboxData = bboxPermute; } /* * If shareLocation, numLocClasses = 1 * No need to permute data on linear memory */ else { bboxData = bboxDataRaw; } /* * Conf data format * [batch size, numPriors * param.numClasses, 1, 1] */ const int numScores = N * perBatchScoresSize; size_t totalScoresSize = detectionForwardPreNMSSize(N, perBatchScoresSize); void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize); // need a conf_scores /* * After permutation, bboxData format: * [batch_size, numClasses, numPredsPerClass, 1] */ status = permuteData( stream, numScores, numClasses, numPredsPerClass, 1, DataType::kFLOAT, confSigmoid, confData, scores); ASSERT_FAILURE(status == STATUS_SUCCESS); size_t indicesSize = detectionForwardPreNMSSize(N, perBatchScoresSize); void* indices = nextWorkspacePtr((int8_t*) scores, totalScoresSize); size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK); size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK); void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize); void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize); void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize); // Sort the scores so that the following NMS could be applied. status = sortScoresPerClass(stream, N, numClasses, numPredsPerClass, backgroundLabelId, scoreThreshold, DataType::kFLOAT, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // This is set to true as the input bounding boxes are of the format [ymin, // xmin, ymax, xmax]. The default implementation assumes [xmin, ymin, xmax, ymax] bool flipXY = true; // NMS status = allClassNMS(stream, N, numClasses, numPredsPerClass, topK, iouThreshold, shareLocation, isNormalized, DataType::kFLOAT, DataType::kFLOAT, bboxData, scores, indices, postNMSScores, postNMSIndices, flipXY); ASSERT_FAILURE(status == STATUS_SUCCESS); // Sort the bounding boxes after NMS using scores status = sortScoresPerImage(stream, N, numClasses * topK, DataType::kFLOAT, postNMSScores, postNMSIndices, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // Gather data from the sorted bounding boxes after NMS status = gatherNMSOutputs(stream, shareLocation, N, numPredsPerClass, numClasses, topK, keepTopK, DataType::kFLOAT, DataType::kFLOAT, indices, scores, bboxData, keepCount, nmsedBoxes, nmsedScores, nmsedClasses, clipBoxes); ASSERT_FAILURE(status == STATUS_SUCCESS); return STATUS_SUCCESS; }
4ad194144f8067a16f43b47d2660bce52683b82b.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bboxUtils.h" #include "cuda_runtime_api.h" #include "gatherNMSOutputs.h" #include "kernel.h" #include "nmsUtils.h" pluginStatus_t nmsInference(cudaStream_t stream, const int N, const int perBatchBoxesSize, const int perBatchScoresSize, const bool shareLocation, const int backgroundLabelId, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const float scoreThreshold, const float iouThreshold, const DataType DT_BBOX, const void* locData, const DataType DT_SCORE, const void* confData, void* keepCount, void* nmsedBoxes, void* nmsedScores, void* nmsedClasses, void* workspace, bool isNormalized, bool confSigmoid, bool clipBoxes) { // locCount = batch_size * number_boxes_per_sample * 4 const int locCount = N * perBatchBoxesSize; /* * shareLocation * Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class. * Otherwise * Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or * not (binary classification). */ const int numLocClasses = shareLocation ? 1 : numClasses; size_t bboxDataSize = detectionForwardBBoxDataSize(N, perBatchBoxesSize, DataType::kFLOAT); void* bboxDataRaw = workspace; cudaMemcpyAsync(bboxDataRaw, locData, bboxDataSize, cudaMemcpyDeviceToDevice, stream); pluginStatus_t status; /* * bboxDataRaw format: * [batch size, numPriors (per sample), numLocClasses, 4] */ // float for now void* bboxData; size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, perBatchBoxesSize, DataType::kFLOAT); void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize); /* * After permutation, bboxData format: * [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4] * This is equivalent to swapping axis */ if (!shareLocation) { status = permuteData( stream, locCount, numLocClasses, numPredsPerClass, 4, DataType::kFLOAT, false, bboxDataRaw, bboxPermute); ASSERT_FAILURE(status == STATUS_SUCCESS); bboxData = bboxPermute; } /* * If shareLocation, numLocClasses = 1 * No need to permute data on linear memory */ else { bboxData = bboxDataRaw; } /* * Conf data format * [batch size, numPriors * param.numClasses, 1, 1] */ const int numScores = N * perBatchScoresSize; size_t totalScoresSize = detectionForwardPreNMSSize(N, perBatchScoresSize); void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize); // need a conf_scores /* * After permutation, bboxData format: * [batch_size, numClasses, numPredsPerClass, 1] */ status = permuteData( stream, numScores, numClasses, numPredsPerClass, 1, DataType::kFLOAT, confSigmoid, confData, scores); ASSERT_FAILURE(status == STATUS_SUCCESS); size_t indicesSize = detectionForwardPreNMSSize(N, perBatchScoresSize); void* indices = nextWorkspacePtr((int8_t*) scores, totalScoresSize); size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK); size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK); void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize); void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize); void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize); // Sort the scores so that the following NMS could be applied. status = sortScoresPerClass(stream, N, numClasses, numPredsPerClass, backgroundLabelId, scoreThreshold, DataType::kFLOAT, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // This is set to true as the input bounding boxes are of the format [ymin, // xmin, ymax, xmax]. The default implementation assumes [xmin, ymin, xmax, ymax] bool flipXY = true; // NMS status = allClassNMS(stream, N, numClasses, numPredsPerClass, topK, iouThreshold, shareLocation, isNormalized, DataType::kFLOAT, DataType::kFLOAT, bboxData, scores, indices, postNMSScores, postNMSIndices, flipXY); ASSERT_FAILURE(status == STATUS_SUCCESS); // Sort the bounding boxes after NMS using scores status = sortScoresPerImage(stream, N, numClasses * topK, DataType::kFLOAT, postNMSScores, postNMSIndices, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // Gather data from the sorted bounding boxes after NMS status = gatherNMSOutputs(stream, shareLocation, N, numPredsPerClass, numClasses, topK, keepTopK, DataType::kFLOAT, DataType::kFLOAT, indices, scores, bboxData, keepCount, nmsedBoxes, nmsedScores, nmsedClasses, clipBoxes); ASSERT_FAILURE(status == STATUS_SUCCESS); return STATUS_SUCCESS; }
39120adcf426cc79cd059eff798cd07427653f81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Matrix multiplication by parts // Elements stored in row-major order using namespace std; #include <stdio.h> #include <iostream> #include <fstream> #include "Common/helper_timer.h" #define BLOCK_SIZE 32 #define GRID_SIZE 16 typedef struct { int width; int height; float *elements; } Matrix; // Forward declaration of matrix mult __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Host code void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load matrices A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipMalloc((void **) &d_A.elements, size); hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); hipMalloc((void **) &d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // allocate C in device Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = d_C.width * d_C.height * sizeof(float); hipMalloc((void **) &d_C.elements, size); // call kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // define the block size (what is the best value?) dim3 dimGrid(C.width / BLOCK_SIZE, C.height / BLOCK_SIZE); // choose grid size depending on problem size hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); // copy C to host hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); // free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } //matrix multiplication kernel __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float tmpSum = 0; if (row < A.height && col < B.width) { // each thread computes one element of the block sub-matrix for (int i = 0; i < A.width; i++) { tmpSum += A.elements[row * A.width + i] * B.elements[i * B.width + col]; } C.elements[row * C.width + col] = tmpSum; } } //CPU matrix multiplication{ void CPUMatMull(Matrix A, Matrix B, Matrix C) { float sum; for (int row = 0; row < A.height; row++) { for (int col = 0; col < B.width; col++) { sum = 0.f; for (int n = 0; n < A.width; n++) { sum += A.elements[row * A.width + n] * B.elements[n * B.width + col]; } C.elements[row * C.width + col] = sum; } } } // Check the result void check(const Matrix A, const Matrix B) { double err = 0; // Check the result and make sure it is correct for (int row = 0; row < A.height; row++) { for (int col = 0; col < A.width; col++) { err += A.elements[row * A.height + col] - B.elements[row * B.width + col]; } } cout << "Error: " << err << endl; } void printPerformance(float timeGPU, float timeCPU, double flopsPerMatrixMul, double gigaFlops, double gigaFlopsGPU) { printf( "CPU Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops\n", gigaFlops, timeCPU, flopsPerMatrixMul); printf( "GPU Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops\n", gigaFlopsGPU, timeGPU, flopsPerMatrixMul); } void expand(int Height, Matrix &A, Matrix &B) { int fileDim = 16; for (int i = 1; i <= (Height / fileDim) * (Height / fileDim); i++) { for (int j = 0; j < fileDim * fileDim; j++) { A.elements[i * fileDim * fileDim + j] = A.elements[j]; B.elements[i * fileDim * fileDim + j] = B.elements[j]; } } // for (int i = 0; i < Width; i++) { // for (int j = 0; j < Width; j++) // cout << A.elements[i * Width + j] << "\t"; // cout << endl; // } } int main() { FILE *fp; if ((fp = fopen("times_N.csv", "a")) == NULL) { printf("Can't open .csv in append mode!\n"); exit(1); } fprintf(fp, "n,blocksPerGrid,threadsPerBlock,timeCPU,timeGPU,gflopsCPU,gflopsGPU\n"); int Height = 512; int Width = Height; Matrix A; Matrix B; Matrix C; Matrix D; A.width = Width; B.width = Width; C.width = Width; D.width = Width; A.height = Height; B.height = Height; C.height = Width; D.height = Width; A.elements = new float[Width * Width]; B.elements = new float[Width * Width]; C.elements = new float[Width * Width]; D.elements = new float[Width * Width]; //fill matrices std::ifstream A_input; std::ifstream B_input; A_input.open("A.txt"); B_input.open("B.txt"); float a, b; A_input >> a; B_input >> b; int i = 0; while (!A_input.eof()) { A.elements[i] = a; B.elements[i] = b; A_input >> a; B_input >> b; i += 1; } A_input.close(); B_input.close(); expand(Height, A, B); //przygotowanie i start timera StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkResetTimer(&timer); sdkStartTimer(&timer); MatMul(A, B, C); //synchronizacja wtkw i zatrzymanie timera hipDeviceSynchronize(); sdkStopTimer(&timer); float timeGPU = sdkGetTimerValue(&timer); sdkDeleteTimer(&timer); std::ofstream C_output; C_output.open("C.txt"); for (int i = 0; i < Width; i++) { for (int j = 0; j < Width; j++) C_output << C.elements[i * Width + j] << "\t"; C_output << endl; } clock_t tStart = clock(); CPUMatMull(A, B, D); clock_t tim = (clock() - tStart); float timeCPU = (float) tim / CLOCKS_PER_SEC * 1000; double flopsPerMatrixMul = 2.0 * (double) A.width * (double) A.height * (double) B.width; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (timeCPU / 1000.0f); double gigaFlopsGPU = (flopsPerMatrixMul * 1.0e-9f) / (timeGPU / 1000.0f); printPerformance(timeGPU, timeCPU, flopsPerMatrixMul, gigaFlops, gigaFlopsGPU); int grid = C.width / BLOCK_SIZE; fprintf(fp, "%i,%i,%i,%f,%f,%f,%f\n", Width * Width, grid, BLOCK_SIZE, timeCPU, timeGPU, gigaFlops, gigaFlopsGPU ); check(C, D); }
39120adcf426cc79cd059eff798cd07427653f81.cu
// Matrix multiplication by parts // Elements stored in row-major order using namespace std; #include <stdio.h> #include <iostream> #include <fstream> #include "Common/helper_timer.h" #define BLOCK_SIZE 32 #define GRID_SIZE 16 typedef struct { int width; int height; float *elements; } Matrix; // Forward declaration of matrix mult __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Host code void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load matrices A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc((void **) &d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc((void **) &d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // allocate C in device Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = d_C.width * d_C.height * sizeof(float); cudaMalloc((void **) &d_C.elements, size); // call kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // define the block size (what is the best value?) dim3 dimGrid(C.width / BLOCK_SIZE, C.height / BLOCK_SIZE); // choose grid size depending on problem size MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); // copy C to host cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); // free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } //matrix multiplication kernel __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float tmpSum = 0; if (row < A.height && col < B.width) { // each thread computes one element of the block sub-matrix for (int i = 0; i < A.width; i++) { tmpSum += A.elements[row * A.width + i] * B.elements[i * B.width + col]; } C.elements[row * C.width + col] = tmpSum; } } //CPU matrix multiplication{ void CPUMatMull(Matrix A, Matrix B, Matrix C) { float sum; for (int row = 0; row < A.height; row++) { for (int col = 0; col < B.width; col++) { sum = 0.f; for (int n = 0; n < A.width; n++) { sum += A.elements[row * A.width + n] * B.elements[n * B.width + col]; } C.elements[row * C.width + col] = sum; } } } // Check the result void check(const Matrix A, const Matrix B) { double err = 0; // Check the result and make sure it is correct for (int row = 0; row < A.height; row++) { for (int col = 0; col < A.width; col++) { err += A.elements[row * A.height + col] - B.elements[row * B.width + col]; } } cout << "Error: " << err << endl; } void printPerformance(float timeGPU, float timeCPU, double flopsPerMatrixMul, double gigaFlops, double gigaFlopsGPU) { printf( "CPU Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops\n", gigaFlops, timeCPU, flopsPerMatrixMul); printf( "GPU Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops\n", gigaFlopsGPU, timeGPU, flopsPerMatrixMul); } void expand(int Height, Matrix &A, Matrix &B) { int fileDim = 16; for (int i = 1; i <= (Height / fileDim) * (Height / fileDim); i++) { for (int j = 0; j < fileDim * fileDim; j++) { A.elements[i * fileDim * fileDim + j] = A.elements[j]; B.elements[i * fileDim * fileDim + j] = B.elements[j]; } } // for (int i = 0; i < Width; i++) { // for (int j = 0; j < Width; j++) // cout << A.elements[i * Width + j] << "\t"; // cout << endl; // } } int main() { FILE *fp; if ((fp = fopen("times_N.csv", "a")) == NULL) { printf("Can't open .csv in append mode!\n"); exit(1); } fprintf(fp, "n,blocksPerGrid,threadsPerBlock,timeCPU,timeGPU,gflopsCPU,gflopsGPU\n"); int Height = 512; int Width = Height; Matrix A; Matrix B; Matrix C; Matrix D; A.width = Width; B.width = Width; C.width = Width; D.width = Width; A.height = Height; B.height = Height; C.height = Width; D.height = Width; A.elements = new float[Width * Width]; B.elements = new float[Width * Width]; C.elements = new float[Width * Width]; D.elements = new float[Width * Width]; //fill matrices std::ifstream A_input; std::ifstream B_input; A_input.open("A.txt"); B_input.open("B.txt"); float a, b; A_input >> a; B_input >> b; int i = 0; while (!A_input.eof()) { A.elements[i] = a; B.elements[i] = b; A_input >> a; B_input >> b; i += 1; } A_input.close(); B_input.close(); expand(Height, A, B); //przygotowanie i start timera StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkResetTimer(&timer); sdkStartTimer(&timer); MatMul(A, B, C); //synchronizacja wątków i zatrzymanie timera cudaThreadSynchronize(); sdkStopTimer(&timer); float timeGPU = sdkGetTimerValue(&timer); sdkDeleteTimer(&timer); std::ofstream C_output; C_output.open("C.txt"); for (int i = 0; i < Width; i++) { for (int j = 0; j < Width; j++) C_output << C.elements[i * Width + j] << "\t"; C_output << endl; } clock_t tStart = clock(); CPUMatMull(A, B, D); clock_t tim = (clock() - tStart); float timeCPU = (float) tim / CLOCKS_PER_SEC * 1000; double flopsPerMatrixMul = 2.0 * (double) A.width * (double) A.height * (double) B.width; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (timeCPU / 1000.0f); double gigaFlopsGPU = (flopsPerMatrixMul * 1.0e-9f) / (timeGPU / 1000.0f); printPerformance(timeGPU, timeCPU, flopsPerMatrixMul, gigaFlops, gigaFlopsGPU); int grid = C.width / BLOCK_SIZE; fprintf(fp, "%i,%i,%i,%f,%f,%f,%f\n", Width * Width, grid, BLOCK_SIZE, timeCPU, timeGPU, gigaFlops, gigaFlopsGPU ); check(C, D); }
2e607f20da1daa4f1a40786613be5bb5b11b8896.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdio> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <cmath> using namespace std; const int TILE_WIDTH = 16; __global__ void MatrixMulKernel(int *d_M,int *d_N,int *d_P,int m,int n,int k) { __shared__ int ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ int ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; //Identify the row and column of the Pd element to work on int row = by * TILE_WIDTH + ty; int col = bx * TILE_WIDTH + tx; int pValue = 0; //loop over the Md and Nd tiles required to comput the Pd element for(int t = 0; t < (n-1) / TILE_WIDTH + 1; ++t) { if(row < m && t * TILE_WIDTH + tx < n) ds_M[ty][tx] = d_M[row * n + t * TILE_WIDTH + tx]; else ds_M[ty][tx] = 0; if(col < k && t * TILE_WIDTH + ty < n) ds_N[ty][tx] = d_N[(t * TILE_WIDTH + ty) * k + col]; else ds_N[ty][tx] = 0; __syncthreads(); for(int i = 0; i < TILE_WIDTH; ++i) pValue += ds_M[ty][i] * ds_N[i][tx]; __syncthreads(); } if(row < m && col < k) d_P[row * k + col] = pValue; } int main() { //freopen("out","w",stdout); int m = 600, n = 700, k = 1000; int *h_M, *h_N, *d_M, *d_N; int *h_P, *d_P; size_t sizeM = m * n * sizeof(int); size_t sizeN = n * k * sizeof(int); size_t sizeP = m * k * sizeof(int); h_M = (int *) malloc(sizeM); h_N = (int *) malloc(sizeN); h_P = (int *) malloc(sizeP); hipMalloc(&d_M,sizeM); hipMalloc(&d_N,sizeN); hipMalloc(&d_P,sizeP); for(int i = 0; i < m * n; ++i) { if(i % 2 == 0) h_M[i] = 1; else h_M[i] = 0; } for(int i = 0;i < n * k; ++i) { if(i % 2 == 0) h_N[i] = 0; else h_N[i] = 1; } hipMemcpy(d_M,h_M,sizeM,hipMemcpyHostToDevice); hipMemcpy(d_N,h_N,sizeN,hipMemcpyHostToDevice); hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); dim3 grid((int)ceil(k*1.0 / TILE_WIDTH), (int)ceil(m*1.0/ TILE_WIDTH)); dim3 block(TILE_WIDTH,TILE_WIDTH); hipLaunchKernelGGL(( MatrixMulKernel), dim3(grid),dim3(block), 0, 0, d_M,d_N,d_P,m,n,k); hipEventRecord(stop,0); //hipDeviceSynchronize(); hipEventSynchronize(stop); float ElapsedTime; hipEventElapsedTime(&ElapsedTime,start,stop); printf("Kernel Elpased Time: %.3f ms\n",ElapsedTime); hipMemcpy(h_P,d_P,sizeP,hipMemcpyDeviceToHost); /* for(int i = 0; i < m * k; ++i) printf("%d\n",h_P[i]); printf("\n"); */ free(h_P); free(h_M); free(h_N); hipFree(d_P); hipFree(d_M); hipFree(d_N); return 0; }
2e607f20da1daa4f1a40786613be5bb5b11b8896.cu
#include <iostream> #include <cstdio> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cmath> using namespace std; const int TILE_WIDTH = 16; __global__ void MatrixMulKernel(int *d_M,int *d_N,int *d_P,int m,int n,int k) { __shared__ int ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ int ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; //Identify the row and column of the Pd element to work on int row = by * TILE_WIDTH + ty; int col = bx * TILE_WIDTH + tx; int pValue = 0; //loop over the Md and Nd tiles required to comput the Pd element for(int t = 0; t < (n-1) / TILE_WIDTH + 1; ++t) { if(row < m && t * TILE_WIDTH + tx < n) ds_M[ty][tx] = d_M[row * n + t * TILE_WIDTH + tx]; else ds_M[ty][tx] = 0; if(col < k && t * TILE_WIDTH + ty < n) ds_N[ty][tx] = d_N[(t * TILE_WIDTH + ty) * k + col]; else ds_N[ty][tx] = 0; __syncthreads(); for(int i = 0; i < TILE_WIDTH; ++i) pValue += ds_M[ty][i] * ds_N[i][tx]; __syncthreads(); } if(row < m && col < k) d_P[row * k + col] = pValue; } int main() { //freopen("out","w",stdout); int m = 600, n = 700, k = 1000; int *h_M, *h_N, *d_M, *d_N; int *h_P, *d_P; size_t sizeM = m * n * sizeof(int); size_t sizeN = n * k * sizeof(int); size_t sizeP = m * k * sizeof(int); h_M = (int *) malloc(sizeM); h_N = (int *) malloc(sizeN); h_P = (int *) malloc(sizeP); cudaMalloc(&d_M,sizeM); cudaMalloc(&d_N,sizeN); cudaMalloc(&d_P,sizeP); for(int i = 0; i < m * n; ++i) { if(i % 2 == 0) h_M[i] = 1; else h_M[i] = 0; } for(int i = 0;i < n * k; ++i) { if(i % 2 == 0) h_N[i] = 0; else h_N[i] = 1; } cudaMemcpy(d_M,h_M,sizeM,cudaMemcpyHostToDevice); cudaMemcpy(d_N,h_N,sizeN,cudaMemcpyHostToDevice); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); dim3 grid((int)ceil(k*1.0 / TILE_WIDTH), (int)ceil(m*1.0/ TILE_WIDTH)); dim3 block(TILE_WIDTH,TILE_WIDTH); MatrixMulKernel<<<grid,block>>>(d_M,d_N,d_P,m,n,k); cudaEventRecord(stop,0); //cudaDeviceSynchronize(); cudaEventSynchronize(stop); float ElapsedTime; cudaEventElapsedTime(&ElapsedTime,start,stop); printf("Kernel Elpased Time: %.3f ms\n",ElapsedTime); cudaMemcpy(h_P,d_P,sizeP,cudaMemcpyDeviceToHost); /* for(int i = 0; i < m * k; ++i) printf("%d\n",h_P[i]); printf("\n"); */ free(h_P); free(h_M); free(h_N); cudaFree(d_P); cudaFree(d_M); cudaFree(d_N); return 0; }
ec3a1a7d597be913fad7d77657cacd1f2a50d2fb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B */ #define TILE_WIDTH 16 __global__ void matrixMulCUDA(float * C, float * A, float * B, int n) { int start_row = blockDim.y * blockIdx.y * TILE_WIDTH + threadIdx.y * TILE_WIDTH; int end_row = start_row + TILE_WIDTH; int start_col = blockDim.x * blockIdx.x * TILE_WIDTH + threadIdx.x * TILE_WIDTH; int end_col = start_col + TILE_WIDTH; for (int row = start_row; row < end_row; row++) { for (int col = start_col; col < end_col; col++) { float C_val = 0; for (int k = 0; k < n; ++k) { float A_elem = A[row * n + k]; float B_elem = B[k * n + col]; C_val += A_elem * B_elem; } C[row * n + col] = C_val; } } } void constantInit(float * data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char ** argv, int n) { // Allocate host memory for matrices A and B unsigned int size_A = n * n; unsigned int mem_size_A = sizeof(float) * size_A; float * h_A = (float * ) malloc(mem_size_A); unsigned int size_B = n * n; unsigned int mem_size_B = sizeof(float) * size_B; float * h_B = (float * ) malloc(mem_size_B); // Initialize host memory const float valB = 0.01 f; constantInit(h_A, size_A, 1.0 f); constantInit(h_B, size_B, valB); // Allocate device memory float * d_A, * d_B, * d_C; // Allocate host matrix C unsigned int mem_size_C = n * n * sizeof(float); float * h_C = (float * ) malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } hipError_t error; error = hipMalloc((void ** ) & d_A, mem_size_A); if (error != hipSuccess) { printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void ** ) & d_B, mem_size_B); if (error != hipSuccess) { printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void ** ) & d_C, mem_size_C); if (error != hipSuccess) { printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(16, 16, 1); dim3 grid(32, 32, 1); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Allocate CUDA events that we'll use for timing hipEvent_t start; error = hipEventCreate( & start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate( & stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel matrixMulCUDA << < grid, threads >> > (d_C, d_A, d_B, n); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr, "Failed to launch kernel!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0 f; error = hipEventElapsedTime( & msecTotal, start, stop); printf("Elapsed time in msec = %f\n", msecTotal); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Copy result from device to host error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); int i = 0; printf(""); // printf("A: "); // for (i = 0; i < n; i++) { // printf("%f \n", h_A[i]); // } // printf("B: "); // for (i = 0; i < n; i++) { // printf("%f \n", h_B[i]); // } printf("C: "); for (i = 4090; i < 4100; i++) { printf("%f \n", h_C[i]); } if (error != hipSuccess) { printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // Clean up memory free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); return EXIT_SUCCESS; } /** * Program main */ int main(int argc, char ** argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); // By default, we use device 0 int devID = 0; hipSetDevice(devID); hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice( & devID); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipGetDeviceProperties( & deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Size of square matrices size_t n = 0; printf("[-] N = "); scanf("%u", & n); printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", n, n, n, n); int matrix_result = matrixMultiply(argc, argv, n); exit(matrix_result); }
ec3a1a7d597be913fad7d77657cacd1f2a50d2fb.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B */ #define TILE_WIDTH 16 __global__ void matrixMulCUDA(float * C, float * A, float * B, int n) { int start_row = blockDim.y * blockIdx.y * TILE_WIDTH + threadIdx.y * TILE_WIDTH; int end_row = start_row + TILE_WIDTH; int start_col = blockDim.x * blockIdx.x * TILE_WIDTH + threadIdx.x * TILE_WIDTH; int end_col = start_col + TILE_WIDTH; for (int row = start_row; row < end_row; row++) { for (int col = start_col; col < end_col; col++) { float C_val = 0; for (int k = 0; k < n; ++k) { float A_elem = A[row * n + k]; float B_elem = B[k * n + col]; C_val += A_elem * B_elem; } C[row * n + col] = C_val; } } } void constantInit(float * data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char ** argv, int n) { // Allocate host memory for matrices A and B unsigned int size_A = n * n; unsigned int mem_size_A = sizeof(float) * size_A; float * h_A = (float * ) malloc(mem_size_A); unsigned int size_B = n * n; unsigned int mem_size_B = sizeof(float) * size_B; float * h_B = (float * ) malloc(mem_size_B); // Initialize host memory const float valB = 0.01 f; constantInit(h_A, size_A, 1.0 f); constantInit(h_B, size_B, valB); // Allocate device memory float * d_A, * d_B, * d_C; // Allocate host matrix C unsigned int mem_size_C = n * n * sizeof(float); float * h_C = (float * ) malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } cudaError_t error; error = cudaMalloc((void ** ) & d_A, mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void ** ) & d_B, mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void ** ) & d_C, mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(16, 16, 1); dim3 grid(32, 32, 1); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate( & start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate( & stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel matrixMulCUDA << < grid, threads >> > (d_C, d_A, d_B, n); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "Failed to launch kernel!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0 f; error = cudaEventElapsedTime( & msecTotal, start, stop); printf("Elapsed time in msec = %f\n", msecTotal); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Copy result from device to host error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); int i = 0; printf(""); // printf("A: "); // for (i = 0; i < n; i++) { // printf("%f \n", h_A[i]); // } // printf("B: "); // for (i = 0; i < n; i++) { // printf("%f \n", h_B[i]); // } printf("C: "); for (i = 4090; i < 4100; i++) { printf("%f \n", h_C[i]); } if (error != cudaSuccess) { printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // Clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return EXIT_SUCCESS; } /** * Program main */ int main(int argc, char ** argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); // By default, we use device 0 int devID = 0; cudaSetDevice(devID); cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice( & devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaGetDeviceProperties( & deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Size of square matrices size_t n = 0; printf("[-] N = "); scanf("%u", & n); printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", n, n, n, n); int matrix_result = matrixMultiply(argc, argv, n); exit(matrix_result); }
7414c4e74259201cad1a010dd14a66c5ca90b7fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "CosineSqAngleForceGPU.cuh" #include "hoomd/TextureTools.h" #include <assert.h> // SMALL a relatively small number #define SMALL Scalar(0.001) /*! \file CosineSqAngleForceGPU.cu \brief Defines GPU kernel code for calculating the cosine squared angle forces. Used by CosineSqAngleForceComputeGPU. */ //! Texture for reading angle parameters scalar2_tex_t angle_params_tex; //! Kernel for calculating cosine squared angle forces on the GPU /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch Pitch of 2D virial array \param N number of particles \param d_pos device array of particle positions \param d_params Parameters for the angle force \param box Box dimensions for periodic boundary condition handling \param alist Angle data to use in calculating the forces \param pitch Pitch of 2D angles list \param n_angles_list List of numbers of angles stored on the GPU */ extern "C" __global__ void gpu_compute_cosinesq_angle_forces_kernel(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const Scalar2 *d_params, BoxDim box, const group_storage<3> *alist, const unsigned int *apos_list, const unsigned int pitch, const unsigned int *n_angles_list) { // start by identifying which particle we are to handle int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; // load in the length of the list for this thread (MEM TRANSFER: 4 bytes) int n_angles = n_angles_list[idx]; // read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes) Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z); Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet // initialize the force to 0 Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); Scalar fab[3], fcb[3]; // initialize the virial to 0 Scalar virial[6]; for (int i = 0; i < 6; i++) virial[i] = Scalar(0.0); // loop over all angles for (int angle_idx = 0; angle_idx < n_angles; angle_idx++) { group_storage<3> cur_angle = alist[pitch*angle_idx + idx]; int cur_angle_x_idx = cur_angle.idx[0]; int cur_angle_y_idx = cur_angle.idx[1]; int cur_angle_type = cur_angle.idx[2]; int cur_angle_abc = apos_list[pitch*angle_idx + idx]; // get the a-particle's position (MEM TRANSFER: 16 bytes) Scalar4 x_postype = d_pos[cur_angle_x_idx]; Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 y_postype = d_pos[cur_angle_y_idx]; Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z); if (cur_angle_abc == 0) { a_pos = idx_pos; b_pos = x_pos; c_pos = y_pos; } if (cur_angle_abc == 1) { b_pos = idx_pos; a_pos = x_pos; c_pos = y_pos; } if (cur_angle_abc == 2) { c_pos = idx_pos; a_pos = x_pos; b_pos = y_pos; } // calculate dr for a-b,c-b,and a-c Scalar3 dab = a_pos - b_pos; Scalar3 dcb = c_pos - b_pos; Scalar3 dac = a_pos - c_pos; // apply periodic boundary conditions dab = box.minImage(dab); dcb = box.minImage(dcb); dac = box.minImage(dac); // get the angle parameters (MEM TRANSFER: 8 bytes) Scalar2 params = texFetchScalar2(d_params, angle_params_tex, cur_angle_type); Scalar K = params.x; Scalar t_0 = params.y; Scalar rsqab = dot(dab, dab); Scalar rab = fast::sqrt(rsqab); Scalar rsqcb = dot(dcb, dcb); Scalar rcb = fast::sqrt(rsqcb); Scalar c_abbc = dot(dab, dcb); c_abbc /= rab*rcb; // cos(t) if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0); if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0); // actually calculate the force // should the user pass cos(t_0) so that it's not calculated each time for each angle? Scalar dcosth = c_abbc - fast::cos(t_0); Scalar tk = K*dcosth; Scalar a = Scalar(1.0) * tk; Scalar a11 = a * c_abbc / rsqab; Scalar a12 = -a / (rab * rcb); Scalar a22 = a * c_abbc / rsqcb; fab[0] = a11*dab.x + a12*dcb.x; fab[1] = a11*dab.y + a12*dcb.y; fab[2] = a11*dab.z + a12*dcb.z; fcb[0] = a22*dcb.x + a12*dab.x; fcb[1] = a22*dcb.y + a12*dab.y; fcb[2] = a22*dcb.z + a12*dab.z; // the rest should be the same as for the harmonic bond // compute 1/3 of the energy, 1/3 for each atom in the angle Scalar angle_eng = tk*dcosth*Scalar(Scalar(1.0)/Scalar(6.0)); // upper triangular version of virial tensor Scalar angle_virial[6]; angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]); angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]); angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]); angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]); angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]); angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]); if (cur_angle_abc == 0) { force_idx.x += fab[0]; force_idx.y += fab[1]; force_idx.z += fab[2]; } if (cur_angle_abc == 1) { force_idx.x -= fab[0] + fcb[0]; force_idx.y -= fab[1] + fcb[1]; force_idx.z -= fab[2] + fcb[2]; } if (cur_angle_abc == 2) { force_idx.x += fcb[0]; force_idx.y += fcb[1]; force_idx.z += fcb[2]; } force_idx.w += angle_eng; for (int i = 0; i < 6; i++) virial[i] += angle_virial[i]; } // now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes) d_force[idx] = force_idx; for (int i = 0; i < 6; i++) d_virial[i*virial_pitch+idx] = virial[i]; } /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos device array of particle positions \param box Box dimensions (in GPU format) to use for periodic boundary conditions \param atable List of angles stored on the GPU \param pitch Pitch of 2D angles list \param n_angles_list List of numbers of angles stored on the GPU \param d_params K and t_0 params packed as Scalar2 variables \param n_angle_types Number of angle types in d_params \param block_size Block size to use when performing calculations \param compute_capability Device compute capability (200, 300, 350, ...) \returns Any error code resulting from the kernel launch \note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize() \a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant and the y component contains t_0 the equilibrium angle. */ hipError_t gpu_compute_cosinesq_angle_forces(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const BoxDim& box, const group_storage<3> *atable, const unsigned int *apos_list, const unsigned int pitch, const unsigned int *n_angles_list, Scalar2 *d_params, unsigned int n_angle_types, int block_size, const unsigned int compute_capability) { assert(d_params); static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void *)gpu_compute_cosinesq_angle_forces_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // setup the grid to run the kernel dim3 grid( N / run_block_size + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // bind the texture on pre sm 35 arches if (compute_capability < 350) { hipError_t error = hipBindTexture(0, angle_params_tex, d_params, sizeof(Scalar2) * n_angle_types); if (error != hipSuccess) return error; } // run the kernel hipLaunchKernelGGL(( gpu_compute_cosinesq_angle_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, d_params, box, atable, apos_list, pitch, n_angles_list); return hipSuccess; }
7414c4e74259201cad1a010dd14a66c5ca90b7fc.cu
// Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "CosineSqAngleForceGPU.cuh" #include "hoomd/TextureTools.h" #include <assert.h> // SMALL a relatively small number #define SMALL Scalar(0.001) /*! \file CosineSqAngleForceGPU.cu \brief Defines GPU kernel code for calculating the cosine squared angle forces. Used by CosineSqAngleForceComputeGPU. */ //! Texture for reading angle parameters scalar2_tex_t angle_params_tex; //! Kernel for calculating cosine squared angle forces on the GPU /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch Pitch of 2D virial array \param N number of particles \param d_pos device array of particle positions \param d_params Parameters for the angle force \param box Box dimensions for periodic boundary condition handling \param alist Angle data to use in calculating the forces \param pitch Pitch of 2D angles list \param n_angles_list List of numbers of angles stored on the GPU */ extern "C" __global__ void gpu_compute_cosinesq_angle_forces_kernel(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const Scalar2 *d_params, BoxDim box, const group_storage<3> *alist, const unsigned int *apos_list, const unsigned int pitch, const unsigned int *n_angles_list) { // start by identifying which particle we are to handle int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; // load in the length of the list for this thread (MEM TRANSFER: 4 bytes) int n_angles = n_angles_list[idx]; // read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes) Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z); Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet // initialize the force to 0 Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); Scalar fab[3], fcb[3]; // initialize the virial to 0 Scalar virial[6]; for (int i = 0; i < 6; i++) virial[i] = Scalar(0.0); // loop over all angles for (int angle_idx = 0; angle_idx < n_angles; angle_idx++) { group_storage<3> cur_angle = alist[pitch*angle_idx + idx]; int cur_angle_x_idx = cur_angle.idx[0]; int cur_angle_y_idx = cur_angle.idx[1]; int cur_angle_type = cur_angle.idx[2]; int cur_angle_abc = apos_list[pitch*angle_idx + idx]; // get the a-particle's position (MEM TRANSFER: 16 bytes) Scalar4 x_postype = d_pos[cur_angle_x_idx]; Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 y_postype = d_pos[cur_angle_y_idx]; Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z); if (cur_angle_abc == 0) { a_pos = idx_pos; b_pos = x_pos; c_pos = y_pos; } if (cur_angle_abc == 1) { b_pos = idx_pos; a_pos = x_pos; c_pos = y_pos; } if (cur_angle_abc == 2) { c_pos = idx_pos; a_pos = x_pos; b_pos = y_pos; } // calculate dr for a-b,c-b,and a-c Scalar3 dab = a_pos - b_pos; Scalar3 dcb = c_pos - b_pos; Scalar3 dac = a_pos - c_pos; // apply periodic boundary conditions dab = box.minImage(dab); dcb = box.minImage(dcb); dac = box.minImage(dac); // get the angle parameters (MEM TRANSFER: 8 bytes) Scalar2 params = texFetchScalar2(d_params, angle_params_tex, cur_angle_type); Scalar K = params.x; Scalar t_0 = params.y; Scalar rsqab = dot(dab, dab); Scalar rab = fast::sqrt(rsqab); Scalar rsqcb = dot(dcb, dcb); Scalar rcb = fast::sqrt(rsqcb); Scalar c_abbc = dot(dab, dcb); c_abbc /= rab*rcb; // cos(t) if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0); if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0); // actually calculate the force // should the user pass cos(t_0) so that it's not calculated each time for each angle? Scalar dcosth = c_abbc - fast::cos(t_0); Scalar tk = K*dcosth; Scalar a = Scalar(1.0) * tk; Scalar a11 = a * c_abbc / rsqab; Scalar a12 = -a / (rab * rcb); Scalar a22 = a * c_abbc / rsqcb; fab[0] = a11*dab.x + a12*dcb.x; fab[1] = a11*dab.y + a12*dcb.y; fab[2] = a11*dab.z + a12*dcb.z; fcb[0] = a22*dcb.x + a12*dab.x; fcb[1] = a22*dcb.y + a12*dab.y; fcb[2] = a22*dcb.z + a12*dab.z; // the rest should be the same as for the harmonic bond // compute 1/3 of the energy, 1/3 for each atom in the angle Scalar angle_eng = tk*dcosth*Scalar(Scalar(1.0)/Scalar(6.0)); // upper triangular version of virial tensor Scalar angle_virial[6]; angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]); angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]); angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]); angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]); angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]); angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]); if (cur_angle_abc == 0) { force_idx.x += fab[0]; force_idx.y += fab[1]; force_idx.z += fab[2]; } if (cur_angle_abc == 1) { force_idx.x -= fab[0] + fcb[0]; force_idx.y -= fab[1] + fcb[1]; force_idx.z -= fab[2] + fcb[2]; } if (cur_angle_abc == 2) { force_idx.x += fcb[0]; force_idx.y += fcb[1]; force_idx.z += fcb[2]; } force_idx.w += angle_eng; for (int i = 0; i < 6; i++) virial[i] += angle_virial[i]; } // now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes) d_force[idx] = force_idx; for (int i = 0; i < 6; i++) d_virial[i*virial_pitch+idx] = virial[i]; } /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos device array of particle positions \param box Box dimensions (in GPU format) to use for periodic boundary conditions \param atable List of angles stored on the GPU \param pitch Pitch of 2D angles list \param n_angles_list List of numbers of angles stored on the GPU \param d_params K and t_0 params packed as Scalar2 variables \param n_angle_types Number of angle types in d_params \param block_size Block size to use when performing calculations \param compute_capability Device compute capability (200, 300, 350, ...) \returns Any error code resulting from the kernel launch \note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize() \a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant and the y component contains t_0 the equilibrium angle. */ cudaError_t gpu_compute_cosinesq_angle_forces(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const BoxDim& box, const group_storage<3> *atable, const unsigned int *apos_list, const unsigned int pitch, const unsigned int *n_angles_list, Scalar2 *d_params, unsigned int n_angle_types, int block_size, const unsigned int compute_capability) { assert(d_params); static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void *)gpu_compute_cosinesq_angle_forces_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // setup the grid to run the kernel dim3 grid( N / run_block_size + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // bind the texture on pre sm 35 arches if (compute_capability < 350) { cudaError_t error = cudaBindTexture(0, angle_params_tex, d_params, sizeof(Scalar2) * n_angle_types); if (error != cudaSuccess) return error; } // run the kernel gpu_compute_cosinesq_angle_forces_kernel<<< grid, threads>>>( d_force, d_virial, virial_pitch, N, d_pos, d_params, box, atable, apos_list, pitch, n_angles_list); return cudaSuccess; }
02a52c89515941531384c84bd55b7f185454f36b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <af/defines.h> #include <backend.hpp> #include <dispatch.hpp> #include <Param.hpp> #include <debug_cuda.hpp> #include <math.hpp> #include "shared.hpp" #include <convolve.hpp> namespace cuda { namespace kernel { static const dim_type THREADS = 256; static const dim_type THREADS_X = 16; static const dim_type THREADS_Y = 16; static const dim_type CUBE_X = 8; static const dim_type CUBE_Y = 8; static const dim_type CUBE_Z = 4; // below shared MAX_*_LEN's are calculated based on // a maximum shared memory configuration of 48KB per block // considering complex types as well static const dim_type MAX_CONV1_FILTER_LEN = 129; static const dim_type MAX_CONV2_FILTER_LEN = 11; static const dim_type MAX_CONV3_FILTER_LEN = 5; // we shall declare the maximum size required of above all three cases // and re-use the same constant memory locations for every case __constant__ char cFilter[2*(2*(MAX_CONV1_FILTER_LEN-1)+THREADS)*sizeof(double)]; __inline__ __device__ dim_type index(dim_type i, dim_type j, dim_type k, dim_type jstride, dim_type kstride) { return i+j*jstride+k*kstride; } template<typename T, typename accType, bool expand> __global__ void convolve1(Param<T> out, CParam<T> signal, dim_type fLen, dim_type nBBS, dim_type oStep, dim_type sStep) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); const dim_type padding = fLen-1; const dim_type shrdLen = blockDim.x + 2*padding; const unsigned batchId = blockIdx.x/nBBS; T *dst = (T *)out.ptr + oStep +(batchId*out.strides[1]); const T *src = (const T *)signal.ptr + sStep +(batchId*signal.strides[1]); const accType *impulse = (const accType *)cFilter; dim_type gx = blockDim.x*(blockIdx.x-batchId*nBBS); dim_type s0 = signal.strides[0]; dim_type d0 = signal.dims[0]; for (dim_type i=threadIdx.x; i<shrdLen; i+=blockDim.x) { dim_type idx= gx-padding + i; shrdMem[i] = (idx>=0 && idx<d0) ? src[idx*s0] : scalar<T>(0); } __syncthreads(); gx += threadIdx.x; if (gx<out.dims[0]) { dim_type lx = threadIdx.x + padding + (expand ? 0 : fLen>>1); accType accum = scalar<accType>(0); for(dim_type f=0; f<fLen; ++f) { accum = accum + (shrdMem[lx-f]*impulse[f]); } dst[gx] = (T)accum; } } template<typename T, typename accType, bool expand, dim_type fLen0, dim_type fLen1> __global__ void convolve2(Param<T> out, CParam<T> signal, dim_type nBBS, dim_type oStep, dim_type sStep) { const size_t C_SIZE = (THREADS_X+2*(fLen0-1))* (THREADS_Y+2*(fLen1-1)); __shared__ T shrdMem[C_SIZE]; const dim_type radius0 = fLen0-1; const dim_type radius1 = fLen1-1; const dim_type padding0 = 2*radius0; const dim_type padding1 = 2*radius1; const dim_type shrdLen0 = THREADS_X + padding0; const dim_type shrdLen1 = THREADS_Y + padding1; unsigned batchId = blockIdx.x/nBBS; T *dst = (T *)out.ptr + oStep + (batchId*out.strides[2]); const T *src = (const T *)signal.ptr + sStep + (batchId*signal.strides[2]); const accType *impulse = (const accType *)cFilter; dim_type lx = threadIdx.x; dim_type ly = threadIdx.y; dim_type gx = THREADS_X * (blockIdx.x-batchId*nBBS) + lx; dim_type gy = THREADS_Y * blockIdx.y + ly; dim_type s0 = signal.strides[0]; dim_type s1 = signal.strides[1]; dim_type d0 = signal.dims[0]; dim_type d1 = signal.dims[1]; // below loops are traditional loops, they only run multiple // times filter length is more than launch size #pragma unroll for (dim_type b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) { dim_type j = gy2-radius1; bool is_j = j>=0 && j<d1; // move row_set THREADS_Y along coloumns #pragma unroll for (dim_type a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) { dim_type i = gx2-radius0; bool is_i = i>=0 && i<d0; shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : scalar<T>(0)); } } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1]) { dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1); dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1); accType accum = scalar<accType>(0); #pragma unroll for(dim_type fj=0; fj<fLen1; ++fj) { #pragma unroll for(dim_type fi=0; fi<fLen0; ++fi) { accType f_val = impulse[fj*fLen0+fi]; T s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)]; accum = accum + s_val*f_val; } } dst[gy*out.strides[1]+gx] = (T)accum; } } template<typename T> __device__ T readSrc(T const *src, dim_type i, dim_type j, dim_type k, dim_type dims[], dim_type strides[]) { bool is_i = i>=0 && i<dims[0]; bool is_j = j>=0 && j<dims[1]; bool is_k = k>=0 && k<dims[2]; if (is_i && is_j && is_k) return src[(i*strides[0] + j*strides[1] + k*strides[2])]; else return scalar<T>(0); } template<typename T, typename accType, bool expand> __global__ void convolve3(Param<T> out, CParam<T> signal, dim_type fLen0, dim_type fLen1, dim_type fLen2, dim_type nBBS, dim_type oStep, dim_type sStep) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); dim_type radius0 = fLen0-1; dim_type radius1 = fLen1-1; dim_type radius2 = fLen2-1; dim_type padding0 = 2*radius0; dim_type padding1 = 2*radius1; dim_type padding2 = 2*radius2; dim_type shrdLen0 = blockDim.x + padding0; dim_type skStride = shrdLen0 * (blockDim.y + padding1); dim_type fStride = fLen0 * fLen1; unsigned batchId = blockIdx.x/nBBS; T *dst = (T *)out.ptr + oStep + (batchId*out.strides[3]); const T *src = (const T *)signal.ptr + sStep + (batchId*signal.strides[3]); const accType *impulse = (const accType *)cFilter; dim_type lx = threadIdx.x; dim_type ly = threadIdx.y; dim_type lz = threadIdx.z; dim_type gx = blockDim.x * (blockIdx.x-batchId*nBBS) + lx; dim_type gy = blockDim.y * blockIdx.y + ly; dim_type gz = blockDim.z * blockIdx.z + lz; dim_type lx2 = lx + blockDim.x; dim_type ly2 = ly + blockDim.y; dim_type lz2 = lz + blockDim.z; dim_type gx2 = gx + blockDim.x; dim_type gy2 = gy + blockDim.y; dim_type gz2 = gz + blockDim.z; shrdMem[index(lx, ly, lz, shrdLen0, skStride)] = readSrc(src, gx-radius0, gy-radius1, gz-radius2, signal.dims, signal.strides); if (lx < padding0) { shrdMem[index(lx2, ly, lz, shrdLen0, skStride)] = readSrc(src, gx2-radius0, gy-radius1, gz-radius2, signal.dims, signal.strides); } if (ly < padding1) { shrdMem[index(lx, ly2, lz, shrdLen0, skStride)] = readSrc(src, gx-radius0, gy2-radius1, gz-radius2, signal.dims, signal.strides); } if (lz < padding2) { shrdMem[index(lx, ly, lz2, shrdLen0, skStride)] = readSrc(src, gx-radius0, gy-radius1, gz2-radius2, signal.dims, signal.strides); } if (lx < padding0 && ly < padding1) { shrdMem[index(lx2, ly2, lz, shrdLen0, skStride)] = readSrc(src, gx2-radius0, gy2-radius1, gz-radius2, signal.dims, signal.strides); } if (ly < padding1 && lz < padding2) { shrdMem[index(lx, ly2, lz2, shrdLen0, skStride)] = readSrc(src, gx-radius0, gy2-radius1, gz2-radius2, signal.dims, signal.strides); } if (lz < padding2 && lx < padding0) { shrdMem[index(lx2, ly, lz2, shrdLen0, skStride)] = readSrc(src, gx2-radius0, gy-radius1, gz2-radius2, signal.dims, signal.strides); } if (lx < padding0 && ly < padding1 && lz < padding2) { shrdMem[index(lx2, ly2, lz2, shrdLen0, skStride)] = readSrc(src, gx2-radius0, gy2-radius1, gz2-radius2, signal.dims, signal.strides); } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1] && gz<out.dims[2]) { dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1); dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1); dim_type ck = lz + radius2 + (expand ? 0 : fLen2>>1); accType accum = scalar<accType>(0); #pragma unroll for(dim_type fk=0; fk<fLen2; ++fk) { #pragma unroll for(dim_type fj=0; fj<fLen1; ++fj) { #pragma unroll for(dim_type fi=0; fi<fLen0; ++fi) { accType f_val = impulse[index(fi, fj, fk, fLen0, fStride)]; T s_val = shrdMem[index(ci-fi, cj-fj, ck-fk, shrdLen0, skStride)]; accum = accum + s_val*f_val; } } } dst[index(gx, gy, gz, out.strides[1], out.strides[2])] = (T)accum; } } template<typename T, dim_type baseDim> void prepareKernelArgs(dim3 &blocks, dim3 &threads, size_t &sharedSize, dim_type &blk_x, ConvolveBatchKind kind, dim_type oDims[], dim_type sDims[], dim_type fDims[]) { dim_type blk_y, blk_z; if (baseDim==1) { threads = dim3(THREADS, 1); blk_x = divup(oDims[0], threads.x); blocks = dim3(blk_x, 1); if (kind==MANY2ONE) blocks.x *= sDims[1]; sharedSize = (threads.x+2*(fDims[0]-1)) * sizeof(T); } else if (baseDim==2) { threads = dim3(THREADS_X, THREADS_Y); blk_x = divup(oDims[0], threads.x); blk_y = divup(oDims[1], threads.y); blocks = dim3(blk_x, blk_y); if (kind==MANY2ONE) blocks.x *= sDims[2]; } else if (baseDim==3) { threads = dim3(CUBE_X, CUBE_Y, CUBE_Z); blk_x = divup(oDims[0], threads.x); blk_y = divup(oDims[1], threads.y); blk_z = divup(oDims[2], threads.z); blocks = dim3(blk_x, blk_y, blk_z); if (kind==MANY2ONE) blocks.x *= sDims[3]; sharedSize = (threads.x+2*(fDims[0]-1)) * (threads.y+2*(fDims[1]-1)) * (threads.z+2*(fDims[2]-1)) * sizeof(T); } } template<typename T, typename aT, bool expand, dim_type f0, dim_type f1> void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, dim_type nBBS, dim_type oStp, dim_type sStp) { (convolve2<T, aT, expand, f0, f1hipLaunchKernelGGL((>)), dim3(blks), dim3(thrds), 0, 0, out, sig, nBBS, oStp, sStp); } template<typename T, typename aT, bool expand, dim_type f0> void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, dim_type f1, dim_type nBBS, dim_type oStp, dim_type sStp) { switch(f1) { case 1: conv2Helper<T, aT, expand, f0, 1>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 2: conv2Helper<T, aT, expand, f0, 2>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 3: conv2Helper<T, aT, expand, f0, 3>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 4: conv2Helper<T, aT, expand, f0, 4>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 5: conv2Helper<T, aT, expand, f0, 5>(blks, thrds, out, sig, nBBS, oStp, sStp); break; default: CUDA_NOT_SUPPORTED(); } } template<typename T, typename aT, bool expand> void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, dim_type f0, dim_type f1, dim_type nBBS, dim_type oStp, dim_type sStp) { switch(f0) { case 1: conv2Helper<T, aT, expand, 1>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break; case 2: conv2Helper<T, aT, expand, 2>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break; case 3: conv2Helper<T, aT, expand, 3>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break; case 4: conv2Helper<T, aT, expand, 4>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break; case 5: conv2Helper<T, aT, expand, 5>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break; default: { if (f0==f1) { switch(f1) { case 6: conv2Helper<T, aT, expand, 6, 6>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 7: conv2Helper<T, aT, expand, 7, 7>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 8: conv2Helper<T, aT, expand, 8, 8>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 9: conv2Helper<T, aT, expand, 9, 9>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 10: conv2Helper<T, aT, expand, 10, 10>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 11: conv2Helper<T, aT, expand, 11, 11>(blks, thrds, out, sig, nBBS, oStp, sStp); break; default: CUDA_NOT_SUPPORTED(); } } else CUDA_NOT_SUPPORTED(); } break; } } template<typename T, typename accType, dim_type baseDim, bool expand> void convolve_nd(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind) { bool callKernel = true; dim_type MCFL2 = kernel::MAX_CONV2_FILTER_LEN; dim_type MCFL3 = kernel::MAX_CONV3_FILTER_LEN; switch(baseDim) { case 1: if (filter.dims[0]>kernel::MAX_CONV1_FILTER_LEN) callKernel = false; break; case 2: if ((filter.dims[0]*filter.dims[1]) > (MCFL2 * MCFL2)) callKernel = false; break; case 3: if ((filter.dims[0]*filter.dims[1]*filter.dims[2]) > (MCFL3 * MCFL3 * MCFL3)) callKernel = false; break; } if (!callKernel) { CUDA_NOT_SUPPORTED(); } dim_type bCount = 1; dim_type steps[3] = { 0, 0, 0 }; // [0] - output step, [1] - signal step, [2] - filter step if (kind==MANY2MANY) { steps[0] = out.strides[baseDim]; steps[1] = signal.strides[baseDim]; steps[2] = filter.strides[baseDim]; bCount = signal.dims[baseDim]; } else if (kind==ONE2ALL) { steps[0] = out.strides[baseDim]; steps[2] = filter.strides[baseDim]; bCount = filter.dims[baseDim]; } dim3 blocks, threads; dim_type blk_x; size_t sharedSize; prepareKernelArgs<T, baseDim>(blocks, threads, sharedSize, blk_x, kind, out.dims, signal.dims, filter.dims); dim_type filterLen = filter.dims[0]; for(int i=1; i<baseDim; ++i) filterLen *= filter.dims[i]; for (dim_type b=0; b<bCount; ++b) { // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(hipMemcpyToSymbol(kernel::cFilter, filter.ptr+b*steps[2], filterLen*sizeof(accType), 0, hipMemcpyDeviceToDevice)); switch(baseDim) { case 1: (convolve1<T, accType, expandhipLaunchKernelGGL((>)) , dim3(blocks), dim3(threads), sharedSize, 0, out, signal, filter.dims[0], blk_x, b*steps[0], b*steps[1]); break; case 2: conv2Helper<T, accType, expand>(blocks, threads, out, signal, filter.dims[0], filter.dims[1], blk_x, b*steps[0], b*steps[1]); break; case 3: (convolve3<T, accType, expandhipLaunchKernelGGL((>)) , dim3(blocks), dim3(threads), sharedSize, 0, out, signal, filter.dims[0], filter.dims[1], filter.dims[2], blk_x, b*steps[0], b*steps[1]); break; } } POST_LAUNCH_CHECK(); } #define INSTANTIATE(T, accType) \ template void convolve_nd<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, accType, 2, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, accType, 2, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, accType, 3, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, accType, 3, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ INSTANTIATE(cdouble, cdouble) INSTANTIATE(cfloat , cfloat) INSTANTIATE(double , double) INSTANTIATE(float , float) INSTANTIATE(uint , float) INSTANTIATE(int , float) INSTANTIATE(uchar , float) INSTANTIATE(char , float) } }
02a52c89515941531384c84bd55b7f185454f36b.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <af/defines.h> #include <backend.hpp> #include <dispatch.hpp> #include <Param.hpp> #include <debug_cuda.hpp> #include <math.hpp> #include "shared.hpp" #include <convolve.hpp> namespace cuda { namespace kernel { static const dim_type THREADS = 256; static const dim_type THREADS_X = 16; static const dim_type THREADS_Y = 16; static const dim_type CUBE_X = 8; static const dim_type CUBE_Y = 8; static const dim_type CUBE_Z = 4; // below shared MAX_*_LEN's are calculated based on // a maximum shared memory configuration of 48KB per block // considering complex types as well static const dim_type MAX_CONV1_FILTER_LEN = 129; static const dim_type MAX_CONV2_FILTER_LEN = 11; static const dim_type MAX_CONV3_FILTER_LEN = 5; // we shall declare the maximum size required of above all three cases // and re-use the same constant memory locations for every case __constant__ char cFilter[2*(2*(MAX_CONV1_FILTER_LEN-1)+THREADS)*sizeof(double)]; __inline__ __device__ dim_type index(dim_type i, dim_type j, dim_type k, dim_type jstride, dim_type kstride) { return i+j*jstride+k*kstride; } template<typename T, typename accType, bool expand> __global__ void convolve1(Param<T> out, CParam<T> signal, dim_type fLen, dim_type nBBS, dim_type oStep, dim_type sStep) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); const dim_type padding = fLen-1; const dim_type shrdLen = blockDim.x + 2*padding; const unsigned batchId = blockIdx.x/nBBS; T *dst = (T *)out.ptr + oStep +(batchId*out.strides[1]); const T *src = (const T *)signal.ptr + sStep +(batchId*signal.strides[1]); const accType *impulse = (const accType *)cFilter; dim_type gx = blockDim.x*(blockIdx.x-batchId*nBBS); dim_type s0 = signal.strides[0]; dim_type d0 = signal.dims[0]; for (dim_type i=threadIdx.x; i<shrdLen; i+=blockDim.x) { dim_type idx= gx-padding + i; shrdMem[i] = (idx>=0 && idx<d0) ? src[idx*s0] : scalar<T>(0); } __syncthreads(); gx += threadIdx.x; if (gx<out.dims[0]) { dim_type lx = threadIdx.x + padding + (expand ? 0 : fLen>>1); accType accum = scalar<accType>(0); for(dim_type f=0; f<fLen; ++f) { accum = accum + (shrdMem[lx-f]*impulse[f]); } dst[gx] = (T)accum; } } template<typename T, typename accType, bool expand, dim_type fLen0, dim_type fLen1> __global__ void convolve2(Param<T> out, CParam<T> signal, dim_type nBBS, dim_type oStep, dim_type sStep) { const size_t C_SIZE = (THREADS_X+2*(fLen0-1))* (THREADS_Y+2*(fLen1-1)); __shared__ T shrdMem[C_SIZE]; const dim_type radius0 = fLen0-1; const dim_type radius1 = fLen1-1; const dim_type padding0 = 2*radius0; const dim_type padding1 = 2*radius1; const dim_type shrdLen0 = THREADS_X + padding0; const dim_type shrdLen1 = THREADS_Y + padding1; unsigned batchId = blockIdx.x/nBBS; T *dst = (T *)out.ptr + oStep + (batchId*out.strides[2]); const T *src = (const T *)signal.ptr + sStep + (batchId*signal.strides[2]); const accType *impulse = (const accType *)cFilter; dim_type lx = threadIdx.x; dim_type ly = threadIdx.y; dim_type gx = THREADS_X * (blockIdx.x-batchId*nBBS) + lx; dim_type gy = THREADS_Y * blockIdx.y + ly; dim_type s0 = signal.strides[0]; dim_type s1 = signal.strides[1]; dim_type d0 = signal.dims[0]; dim_type d1 = signal.dims[1]; // below loops are traditional loops, they only run multiple // times filter length is more than launch size #pragma unroll for (dim_type b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) { dim_type j = gy2-radius1; bool is_j = j>=0 && j<d1; // move row_set THREADS_Y along coloumns #pragma unroll for (dim_type a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) { dim_type i = gx2-radius0; bool is_i = i>=0 && i<d0; shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : scalar<T>(0)); } } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1]) { dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1); dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1); accType accum = scalar<accType>(0); #pragma unroll for(dim_type fj=0; fj<fLen1; ++fj) { #pragma unroll for(dim_type fi=0; fi<fLen0; ++fi) { accType f_val = impulse[fj*fLen0+fi]; T s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)]; accum = accum + s_val*f_val; } } dst[gy*out.strides[1]+gx] = (T)accum; } } template<typename T> __device__ T readSrc(T const *src, dim_type i, dim_type j, dim_type k, dim_type dims[], dim_type strides[]) { bool is_i = i>=0 && i<dims[0]; bool is_j = j>=0 && j<dims[1]; bool is_k = k>=0 && k<dims[2]; if (is_i && is_j && is_k) return src[(i*strides[0] + j*strides[1] + k*strides[2])]; else return scalar<T>(0); } template<typename T, typename accType, bool expand> __global__ void convolve3(Param<T> out, CParam<T> signal, dim_type fLen0, dim_type fLen1, dim_type fLen2, dim_type nBBS, dim_type oStep, dim_type sStep) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); dim_type radius0 = fLen0-1; dim_type radius1 = fLen1-1; dim_type radius2 = fLen2-1; dim_type padding0 = 2*radius0; dim_type padding1 = 2*radius1; dim_type padding2 = 2*radius2; dim_type shrdLen0 = blockDim.x + padding0; dim_type skStride = shrdLen0 * (blockDim.y + padding1); dim_type fStride = fLen0 * fLen1; unsigned batchId = blockIdx.x/nBBS; T *dst = (T *)out.ptr + oStep + (batchId*out.strides[3]); const T *src = (const T *)signal.ptr + sStep + (batchId*signal.strides[3]); const accType *impulse = (const accType *)cFilter; dim_type lx = threadIdx.x; dim_type ly = threadIdx.y; dim_type lz = threadIdx.z; dim_type gx = blockDim.x * (blockIdx.x-batchId*nBBS) + lx; dim_type gy = blockDim.y * blockIdx.y + ly; dim_type gz = blockDim.z * blockIdx.z + lz; dim_type lx2 = lx + blockDim.x; dim_type ly2 = ly + blockDim.y; dim_type lz2 = lz + blockDim.z; dim_type gx2 = gx + blockDim.x; dim_type gy2 = gy + blockDim.y; dim_type gz2 = gz + blockDim.z; shrdMem[index(lx, ly, lz, shrdLen0, skStride)] = readSrc(src, gx-radius0, gy-radius1, gz-radius2, signal.dims, signal.strides); if (lx < padding0) { shrdMem[index(lx2, ly, lz, shrdLen0, skStride)] = readSrc(src, gx2-radius0, gy-radius1, gz-radius2, signal.dims, signal.strides); } if (ly < padding1) { shrdMem[index(lx, ly2, lz, shrdLen0, skStride)] = readSrc(src, gx-radius0, gy2-radius1, gz-radius2, signal.dims, signal.strides); } if (lz < padding2) { shrdMem[index(lx, ly, lz2, shrdLen0, skStride)] = readSrc(src, gx-radius0, gy-radius1, gz2-radius2, signal.dims, signal.strides); } if (lx < padding0 && ly < padding1) { shrdMem[index(lx2, ly2, lz, shrdLen0, skStride)] = readSrc(src, gx2-radius0, gy2-radius1, gz-radius2, signal.dims, signal.strides); } if (ly < padding1 && lz < padding2) { shrdMem[index(lx, ly2, lz2, shrdLen0, skStride)] = readSrc(src, gx-radius0, gy2-radius1, gz2-radius2, signal.dims, signal.strides); } if (lz < padding2 && lx < padding0) { shrdMem[index(lx2, ly, lz2, shrdLen0, skStride)] = readSrc(src, gx2-radius0, gy-radius1, gz2-radius2, signal.dims, signal.strides); } if (lx < padding0 && ly < padding1 && lz < padding2) { shrdMem[index(lx2, ly2, lz2, shrdLen0, skStride)] = readSrc(src, gx2-radius0, gy2-radius1, gz2-radius2, signal.dims, signal.strides); } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1] && gz<out.dims[2]) { dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1); dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1); dim_type ck = lz + radius2 + (expand ? 0 : fLen2>>1); accType accum = scalar<accType>(0); #pragma unroll for(dim_type fk=0; fk<fLen2; ++fk) { #pragma unroll for(dim_type fj=0; fj<fLen1; ++fj) { #pragma unroll for(dim_type fi=0; fi<fLen0; ++fi) { accType f_val = impulse[index(fi, fj, fk, fLen0, fStride)]; T s_val = shrdMem[index(ci-fi, cj-fj, ck-fk, shrdLen0, skStride)]; accum = accum + s_val*f_val; } } } dst[index(gx, gy, gz, out.strides[1], out.strides[2])] = (T)accum; } } template<typename T, dim_type baseDim> void prepareKernelArgs(dim3 &blocks, dim3 &threads, size_t &sharedSize, dim_type &blk_x, ConvolveBatchKind kind, dim_type oDims[], dim_type sDims[], dim_type fDims[]) { dim_type blk_y, blk_z; if (baseDim==1) { threads = dim3(THREADS, 1); blk_x = divup(oDims[0], threads.x); blocks = dim3(blk_x, 1); if (kind==MANY2ONE) blocks.x *= sDims[1]; sharedSize = (threads.x+2*(fDims[0]-1)) * sizeof(T); } else if (baseDim==2) { threads = dim3(THREADS_X, THREADS_Y); blk_x = divup(oDims[0], threads.x); blk_y = divup(oDims[1], threads.y); blocks = dim3(blk_x, blk_y); if (kind==MANY2ONE) blocks.x *= sDims[2]; } else if (baseDim==3) { threads = dim3(CUBE_X, CUBE_Y, CUBE_Z); blk_x = divup(oDims[0], threads.x); blk_y = divup(oDims[1], threads.y); blk_z = divup(oDims[2], threads.z); blocks = dim3(blk_x, blk_y, blk_z); if (kind==MANY2ONE) blocks.x *= sDims[3]; sharedSize = (threads.x+2*(fDims[0]-1)) * (threads.y+2*(fDims[1]-1)) * (threads.z+2*(fDims[2]-1)) * sizeof(T); } } template<typename T, typename aT, bool expand, dim_type f0, dim_type f1> void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, dim_type nBBS, dim_type oStp, dim_type sStp) { (convolve2<T, aT, expand, f0, f1>)<<<blks, thrds>>>(out, sig, nBBS, oStp, sStp); } template<typename T, typename aT, bool expand, dim_type f0> void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, dim_type f1, dim_type nBBS, dim_type oStp, dim_type sStp) { switch(f1) { case 1: conv2Helper<T, aT, expand, f0, 1>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 2: conv2Helper<T, aT, expand, f0, 2>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 3: conv2Helper<T, aT, expand, f0, 3>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 4: conv2Helper<T, aT, expand, f0, 4>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 5: conv2Helper<T, aT, expand, f0, 5>(blks, thrds, out, sig, nBBS, oStp, sStp); break; default: CUDA_NOT_SUPPORTED(); } } template<typename T, typename aT, bool expand> void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, dim_type f0, dim_type f1, dim_type nBBS, dim_type oStp, dim_type sStp) { switch(f0) { case 1: conv2Helper<T, aT, expand, 1>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break; case 2: conv2Helper<T, aT, expand, 2>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break; case 3: conv2Helper<T, aT, expand, 3>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break; case 4: conv2Helper<T, aT, expand, 4>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break; case 5: conv2Helper<T, aT, expand, 5>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break; default: { if (f0==f1) { switch(f1) { case 6: conv2Helper<T, aT, expand, 6, 6>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 7: conv2Helper<T, aT, expand, 7, 7>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 8: conv2Helper<T, aT, expand, 8, 8>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 9: conv2Helper<T, aT, expand, 9, 9>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 10: conv2Helper<T, aT, expand, 10, 10>(blks, thrds, out, sig, nBBS, oStp, sStp); break; case 11: conv2Helper<T, aT, expand, 11, 11>(blks, thrds, out, sig, nBBS, oStp, sStp); break; default: CUDA_NOT_SUPPORTED(); } } else CUDA_NOT_SUPPORTED(); } break; } } template<typename T, typename accType, dim_type baseDim, bool expand> void convolve_nd(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind) { bool callKernel = true; dim_type MCFL2 = kernel::MAX_CONV2_FILTER_LEN; dim_type MCFL3 = kernel::MAX_CONV3_FILTER_LEN; switch(baseDim) { case 1: if (filter.dims[0]>kernel::MAX_CONV1_FILTER_LEN) callKernel = false; break; case 2: if ((filter.dims[0]*filter.dims[1]) > (MCFL2 * MCFL2)) callKernel = false; break; case 3: if ((filter.dims[0]*filter.dims[1]*filter.dims[2]) > (MCFL3 * MCFL3 * MCFL3)) callKernel = false; break; } if (!callKernel) { CUDA_NOT_SUPPORTED(); } dim_type bCount = 1; dim_type steps[3] = { 0, 0, 0 }; // [0] - output step, [1] - signal step, [2] - filter step if (kind==MANY2MANY) { steps[0] = out.strides[baseDim]; steps[1] = signal.strides[baseDim]; steps[2] = filter.strides[baseDim]; bCount = signal.dims[baseDim]; } else if (kind==ONE2ALL) { steps[0] = out.strides[baseDim]; steps[2] = filter.strides[baseDim]; bCount = filter.dims[baseDim]; } dim3 blocks, threads; dim_type blk_x; size_t sharedSize; prepareKernelArgs<T, baseDim>(blocks, threads, sharedSize, blk_x, kind, out.dims, signal.dims, filter.dims); dim_type filterLen = filter.dims[0]; for(int i=1; i<baseDim; ++i) filterLen *= filter.dims[i]; for (dim_type b=0; b<bCount; ++b) { // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(cudaMemcpyToSymbol(kernel::cFilter, filter.ptr+b*steps[2], filterLen*sizeof(accType), 0, cudaMemcpyDeviceToDevice)); switch(baseDim) { case 1: (convolve1<T, accType, expand>) <<<blocks, threads, sharedSize>>>(out, signal, filter.dims[0], blk_x, b*steps[0], b*steps[1]); break; case 2: conv2Helper<T, accType, expand>(blocks, threads, out, signal, filter.dims[0], filter.dims[1], blk_x, b*steps[0], b*steps[1]); break; case 3: (convolve3<T, accType, expand>) <<<blocks, threads, sharedSize>>>(out, signal, filter.dims[0], filter.dims[1], filter.dims[2], blk_x, b*steps[0], b*steps[1]); break; } } POST_LAUNCH_CHECK(); } #define INSTANTIATE(T, accType) \ template void convolve_nd<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, accType, 2, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, accType, 2, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, accType, 3, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, accType, 3, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\ INSTANTIATE(cdouble, cdouble) INSTANTIATE(cfloat , cfloat) INSTANTIATE(double , double) INSTANTIATE(float , float) INSTANTIATE(uint , float) INSTANTIATE(int , float) INSTANTIATE(uchar , float) INSTANTIATE(char , float) } }
b90fb637d47e00fe7640c8e555f55f259bb7af37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Compilar com: nvcc -gencode arch=compute_50,code=[sm_50,compute_50] mandelbrot_cuda.cu -o mandelbrot -Wno-deprecated-gpu-targets #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/time.h> struct timer_info { clock_t c_start; clock_t c_end; struct timespec t_start; struct timespec t_end; struct timeval v_start; struct timeval v_end; }; struct timer_info timer; double c_x_min; double c_x_max; double c_y_min; double c_y_max; double pixel_width; double pixel_height; int iteration_max = 200; int image_size; unsigned char *image_buffer_red; unsigned char *image_buffer_green; unsigned char *image_buffer_blue; unsigned char *dev_image_buffer_red; unsigned char *dev_image_buffer_blue; unsigned char *dev_image_buffer_green; unsigned char **pixels; int block_dim_x; int block_dim_y; int i_x_max; int i_y_max; int image_buffer_size; int gradient_size = 16; int color_red[17] = {66, 25, 9, 4, 0, 12, 24, 57, 134, 211, 241, 248, 255, 204, 153, 106, 16}; int color_green[17] = {30, 7, 1, 4, 7, 44, 82, 125, 181, 236, 233, 201, 170, 128, 87, 52, 16}; int color_blue[17] = {15, 26, 47, 73, 100, 138, 177, 209, 229, 248, 191, 95, 0, 0, 0, 3, 16}; int *dev_color_red; int *dev_color_green; int *dev_color_blue; void allocate_image_buffer() { //int rgb_size = 3; image_buffer_red = (unsigned char *)malloc(sizeof(unsigned char) * image_buffer_size); hipMalloc((void**)&dev_image_buffer_red, image_buffer_size * sizeof(unsigned char)); image_buffer_green = (unsigned char *)malloc(sizeof(unsigned char) * image_buffer_size); hipMalloc((void**)&dev_image_buffer_green, image_buffer_size * sizeof(unsigned char)); image_buffer_blue = (unsigned char *)malloc(sizeof(unsigned char) * image_buffer_size); hipMalloc((void**)&dev_image_buffer_blue, image_buffer_size * sizeof(unsigned char)); }; void init(int argc, char *argv[]) { if (argc < 8) { printf("usage: ./mandelbrot_seq c_x_min c_x_max c_y_min c_y_max image_size dimX dimY\n"); printf("examples with image_size = 11500:\n"); printf(" Full Picture: ./mandelbrot_seq -2.5 1.5 -2.0 2.0 4096 8 64\n"); printf(" Seahorse Valley: ./mandelbrot_seq -0.8 -0.7 0.05 0.15 4096 32 32\n"); printf(" Elephant Valley: ./mandelbrot_seq 0.175 0.375 -0.1 0.1 4096 16 64\n"); printf(" Triple Spiral Valley: ./mandelbrot_seq -0.188 -0.012 0.554 0.754 4096 1 32\n"); exit(0); } else { sscanf(argv[1], "%lf", &c_x_min); sscanf(argv[2], "%lf", &c_x_max); sscanf(argv[3], "%lf", &c_y_min); sscanf(argv[4], "%lf", &c_y_max); sscanf(argv[5], "%d", &image_size); sscanf(argv[6], "%d", &block_dim_x); sscanf(argv[7], "%d", &block_dim_y); i_x_max = image_size; i_y_max = image_size; image_buffer_size = image_size * image_size; pixel_width = (c_x_max - c_x_min) / i_x_max; pixel_height = (c_y_max - c_y_min) / i_y_max; }; }; void init_colors(){ int color_size = 17 * sizeof(int); hipMalloc((void**)&dev_color_red, color_size); hipMalloc((void**)&dev_color_green, color_size); hipMalloc((void**)&dev_color_blue, color_size); hipMemcpy(dev_color_red, color_red, color_size, hipMemcpyHostToDevice); hipMemcpy(dev_color_green, color_green, color_size, hipMemcpyHostToDevice); hipMemcpy(dev_color_blue, color_blue, color_size, hipMemcpyHostToDevice); } void allocate_pixels(){ int rgb_size = 3; pixels = (unsigned char **) malloc(sizeof(unsigned char *) * image_buffer_size); for(int i = 0; i < image_buffer_size; i++){ pixels[i] = (unsigned char *) malloc(sizeof(unsigned char) * rgb_size); }; } void set_pixels(){ for(int i = 0; i < image_buffer_size; i++){ pixels[i][0] = image_buffer_red[i]; pixels[i][1] = image_buffer_green[i]; pixels[i][2] = image_buffer_blue[i]; } } void write_to_file() { FILE *file; const char *filename = "output.ppm"; const char *comment = "# "; int max_color_component_value = 255; file = fopen(filename, "wb"); fprintf(file, "P6\n %s\n %d\n %d\n %d\n", comment, i_x_max, i_y_max, max_color_component_value); for(int i = 0; i < image_buffer_size; i++){ fwrite(pixels[i], 1 , 3, file); }; fclose(file); }; __device__ void update_rgb_buffer(int iteration, int x, int y, int image_size, unsigned char *image_buffer_red, unsigned char *image_buffer_green, unsigned char *image_buffer_blue, int *color_red, int *color_green, int *color_blue) { int gradient_size = 16; int iteration_max = 200; int color; if (iteration == iteration_max) { image_buffer_red[(image_size * y) + x] = color_red[gradient_size]; image_buffer_green[(image_size * y) + x] = color_green[gradient_size]; image_buffer_blue[(image_size * y) + x] = color_blue[gradient_size]; } else { color = iteration % gradient_size; image_buffer_red[(image_size * y) + x] = color_red[color]; image_buffer_green[(image_size * y) + x] = color_green[color]; image_buffer_blue[(image_size * y) + x] = color_blue[color]; }; }; __device__ int mandelbrot(double c_x, double c_y) { double z_x = 0; double z_y = 0; double z_x_squared = 0; double z_y_squared = 0; double escape_radius_squared = 4; int iteration_max = 200; int iteration; for (iteration = 0; iteration < iteration_max && ((z_x_squared + z_y_squared) < escape_radius_squared); iteration++) { z_y = 2 * z_x * z_y + c_y; z_x = z_x_squared - z_y_squared + c_x; z_x_squared = z_x * z_x; z_y_squared = z_y * z_y; }; return iteration; } __global__ void compute_mandelbrot(unsigned char *image_buffer_red, unsigned char *image_buffer_green, unsigned char *image_buffer_blue, double c_x_min, double c_y_min, double pixel_width, double pixel_height, int image_size, int *color_red, int *color_green, int *color_blue){ int i_x; int i_y; int iteration; i_x = blockIdx.x*blockDim.x+threadIdx.x; i_y = blockIdx.y*blockDim.y+threadIdx.y; double c_y = c_y_min + i_y * pixel_height; if(fabs(c_y) < pixel_height / 2){ c_y = 0.0; }; double c_x = c_x_min + i_x * pixel_width; iteration = mandelbrot(c_x, c_y); update_rgb_buffer(iteration, i_x, i_y, image_size, image_buffer_red, image_buffer_green, image_buffer_blue, color_red, color_green, color_blue); } int main(int argc, char *argv[]) { init(argc, argv); allocate_image_buffer(); init_colors(); int time; dim3 dimBlock(block_dim_x, block_dim_y); dim3 dimGrid((int)ceil(image_size/dimBlock.x),(int)ceil(image_size/dimBlock.y)); //MEDICAO DE TEMPO timer.c_start = clock(); clock_gettime(CLOCK_MONOTONIC, &timer.t_start); gettimeofday(&timer.v_start, NULL); hipLaunchKernelGGL(( compute_mandelbrot) , dim3(dimGrid), dim3(dimBlock), 0, 0, dev_image_buffer_red, dev_image_buffer_green, dev_image_buffer_blue, c_x_min, c_y_min, pixel_width, pixel_height, image_size, dev_color_red, dev_color_green, dev_color_blue); hipMemcpy(image_buffer_red, dev_image_buffer_red, sizeof(unsigned char) * image_buffer_size, hipMemcpyDeviceToHost); hipMemcpy(image_buffer_green, dev_image_buffer_green, sizeof(unsigned char) * image_buffer_size, hipMemcpyDeviceToHost); hipMemcpy(image_buffer_blue, dev_image_buffer_blue, sizeof(unsigned char) * image_buffer_size, hipMemcpyDeviceToHost); timer.c_end = clock(); clock_gettime(CLOCK_MONOTONIC, &timer.t_end); gettimeofday(&timer.v_end, NULL); //FIM DA MEDICAO hipFree(dev_color_red); hipFree(dev_color_green); hipFree(dev_color_blue); allocate_pixels(); set_pixels(); hipFree(dev_image_buffer_red); hipFree(dev_image_buffer_green); hipFree(dev_image_buffer_blue); write_to_file(); printf("%f\n", (double) (timer.t_end.tv_sec - timer.t_start.tv_sec) + (double) (timer.t_end.tv_nsec - timer.t_start.tv_nsec) / 1000000000.0); time = (double) (timer.t_end.tv_sec - timer.t_start.tv_sec) + (double) (timer.t_end.tv_nsec - timer.t_start.tv_nsec) / 1000000000.0; return time; };
b90fb637d47e00fe7640c8e555f55f259bb7af37.cu
//Compilar com: nvcc -gencode arch=compute_50,code=[sm_50,compute_50] mandelbrot_cuda.cu -o mandelbrot -Wno-deprecated-gpu-targets #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/time.h> struct timer_info { clock_t c_start; clock_t c_end; struct timespec t_start; struct timespec t_end; struct timeval v_start; struct timeval v_end; }; struct timer_info timer; double c_x_min; double c_x_max; double c_y_min; double c_y_max; double pixel_width; double pixel_height; int iteration_max = 200; int image_size; unsigned char *image_buffer_red; unsigned char *image_buffer_green; unsigned char *image_buffer_blue; unsigned char *dev_image_buffer_red; unsigned char *dev_image_buffer_blue; unsigned char *dev_image_buffer_green; unsigned char **pixels; int block_dim_x; int block_dim_y; int i_x_max; int i_y_max; int image_buffer_size; int gradient_size = 16; int color_red[17] = {66, 25, 9, 4, 0, 12, 24, 57, 134, 211, 241, 248, 255, 204, 153, 106, 16}; int color_green[17] = {30, 7, 1, 4, 7, 44, 82, 125, 181, 236, 233, 201, 170, 128, 87, 52, 16}; int color_blue[17] = {15, 26, 47, 73, 100, 138, 177, 209, 229, 248, 191, 95, 0, 0, 0, 3, 16}; int *dev_color_red; int *dev_color_green; int *dev_color_blue; void allocate_image_buffer() { //int rgb_size = 3; image_buffer_red = (unsigned char *)malloc(sizeof(unsigned char) * image_buffer_size); cudaMalloc((void**)&dev_image_buffer_red, image_buffer_size * sizeof(unsigned char)); image_buffer_green = (unsigned char *)malloc(sizeof(unsigned char) * image_buffer_size); cudaMalloc((void**)&dev_image_buffer_green, image_buffer_size * sizeof(unsigned char)); image_buffer_blue = (unsigned char *)malloc(sizeof(unsigned char) * image_buffer_size); cudaMalloc((void**)&dev_image_buffer_blue, image_buffer_size * sizeof(unsigned char)); }; void init(int argc, char *argv[]) { if (argc < 8) { printf("usage: ./mandelbrot_seq c_x_min c_x_max c_y_min c_y_max image_size dimX dimY\n"); printf("examples with image_size = 11500:\n"); printf(" Full Picture: ./mandelbrot_seq -2.5 1.5 -2.0 2.0 4096 8 64\n"); printf(" Seahorse Valley: ./mandelbrot_seq -0.8 -0.7 0.05 0.15 4096 32 32\n"); printf(" Elephant Valley: ./mandelbrot_seq 0.175 0.375 -0.1 0.1 4096 16 64\n"); printf(" Triple Spiral Valley: ./mandelbrot_seq -0.188 -0.012 0.554 0.754 4096 1 32\n"); exit(0); } else { sscanf(argv[1], "%lf", &c_x_min); sscanf(argv[2], "%lf", &c_x_max); sscanf(argv[3], "%lf", &c_y_min); sscanf(argv[4], "%lf", &c_y_max); sscanf(argv[5], "%d", &image_size); sscanf(argv[6], "%d", &block_dim_x); sscanf(argv[7], "%d", &block_dim_y); i_x_max = image_size; i_y_max = image_size; image_buffer_size = image_size * image_size; pixel_width = (c_x_max - c_x_min) / i_x_max; pixel_height = (c_y_max - c_y_min) / i_y_max; }; }; void init_colors(){ int color_size = 17 * sizeof(int); cudaMalloc((void**)&dev_color_red, color_size); cudaMalloc((void**)&dev_color_green, color_size); cudaMalloc((void**)&dev_color_blue, color_size); cudaMemcpy(dev_color_red, color_red, color_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_color_green, color_green, color_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_color_blue, color_blue, color_size, cudaMemcpyHostToDevice); } void allocate_pixels(){ int rgb_size = 3; pixels = (unsigned char **) malloc(sizeof(unsigned char *) * image_buffer_size); for(int i = 0; i < image_buffer_size; i++){ pixels[i] = (unsigned char *) malloc(sizeof(unsigned char) * rgb_size); }; } void set_pixels(){ for(int i = 0; i < image_buffer_size; i++){ pixels[i][0] = image_buffer_red[i]; pixels[i][1] = image_buffer_green[i]; pixels[i][2] = image_buffer_blue[i]; } } void write_to_file() { FILE *file; const char *filename = "output.ppm"; const char *comment = "# "; int max_color_component_value = 255; file = fopen(filename, "wb"); fprintf(file, "P6\n %s\n %d\n %d\n %d\n", comment, i_x_max, i_y_max, max_color_component_value); for(int i = 0; i < image_buffer_size; i++){ fwrite(pixels[i], 1 , 3, file); }; fclose(file); }; __device__ void update_rgb_buffer(int iteration, int x, int y, int image_size, unsigned char *image_buffer_red, unsigned char *image_buffer_green, unsigned char *image_buffer_blue, int *color_red, int *color_green, int *color_blue) { int gradient_size = 16; int iteration_max = 200; int color; if (iteration == iteration_max) { image_buffer_red[(image_size * y) + x] = color_red[gradient_size]; image_buffer_green[(image_size * y) + x] = color_green[gradient_size]; image_buffer_blue[(image_size * y) + x] = color_blue[gradient_size]; } else { color = iteration % gradient_size; image_buffer_red[(image_size * y) + x] = color_red[color]; image_buffer_green[(image_size * y) + x] = color_green[color]; image_buffer_blue[(image_size * y) + x] = color_blue[color]; }; }; __device__ int mandelbrot(double c_x, double c_y) { double z_x = 0; double z_y = 0; double z_x_squared = 0; double z_y_squared = 0; double escape_radius_squared = 4; int iteration_max = 200; int iteration; for (iteration = 0; iteration < iteration_max && ((z_x_squared + z_y_squared) < escape_radius_squared); iteration++) { z_y = 2 * z_x * z_y + c_y; z_x = z_x_squared - z_y_squared + c_x; z_x_squared = z_x * z_x; z_y_squared = z_y * z_y; }; return iteration; } __global__ void compute_mandelbrot(unsigned char *image_buffer_red, unsigned char *image_buffer_green, unsigned char *image_buffer_blue, double c_x_min, double c_y_min, double pixel_width, double pixel_height, int image_size, int *color_red, int *color_green, int *color_blue){ int i_x; int i_y; int iteration; i_x = blockIdx.x*blockDim.x+threadIdx.x; i_y = blockIdx.y*blockDim.y+threadIdx.y; double c_y = c_y_min + i_y * pixel_height; if(fabs(c_y) < pixel_height / 2){ c_y = 0.0; }; double c_x = c_x_min + i_x * pixel_width; iteration = mandelbrot(c_x, c_y); update_rgb_buffer(iteration, i_x, i_y, image_size, image_buffer_red, image_buffer_green, image_buffer_blue, color_red, color_green, color_blue); } int main(int argc, char *argv[]) { init(argc, argv); allocate_image_buffer(); init_colors(); int time; dim3 dimBlock(block_dim_x, block_dim_y); dim3 dimGrid((int)ceil(image_size/dimBlock.x),(int)ceil(image_size/dimBlock.y)); //MEDICAO DE TEMPO timer.c_start = clock(); clock_gettime(CLOCK_MONOTONIC, &timer.t_start); gettimeofday(&timer.v_start, NULL); compute_mandelbrot <<<dimGrid, dimBlock>>>(dev_image_buffer_red, dev_image_buffer_green, dev_image_buffer_blue, c_x_min, c_y_min, pixel_width, pixel_height, image_size, dev_color_red, dev_color_green, dev_color_blue); cudaMemcpy(image_buffer_red, dev_image_buffer_red, sizeof(unsigned char) * image_buffer_size, cudaMemcpyDeviceToHost); cudaMemcpy(image_buffer_green, dev_image_buffer_green, sizeof(unsigned char) * image_buffer_size, cudaMemcpyDeviceToHost); cudaMemcpy(image_buffer_blue, dev_image_buffer_blue, sizeof(unsigned char) * image_buffer_size, cudaMemcpyDeviceToHost); timer.c_end = clock(); clock_gettime(CLOCK_MONOTONIC, &timer.t_end); gettimeofday(&timer.v_end, NULL); //FIM DA MEDICAO cudaFree(dev_color_red); cudaFree(dev_color_green); cudaFree(dev_color_blue); allocate_pixels(); set_pixels(); cudaFree(dev_image_buffer_red); cudaFree(dev_image_buffer_green); cudaFree(dev_image_buffer_blue); write_to_file(); printf("%f\n", (double) (timer.t_end.tv_sec - timer.t_start.tv_sec) + (double) (timer.t_end.tv_nsec - timer.t_start.tv_nsec) / 1000000000.0); time = (double) (timer.t_end.tv_sec - timer.t_start.tv_sec) + (double) (timer.t_end.tv_nsec - timer.t_start.tv_nsec) / 1000000000.0; return time; };
ef6f0cf259ea186ef967f149ce6c052cc9e7c8bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gtest/gtest.h> #include "linalg/add.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename Type> __global__ void naiveAddElemKernel(Type* out, const Type* in1, const Type* in2, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < len) { out[idx] = in1[idx] + in2[idx]; } } template <typename Type> void naiveAddElem(Type* out, const Type* in1, const Type* in2, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); hipLaunchKernelGGL(( naiveAddElemKernel<Type>), dim3(nblks),dim3(TPB), 0, 0, out, in1, in2, len); CUDA_CHECK(hipPeekAtLastError()); } template <typename T> struct AddInputs { T tolerance; int len; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const AddInputs<T>& dims) { return os; } template <typename T> class AddTest: public ::testing::TestWithParam<AddInputs<T> > { protected: void SetUp() override { params = ::testing::TestWithParam<AddInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int len = params.len; allocate(in1, len); allocate(in2, len); allocate(out_ref, len); allocate(out, len); r.uniform(in1, len, T(-1.0), T(1.0)); r.uniform(in2, len, T(-1.0), T(1.0)); naiveAddElem(out_ref, in1, in2, len); add(out, in1, in2, len); add(in1, in1, in2, len); } void TearDown() override { CUDA_CHECK(hipFree(in1)); CUDA_CHECK(hipFree(in2)); CUDA_CHECK(hipFree(out_ref)); CUDA_CHECK(hipFree(out)); } protected: AddInputs<T> params; T *in1, *in2, *out_ref, *out; }; const std::vector<AddInputs<float> > inputsf2 = { {0.000001f, 1024*1024, 1234ULL} }; const std::vector<AddInputs<double> > inputsd2 = { {0.00000001, 1024*1024, 1234ULL} }; typedef AddTest<float> AddTestF; TEST_P(AddTestF, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_ref, in1, params.len, CompareApprox<float>(params.tolerance))); } typedef AddTest<double> AddTestD; TEST_P(AddTestD, Result){ ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_ref, in1, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(AddTests, AddTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(AddTests, AddTestD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
ef6f0cf259ea186ef967f149ce6c052cc9e7c8bd.cu
#include <gtest/gtest.h> #include "linalg/add.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename Type> __global__ void naiveAddElemKernel(Type* out, const Type* in1, const Type* in2, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < len) { out[idx] = in1[idx] + in2[idx]; } } template <typename Type> void naiveAddElem(Type* out, const Type* in1, const Type* in2, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); naiveAddElemKernel<Type><<<nblks,TPB>>>(out, in1, in2, len); CUDA_CHECK(cudaPeekAtLastError()); } template <typename T> struct AddInputs { T tolerance; int len; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const AddInputs<T>& dims) { return os; } template <typename T> class AddTest: public ::testing::TestWithParam<AddInputs<T> > { protected: void SetUp() override { params = ::testing::TestWithParam<AddInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int len = params.len; allocate(in1, len); allocate(in2, len); allocate(out_ref, len); allocate(out, len); r.uniform(in1, len, T(-1.0), T(1.0)); r.uniform(in2, len, T(-1.0), T(1.0)); naiveAddElem(out_ref, in1, in2, len); add(out, in1, in2, len); add(in1, in1, in2, len); } void TearDown() override { CUDA_CHECK(cudaFree(in1)); CUDA_CHECK(cudaFree(in2)); CUDA_CHECK(cudaFree(out_ref)); CUDA_CHECK(cudaFree(out)); } protected: AddInputs<T> params; T *in1, *in2, *out_ref, *out; }; const std::vector<AddInputs<float> > inputsf2 = { {0.000001f, 1024*1024, 1234ULL} }; const std::vector<AddInputs<double> > inputsd2 = { {0.00000001, 1024*1024, 1234ULL} }; typedef AddTest<float> AddTestF; TEST_P(AddTestF, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_ref, in1, params.len, CompareApprox<float>(params.tolerance))); } typedef AddTest<double> AddTestD; TEST_P(AddTestD, Result){ ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_ref, in1, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(AddTests, AddTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(AddTests, AddTestD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
af5b35a4be878bfc34f0427e2fa7e85329efe3ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cellularautomata_kernal.h" __global__ void kernal(int* g_data, int* DIM) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //calculates number of live neighbour cells int neighLiveCells = liveCellsInNeigh(g_data, x * *DIM, y, *DIM); //set new cell state. g_data[(x * *DIM) + y] = applyRules(g_data[x * *DIM + y],neighLiveCells); } //probably a much better way to figure out the moores neighbourhood __device__ int liveCellsInNeigh(int* g_data, int x, int y, int xDIM) { //get neighbours for cell x,y int numlivecells = 0; // [-1,-1] if (x != 0 && y != 0) if (g_data[x - (xDIM * 1) + y - 1] & 1 == 1) ++numlivecells; // [0,-1] if ( y != 0) if (g_data[x + y - 1] & 1 == 1) ++numlivecells; // [1,-1] if (x != xDIM - 1 && y != 0 ) if (g_data[x + (xDIM * 1) + y - 1] & 1 == 1) ++numlivecells; // [-1,0] if (x != 0) if (g_data[x - (xDIM * 1) + y] & 1 == 1) ++numlivecells; // [1,0] if (x != xDIM - 1) if (g_data[x + (xDIM * 1) + y] & 1 == 1) ++numlivecells; // [-1,1] if (x != 0 && y != xDIM -1 ) if (g_data[x - (xDIM * 1) + y + 1] & 1 == 1) ++numlivecells; // [0,1] if (y != xDIM -1 ) if (g_data[x + y + 1] & 1 == 1) ++numlivecells; // [1,1] if (x != xDIM -1 && y != xDIM -1 ) if (g_data[x + (xDIM * 1) + y + 1] & 1 == 1) ++numlivecells; return numlivecells; } __device__ int applyRules(int state, int liveCells) { //Any live cell with fewer than two live neighbours dies, as if caused by under-population. if (state && liveCells < 2) return state; //Any live cell with two or three live neighbours lives on to the next generation. if (state && liveCells > 1 && liveCells < 4) return state | 2; //Any live cell with more than three live neighbours dies, as if by overcrowding. if (state && liveCells > 3) return state; //Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction. if (!state && liveCells == 3) return state | 2; //default return 0; }
af5b35a4be878bfc34f0427e2fa7e85329efe3ce.cu
#include "cellularautomata_kernal.h" __global__ void kernal(int* g_data, int* DIM) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //calculates number of live neighbour cells int neighLiveCells = liveCellsInNeigh(g_data, x * *DIM, y, *DIM); //set new cell state. g_data[(x * *DIM) + y] = applyRules(g_data[x * *DIM + y],neighLiveCells); } //probably a much better way to figure out the moores neighbourhood __device__ int liveCellsInNeigh(int* g_data, int x, int y, int xDIM) { //get neighbours for cell x,y int numlivecells = 0; // [-1,-1] if (x != 0 && y != 0) if (g_data[x - (xDIM * 1) + y - 1] & 1 == 1) ++numlivecells; // [0,-1] if ( y != 0) if (g_data[x + y - 1] & 1 == 1) ++numlivecells; // [1,-1] if (x != xDIM - 1 && y != 0 ) if (g_data[x + (xDIM * 1) + y - 1] & 1 == 1) ++numlivecells; // [-1,0] if (x != 0) if (g_data[x - (xDIM * 1) + y] & 1 == 1) ++numlivecells; // [1,0] if (x != xDIM - 1) if (g_data[x + (xDIM * 1) + y] & 1 == 1) ++numlivecells; // [-1,1] if (x != 0 && y != xDIM -1 ) if (g_data[x - (xDIM * 1) + y + 1] & 1 == 1) ++numlivecells; // [0,1] if (y != xDIM -1 ) if (g_data[x + y + 1] & 1 == 1) ++numlivecells; // [1,1] if (x != xDIM -1 && y != xDIM -1 ) if (g_data[x + (xDIM * 1) + y + 1] & 1 == 1) ++numlivecells; return numlivecells; } __device__ int applyRules(int state, int liveCells) { //Any live cell with fewer than two live neighbours dies, as if caused by under-population. if (state && liveCells < 2) return state; //Any live cell with two or three live neighbours lives on to the next generation. if (state && liveCells > 1 && liveCells < 4) return state | 2; //Any live cell with more than three live neighbours dies, as if by overcrowding. if (state && liveCells > 3) return state; //Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction. if (!state && liveCells == 3) return state | 2; //default return 0; }
8f50ecab7b6ecbb63762793a0bec384a19b405d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <math.h> #include <time.h> #define MINVAL 0.00 #define MAXVAL 10.0 #define TOL 1e-5 #define NUM_THREADS 16 double CPS = 2.9e9; int LEN; // to be defined via cmd args //////////////////////////// CUDA RELATED //////////////////////////////////// // Assertion to check for errors #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void MMM_kernel(float* A, float* B, float* dst, int len) { const int row = threadIdx.x + blockDim.x * blockIdx.x; const int col = threadIdx.y + blockDim.y * blockIdx.y; if(((row >= 0) && (row < len)) && ((col >= 0) && (col < len))) { int k; for(k = 0; k < len; k++) dst[row * len + col] = A[row * len + k] * B[k * len + col]; } } ////////////////////////////// MATRIX ///////////////////////////////////////// float* matrix_create(int len); int matrix_init(float* mat, int len); int matrix_zero(float* mat, int len); int matrix_copy(float* src, float* dst, int len); void MMM_CPU(float* A, float* B, float* dst, int len); ///////////////// Time related ////////////////////////////// //rdtsc related typedef union { unsigned long long int64; struct {unsigned int lo, hi;} int32; } mcps_tctr; #define MCPS_RDTSC(cpu_c) __asm__ __volatile__ ("rdtsc" : \ "=a" ((cpu_c).int32.lo), "=d"((cpu_c).int32.hi)) int clock_gettime(clockid_t clk_id, struct timespec *tp); struct timespec diff(struct timespec start, struct timespec end); double ts_ms(struct timespec ts); struct timespec ts_diff(struct timespec start, struct timespec end); double measure_cps(void); //////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { if(argc != 2) { printf("\nPlease pass a length in.\n"); return 0; } LEN = strtol(argv[1], NULL, 10); if(LEN <= 0) { printf("\nLength must be greater than zero\n"); return 0; } int size = LEN * LEN * sizeof(float); int NUM_BLOCKS = LEN / NUM_THREADS; if(LEN % NUM_THREADS != 0) // die if not a good fit { printf("\nOdd Numbr of blocks\n"); return 0; } // CUDA Timing hipEvent_t start_full, start_mmm, stop_full, stop_mmm; float d_time_full, d_time_mmm; // CPU Timing struct timespec time1, time2; double h_time; // CPU set up float *h_A, *h_B, *h_dst_gpu, *h_dst_cpu, *d_A, *d_B, *d_dst; measure_cps(); h_A = matrix_create(LEN); if(!h_A) return 0; if(!matrix_init(h_A, LEN)) return 0; h_B = matrix_create(LEN); if(!h_B) return 0; if(!matrix_init(h_B, LEN)) return 0; h_dst_cpu = matrix_create(LEN); // cpu result if(!h_dst_cpu) return 0; if(!matrix_zero(h_dst_cpu, LEN)) return 0; h_dst_gpu = matrix_create(LEN); // gpu result if(!h_dst_gpu) return 0; if(!matrix_zero(h_dst_gpu, LEN)) return 0; // GPU Set up d_A = NULL; d_B = NULL; d_dst = NULL; CUDA_SAFE_CALL(hipSetDevice(0)); CUDA_SAFE_CALL(hipMalloc((void**)&d_A, size)); CUDA_SAFE_CALL(hipMalloc((void**)&d_B, size)); CUDA_SAFE_CALL(hipMalloc((void**)&d_dst, size)); hipEventCreate(&start_full); hipEventCreate(&start_mmm); hipEventCreate(&stop_full); hipEventCreate(&stop_mmm); // start the GPU calculations dim3 dimBlock(NUM_THREADS, NUM_THREADS, 1); dim3 dimGrid(NUM_BLOCKS, NUM_BLOCKS, 1); hipEventRecord(start_full,0); CUDA_SAFE_CALL(hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice)); hipEventRecord(start_mmm,0); hipLaunchKernelGGL(( MMM_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_dst, LEN); hipEventRecord(stop_mmm,0); hipEventSynchronize(stop_mmm); CUDA_SAFE_CALL(hipPeekAtLastError()); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUDA_SAFE_CALL(hipMemcpy(h_dst_gpu, d_dst, size, hipMemcpyDeviceToHost)); hipEventRecord(stop_full, 0); hipEventSynchronize(stop_full); hipEventElapsedTime(&d_time_mmm, start_mmm, stop_mmm); hipEventElapsedTime(&d_time_full, start_full, stop_full); printf("\nGPU MMM Time: %f ms", d_time_mmm); printf("\nGPU FUll Time: %f ms", d_time_full); hipEventDestroy(start_full); hipEventDestroy(stop_full); //CPU calculation clock_gettime(CLOCK_REALTIME, &time1); MMM_CPU(h_A, h_B, h_dst_cpu, LEN); clock_gettime(CLOCK_REALTIME, &time2); h_time = ts_ms(ts_diff(time1, time2)); printf("\nCPU Time: %lf ms\n", h_time); int i, num_elements; num_elements = LEN * LEN; for(i = 0; i < num_elements; i++) { if((h_dst_cpu - h_dst_gpu) > (float) TOL) { printf("\nResult verification issue at element %d | CPU: %f | GPU: %f\n", i, h_dst_cpu, h_dst_gpu); return 0; } } // Free stuff CUDA_SAFE_CALL(hipFree(d_A)); CUDA_SAFE_CALL(hipFree(d_B)); CUDA_SAFE_CALL(hipFree(d_dst)); free(h_A); free(h_B); free(h_dst_gpu); free(h_dst_cpu); printf("\nDone\n"); return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////// MATRIX IMPLEMENTATIONS //////////////////////////////////////// float float_rand(float min, float max) { float f = (float)random()/RAND_MAX; return min + f * (max - min); } float* matrix_create(int len) { float* arr; if(len > 0) { arr = (float*) calloc(len*len, sizeof(float)); if(!arr) { printf("\n\tFailed to allocate array\n"); return NULL; } } else return NULL; return arr; } int matrix_init(float* mat, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for (i = 0; i < len_sq; i++) { mat[i] = float_rand(MINVAL, MAXVAL); } return 1; } printf("\nError in initializing matrix\n"); return 0; } int matrix_zero(float* mat, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for(i = 0; i < len_sq; i++) { mat[i] = 0; } return 1; } printf("\nFailed to zero matrix\n"); return 0; } int matrix_copy(float* src, float* dst, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for(i = 0; i < len_sq; i++) { dst[i] = src[i]; } return 1; } printf("\nFailed to copy matrix\n"); return 0; } void MMM_CPU(float* A, float* B, float* dst, int len) { int i, j, k; for (i = 0; i < len; i++) { for(j = 0; j < len; j++) { for(k = 0; k < len; k++) dst[i * len + j] += A[i * len + k] * B[k * len + j]; } } } ///////////////////////////// Timing related /////////////////////////////// double ts_ms(struct timespec ts) { return ((((double)(ts.tv_sec))*1.0e9) + ((double)(ts.tv_nsec)))/(1.0e6); } /* --------------------------------------------------------------------------- | Make the CPU busy, and measure CPS (cycles per second). | | Explanation: | If tests are very fast, they can run so quickly that the SpeedStep control | (in kernel and/or on-chip) doesn't notice in time, and the first few tests | might finish while the CPU is still in its sleep state (about 800 MHz, | judging from my measurements) | A simple way to get around this is to run some kind of busy-loop that | forces the OS and/or CPU to notice it needs to go to full clock speed. | We print out the results of the computation so the loop won't get optimised | away. | | Copy this code into other programs as desired. It provides three entry | points: | | double ts_sec(ts): converts a timespec into seconds | timespec ts_diff(ts1, ts2): computes interval between two timespecs | measure_cps(): Does the busy loop and prints out measured CPS (cycles/sec) --------------------------------------------------------------------------- */ struct timespec ts_diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } double measure_cps() { struct timespec cal_start, cal_end; mcps_tctr tsc_start, tsc_end; double total_time; double total_cycles; /* We perform a chaotic iteration and print the result, to defeat compiler optimisation */ double chaosC = -1.8464323952913974; double z = 0.0; long int i, ilim, j; /* Do it twice and throw away results from the first time; this ensures the * OS and CPU will notice it's busy and set the clock speed. */ for(j=0; j<2; j++) { clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_start); MCPS_RDTSC(tsc_start); ilim = 50*1000*1000; for (i=0; i<ilim; i++) z = z * z + chaosC; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_end); MCPS_RDTSC(tsc_end); } total_time = ts_ms(ts_diff(cal_start, cal_end)); total_cycles = (double)(tsc_end.int64-tsc_start.int64); CPS = total_cycles / total_time; printf("z == %f, CPS == %g\n", z, CPS); return CPS; } /* --------------------------------------------------------------------------- | End of measure_cps code --------------------------------------------------------------------------- */ struct timespec diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; }
8f50ecab7b6ecbb63762793a0bec384a19b405d8.cu
#include <cstdio> #include <cstdlib> #include <math.h> #include <time.h> #define MINVAL 0.00 #define MAXVAL 10.0 #define TOL 1e-5 #define NUM_THREADS 16 double CPS = 2.9e9; int LEN; // to be defined via cmd args //////////////////////////// CUDA RELATED //////////////////////////////////// // Assertion to check for errors #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void MMM_kernel(float* A, float* B, float* dst, int len) { const int row = threadIdx.x + blockDim.x * blockIdx.x; const int col = threadIdx.y + blockDim.y * blockIdx.y; if(((row >= 0) && (row < len)) && ((col >= 0) && (col < len))) { int k; for(k = 0; k < len; k++) dst[row * len + col] = A[row * len + k] * B[k * len + col]; } } ////////////////////////////// MATRIX ///////////////////////////////////////// float* matrix_create(int len); int matrix_init(float* mat, int len); int matrix_zero(float* mat, int len); int matrix_copy(float* src, float* dst, int len); void MMM_CPU(float* A, float* B, float* dst, int len); ///////////////// Time related ////////////////////////////// //rdtsc related typedef union { unsigned long long int64; struct {unsigned int lo, hi;} int32; } mcps_tctr; #define MCPS_RDTSC(cpu_c) __asm__ __volatile__ ("rdtsc" : \ "=a" ((cpu_c).int32.lo), "=d"((cpu_c).int32.hi)) int clock_gettime(clockid_t clk_id, struct timespec *tp); struct timespec diff(struct timespec start, struct timespec end); double ts_ms(struct timespec ts); struct timespec ts_diff(struct timespec start, struct timespec end); double measure_cps(void); //////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { if(argc != 2) { printf("\nPlease pass a length in.\n"); return 0; } LEN = strtol(argv[1], NULL, 10); if(LEN <= 0) { printf("\nLength must be greater than zero\n"); return 0; } int size = LEN * LEN * sizeof(float); int NUM_BLOCKS = LEN / NUM_THREADS; if(LEN % NUM_THREADS != 0) // die if not a good fit { printf("\nOdd Numbr of blocks\n"); return 0; } // CUDA Timing cudaEvent_t start_full, start_mmm, stop_full, stop_mmm; float d_time_full, d_time_mmm; // CPU Timing struct timespec time1, time2; double h_time; // CPU set up float *h_A, *h_B, *h_dst_gpu, *h_dst_cpu, *d_A, *d_B, *d_dst; measure_cps(); h_A = matrix_create(LEN); if(!h_A) return 0; if(!matrix_init(h_A, LEN)) return 0; h_B = matrix_create(LEN); if(!h_B) return 0; if(!matrix_init(h_B, LEN)) return 0; h_dst_cpu = matrix_create(LEN); // cpu result if(!h_dst_cpu) return 0; if(!matrix_zero(h_dst_cpu, LEN)) return 0; h_dst_gpu = matrix_create(LEN); // gpu result if(!h_dst_gpu) return 0; if(!matrix_zero(h_dst_gpu, LEN)) return 0; // GPU Set up d_A = NULL; d_B = NULL; d_dst = NULL; CUDA_SAFE_CALL(cudaSetDevice(0)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_A, size)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_B, size)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_dst, size)); cudaEventCreate(&start_full); cudaEventCreate(&start_mmm); cudaEventCreate(&stop_full); cudaEventCreate(&stop_mmm); // start the GPU calculations dim3 dimBlock(NUM_THREADS, NUM_THREADS, 1); dim3 dimGrid(NUM_BLOCKS, NUM_BLOCKS, 1); cudaEventRecord(start_full,0); CUDA_SAFE_CALL(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice)); cudaEventRecord(start_mmm,0); MMM_kernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_dst, LEN); cudaEventRecord(stop_mmm,0); cudaEventSynchronize(stop_mmm); CUDA_SAFE_CALL(cudaPeekAtLastError()); CUDA_SAFE_CALL(cudaThreadSynchronize()); CUDA_SAFE_CALL(cudaMemcpy(h_dst_gpu, d_dst, size, cudaMemcpyDeviceToHost)); cudaEventRecord(stop_full, 0); cudaEventSynchronize(stop_full); cudaEventElapsedTime(&d_time_mmm, start_mmm, stop_mmm); cudaEventElapsedTime(&d_time_full, start_full, stop_full); printf("\nGPU MMM Time: %f ms", d_time_mmm); printf("\nGPU FUll Time: %f ms", d_time_full); cudaEventDestroy(start_full); cudaEventDestroy(stop_full); //CPU calculation clock_gettime(CLOCK_REALTIME, &time1); MMM_CPU(h_A, h_B, h_dst_cpu, LEN); clock_gettime(CLOCK_REALTIME, &time2); h_time = ts_ms(ts_diff(time1, time2)); printf("\nCPU Time: %lf ms\n", h_time); int i, num_elements; num_elements = LEN * LEN; for(i = 0; i < num_elements; i++) { if((h_dst_cpu - h_dst_gpu) > (float) TOL) { printf("\nResult verification issue at element %d | CPU: %f | GPU: %f\n", i, h_dst_cpu, h_dst_gpu); return 0; } } // Free stuff CUDA_SAFE_CALL(cudaFree(d_A)); CUDA_SAFE_CALL(cudaFree(d_B)); CUDA_SAFE_CALL(cudaFree(d_dst)); free(h_A); free(h_B); free(h_dst_gpu); free(h_dst_cpu); printf("\nDone\n"); return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////// MATRIX IMPLEMENTATIONS //////////////////////////////////////// float float_rand(float min, float max) { float f = (float)random()/RAND_MAX; return min + f * (max - min); } float* matrix_create(int len) { float* arr; if(len > 0) { arr = (float*) calloc(len*len, sizeof(float)); if(!arr) { printf("\n\tFailed to allocate array\n"); return NULL; } } else return NULL; return arr; } int matrix_init(float* mat, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for (i = 0; i < len_sq; i++) { mat[i] = float_rand(MINVAL, MAXVAL); } return 1; } printf("\nError in initializing matrix\n"); return 0; } int matrix_zero(float* mat, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for(i = 0; i < len_sq; i++) { mat[i] = 0; } return 1; } printf("\nFailed to zero matrix\n"); return 0; } int matrix_copy(float* src, float* dst, int len) { int len_sq, i; if(len > 0) { len_sq = len * len; for(i = 0; i < len_sq; i++) { dst[i] = src[i]; } return 1; } printf("\nFailed to copy matrix\n"); return 0; } void MMM_CPU(float* A, float* B, float* dst, int len) { int i, j, k; for (i = 0; i < len; i++) { for(j = 0; j < len; j++) { for(k = 0; k < len; k++) dst[i * len + j] += A[i * len + k] * B[k * len + j]; } } } ///////////////////////////// Timing related /////////////////////////////// double ts_ms(struct timespec ts) { return ((((double)(ts.tv_sec))*1.0e9) + ((double)(ts.tv_nsec)))/(1.0e6); } /* --------------------------------------------------------------------------- | Make the CPU busy, and measure CPS (cycles per second). | | Explanation: | If tests are very fast, they can run so quickly that the SpeedStep control | (in kernel and/or on-chip) doesn't notice in time, and the first few tests | might finish while the CPU is still in its sleep state (about 800 MHz, | judging from my measurements) | A simple way to get around this is to run some kind of busy-loop that | forces the OS and/or CPU to notice it needs to go to full clock speed. | We print out the results of the computation so the loop won't get optimised | away. | | Copy this code into other programs as desired. It provides three entry | points: | | double ts_sec(ts): converts a timespec into seconds | timespec ts_diff(ts1, ts2): computes interval between two timespecs | measure_cps(): Does the busy loop and prints out measured CPS (cycles/sec) --------------------------------------------------------------------------- */ struct timespec ts_diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } double measure_cps() { struct timespec cal_start, cal_end; mcps_tctr tsc_start, tsc_end; double total_time; double total_cycles; /* We perform a chaotic iteration and print the result, to defeat compiler optimisation */ double chaosC = -1.8464323952913974; double z = 0.0; long int i, ilim, j; /* Do it twice and throw away results from the first time; this ensures the * OS and CPU will notice it's busy and set the clock speed. */ for(j=0; j<2; j++) { clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_start); MCPS_RDTSC(tsc_start); ilim = 50*1000*1000; for (i=0; i<ilim; i++) z = z * z + chaosC; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_end); MCPS_RDTSC(tsc_end); } total_time = ts_ms(ts_diff(cal_start, cal_end)); total_cycles = (double)(tsc_end.int64-tsc_start.int64); CPS = total_cycles / total_time; printf("z == %f, CPS == %g\n", z, CPS); return CPS; } /* --------------------------------------------------------------------------- | End of measure_cps code --------------------------------------------------------------------------- */ struct timespec diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; }
8f7e1803ce6b532af0cba79972b85c1be14bb072.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "../high_performance_timer/High_performance_timer.h" #include <iostream> #include <string> #include <stdlib.h> #include <time.h> #include<omp.h> using namespace std; bool mem_alloc(int **a, int **b, int **c, int size); void clean_up(int *a, int *b, int *c); void fill_arrays(int *a, int *b, int *c, int size); void add_vec_serial_CPU(int * a, int * b, int * c, int size); void cuda_malloc_add(int* cpu_a, int* cpu_b, int* cpu_c, int size); __global__ void add_kernel(int *c, const int *a, const int *b, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { c[i] = a[i] + b[i]; } } int main(int argc, char * argv[]) { //seed random with time srand((unsigned)time(NULL)); //set up the timer HighPrecisionTime htp; //set the second argument to be 1000 by default int size = 1000; //set the iterations to be 100 by default int iter = 100; //declare the variables int *a = nullptr; int *b = nullptr; int *c = nullptr; //check to see the user added a second argument //if they did, then change the size to that new argument if (argc > 1) { size = stoi(argv[1]); } cout << "the size of the array is: " << size << endl; //check to see if the user added a third argument //if they did, then change the size to that new argument if (argc > 2) { iter = stoi(argv[2]); } cout << "the number of iterations is: " << iter << endl; //try to allocate the memory try { if (!mem_alloc(&a, &b, &c, size)) { throw("did not correctly allocate!"); } cout << "memory has been allocated!" << endl; } //if it doesn't work print out the error message and continue //to clean up catch(char * err_message) { cout << err_message << endl; } fill_arrays(a, b, c, size); double t = 0; htp.TimeSinceLastCall(); for (int i = 0; i < iter; i++) { add_vec_serial_CPU(a, b, c, size); t = t + htp.TimeSinceLastCall(); } cout << "\ntotal time to add vec serial CPU took: " << t << " seconds!" << endl; t = t / iter; cout << "Add vec serial CPU took: " << t << " seconds!" << endl; //=====================test cuda code============================= //cuda_malloc_add(a, b, c, size); hipError_t cuda_status; int * gpu_a = nullptr; int * gpu_b = nullptr; int * gpu_c = nullptr; try { //choose which GPU to run on, change this on a multi-GPU system. cuda_status = hipSetDevice(0); if (cuda_status != hipSuccess) { throw("cudaSetDeice failed!"); } //allocate GPU buffers for 3 arrays cuda_status = hipMalloc((void**)&gpu_a, size * sizeof(int)); if (cuda_status != hipSuccess) { throw("hipMalloc of array a failed!"); } cuda_status = hipMalloc((void**)&gpu_b, size * sizeof(int)); if (cuda_status != hipSuccess) { throw("hipMalloc of array b failed!"); } cuda_status = hipMalloc((void**)&gpu_c, size * sizeof(int)); if (cuda_status != hipSuccess) { throw("hipMalloc of array c failed!"); } //copy the vectors over to the GPU buffers //only copy over a & b cause they are the only ones with any real data htp.TimeSinceLastCall(); cuda_status = hipMemcpy(gpu_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cuda_status != hipSuccess) { throw("hipMemcpy of array a failed!"); } cuda_status = hipMemcpy(gpu_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cuda_status != hipSuccess) { throw("hipMemcpy of array a failed!"); } t =htp.TimeSinceLastCall(); cout << "\ncuda mem copy took: " << t << "seconds!" << endl; } catch (char * err_message) { cout << err_message << endl; goto Error; } //re-initialize t to be 0 for the new timing t = 0; //now add the vectors together for (int i = 0; i < iter; i++) { htp.TimeSinceLastCall(); hipLaunchKernelGGL(( add_kernel) , dim3(1), dim3(size) , 0, 0, gpu_c, gpu_a, gpu_b, size); t = t + htp.TimeSinceLastCall(); } cout << "cuda add arrays total took: " << t << " seconds!" << endl; t = t / iter; cout << "cuda add arrays took: " << t << " seconds!" << endl; Error: hipFree(gpu_c); hipFree(gpu_b); hipFree(gpu_a); clean_up(a, b, c); return 0; } //--------------------------------------------------------------------------- //function to allocate memory bool mem_alloc(int **a, int **b, int **c, int size) { //set up the return value to be false bool retval = false; //allocate memory for all the arrays and size *a = (int *)malloc(sizeof(int) * size); *b = (int *)malloc(sizeof(int) * size); *c = (int *)malloc(sizeof(int) * size); //check to make sure they properly allocated //if they were then change retval to true if (*a != NULL || *b != NULL || *c != NULL) { retval = true; } return retval; } //--------------------------------------------------------------------------- //function for cleaning up and freeing the data void clean_up(int *a, int *b, int *c) { free(a); free(b); free(c); if (a != nullptr) { a = nullptr; } if (a != nullptr) { b = nullptr; } if (a != nullptr) { c = nullptr; } } //--------------------------------------------------------------------------- void fill_arrays(int *a, int *b, int *c, int size) { //fill in the arrays for (int i = 0; i < size; i++) { a[i] = rand() % 20 + 1; b[i] = rand() % 20 + 1; c[i] = 0; } } //--------------------------------------------------------------------------- void add_vec_serial_CPU(int * a, int * b, int * c, int size) { //add a and b and save it into c for (int i = 0; i < size; i++) { c[i] = a[i] + b[i]; } } //=========CUDA CODE=================== //--------------------------------------------------------------------------- void cuda_malloc_add(int * cpu_a, int * cpu_b, int * cpu_c, int size) { hipError_t cuda_status; int * gpu_a = nullptr; int * gpu_b = nullptr; int * gpu_c = nullptr; int malloc_size = size * sizeof(int); try { //choose which GPU to run on, change this on a multi-GPU system. cuda_status = hipSetDevice(0); if (cuda_status != hipSuccess) { throw("cudaSetDeice failed!"); } //allocate GPU buffers for 3 arrays cuda_status = hipMalloc((void**)&gpu_a, malloc_size); if (cuda_status != hipSuccess) { throw("hipMalloc of array a failed!"); } cuda_status = hipMalloc((void**)&gpu_b, malloc_size); if (cuda_status != hipSuccess) { throw("hipMalloc of array b failed!"); } cuda_status = hipMalloc((void**)&gpu_c, malloc_size); if (cuda_status != hipSuccess) { throw("hipMalloc of array c failed!"); } //copy the vectors over to the GPU buffers //only copy over a & b cause they are the only ones with any real data cuda_status = hipMemcpy(gpu_a, cpu_a, malloc_size, hipMemcpyHostToDevice); if (cuda_status != hipSuccess) { throw("hipMemcpy of array a failed!"); } cuda_status = hipMemcpy(gpu_b, cpu_b, malloc_size, hipMemcpyHostToDevice); if (cuda_status != hipSuccess) { throw("hipMemcpy of array a failed!"); } } catch (char * err_message) { cout << err_message << endl; goto Error; } //now add the vectors together add_kernel <<<1, size >> > (gpu_c, gpu_a, gpu_b, size); cout << gpu_a[0] << endl; Error: hipFree(gpu_c); hipFree(gpu_b); hipFree(gpu_a); }
8f7e1803ce6b532af0cba79972b85c1be14bb072.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "../high_performance_timer/High_performance_timer.h" #include <iostream> #include <string> #include <stdlib.h> #include <time.h> #include<omp.h> using namespace std; bool mem_alloc(int **a, int **b, int **c, int size); void clean_up(int *a, int *b, int *c); void fill_arrays(int *a, int *b, int *c, int size); void add_vec_serial_CPU(int * a, int * b, int * c, int size); void cuda_malloc_add(int* cpu_a, int* cpu_b, int* cpu_c, int size); __global__ void add_kernel(int *c, const int *a, const int *b, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { c[i] = a[i] + b[i]; } } int main(int argc, char * argv[]) { //seed random with time srand((unsigned)time(NULL)); //set up the timer HighPrecisionTime htp; //set the second argument to be 1000 by default int size = 1000; //set the iterations to be 100 by default int iter = 100; //declare the variables int *a = nullptr; int *b = nullptr; int *c = nullptr; //check to see the user added a second argument //if they did, then change the size to that new argument if (argc > 1) { size = stoi(argv[1]); } cout << "the size of the array is: " << size << endl; //check to see if the user added a third argument //if they did, then change the size to that new argument if (argc > 2) { iter = stoi(argv[2]); } cout << "the number of iterations is: " << iter << endl; //try to allocate the memory try { if (!mem_alloc(&a, &b, &c, size)) { throw("did not correctly allocate!"); } cout << "memory has been allocated!" << endl; } //if it doesn't work print out the error message and continue //to clean up catch(char * err_message) { cout << err_message << endl; } fill_arrays(a, b, c, size); double t = 0; htp.TimeSinceLastCall(); for (int i = 0; i < iter; i++) { add_vec_serial_CPU(a, b, c, size); t = t + htp.TimeSinceLastCall(); } cout << "\ntotal time to add vec serial CPU took: " << t << " seconds!" << endl; t = t / iter; cout << "Add vec serial CPU took: " << t << " seconds!" << endl; //=====================test cuda code============================= //cuda_malloc_add(a, b, c, size); cudaError cuda_status; int * gpu_a = nullptr; int * gpu_b = nullptr; int * gpu_c = nullptr; try { //choose which GPU to run on, change this on a multi-GPU system. cuda_status = cudaSetDevice(0); if (cuda_status != cudaSuccess) { throw("cudaSetDeice failed!"); } //allocate GPU buffers for 3 arrays cuda_status = cudaMalloc((void**)&gpu_a, size * sizeof(int)); if (cuda_status != cudaSuccess) { throw("cudaMalloc of array a failed!"); } cuda_status = cudaMalloc((void**)&gpu_b, size * sizeof(int)); if (cuda_status != cudaSuccess) { throw("cudaMalloc of array b failed!"); } cuda_status = cudaMalloc((void**)&gpu_c, size * sizeof(int)); if (cuda_status != cudaSuccess) { throw("cudaMalloc of array c failed!"); } //copy the vectors over to the GPU buffers //only copy over a & b cause they are the only ones with any real data htp.TimeSinceLastCall(); cuda_status = cudaMemcpy(gpu_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cuda_status != cudaSuccess) { throw("cudaMemcpy of array a failed!"); } cuda_status = cudaMemcpy(gpu_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cuda_status != cudaSuccess) { throw("cudaMemcpy of array a failed!"); } t =htp.TimeSinceLastCall(); cout << "\ncuda mem copy took: " << t << "seconds!" << endl; } catch (char * err_message) { cout << err_message << endl; goto Error; } //re-initialize t to be 0 for the new timing t = 0; //now add the vectors together for (int i = 0; i < iter; i++) { htp.TimeSinceLastCall(); add_kernel <<<1, size >>> (gpu_c, gpu_a, gpu_b, size); t = t + htp.TimeSinceLastCall(); } cout << "cuda add arrays total took: " << t << " seconds!" << endl; t = t / iter; cout << "cuda add arrays took: " << t << " seconds!" << endl; Error: cudaFree(gpu_c); cudaFree(gpu_b); cudaFree(gpu_a); clean_up(a, b, c); return 0; } //--------------------------------------------------------------------------- //function to allocate memory bool mem_alloc(int **a, int **b, int **c, int size) { //set up the return value to be false bool retval = false; //allocate memory for all the arrays and size *a = (int *)malloc(sizeof(int) * size); *b = (int *)malloc(sizeof(int) * size); *c = (int *)malloc(sizeof(int) * size); //check to make sure they properly allocated //if they were then change retval to true if (*a != NULL || *b != NULL || *c != NULL) { retval = true; } return retval; } //--------------------------------------------------------------------------- //function for cleaning up and freeing the data void clean_up(int *a, int *b, int *c) { free(a); free(b); free(c); if (a != nullptr) { a = nullptr; } if (a != nullptr) { b = nullptr; } if (a != nullptr) { c = nullptr; } } //--------------------------------------------------------------------------- void fill_arrays(int *a, int *b, int *c, int size) { //fill in the arrays for (int i = 0; i < size; i++) { a[i] = rand() % 20 + 1; b[i] = rand() % 20 + 1; c[i] = 0; } } //--------------------------------------------------------------------------- void add_vec_serial_CPU(int * a, int * b, int * c, int size) { //add a and b and save it into c for (int i = 0; i < size; i++) { c[i] = a[i] + b[i]; } } //=========CUDA CODE=================== //--------------------------------------------------------------------------- void cuda_malloc_add(int * cpu_a, int * cpu_b, int * cpu_c, int size) { cudaError cuda_status; int * gpu_a = nullptr; int * gpu_b = nullptr; int * gpu_c = nullptr; int malloc_size = size * sizeof(int); try { //choose which GPU to run on, change this on a multi-GPU system. cuda_status = cudaSetDevice(0); if (cuda_status != cudaSuccess) { throw("cudaSetDeice failed!"); } //allocate GPU buffers for 3 arrays cuda_status = cudaMalloc((void**)&gpu_a, malloc_size); if (cuda_status != cudaSuccess) { throw("cudaMalloc of array a failed!"); } cuda_status = cudaMalloc((void**)&gpu_b, malloc_size); if (cuda_status != cudaSuccess) { throw("cudaMalloc of array b failed!"); } cuda_status = cudaMalloc((void**)&gpu_c, malloc_size); if (cuda_status != cudaSuccess) { throw("cudaMalloc of array c failed!"); } //copy the vectors over to the GPU buffers //only copy over a & b cause they are the only ones with any real data cuda_status = cudaMemcpy(gpu_a, cpu_a, malloc_size, cudaMemcpyHostToDevice); if (cuda_status != cudaSuccess) { throw("cudaMemcpy of array a failed!"); } cuda_status = cudaMemcpy(gpu_b, cpu_b, malloc_size, cudaMemcpyHostToDevice); if (cuda_status != cudaSuccess) { throw("cudaMemcpy of array a failed!"); } } catch (char * err_message) { cout << err_message << endl; goto Error; } //now add the vectors together add_kernel <<<1, size >> > (gpu_c, gpu_a, gpu_b, size); cout << gpu_a[0] << endl; Error: cudaFree(gpu_c); cudaFree(gpu_b); cudaFree(gpu_a); }
877c06edf5d3e0ed082128fef6d2ed6c64f00444.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "layers/addN.h" namespace graphdl { namespace core { namespace layers { namespace cuda { namespace { __global__ void addNKernel(int n, size_t size, float** xs, float* y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { y[id] = 0; for (int i = 0; i < n; ++i) y[id] += xs[i][id]; } } __global__ void addNGradientKernel(int n, size_t size, const float* yG, float** xGs) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { for (int i = 0; i < n; ++i) xGs[i][id] = yG[id]; } } } // namespace void runAddNDevice(int n, size_t size, float** xs, float* y) { const int BLOCK_SIZE = 256; const int NUM_BLOCKS = (size + BLOCK_SIZE - 1) / BLOCK_SIZE; float** xsDevice; hipMalloc((void**)&xsDevice, n * sizeof(float*)); hipMemcpy(xsDevice, xs, n * sizeof(float*), hipMemcpyHostToDevice); hipLaunchKernelGGL(( addNKernel), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, n, size, xsDevice, y); hipDeviceSynchronize(); hipFree(xsDevice); } void runAddNGradientDevice(int n, size_t size, float* yGrad, float** xGrads) { const int BLOCK_SIZE = 256; const int NUM_BLOCKS = (size + BLOCK_SIZE - 1) / BLOCK_SIZE; float** xGradsDevice; hipMalloc((void**)&xGradsDevice, n * sizeof(float*)); hipMemcpy(xGradsDevice, xGrads, n * sizeof(float*), hipMemcpyHostToDevice); hipLaunchKernelGGL(( addNGradientKernel), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, n, size, yGrad, xGradsDevice); hipDeviceSynchronize(); hipFree(xGradsDevice); } } // namespace cuda } // namespace layers } // namespace core } // namespace graphdl
877c06edf5d3e0ed082128fef6d2ed6c64f00444.cu
#include "layers/addN.h" namespace graphdl { namespace core { namespace layers { namespace cuda { namespace { __global__ void addNKernel(int n, size_t size, float** xs, float* y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { y[id] = 0; for (int i = 0; i < n; ++i) y[id] += xs[i][id]; } } __global__ void addNGradientKernel(int n, size_t size, const float* yG, float** xGs) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { for (int i = 0; i < n; ++i) xGs[i][id] = yG[id]; } } } // namespace void runAddNDevice(int n, size_t size, float** xs, float* y) { const int BLOCK_SIZE = 256; const int NUM_BLOCKS = (size + BLOCK_SIZE - 1) / BLOCK_SIZE; float** xsDevice; cudaMalloc((void**)&xsDevice, n * sizeof(float*)); cudaMemcpy(xsDevice, xs, n * sizeof(float*), cudaMemcpyHostToDevice); addNKernel<<<NUM_BLOCKS, BLOCK_SIZE>>>(n, size, xsDevice, y); cudaDeviceSynchronize(); cudaFree(xsDevice); } void runAddNGradientDevice(int n, size_t size, float* yGrad, float** xGrads) { const int BLOCK_SIZE = 256; const int NUM_BLOCKS = (size + BLOCK_SIZE - 1) / BLOCK_SIZE; float** xGradsDevice; cudaMalloc((void**)&xGradsDevice, n * sizeof(float*)); cudaMemcpy(xGradsDevice, xGrads, n * sizeof(float*), cudaMemcpyHostToDevice); addNGradientKernel<<<NUM_BLOCKS, BLOCK_SIZE>>>(n, size, yGrad, xGradsDevice); cudaDeviceSynchronize(); cudaFree(xGradsDevice); } } // namespace cuda } // namespace layers } // namespace core } // namespace graphdl
bbd5a444abc34880035c929a8fcc2397f56769c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation and/or // other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON // ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Author's note: // This file was distributed as part of the Nature Biotechnology // supplementary software release for DeepBind. Users of DeepBind // are encouraged to instead use the latest source code and binaries // for scoring sequences at // http://tools.genes.toronto.edu/deepbind/ // #include <smat_cuda/cuda_errors.h> #include <smat_cuda/cuda_context.h> #include <smat_cuda/launch_util.h> #include <smat/vm/instruction_db.h> using namespace sm; template <typename T> __global__ void kernel_madd_bcast(const T* A, const T* b, T* dst, usize_t n, usize_t m, usize_t k) { DECL_KERNEL_VARS for (usize_t i = (usize_t)bdx*bx+tx; i < n; i += bdx*gdx) dst[i] += A[i]*b[(i/k) % m]; } void launch_madd_bcast(hipStream_t stream, dtype_t dtype, const void* A, const void* b, void* dst, usize_t n, usize_t m, usize_t k) { launchcfg cfg = make_elemwise_launchcfg(n); if (dtype == f32) hipLaunchKernelGGL(( kernel_madd_bcast), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, (const float*)A,(const float*)b,(float*)dst,n,m,k); else hipLaunchKernelGGL(( kernel_madd_bcast), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, (const double*)A,(const double*)b,(double*)dst,n,m,k); }
bbd5a444abc34880035c929a8fcc2397f56769c7.cu
// Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation and/or // other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON // ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Author's note: // This file was distributed as part of the Nature Biotechnology // supplementary software release for DeepBind. Users of DeepBind // are encouraged to instead use the latest source code and binaries // for scoring sequences at // http://tools.genes.toronto.edu/deepbind/ // #include <smat_cuda/cuda_errors.h> #include <smat_cuda/cuda_context.h> #include <smat_cuda/launch_util.h> #include <smat/vm/instruction_db.h> using namespace sm; template <typename T> __global__ void kernel_madd_bcast(const T* A, const T* b, T* dst, usize_t n, usize_t m, usize_t k) { DECL_KERNEL_VARS for (usize_t i = (usize_t)bdx*bx+tx; i < n; i += bdx*gdx) dst[i] += A[i]*b[(i/k) % m]; } void launch_madd_bcast(cudaStream_t stream, dtype_t dtype, const void* A, const void* b, void* dst, usize_t n, usize_t m, usize_t k) { launchcfg cfg = make_elemwise_launchcfg(n); if (dtype == f32) kernel_madd_bcast<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>((const float*)A,(const float*)b,(float*)dst,n,m,k); else kernel_madd_bcast<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>((const double*)A,(const double*)b,(double*)dst,n,m,k); }
855730ae1a5f5b83483c2850dcf5c0a472aed7dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "internal.h" #include "device.h" #include <limits> #include <pcl/gpu/utils/device/limits.hpp> #include <pcl/gpu/utils/device/algorithm.hpp> #include <pcl/gpu/utils/device/warp.hpp> #include <pcl/gpu/utils/device/static_check.hpp> //#include <pcl/gpu/utils/device/funcattrib.hpp> #include <pcl/gpu/utils/safe_call.hpp> #include <thrust/tuple.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include "thrust/device_ptr.h" #include <thrust/transform.h> #include <thrust/sort.h> #include <thrust/transform_scan.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/unique.h> #include <thrust/gather.h> using namespace thrust; using namespace std; namespace pcl { namespace device { __global__ void size_check() { Static<sizeof(uint64_type) == 8>::check(); }; template<bool use_max> struct IndOp { __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<float, int>& e1, const thrust::tuple<float, int>& e2) const { thrust::tuple<float, int> res; if (use_max) res.get<0>() = fmax(e1.get<0>(), e2.get<0>()); else res.get<0>() = fmin(e1.get<0>(), e2.get<0>()); res.get<1>() = (res.get<0>() == e1.get<0>()) ? e1.get<1>() : e2.get<1>(); return res; } }; struct X { __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { return thrust::tuple<float, int>(in.get<0>().x, in.get<1>()); } }; struct Y { __device__ __forceinline__ float operator()(const PointType& in) const { return in.y; } }; struct Z { __device__ __forceinline__ float operator()(const PointType& in) const { return in.z; } }; struct LineDist { float3 x1, x2; LineDist(const PointType& p1, const PointType& p2) : x1(tr(p1)), x2(tr(p2)) {} __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { float3 x0 = tr(in.get<0>()); float dist = norm(cross(x0 - x1, x0 - x2))/norm(x1 - x2); return thrust::tuple<float, int>(dist, in.get<1>()); } }; struct PlaneDist { float3 x1, n; PlaneDist(const PointType& p1, const PointType& p2, const PointType& p3) : x1(tr(p1)) { float3 x2 = tr(p2), x3 = tr(p3); n = normalized(cross(x2 - x1, x3 - x1)); } __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { float3 x0 = tr(in.get<0>()); float dist = fabs(dot(n, x0 - x1)); return thrust::tuple<float, int>(dist, in.get<1>()); } }; template<typename It, typename Unary, typename Init, typename Binary> int transform_reduce_index(It beg, It end, Unary unop, Init init, Binary binary) { counting_iterator<int> cbeg(0); counting_iterator<int> cend = cbeg + thrust::distance(beg, end); thrust::tuple<float, int> t = transform_reduce( make_zip_iterator(thrust::make_tuple(beg, cbeg)), make_zip_iterator(thrust::make_tuple(end, cend)), unop, init, binary); return t.get<1>(); } template<typename It, typename Unary> int transform_reduce_min_index(It beg, It end, Unary unop) { thrust::tuple<float, int> min_tuple(std::numeric_limits<float>::max(), 0); return transform_reduce_index(beg, end, unop, min_tuple, IndOp<false>()); } template<typename It, typename Unary> int transform_reduce_max_index(It beg, It end, Unary unop) { thrust::tuple<float, int> max_tuple(std::numeric_limits<float>::min(), 0); return transform_reduce_index(beg, end, unop, max_tuple, IndOp<true>()); } } } pcl::device::PointStream::PointStream(const Cloud& cloud_) : cloud(cloud_) { cloud_size = cloud.size(); facets_dists.create(cloud_size); perm.create(cloud_size); device_ptr<int> pbeg(perm.ptr()); thrust::sequence(pbeg, pbeg + cloud_size); } void pcl::device::PointStream::computeInitalSimplex() { device_ptr<const PointType> beg(cloud.ptr()); device_ptr<const PointType> end = beg + cloud_size; int minx = transform_reduce_min_index(beg, end, X()); int maxx = transform_reduce_max_index(beg, end, X()); PointType p1 = *(beg + minx); PointType p2 = *(beg + maxx); int maxl = transform_reduce_max_index(beg, end, LineDist(p1, p2)); PointType p3 = *(beg + maxl); int maxp = transform_reduce_max_index(beg, end, PlaneDist(p1, p2, p3)); PointType p4 = *(beg + maxp); simplex.x1 = tr(p1); simplex.x2 = tr(p2); simplex.x3 = tr(p3); simplex.x4 = tr(p4); simplex.i1 = minx; simplex.i2 = maxx; simplex.i3 = maxl; simplex.i4 = maxp; float maxy = transform_reduce(beg, end, Y(), std::numeric_limits<float>::min(), maximum<float>()); float miny = transform_reduce(beg, end, Y(), std::numeric_limits<float>::max(), minimum<float>()); float maxz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::min(), maximum<float>()); float minz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::max(), minimum<float>()); float dx = (p2.x - p1.x); float dy = (maxy - miny); float dz = (maxz - minz); cloud_diag = sqrt(dx*dx + dy*dy + dz*dz); simplex.p1 = compute_plane(simplex.x4, simplex.x2, simplex.x3, simplex.x1); simplex.p2 = compute_plane(simplex.x3, simplex.x1, simplex.x4, simplex.x2); simplex.p3 = compute_plane(simplex.x2, simplex.x1, simplex.x4, simplex.x3); simplex.p4 = compute_plane(simplex.x1, simplex.x2, simplex.x3, simplex.x4); } namespace pcl { namespace device { __global__ void init_fs(int i1, int i2, int i3, int i4, PtrStep<int> verts_inds) { *(int4*)verts_inds.ptr(0) = make_int4(i2, i1, i1, i1); *(int4*)verts_inds.ptr(1) = make_int4(i3, i3, i2, i2); *(int4*)verts_inds.ptr(2) = make_int4(i4, i4, i4, i3); } } } void pcl::device::FacetStream::setInitialFacets(const InitalSimplex& s) { hipLaunchKernelGGL(( init_fs), dim3(1), dim3(1), 0, 0, s.i1, s.i2, s.i3, s.i4, verts_inds); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); facet_count = 4; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct InitalClassify { float diag; float4 pl1, pl2, pl3, pl4; InitalClassify(const float4& p1, const float4& p2, const float4& p3, const float4& p4, float diagonal) : diag(diagonal), pl1(p1), pl2(p2), pl3(p3), pl4(p4) { pl1 *= compue_inv_normal_norm(pl1); pl2 *= compue_inv_normal_norm(pl2); pl3 *= compue_inv_normal_norm(pl3); pl4 *= compue_inv_normal_norm(pl4); } __device__ __forceinline__ uint64_type operator()(const PointType& p) const { float4 x = p; x.w = 1; float d0 = dot(pl1, x); float d1 = dot(pl2, x); float d2 = dot(pl3, x); float d3 = dot(pl4, x); float dists[] = { d0, d1, d2, d3 }; int negs_inds[4]; int neg_count = 0; int idx = numeric_limits<int>::max(); float dist = 0; #pragma unroll for(int i = 0; i < 4; ++i) if (dists[i] < 0) negs_inds[neg_count++] = i; if (neg_count == 3) { int i1 = negs_inds[1]; int i2 = negs_inds[2]; int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1; negs_inds[1] = ir; --neg_count; } if (neg_count == 2) { int i1 = negs_inds[0]; int i2 = negs_inds[1]; int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1; negs_inds[0] = ir; --neg_count; } if (neg_count == 1) { idx = negs_inds[0]; dist = diag - fabs(dists[idx]); // to ensure that sorting order is inverse, i.e. distant points go first } //if (neg_count == 0) // then internal point ==>> idx = INT_MAX uint64_type res = idx; res <<= 32; return res + *reinterpret_cast<unsigned int*>(&dist); } }; __global__ void initalClassifyKernel(const InitalClassify ic, const PointType* points, int cloud_size, uint64_type* output) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < cloud_size) output[index] = ic(points[index]); } } } void pcl::device::PointStream::initalClassify() { //thrust::device_ptr<const PointType> beg(cloud.ptr()); //thrust::device_ptr<const PointType> end = beg + cloud_size; thrust::device_ptr<uint64_type> out(facets_dists.ptr()); InitalClassify ic(simplex.p1, simplex.p2, simplex.p3, simplex.p4, cloud_diag); //thrust::transform(beg, end, out, ic); //printFuncAttrib(initalClassifyKernel); hipLaunchKernelGGL(( initalClassifyKernel), dim3(divUp(cloud_size, 256)), dim3(256), 0, 0, ic, cloud, cloud_size, facets_dists); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); thrust::device_ptr<int> pbeg(perm.ptr()); thrust::sort_by_key(out, out + cloud_size, pbeg); } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { __device__ int new_cloud_size; struct SearchFacetHeads { uint64_type *facets_dists; int cloud_size; int facet_count; int *perm; const PointType* points; mutable int* head_points; //bool logger; __device__ __forceinline__ void operator()(int facet) const { const uint64_type* b = facets_dists; const uint64_type* e = b + cloud_size; bool last_thread = facet == facet_count; int search_value = !last_thread ? facet : numeric_limits<int>::max(); int index = lower_bound(b, e, search_value, LessThanByFacet()) - b; if (last_thread) new_cloud_size = index; else { bool not_found = index == cloud_size || (facet != (facets_dists[index] >> 32)); head_points[facet] = not_found ? -1 : perm[index]; } } }; __global__ void searchFacetHeadsKernel(const SearchFacetHeads sfh) { int facet = threadIdx.x + blockDim.x * blockIdx.x; if (facet <= sfh.facet_count) sfh(facet); } } } int pcl::device::PointStream::searchFacetHeads(size_t facet_count, DeviceArray<int>& head_points) { SearchFacetHeads sfh; sfh.facets_dists = facets_dists; sfh.cloud_size = (int)cloud_size; sfh.facet_count = (int)facet_count; sfh.perm = perm; sfh.points = cloud.ptr(); sfh.head_points = head_points; //thrust::counting_iterator<int> b(0); //thrust::counting_iterator<int> e = b + facet_count + 1; //thrust::for_each(b, e, sfh); hipLaunchKernelGGL(( searchFacetHeadsKernel), dim3(divUp(facet_count+1, 256)), dim3(256), 0, 0, sfh); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int new_size; cudaSafeCall( hipMemcpyFromSymbol( (void*)&new_size, pcl::device::new_cloud_size, sizeof(new_size)) ); return new_size; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct NotMinus1 { __device__ __forceinline__ int operator()(const int& v) const { return (v == -1) ? 0 : 1; } }; struct Compaction { enum { CTA_SIZE = 256, WARPS = CTA_SIZE/ Warp::WARP_SIZE }; int* head_points_in; PtrStep<int> verts_inds_in; int *scan_buffer; int facet_count; mutable int* head_points_out; mutable PtrStep<int> verts_inds_out; mutable PtrStep<int> empty_facets; mutable int *empty_count; __device__ __forceinline__ void operator()() const { int idx = threadIdx.x + blockIdx.x * blockDim.x; #if CUDART_VERSION >= 9000 if (__all_sync (__activemask (), idx >= facet_count)) return; #else if (__all (idx >= facet_count)) return; #endif int empty = 0; if(idx < facet_count) { int head_idx = head_points_in[idx]; if (head_idx != -1) { int offset = scan_buffer[idx]; head_points_out[offset] = head_idx; verts_inds_out.ptr(0)[offset] = verts_inds_in.ptr(0)[idx]; verts_inds_out.ptr(1)[offset] = verts_inds_in.ptr(1)[idx]; verts_inds_out.ptr(2)[offset] = verts_inds_in.ptr(2)[idx]; } else empty = 1; } #if CUDART_VERSION >= 9000 int total = __popc (__ballot_sync (__activemask (), empty)); #else int total = __popc (__ballot (empty)); #endif if (total > 0) { #if CUDART_VERSION >= 9000 int offset = Warp::binaryExclScan (__ballot_sync (__activemask (), empty)); #else int offset = Warp::binaryExclScan (__ballot (empty)); #endif volatile __shared__ int wapr_buffer[WARPS]; int laneid = Warp::laneId(); int warpid = Warp::id(); if (laneid == 0) { int old = atomicAdd(empty_count, total); wapr_buffer[warpid] = old; } int old = wapr_buffer[warpid]; if (empty) { empty_facets.ptr(0)[old + offset] = verts_inds_in.ptr(0)[idx]; empty_facets.ptr(1)[old + offset] = verts_inds_in.ptr(1)[idx]; empty_facets.ptr(2)[old + offset] = verts_inds_in.ptr(2)[idx]; int a1 = verts_inds_in.ptr(0)[idx], a2 = verts_inds_in.ptr(1)[idx], a3 = verts_inds_in.ptr(2)[idx]; } } } }; __global__ void compactionKernel( const Compaction c ) { c(); } } } void pcl::device::FacetStream::compactFacets() { int old_empty_count; empty_count.download(&old_empty_count); thrust::device_ptr<int> b(head_points.ptr()); thrust::device_ptr<int> e = b + facet_count; thrust::device_ptr<int> o(scan_buffer.ptr()); thrust::transform_exclusive_scan(b, e, o, NotMinus1(), 0, thrust::plus<int>()); Compaction c; c.verts_inds_in = verts_inds; c.head_points_in = head_points; c.scan_buffer = scan_buffer; c.facet_count = facet_count; c.head_points_out = head_points2; c.verts_inds_out = verts_inds2; c.empty_facets = empty_facets; c.empty_count = empty_count; int block = Compaction::CTA_SIZE; int grid = divUp(facet_count, block); hipLaunchKernelGGL(( compactionKernel), dim3(grid), dim3(block), 0, 0, c); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); verts_inds.swap(verts_inds2); head_points.swap(head_points2); int new_empty_count; empty_count.download(&new_empty_count); facet_count -= new_empty_count - old_empty_count; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct Classify { uint64_type* facets_dists; int* scan_buffer; int* head_points; int* perm; PtrStep<int> verts_inds; const PointType *points; float diag; int facet_count; __device__ __forceinline__ void operator()(int point_idx) const { int perm_index = perm[point_idx]; int facet = facets_dists[point_idx] >> 32; facet = scan_buffer[facet]; int hi = head_points[facet]; if (hi == perm_index) { uint64_type res = numeric_limits<int>::max(); res <<= 32; facets_dists[point_idx] = res; } else { int i1 = verts_inds.ptr(0)[facet]; int i2 = verts_inds.ptr(1)[facet]; int i3 = verts_inds.ptr(2)[facet]; float3 hp = tr( points[ hi ] ); float3 v1 = tr( points[ i1 ] ); float3 v2 = tr( points[ i2 ] ); float3 v3 = tr( points[ i3 ] ); float4 p0 = compute_plane(hp, v1, v2, /*opposite*/v3); // j float4 p1 = compute_plane(hp, v2, v3, /*opposite*/v1); // facet_count + j float4 p2 = compute_plane(hp, v3, v1, /*opposite*/v2); // facet_count + j*2 p0 *= compue_inv_normal_norm(p0); p1 *= compue_inv_normal_norm(p1); p2 *= compue_inv_normal_norm(p2); float4 p = points[perm_index]; p.w = 1; float d0 = dot(p, p0); float d1 = dot(p, p1); float d2 = dot(p, p2); float dists[] = { d0, d1, d2 }; int negs_inds[3]; int neg_count = 0; int new_idx = numeric_limits<int>::max(); float dist = 0; int indeces[] = { facet, facet + facet_count, facet + facet_count * 2 }; #pragma unroll for(int i = 0; i < 3; ++i) if (dists[i] < 0) negs_inds[neg_count++] = i; if (neg_count == 3) { int i1 = negs_inds[1]; int i2 = negs_inds[2]; int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1; negs_inds[1] = ir; --neg_count; } if (neg_count == 2) { int i1 = negs_inds[0]; int i2 = negs_inds[1]; int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1; negs_inds[0] = ir; --neg_count; } if (neg_count == 1) { new_idx = negs_inds[0]; dist = diag - fabs(dists[new_idx]); // to ensure that sorting order is inverse, i.e. distant points go first new_idx = indeces[new_idx]; } // if (neg_count == 0) // new_idx = INT_MAX ==>> internal point uint64_type res = new_idx; res <<= 32; res += *reinterpret_cast<unsigned int*>(&dist); facets_dists[point_idx] = res; } /* if (hi == perm_index) */ } }; __global__ void classifyKernel(const Classify c, int cloud_size) { int point_idx = threadIdx.x + blockIdx.x * blockDim.x; if ( point_idx < cloud_size ) c(point_idx); } } } void pcl::device::PointStream::classify(FacetStream& fs) { Classify c; c.facets_dists = facets_dists; c.scan_buffer = fs.scan_buffer; c.head_points = fs.head_points; c.perm = perm; c.verts_inds = fs.verts_inds; c.points = cloud; c.diag = cloud_diag; c.facet_count = fs.facet_count; //thrust::counting_iterator<int> b(0); //thrust::for_each(b, b + cloud_size, c); hipLaunchKernelGGL(( classifyKernel), dim3(divUp(cloud_size, 256)), dim3(256), 0, 0, c, cloud_size); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); thrust::device_ptr<uint64_type> beg(facets_dists.ptr()); thrust::device_ptr<uint64_type> end = beg + cloud_size; thrust::device_ptr<int> pbeg(perm.ptr()); thrust::sort_by_key(beg, end, pbeg); } namespace pcl { namespace device { struct SplitFacets { int* head_points; int facet_count; mutable PtrStep<int> verts_inds; __device__ __forceinline__ void operator()(int facet) const { int hi = head_points[facet]; int i1 = verts_inds.ptr(0)[facet]; int i2 = verts_inds.ptr(1)[facet]; int i3 = verts_inds.ptr(2)[facet]; make_facet(hi, i1, i2, facet); make_facet(hi, i2, i3, facet + facet_count); make_facet(hi, i3, i1, facet + facet_count * 2); } __device__ __forceinline__ void make_facet(int i1, int i2, int i3, int out_idx) const { verts_inds.ptr(0)[out_idx] = i1; verts_inds.ptr(1)[out_idx] = i2; verts_inds.ptr(2)[out_idx] = i3; } }; __global__ void splitFacetsKernel(const SplitFacets sf) { int facet = threadIdx.x + blockIdx.x * blockDim.x; if (facet < sf.facet_count) sf(facet); } } } void pcl::device::FacetStream::splitFacets() { SplitFacets sf; sf.head_points = head_points; sf.verts_inds = verts_inds; sf.facet_count = facet_count; //thrust::counting_iterator<int> b(0); //thrust::for_each(b, b + facet_count, sf); hipLaunchKernelGGL(( splitFacetsKernel), dim3(divUp(facet_count, 256)), dim3(256), 0, 0, sf); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); facet_count *= 3; } size_t pcl::device::remove_duplicates(DeviceArray<int>& indeces) { thrust::device_ptr<int> beg(indeces.ptr()); thrust::device_ptr<int> end = beg + indeces.size(); thrust::sort(beg, end); return (size_t)(thrust::unique(beg, end) - beg); } namespace pcl { namespace device { __global__ void gatherKernel(const PtrSz<int> indeces, const PointType* src, PointType* dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < indeces.size) dst[idx] = src[indeces.data[idx]]; } } } void pcl::device::pack_hull(const DeviceArray<PointType>& points, const DeviceArray<int>& indeces, DeviceArray<PointType>& output) { output.create(indeces.size()); //device_ptr<const PointType> in(points.ptr()); //thrust::device_ptr<const int> mb(indeces.ptr()); //thrust::device_ptr<const int> me = mb + indeces.size(); //device_ptr<PointType> out(output.ptr()); //thrust::gather(mb, me, in, out); hipLaunchKernelGGL(( gatherKernel), dim3(divUp(indeces.size(), 256)), dim3(256), 0, 0, indeces, points, output); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); }
855730ae1a5f5b83483c2850dcf5c0a472aed7dc.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "internal.h" #include "device.h" #include <limits> #include <pcl/gpu/utils/device/limits.hpp> #include <pcl/gpu/utils/device/algorithm.hpp> #include <pcl/gpu/utils/device/warp.hpp> #include <pcl/gpu/utils/device/static_check.hpp> //#include <pcl/gpu/utils/device/funcattrib.hpp> #include <pcl/gpu/utils/safe_call.hpp> #include <thrust/tuple.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include "thrust/device_ptr.h" #include <thrust/transform.h> #include <thrust/sort.h> #include <thrust/transform_scan.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/unique.h> #include <thrust/gather.h> using namespace thrust; using namespace std; namespace pcl { namespace device { __global__ void size_check() { Static<sizeof(uint64_type) == 8>::check(); }; template<bool use_max> struct IndOp { __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<float, int>& e1, const thrust::tuple<float, int>& e2) const { thrust::tuple<float, int> res; if (use_max) res.get<0>() = fmax(e1.get<0>(), e2.get<0>()); else res.get<0>() = fmin(e1.get<0>(), e2.get<0>()); res.get<1>() = (res.get<0>() == e1.get<0>()) ? e1.get<1>() : e2.get<1>(); return res; } }; struct X { __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { return thrust::tuple<float, int>(in.get<0>().x, in.get<1>()); } }; struct Y { __device__ __forceinline__ float operator()(const PointType& in) const { return in.y; } }; struct Z { __device__ __forceinline__ float operator()(const PointType& in) const { return in.z; } }; struct LineDist { float3 x1, x2; LineDist(const PointType& p1, const PointType& p2) : x1(tr(p1)), x2(tr(p2)) {} __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { float3 x0 = tr(in.get<0>()); float dist = norm(cross(x0 - x1, x0 - x2))/norm(x1 - x2); return thrust::tuple<float, int>(dist, in.get<1>()); } }; struct PlaneDist { float3 x1, n; PlaneDist(const PointType& p1, const PointType& p2, const PointType& p3) : x1(tr(p1)) { float3 x2 = tr(p2), x3 = tr(p3); n = normalized(cross(x2 - x1, x3 - x1)); } __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { float3 x0 = tr(in.get<0>()); float dist = fabs(dot(n, x0 - x1)); return thrust::tuple<float, int>(dist, in.get<1>()); } }; template<typename It, typename Unary, typename Init, typename Binary> int transform_reduce_index(It beg, It end, Unary unop, Init init, Binary binary) { counting_iterator<int> cbeg(0); counting_iterator<int> cend = cbeg + thrust::distance(beg, end); thrust::tuple<float, int> t = transform_reduce( make_zip_iterator(thrust::make_tuple(beg, cbeg)), make_zip_iterator(thrust::make_tuple(end, cend)), unop, init, binary); return t.get<1>(); } template<typename It, typename Unary> int transform_reduce_min_index(It beg, It end, Unary unop) { thrust::tuple<float, int> min_tuple(std::numeric_limits<float>::max(), 0); return transform_reduce_index(beg, end, unop, min_tuple, IndOp<false>()); } template<typename It, typename Unary> int transform_reduce_max_index(It beg, It end, Unary unop) { thrust::tuple<float, int> max_tuple(std::numeric_limits<float>::min(), 0); return transform_reduce_index(beg, end, unop, max_tuple, IndOp<true>()); } } } pcl::device::PointStream::PointStream(const Cloud& cloud_) : cloud(cloud_) { cloud_size = cloud.size(); facets_dists.create(cloud_size); perm.create(cloud_size); device_ptr<int> pbeg(perm.ptr()); thrust::sequence(pbeg, pbeg + cloud_size); } void pcl::device::PointStream::computeInitalSimplex() { device_ptr<const PointType> beg(cloud.ptr()); device_ptr<const PointType> end = beg + cloud_size; int minx = transform_reduce_min_index(beg, end, X()); int maxx = transform_reduce_max_index(beg, end, X()); PointType p1 = *(beg + minx); PointType p2 = *(beg + maxx); int maxl = transform_reduce_max_index(beg, end, LineDist(p1, p2)); PointType p3 = *(beg + maxl); int maxp = transform_reduce_max_index(beg, end, PlaneDist(p1, p2, p3)); PointType p4 = *(beg + maxp); simplex.x1 = tr(p1); simplex.x2 = tr(p2); simplex.x3 = tr(p3); simplex.x4 = tr(p4); simplex.i1 = minx; simplex.i2 = maxx; simplex.i3 = maxl; simplex.i4 = maxp; float maxy = transform_reduce(beg, end, Y(), std::numeric_limits<float>::min(), maximum<float>()); float miny = transform_reduce(beg, end, Y(), std::numeric_limits<float>::max(), minimum<float>()); float maxz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::min(), maximum<float>()); float minz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::max(), minimum<float>()); float dx = (p2.x - p1.x); float dy = (maxy - miny); float dz = (maxz - minz); cloud_diag = sqrt(dx*dx + dy*dy + dz*dz); simplex.p1 = compute_plane(simplex.x4, simplex.x2, simplex.x3, simplex.x1); simplex.p2 = compute_plane(simplex.x3, simplex.x1, simplex.x4, simplex.x2); simplex.p3 = compute_plane(simplex.x2, simplex.x1, simplex.x4, simplex.x3); simplex.p4 = compute_plane(simplex.x1, simplex.x2, simplex.x3, simplex.x4); } namespace pcl { namespace device { __global__ void init_fs(int i1, int i2, int i3, int i4, PtrStep<int> verts_inds) { *(int4*)verts_inds.ptr(0) = make_int4(i2, i1, i1, i1); *(int4*)verts_inds.ptr(1) = make_int4(i3, i3, i2, i2); *(int4*)verts_inds.ptr(2) = make_int4(i4, i4, i4, i3); } } } void pcl::device::FacetStream::setInitialFacets(const InitalSimplex& s) { init_fs<<<1, 1>>>(s.i1, s.i2, s.i3, s.i4, verts_inds); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); facet_count = 4; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct InitalClassify { float diag; float4 pl1, pl2, pl3, pl4; InitalClassify(const float4& p1, const float4& p2, const float4& p3, const float4& p4, float diagonal) : diag(diagonal), pl1(p1), pl2(p2), pl3(p3), pl4(p4) { pl1 *= compue_inv_normal_norm(pl1); pl2 *= compue_inv_normal_norm(pl2); pl3 *= compue_inv_normal_norm(pl3); pl4 *= compue_inv_normal_norm(pl4); } __device__ __forceinline__ uint64_type operator()(const PointType& p) const { float4 x = p; x.w = 1; float d0 = dot(pl1, x); float d1 = dot(pl2, x); float d2 = dot(pl3, x); float d3 = dot(pl4, x); float dists[] = { d0, d1, d2, d3 }; int negs_inds[4]; int neg_count = 0; int idx = numeric_limits<int>::max(); float dist = 0; #pragma unroll for(int i = 0; i < 4; ++i) if (dists[i] < 0) negs_inds[neg_count++] = i; if (neg_count == 3) { int i1 = negs_inds[1]; int i2 = negs_inds[2]; int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1; negs_inds[1] = ir; --neg_count; } if (neg_count == 2) { int i1 = negs_inds[0]; int i2 = negs_inds[1]; int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1; negs_inds[0] = ir; --neg_count; } if (neg_count == 1) { idx = negs_inds[0]; dist = diag - fabs(dists[idx]); // to ensure that sorting order is inverse, i.e. distant points go first } //if (neg_count == 0) // then internal point ==>> idx = INT_MAX uint64_type res = idx; res <<= 32; return res + *reinterpret_cast<unsigned int*>(&dist); } }; __global__ void initalClassifyKernel(const InitalClassify ic, const PointType* points, int cloud_size, uint64_type* output) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < cloud_size) output[index] = ic(points[index]); } } } void pcl::device::PointStream::initalClassify() { //thrust::device_ptr<const PointType> beg(cloud.ptr()); //thrust::device_ptr<const PointType> end = beg + cloud_size; thrust::device_ptr<uint64_type> out(facets_dists.ptr()); InitalClassify ic(simplex.p1, simplex.p2, simplex.p3, simplex.p4, cloud_diag); //thrust::transform(beg, end, out, ic); //printFuncAttrib(initalClassifyKernel); initalClassifyKernel<<<divUp(cloud_size, 256), 256>>>(ic, cloud, cloud_size, facets_dists); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); thrust::device_ptr<int> pbeg(perm.ptr()); thrust::sort_by_key(out, out + cloud_size, pbeg); } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { __device__ int new_cloud_size; struct SearchFacetHeads { uint64_type *facets_dists; int cloud_size; int facet_count; int *perm; const PointType* points; mutable int* head_points; //bool logger; __device__ __forceinline__ void operator()(int facet) const { const uint64_type* b = facets_dists; const uint64_type* e = b + cloud_size; bool last_thread = facet == facet_count; int search_value = !last_thread ? facet : numeric_limits<int>::max(); int index = lower_bound(b, e, search_value, LessThanByFacet()) - b; if (last_thread) new_cloud_size = index; else { bool not_found = index == cloud_size || (facet != (facets_dists[index] >> 32)); head_points[facet] = not_found ? -1 : perm[index]; } } }; __global__ void searchFacetHeadsKernel(const SearchFacetHeads sfh) { int facet = threadIdx.x + blockDim.x * blockIdx.x; if (facet <= sfh.facet_count) sfh(facet); } } } int pcl::device::PointStream::searchFacetHeads(size_t facet_count, DeviceArray<int>& head_points) { SearchFacetHeads sfh; sfh.facets_dists = facets_dists; sfh.cloud_size = (int)cloud_size; sfh.facet_count = (int)facet_count; sfh.perm = perm; sfh.points = cloud.ptr(); sfh.head_points = head_points; //thrust::counting_iterator<int> b(0); //thrust::counting_iterator<int> e = b + facet_count + 1; //thrust::for_each(b, e, sfh); searchFacetHeadsKernel<<<divUp(facet_count+1, 256), 256>>>(sfh); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int new_size; cudaSafeCall( cudaMemcpyFromSymbol( (void*)&new_size, pcl::device::new_cloud_size, sizeof(new_size)) ); return new_size; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct NotMinus1 { __device__ __forceinline__ int operator()(const int& v) const { return (v == -1) ? 0 : 1; } }; struct Compaction { enum { CTA_SIZE = 256, WARPS = CTA_SIZE/ Warp::WARP_SIZE }; int* head_points_in; PtrStep<int> verts_inds_in; int *scan_buffer; int facet_count; mutable int* head_points_out; mutable PtrStep<int> verts_inds_out; mutable PtrStep<int> empty_facets; mutable int *empty_count; __device__ __forceinline__ void operator()() const { int idx = threadIdx.x + blockIdx.x * blockDim.x; #if CUDART_VERSION >= 9000 if (__all_sync (__activemask (), idx >= facet_count)) return; #else if (__all (idx >= facet_count)) return; #endif int empty = 0; if(idx < facet_count) { int head_idx = head_points_in[idx]; if (head_idx != -1) { int offset = scan_buffer[idx]; head_points_out[offset] = head_idx; verts_inds_out.ptr(0)[offset] = verts_inds_in.ptr(0)[idx]; verts_inds_out.ptr(1)[offset] = verts_inds_in.ptr(1)[idx]; verts_inds_out.ptr(2)[offset] = verts_inds_in.ptr(2)[idx]; } else empty = 1; } #if CUDART_VERSION >= 9000 int total = __popc (__ballot_sync (__activemask (), empty)); #else int total = __popc (__ballot (empty)); #endif if (total > 0) { #if CUDART_VERSION >= 9000 int offset = Warp::binaryExclScan (__ballot_sync (__activemask (), empty)); #else int offset = Warp::binaryExclScan (__ballot (empty)); #endif volatile __shared__ int wapr_buffer[WARPS]; int laneid = Warp::laneId(); int warpid = Warp::id(); if (laneid == 0) { int old = atomicAdd(empty_count, total); wapr_buffer[warpid] = old; } int old = wapr_buffer[warpid]; if (empty) { empty_facets.ptr(0)[old + offset] = verts_inds_in.ptr(0)[idx]; empty_facets.ptr(1)[old + offset] = verts_inds_in.ptr(1)[idx]; empty_facets.ptr(2)[old + offset] = verts_inds_in.ptr(2)[idx]; int a1 = verts_inds_in.ptr(0)[idx], a2 = verts_inds_in.ptr(1)[idx], a3 = verts_inds_in.ptr(2)[idx]; } } } }; __global__ void compactionKernel( const Compaction c ) { c(); } } } void pcl::device::FacetStream::compactFacets() { int old_empty_count; empty_count.download(&old_empty_count); thrust::device_ptr<int> b(head_points.ptr()); thrust::device_ptr<int> e = b + facet_count; thrust::device_ptr<int> o(scan_buffer.ptr()); thrust::transform_exclusive_scan(b, e, o, NotMinus1(), 0, thrust::plus<int>()); Compaction c; c.verts_inds_in = verts_inds; c.head_points_in = head_points; c.scan_buffer = scan_buffer; c.facet_count = facet_count; c.head_points_out = head_points2; c.verts_inds_out = verts_inds2; c.empty_facets = empty_facets; c.empty_count = empty_count; int block = Compaction::CTA_SIZE; int grid = divUp(facet_count, block); compactionKernel<<<grid, block>>>(c); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); verts_inds.swap(verts_inds2); head_points.swap(head_points2); int new_empty_count; empty_count.download(&new_empty_count); facet_count -= new_empty_count - old_empty_count; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct Classify { uint64_type* facets_dists; int* scan_buffer; int* head_points; int* perm; PtrStep<int> verts_inds; const PointType *points; float diag; int facet_count; __device__ __forceinline__ void operator()(int point_idx) const { int perm_index = perm[point_idx]; int facet = facets_dists[point_idx] >> 32; facet = scan_buffer[facet]; int hi = head_points[facet]; if (hi == perm_index) { uint64_type res = numeric_limits<int>::max(); res <<= 32; facets_dists[point_idx] = res; } else { int i1 = verts_inds.ptr(0)[facet]; int i2 = verts_inds.ptr(1)[facet]; int i3 = verts_inds.ptr(2)[facet]; float3 hp = tr( points[ hi ] ); float3 v1 = tr( points[ i1 ] ); float3 v2 = tr( points[ i2 ] ); float3 v3 = tr( points[ i3 ] ); float4 p0 = compute_plane(hp, v1, v2, /*opposite*/v3); // j float4 p1 = compute_plane(hp, v2, v3, /*opposite*/v1); // facet_count + j float4 p2 = compute_plane(hp, v3, v1, /*opposite*/v2); // facet_count + j*2 p0 *= compue_inv_normal_norm(p0); p1 *= compue_inv_normal_norm(p1); p2 *= compue_inv_normal_norm(p2); float4 p = points[perm_index]; p.w = 1; float d0 = dot(p, p0); float d1 = dot(p, p1); float d2 = dot(p, p2); float dists[] = { d0, d1, d2 }; int negs_inds[3]; int neg_count = 0; int new_idx = numeric_limits<int>::max(); float dist = 0; int indeces[] = { facet, facet + facet_count, facet + facet_count * 2 }; #pragma unroll for(int i = 0; i < 3; ++i) if (dists[i] < 0) negs_inds[neg_count++] = i; if (neg_count == 3) { int i1 = negs_inds[1]; int i2 = negs_inds[2]; int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1; negs_inds[1] = ir; --neg_count; } if (neg_count == 2) { int i1 = negs_inds[0]; int i2 = negs_inds[1]; int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1; negs_inds[0] = ir; --neg_count; } if (neg_count == 1) { new_idx = negs_inds[0]; dist = diag - fabs(dists[new_idx]); // to ensure that sorting order is inverse, i.e. distant points go first new_idx = indeces[new_idx]; } // if (neg_count == 0) // new_idx = INT_MAX ==>> internal point uint64_type res = new_idx; res <<= 32; res += *reinterpret_cast<unsigned int*>(&dist); facets_dists[point_idx] = res; } /* if (hi == perm_index) */ } }; __global__ void classifyKernel(const Classify c, int cloud_size) { int point_idx = threadIdx.x + blockIdx.x * blockDim.x; if ( point_idx < cloud_size ) c(point_idx); } } } void pcl::device::PointStream::classify(FacetStream& fs) { Classify c; c.facets_dists = facets_dists; c.scan_buffer = fs.scan_buffer; c.head_points = fs.head_points; c.perm = perm; c.verts_inds = fs.verts_inds; c.points = cloud; c.diag = cloud_diag; c.facet_count = fs.facet_count; //thrust::counting_iterator<int> b(0); //thrust::for_each(b, b + cloud_size, c); classifyKernel<<<divUp(cloud_size, 256), 256>>>(c, cloud_size); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); thrust::device_ptr<uint64_type> beg(facets_dists.ptr()); thrust::device_ptr<uint64_type> end = beg + cloud_size; thrust::device_ptr<int> pbeg(perm.ptr()); thrust::sort_by_key(beg, end, pbeg); } namespace pcl { namespace device { struct SplitFacets { int* head_points; int facet_count; mutable PtrStep<int> verts_inds; __device__ __forceinline__ void operator()(int facet) const { int hi = head_points[facet]; int i1 = verts_inds.ptr(0)[facet]; int i2 = verts_inds.ptr(1)[facet]; int i3 = verts_inds.ptr(2)[facet]; make_facet(hi, i1, i2, facet); make_facet(hi, i2, i3, facet + facet_count); make_facet(hi, i3, i1, facet + facet_count * 2); } __device__ __forceinline__ void make_facet(int i1, int i2, int i3, int out_idx) const { verts_inds.ptr(0)[out_idx] = i1; verts_inds.ptr(1)[out_idx] = i2; verts_inds.ptr(2)[out_idx] = i3; } }; __global__ void splitFacetsKernel(const SplitFacets sf) { int facet = threadIdx.x + blockIdx.x * blockDim.x; if (facet < sf.facet_count) sf(facet); } } } void pcl::device::FacetStream::splitFacets() { SplitFacets sf; sf.head_points = head_points; sf.verts_inds = verts_inds; sf.facet_count = facet_count; //thrust::counting_iterator<int> b(0); //thrust::for_each(b, b + facet_count, sf); splitFacetsKernel<<<divUp(facet_count, 256), 256>>>(sf); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); facet_count *= 3; } size_t pcl::device::remove_duplicates(DeviceArray<int>& indeces) { thrust::device_ptr<int> beg(indeces.ptr()); thrust::device_ptr<int> end = beg + indeces.size(); thrust::sort(beg, end); return (size_t)(thrust::unique(beg, end) - beg); } namespace pcl { namespace device { __global__ void gatherKernel(const PtrSz<int> indeces, const PointType* src, PointType* dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < indeces.size) dst[idx] = src[indeces.data[idx]]; } } } void pcl::device::pack_hull(const DeviceArray<PointType>& points, const DeviceArray<int>& indeces, DeviceArray<PointType>& output) { output.create(indeces.size()); //device_ptr<const PointType> in(points.ptr()); //thrust::device_ptr<const int> mb(indeces.ptr()); //thrust::device_ptr<const int> me = mb + indeces.size(); //device_ptr<PointType> out(output.ptr()); //thrust::gather(mb, me, in, out); gatherKernel<<<divUp(indeces.size(), 256), 256>>>(indeces, points, output); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); }
55729391ae7befdde47d1eb969755227b145d17d.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <fstream> #include <algorithm> #include "timer.h" #include "GDALRead.h" #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include "utils.h" #include "basestruct.h" #include "operator.h" #include <unistd.h> void loadBlockData(int width, int data_height, int data_start, int** h_subData, CGDALRead* pread); __device__ int find(int * localLabel, int p) { if (localLabel[p] != -1) { while (p != localLabel[p]) { p = localLabel[p]; } return p; } else return -1; } __device__ void findAndUnion(int* buf, int g1, int g2) { bool done; do { g1 = find(buf, g1); g2 = find(buf, g2); // it should hold that g1 == buf[g1] and g2 == buf[g2] now if (g1 < g2) { int old = atomicMin(&buf[g2], g1); done = (old == g2); g2 = old; } else if (g2 < g1) { int old = atomicMin(&buf[g1], g2); done = (old == g1); g1 = old; } else { done = true; } } while (!done); } __global__ void gpuLineLocal(int* devSrcData, int * devLabelMap, int width, int task_height, int nodata) { //int id = threadIdx.x + threadIdx.y * blockDim.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x * blockDim.y; //if (id > imgDimension.x * imgDimension.y) return; //int tid = threadIdx.x + threadIdx.y * blockDim.x; int tid = threadIdx.x; int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; bool limits = x < width && y < task_height; int id = x + y * width; __shared__ int localLabel[32 * 16]; if (limits) { localLabel[tid] = tid; __syncthreads(); int focusP = devSrcData[x + y * width]; if (focusP != nodata && threadIdx.x > 0 && focusP == devSrcData[x - 1 + y * width]) localLabel[tid] = localLabel[tid - 1]; __syncthreads(); int buf = tid; while (buf != localLabel[buf]) { buf = localLabel[buf]; localLabel[tid] = buf; } int globalL = (blockIdx.x * blockDim.x + buf) + (blockIdx.y) * width; devLabelMap[id] = globalL; if (focusP == nodata) devLabelMap[id] = -1; } } __global__ void gpuLineUfGlobal(int* devSrcData, int * devLabelMap, int width, int task_height, int nodata) { int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; int gid = x + y * width; bool in_limits = x < width && y < task_height; if (in_limits) { int center = devSrcData[gid]; if (center != nodata) { // search neighbour, left and up //if (x > 0 && threadIdx.x == 0 && center == devSrcData[x - 1 + y * imgDimension.x]) // findAndUnion(devLabelMap, gid, x - 1 + y * imgDimension.x); // left //if (y > 0 && threadIdx.y == 0 && center == devSrcData[x + (y - 1) * imgDimension.x]) // findAndUnion(devLabelMap, gid, x + (y - 1) * imgDimension.x); // up if (x > 0 && threadIdx.x == 0)//&& center == left { if (center == devSrcData[x - 1 + y * width]) findAndUnion(devLabelMap, gid, x - 1 + y * width); // left } if (y > 0 && threadIdx.y == 0)//&& center == up { if (center == devSrcData[x + (y - 1) * width]) findAndUnion(devLabelMap, gid, x + (y - 1) * width); // up } if (y > 0 && x > 0 && threadIdx.y == 0)// && center == leftup { if (center == devSrcData[x - 1 + (y - 1) * width]) findAndUnion(devLabelMap, gid, x - 1 + (y - 1) * width); // up-left } if (y > 0 && x < width - 1 && threadIdx.y == 0)// && center == rightup { if (center == devSrcData[x + 1 + (y - 1) * width]) findAndUnion(devLabelMap, gid, x + 1 + (y - 1) * width); // up-right } } } } __global__ void gpuLineUfFinal(int * labelMap, int width, int task_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; bool limits = x < width && y < task_height; int gid = x + y * width; if (limits) labelMap[gid] = find(labelMap, gid); } __global__ void getEachPixelPeriTop1(int *dev_iData, int *dev_iData_last,int*dev_PixelPerimeter, int width, int task_height, int data_start, int data_end, int task_start, int task_end) { int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; int gid = x + y * width; bool in_limits = x < width && y < task_height;//task if (in_limits) { int center = dev_iData[gid]; if (x == 0) { dev_PixelPerimeter[gid] += 1; } if (x == width - 1) { dev_PixelPerimeter[gid] += 1; } if (y == 0) { dev_PixelPerimeter[gid] += 1; } if(y == task_height - 1) { if(center!=dev_iData_last[x]) { dev_PixelPerimeter[gid] += 1; } } if (x>0) { if (center != dev_iData[gid - 1]) { dev_PixelPerimeter[gid] += 1; } } if (x < width - 1) { if (center != dev_iData[gid + 1]) { dev_PixelPerimeter[gid] += 1; } } if (y > 0) { if (center != dev_iData[gid - width]) { dev_PixelPerimeter[gid] += 1; } } if(y < task_height - 1) { if (center != dev_iData[gid + width])//down { dev_PixelPerimeter[gid] += 1; } } } } __global__ void getEachPixelPeriMid2(int *dev_iData, int*data_startValue,int *dev_dataLastValue, int*dev_PixelPerimeter, int width, int task_height, int data_start, int data_end, int task_start, int task_end) { int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; int gid = x + y * width; bool in_limits = x < width && y < task_height;//task if (in_limits) { int center = dev_iData[gid]; if (x == 0) { dev_PixelPerimeter[gid] += 1; } if (x == width - 1) { dev_PixelPerimeter[gid] += 1; } if (x>0) { if (center != dev_iData[gid - 1]) { dev_PixelPerimeter[gid] += 1; } } if (x < width - 1) { if (center != dev_iData[gid + 1]) { dev_PixelPerimeter[gid] += 1; } } if (y>0) { if (center != dev_iData[gid - width]) { dev_PixelPerimeter[gid] += 1; } } if (y == 0) { if (center != data_startValue[x]) { dev_PixelPerimeter[gid] += 1; } } if(y == task_height - 1) { if(center != dev_dataLastValue[x]) { dev_PixelPerimeter[gid] += 1; } } if(y < task_height-1) { if (center != dev_iData[gid + width])//down { dev_PixelPerimeter[gid] += 1; } } } } __global__ void getEachPixelPeriBottom(int *dev_iData, int*data_startValue, int*dev_PixelPerimeter, int width, int task_height, int data_start, int data_end, int task_start, int task_end) { int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; int gid = x + y * width; bool in_limits = x < width && y < task_height;//task if (in_limits) { int center = dev_iData[gid]; if (x == 0) { dev_PixelPerimeter[gid] += 1; } if (x == width - 1) { dev_PixelPerimeter[gid] += 1; } if (y == task_height - 1) { dev_PixelPerimeter[gid] += 1; } if (x > 0) { if (center != dev_iData[gid - 1]) { dev_PixelPerimeter[gid] += 1; } } if (x < width - 1) { if (center != dev_iData[gid + 1]) { dev_PixelPerimeter[gid] += 1; } } if (y > 0) { if (center != dev_iData[gid - width]) { dev_PixelPerimeter[gid] += 1; } } if (y == 0) { if (center != data_startValue[x]) { dev_PixelPerimeter[gid] += 1; } } if (y < task_height - 1) { if (center != dev_iData[gid + width]) { dev_PixelPerimeter[gid] += 1; } } } } __global__ void getEachPixelPeriNoSplit(int *dev_iData, int*dev_PixelPerimeter, int width, int height) { int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; int gid = x + y * width; bool in_limits = x < width && y < height;//task if (in_limits) { int center = dev_iData[gid]; if (x == 0) { dev_PixelPerimeter[gid] += 1; } if (x == width - 1) { dev_PixelPerimeter[gid] += 1; } if (y == 0) { dev_PixelPerimeter[gid] += 1; } if (y == height - 1) { dev_PixelPerimeter[gid] += 1; } if (x > 0) { if (center != dev_iData[gid - 1]) { dev_PixelPerimeter[gid] += 1; } } if (x < width - 1) { if (center != dev_iData[gid + 1]) { dev_PixelPerimeter[gid] += 1; } } if (y > 0) { if (center != dev_iData[gid - width]) { dev_PixelPerimeter[gid] += 1; } } if (y < height - 1) { if (center != dev_iData[gid + width]) { dev_PixelPerimeter[gid] += 1; } } } } __global__ void getPixNumAndPeri(int* dOutPixNum, int* dOutPeri, int* dev_labelMap, int *dev_pixelPerimeter, int width, int task_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int gid = x + y * width;//global 1D index; bool limits = x < width && y < task_height; if (limits) { int regLabel = dev_labelMap[gid];//get labeled val,if the labled value != -1 than calculate its area and primeter ; if (regLabel >= 0) { atomicAdd(dOutPixNum + regLabel, 1);//get area atomicAdd(dOutPeri + regLabel, dev_pixelPerimeter[gid]); } } } __global__ void updateDevLabel(int * dev_labelMap, int labelStart, int task_height, int width) { //heightdev_labelMap int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int gid = x + y * width;//global 1D index; bool limits = x < width && y < task_height; if (limits) { dev_labelMap[gid] += labelStart; } } double LineCCLNoSplit(int *allData, int width, int height, dim3 blockSize, dim3 gridSize, int * labelMap, int *pixNum, int *perimeter, int nodata) { cout << "LineCCLNoSplit" << endl; int2 imgSize; imgSize.x = width; imgSize.y = height; // device data int * dev_iData; int * dev_labelMap; int * dev_pixNum; int * dev_perimeter; // int * dev_PixelPerimeter; //(task_start task_end) checkCudaErrors(hipMalloc((void**)&dev_iData, sizeof(int)* width * height)); checkCudaErrors(hipMalloc((void**)&dev_labelMap, sizeof(int)* width * height)); checkCudaErrors(hipMalloc((void**)&dev_pixNum, sizeof(int)* width * height)); checkCudaErrors(hipMalloc((void**)&dev_perimeter, sizeof(int)* width * height)); checkCudaErrors(hipMalloc((void**)&dev_PixelPerimeter, sizeof(int)* width * height)); // copy data checkCudaErrors(hipMemcpy(dev_iData, allData, sizeof(int)* width * height, hipMemcpyHostToDevice)); // set data checkCudaErrors(hipMemset(dev_pixNum, 0, sizeof(int)* width * height)); checkCudaErrors(hipMemset(dev_perimeter, 0, sizeof(int)* width * height)); checkCudaErrors(hipMemset(dev_PixelPerimeter, 0, sizeof(int)* width * height)); // reconfigue the dimension of block and grid const int blockSizeX = blockSize.x * blockSize.y; const int blockSizeY = 1; dim3 blockSizeLine(blockSizeX, blockSizeY, 1); dim3 gridSizeLine((imgSize.x + blockSizeX - 1) / blockSizeX, (imgSize.y + blockSizeY - 1) / blockSizeY, 1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); gpuLineLocal << < gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, height, nodata); gpuLineUfGlobal << <gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, height, nodata); gpuLineUfFinal << < gridSizeLine, blockSizeLine >> > (dev_labelMap, width, height); getEachPixelPeriNoSplit << <gridSizeLine, blockSizeLine >> >(dev_iData, dev_PixelPerimeter, width, height); getPixNumAndPeri << <gridSizeLine, blockSizeLine >> >(dev_pixNum, dev_perimeter, dev_labelMap, dev_PixelPerimeter, width, height); hipEventRecord(stop); checkCudaErrors(hipMemcpy(labelMap, dev_labelMap, sizeof(int)* width * height, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(perimeter, dev_perimeter, sizeof(int)* width * height, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(pixNum, dev_pixNum, sizeof(int)* width * height, hipMemcpyDeviceToHost)); hipFree(dev_iData); hipFree(dev_labelMap); hipFree(dev_perimeter); hipFree(dev_pixNum); hipFree(dev_PixelPerimeter); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); //std::cout << "milliseconds = " << milliseconds << std::endl; return milliseconds; } double LineSplitCCL(int** h_subDataNextBlock, dataBlock &dataBlockNext, CGDALRead* pread, int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end) { /* input: allData: all data from data_start to data_end width: width height: data_end-data_start kernel configuration: blockSize gridSize output: nextTaskStartLabel: the label of data_end row */ cout << "LineSplitCCL" << endl; // device data //input int * dev_iData; //(task_start task_end) int * dev_iData_last; // // int * dev_PixelPerimeter; //(task_start task_end) //subPatch int * dev_labelMap; //(task_start task_end) int * dev_pixNum; //(task_start task_end) int * dev_perimeter; //(task_start task_end) //allocate size checkCudaErrors(hipMalloc((void**)&dev_iData, sizeof(int)* width * task_height)); checkCudaErrors(hipMalloc((void**)&dev_iData_last, sizeof(int)* width)); checkCudaErrors(hipMalloc((void**)&dev_PixelPerimeter, sizeof(int)* width * (task_height))); checkCudaErrors(hipMalloc((void**)&dev_labelMap, sizeof(int)* width * task_height)); checkCudaErrors(hipMalloc((void**)&dev_pixNum, sizeof(int)* width * (task_height))); checkCudaErrors(hipMalloc((void**)&dev_perimeter, sizeof(int)* width * (task_height))); // copy data checkCudaErrors(hipMemcpyAsync(dev_iData, allData, sizeof(int)* width * task_height, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(dev_iData_last, allData + width * task_height, sizeof(int)* width, hipMemcpyHostToDevice)); // checkCudaErrors(hipMemcpy(dev_iData, allData, sizeof(int)* width * task_height, hipMemcpyHostToDevice)); // checkCudaErrors(hipMemcpy(dev_iData_last, allData + width * task_height, sizeof(int)* width, hipMemcpyHostToDevice)); // set data checkCudaErrors(hipMemset(dev_PixelPerimeter, 0, sizeof(int)* width * task_height)); checkCudaErrors(hipMemset(dev_pixNum, 0, sizeof(int)* width * task_height)); checkCudaErrors(hipMemset(dev_perimeter, 0, sizeof(int)* width * task_height)); // reconfigue the dimension of block and grid const int blockSizeX = blockSize.x * blockSize.y; const int blockSizeY = 1; dim3 blockSizeLine(blockSizeX, blockSizeY, 1); dim3 gridSizeLine((width + blockSizeX - 1) / blockSizeX, (task_height + blockSizeY - 1) / blockSizeY, 1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); gpuLineLocal << < gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, task_height, nodata); gpuLineUfGlobal << <gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, task_height, nodata); gpuLineUfFinal << < gridSizeLine, blockSizeLine >> > (dev_labelMap, width, task_height); cout << "--------------label---finish---------------------------------" << endl; if (task_start == data_start) getEachPixelPeriTop1 << < gridSizeLine, blockSizeLine >> >(dev_iData,dev_iData_last,dev_PixelPerimeter, width, task_height, data_start, data_end, task_start, task_end); cout << "--------------getEachPixelPeriTop----finish------------------" << endl; hipFree(dev_iData); getPixNumAndPeri << <gridSizeLine, blockSizeLine >> >(dev_pixNum, dev_perimeter, dev_labelMap, dev_PixelPerimeter, width, task_height); cout << "--------------getPixNumAndPeri----finish--------------------" << endl; // checkCudaErrors(hipMemcpy(h_labelMap, dev_labelMap, sizeof(int)* task_height * width, hipMemcpyDeviceToHost)); // cout << "--------------hipMemcpyDeviceToHost--h_labelMap--finish--------------------" << endl; // checkCudaErrors(hipMemcpy(perimeter, dev_perimeter, sizeof(int)* task_height * width, hipMemcpyDeviceToHost)); // cout << "--------------hipMemcpyDeviceToHost--perimeter--finish--------------------" << endl; // checkCudaErrors(hipMemcpy(pixNum, dev_pixNum, sizeof(int)* task_height * width, hipMemcpyDeviceToHost)); // cout << "--------------hipMemcpyDeviceToHost--pixNum--finish--------------------" << endl; checkCudaErrors(hipMemcpyAsync(h_labelMap, dev_labelMap, sizeof(int)* task_height * width, hipMemcpyDeviceToHost)); cout << "--------------hipMemcpyDeviceToHost--h_labelMap--finish--------------------" << endl; checkCudaErrors(hipMemcpyAsync(perimeter, dev_perimeter, sizeof(int)* task_height * width, hipMemcpyDeviceToHost)); cout << "--------------hipMemcpyDeviceToHost--perimeter--finish--------------------" << endl; checkCudaErrors(hipMemcpyAsync(pixNum, dev_pixNum, sizeof(int)* task_height * width, hipMemcpyDeviceToHost)); cout << "--------------hipMemcpyDeviceToHost--pixNum--finish--------------------" << endl; hipEventRecord(stop); unsigned long int counter = 0; while (hipEventQuery(stop) == hipErrorNotReady) { counter++; } loadBlockData(width, dataBlockNext.subDataHeight, dataBlockNext.dataStart, h_subDataNextBlock, pread); printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter); hipFree(dev_PixelPerimeter); hipFree(dev_labelMap); hipFree(dev_perimeter); hipFree(dev_pixNum); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << "milliseconds = " << milliseconds << std::endl; checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); return milliseconds; } double LineSplitCCL2(int** h_subDataNextBlock, dataBlock* dataBlockArray, int iBlock, CGDALRead* pread, int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end) { /* input: allData: all data from data_start to data_end firstRowLabel: width: width height: data_end-data_start kernel configuration: blockSize gridSize output: nextTaskStartLabel: the label of data_end row */ cout << "LineSplitCCL2" << endl; // device data //input int * dev_iData; //(task_start task_end) int * dev_dataStartValue;//data_start int * dev_dataLastValue;//data_end // int * dev_PixelPerimeter; //(task_start task_end) //subPatch int * dev_labelMap; //(task_start task_end) int * dev_pixNum; //(task_start task_end) int * dev_perimeter; //(task_start task_end) //allocate size checkCudaErrors(hipMalloc((void**)&dev_iData, sizeof(int)* width * task_height)); checkCudaErrors(hipMalloc((void**)&dev_dataStartValue, sizeof(int)* width)); checkCudaErrors(hipMalloc((void**)&dev_PixelPerimeter, sizeof(int)* width * task_height)); checkCudaErrors(hipMalloc((void**)&dev_labelMap, sizeof(int)* width * task_height)); checkCudaErrors(hipMalloc((void**)&dev_pixNum, sizeof(int)* width * task_height)); checkCudaErrors(hipMalloc((void**)&dev_perimeter, sizeof(int)* width * task_height)); // copy data // checkCudaErrors(hipMemcpy(dev_dataStartValue, allData, sizeof(int)* width, hipMemcpyHostToDevice)); // checkCudaErrors(hipMemcpy(dev_iData, allData + width, sizeof(int)* width * task_height, hipMemcpyHostToDevice)); // if(task_end != data_end) // { // checkCudaErrors(hipMalloc((void**)&dev_dataLastValue, sizeof(int)* width)); // checkCudaErrors(hipMemcpy(dev_dataLastValue, allData+width*(task_height+1), sizeof(int)* width, hipMemcpyHostToDevice)); // } checkCudaErrors(hipMemcpyAsync(dev_dataStartValue, allData, sizeof(int)* width, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(dev_iData, allData + width, sizeof(int)* width * task_height, hipMemcpyHostToDevice)); if(task_end != data_end) { checkCudaErrors(hipMalloc((void**)&dev_dataLastValue, sizeof(int)* width)); checkCudaErrors(hipMemcpyAsync(dev_dataLastValue, allData+width*(task_height+1), sizeof(int)* width, hipMemcpyHostToDevice)); } // set data checkCudaErrors(hipMemset(dev_PixelPerimeter, 0, sizeof(int)* width *task_height)); checkCudaErrors(hipMemset(dev_pixNum, 0, sizeof(int)* width * task_height)); checkCudaErrors(hipMemset(dev_perimeter, 0, sizeof(int)* width * task_height)); // reconfigue the dimension of block and grid const int blockSizeX = blockSize.x * blockSize.y; const int blockSizeY = 1; dim3 blockSizeLine(blockSizeX, blockSizeY, 1); dim3 gridSizeLine((width + blockSizeX - 1) / blockSizeX, (task_height + blockSizeY - 1) / blockSizeY, 1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); gpuLineLocal << < gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, task_height, nodata); gpuLineUfGlobal << <gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, task_height, nodata); gpuLineUfFinal << < gridSizeLine, blockSizeLine >> > (dev_labelMap, width, task_height); if (task_end == data_end) getEachPixelPeriBottom << < gridSizeLine, blockSizeLine >> >(dev_iData, dev_dataStartValue, dev_PixelPerimeter, width, task_height, data_start, data_end, task_start, task_end); else getEachPixelPeriMid2 << < gridSizeLine, blockSizeLine >> >(dev_iData, dev_dataStartValue, dev_dataLastValue, dev_PixelPerimeter, width, task_height, data_start, data_end, task_start, task_end); hipFree(dev_iData); getPixNumAndPeri << <gridSizeLine, blockSizeLine >> >(dev_pixNum, dev_perimeter, dev_labelMap, dev_PixelPerimeter, width, task_height); updateDevLabel << <gridSizeLine, blockSizeLine >> > (dev_labelMap, labelStart, task_height, width); cout << "--------------updateDevLabeling----finish--------------------" << endl; // have CPU do some work while waiting for stage 1 to finish // checkCudaErrors(hipMemcpy(h_labelMap, dev_labelMap, sizeof(int)* width* task_height, hipMemcpyDeviceToHost)); // cout << "--------------hipMemcpyDeviceToHost--dev_labelMap--finish--------------------" << endl; // checkCudaErrors(hipMemcpy(perimeter, dev_perimeter, sizeof(int)* width* task_height, hipMemcpyDeviceToHost)); // cout << "--------------hipMemcpyDeviceToHost--dev_perimeter--finish--------------------" << endl; // checkCudaErrors(hipMemcpy(pixNum, dev_pixNum, sizeof(int)* width* task_height, hipMemcpyDeviceToHost)); // cout << "--------------hipMemcpyDeviceToHost--pixNum--finish--------------------" << endl; checkCudaErrors(hipMemcpyAsync(h_labelMap, dev_labelMap, sizeof(int)* width* task_height, hipMemcpyDeviceToHost)); cout << "--------------hipMemcpyDeviceToHost--dev_labelMap--finish--------------------" << endl; checkCudaErrors(hipMemcpyAsync(perimeter, dev_perimeter, sizeof(int)* width* task_height, hipMemcpyDeviceToHost)); cout << "--------------hipMemcpyDeviceToHost--dev_perimeter--finish--------------------" << endl; checkCudaErrors(hipMemcpyAsync(pixNum, dev_pixNum, sizeof(int)* width* task_height, hipMemcpyDeviceToHost)); cout << "--------------hipMemcpyDeviceToHost--pixNum--finish--------------------" << endl; hipEventRecord(stop); unsigned long int counter = 0; while (hipEventQuery(stop) == hipErrorNotReady) { counter++; } if (task_end != data_end) loadBlockData(width, dataBlockArray[iBlock+1].subDataHeight, dataBlockArray[iBlock+1].dataStart, h_subDataNextBlock, pread); printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter); hipFree(dev_dataStartValue); if(task_end != data_end) hipFree(dev_dataLastValue); hipFree(dev_PixelPerimeter); hipFree(dev_labelMap); hipFree(dev_perimeter); hipFree(dev_pixNum); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); std::cout << "milliseconds = " << milliseconds << std::endl; return milliseconds; }
55729391ae7befdde47d1eb969755227b145d17d.cu
#include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include <fstream> #include <algorithm> #include "timer.h" #include "GDALRead.h" #include <device_functions.h> #include <cuda_runtime_api.h> #include "utils.h" #include "basestruct.h" #include "operator.h" #include <unistd.h> void loadBlockData(int width, int data_height, int data_start, int** h_subData, CGDALRead* pread); __device__ int find(int * localLabel, int p) { if (localLabel[p] != -1) { while (p != localLabel[p]) { p = localLabel[p]; } return p; } else return -1; } __device__ void findAndUnion(int* buf, int g1, int g2) { bool done; do { g1 = find(buf, g1); g2 = find(buf, g2); // it should hold that g1 == buf[g1] and g2 == buf[g2] now if (g1 < g2) { int old = atomicMin(&buf[g2], g1); done = (old == g2); g2 = old; } else if (g2 < g1) { int old = atomicMin(&buf[g1], g2); done = (old == g1); g1 = old; } else { done = true; } } while (!done); } __global__ void gpuLineLocal(int* devSrcData, int * devLabelMap, int width, int task_height, int nodata) { //int id = threadIdx.x + threadIdx.y * blockDim.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x * blockDim.y; //if (id > imgDimension.x * imgDimension.y) return; //int tid = threadIdx.x + threadIdx.y * blockDim.x; int tid = threadIdx.x; int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; bool limits = x < width && y < task_height; int id = x + y * width; __shared__ int localLabel[32 * 16]; if (limits) { localLabel[tid] = tid; __syncthreads(); int focusP = devSrcData[x + y * width]; if (focusP != nodata && threadIdx.x > 0 && focusP == devSrcData[x - 1 + y * width]) localLabel[tid] = localLabel[tid - 1]; __syncthreads(); int buf = tid; while (buf != localLabel[buf]) { buf = localLabel[buf]; localLabel[tid] = buf; } int globalL = (blockIdx.x * blockDim.x + buf) + (blockIdx.y) * width; devLabelMap[id] = globalL; if (focusP == nodata) devLabelMap[id] = -1; } } __global__ void gpuLineUfGlobal(int* devSrcData, int * devLabelMap, int width, int task_height, int nodata) { int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; int gid = x + y * width; bool in_limits = x < width && y < task_height; if (in_limits) { int center = devSrcData[gid]; if (center != nodata) { // search neighbour, left and up //if (x > 0 && threadIdx.x == 0 && center == devSrcData[x - 1 + y * imgDimension.x]) // findAndUnion(devLabelMap, gid, x - 1 + y * imgDimension.x); // left //if (y > 0 && threadIdx.y == 0 && center == devSrcData[x + (y - 1) * imgDimension.x]) // findAndUnion(devLabelMap, gid, x + (y - 1) * imgDimension.x); // up if (x > 0 && threadIdx.x == 0)//&& center == left { if (center == devSrcData[x - 1 + y * width]) findAndUnion(devLabelMap, gid, x - 1 + y * width); // left } if (y > 0 && threadIdx.y == 0)//&& center == up { if (center == devSrcData[x + (y - 1) * width]) findAndUnion(devLabelMap, gid, x + (y - 1) * width); // up } if (y > 0 && x > 0 && threadIdx.y == 0)// && center == leftup { if (center == devSrcData[x - 1 + (y - 1) * width]) findAndUnion(devLabelMap, gid, x - 1 + (y - 1) * width); // up-left } if (y > 0 && x < width - 1 && threadIdx.y == 0)// && center == rightup { if (center == devSrcData[x + 1 + (y - 1) * width]) findAndUnion(devLabelMap, gid, x + 1 + (y - 1) * width); // up-right } } } } __global__ void gpuLineUfFinal(int * labelMap, int width, int task_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; bool limits = x < width && y < task_height; int gid = x + y * width; if (limits) labelMap[gid] = find(labelMap, gid); } __global__ void getEachPixelPeriTop1(int *dev_iData, int *dev_iData_last,int*dev_PixelPerimeter, int width, int task_height, int data_start, int data_end, int task_start, int task_end) { int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; int gid = x + y * width; bool in_limits = x < width && y < task_height;//×îºóÒ»Ðв»ÊôÓÚµ±Ç°¿éµÄtaskÇøÓò£¬²»×ö¼ÆËã if (in_limits) { int center = dev_iData[gid]; if (x == 0) { dev_PixelPerimeter[gid] += 1; } if (x == width - 1) { dev_PixelPerimeter[gid] += 1; } if (y == 0) { dev_PixelPerimeter[gid] += 1; } if(y == task_height - 1) { if(center!=dev_iData_last[x]) { dev_PixelPerimeter[gid] += 1; } } if (x>0) { if (center != dev_iData[gid - 1]) { dev_PixelPerimeter[gid] += 1; } } if (x < width - 1) { if (center != dev_iData[gid + 1]) { dev_PixelPerimeter[gid] += 1; } } if (y > 0) { if (center != dev_iData[gid - width]) { dev_PixelPerimeter[gid] += 1; } } if(y < task_height - 1) { if (center != dev_iData[gid + width])//down { dev_PixelPerimeter[gid] += 1; } } } } __global__ void getEachPixelPeriMid2(int *dev_iData, int*data_startValue,int *dev_dataLastValue, int*dev_PixelPerimeter, int width, int task_height, int data_start, int data_end, int task_start, int task_end) { int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; int gid = x + y * width; bool in_limits = x < width && y < task_height;//×îºóÒ»Ðв»ÊôÓÚµ±Ç°¿éµÄtaskÇøÓò£¬²»×ö¼ÆËã if (in_limits) { int center = dev_iData[gid]; if (x == 0) { dev_PixelPerimeter[gid] += 1; } if (x == width - 1) { dev_PixelPerimeter[gid] += 1; } if (x>0) { if (center != dev_iData[gid - 1]) { dev_PixelPerimeter[gid] += 1; } } if (x < width - 1) { if (center != dev_iData[gid + 1]) { dev_PixelPerimeter[gid] += 1; } } if (y>0) { if (center != dev_iData[gid - width]) { dev_PixelPerimeter[gid] += 1; } } if (y == 0) { if (center != data_startValue[x]) { dev_PixelPerimeter[gid] += 1; } } if(y == task_height - 1) { if(center != dev_dataLastValue[x]) { dev_PixelPerimeter[gid] += 1; } } if(y < task_height-1) { if (center != dev_iData[gid + width])//down { dev_PixelPerimeter[gid] += 1; } } } } __global__ void getEachPixelPeriBottom(int *dev_iData, int*data_startValue, int*dev_PixelPerimeter, int width, int task_height, int data_start, int data_end, int task_start, int task_end) { int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; int gid = x + y * width; bool in_limits = x < width && y < task_height;//×îºóÒ»ÐÐÊôÓÚµ±Ç°¿éµÄtaskÇøÓò£¬ÐèÒª×ö¼ÆËã if (in_limits) { int center = dev_iData[gid]; if (x == 0) { dev_PixelPerimeter[gid] += 1; } if (x == width - 1) { dev_PixelPerimeter[gid] += 1; } if (y == task_height - 1) { dev_PixelPerimeter[gid] += 1; } if (x > 0) { if (center != dev_iData[gid - 1]) { dev_PixelPerimeter[gid] += 1; } } if (x < width - 1) { if (center != dev_iData[gid + 1]) { dev_PixelPerimeter[gid] += 1; } } if (y > 0) { if (center != dev_iData[gid - width]) { dev_PixelPerimeter[gid] += 1; } } if (y == 0) { if (center != data_startValue[x]) { dev_PixelPerimeter[gid] += 1; } } if (y < task_height - 1) { if (center != dev_iData[gid + width]) { dev_PixelPerimeter[gid] += 1; } } } } __global__ void getEachPixelPeriNoSplit(int *dev_iData, int*dev_PixelPerimeter, int width, int height) { int x = threadIdx.x + blockDim.y * blockDim.x * blockIdx.x; int y = blockIdx.y; int gid = x + y * width; bool in_limits = x < width && y < height;//×îºóÒ»ÐÐÊôÓÚµ±Ç°¿éµÄtaskÇøÓò£¬ÐèÒª×ö¼ÆËã if (in_limits) { int center = dev_iData[gid]; if (x == 0) { dev_PixelPerimeter[gid] += 1; } if (x == width - 1) { dev_PixelPerimeter[gid] += 1; } if (y == 0) { dev_PixelPerimeter[gid] += 1; } if (y == height - 1) { dev_PixelPerimeter[gid] += 1; } if (x > 0) { if (center != dev_iData[gid - 1]) { dev_PixelPerimeter[gid] += 1; } } if (x < width - 1) { if (center != dev_iData[gid + 1]) { dev_PixelPerimeter[gid] += 1; } } if (y > 0) { if (center != dev_iData[gid - width]) { dev_PixelPerimeter[gid] += 1; } } if (y < height - 1) { if (center != dev_iData[gid + width]) { dev_PixelPerimeter[gid] += 1; } } } } __global__ void getPixNumAndPeri(int* dOutPixNum, int* dOutPeri, int* dev_labelMap, int *dev_pixelPerimeter, int width, int task_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int gid = x + y * width;//global 1D index; bool limits = x < width && y < task_height; if (limits) { int regLabel = dev_labelMap[gid];//get labeled val,if the labled value != -1 than calculate its area and primeter ; if (regLabel >= 0) { atomicAdd(dOutPixNum + regLabel, 1);//get area atomicAdd(dOutPeri + regLabel, dev_pixelPerimeter[gid]); } } } __global__ void updateDevLabel(int * dev_labelMap, int labelStart, int task_height, int width) { //heightÊÇdev_labelMapµÄ¸ß¶È int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int gid = x + y * width;//global 1D index; bool limits = x < width && y < task_height; if (limits) { dev_labelMap[gid] += labelStart; } } double LineCCLNoSplit(int *allData, int width, int height, dim3 blockSize, dim3 gridSize, int * labelMap, int *pixNum, int *perimeter, int nodata) { cout << "LineCCLNoSplit" << endl; int2 imgSize; imgSize.x = width; imgSize.y = height; // device data int * dev_iData; int * dev_labelMap; int * dev_pixNum; int * dev_perimeter; //Öмä±äÁ¿ int * dev_PixelPerimeter; //É豸¶ËÔÝ´æÖܳ¤(task_start task_end) checkCudaErrors(cudaMalloc((void**)&dev_iData, sizeof(int)* width * height)); checkCudaErrors(cudaMalloc((void**)&dev_labelMap, sizeof(int)* width * height)); checkCudaErrors(cudaMalloc((void**)&dev_pixNum, sizeof(int)* width * height)); checkCudaErrors(cudaMalloc((void**)&dev_perimeter, sizeof(int)* width * height)); checkCudaErrors(cudaMalloc((void**)&dev_PixelPerimeter, sizeof(int)* width * height)); // copy data checkCudaErrors(cudaMemcpy(dev_iData, allData, sizeof(int)* width * height, cudaMemcpyHostToDevice)); // set data checkCudaErrors(cudaMemset(dev_pixNum, 0, sizeof(int)* width * height)); checkCudaErrors(cudaMemset(dev_perimeter, 0, sizeof(int)* width * height)); checkCudaErrors(cudaMemset(dev_PixelPerimeter, 0, sizeof(int)* width * height)); // reconfigue the dimension of block and grid const int blockSizeX = blockSize.x * blockSize.y; const int blockSizeY = 1; dim3 blockSizeLine(blockSizeX, blockSizeY, 1); dim3 gridSizeLine((imgSize.x + blockSizeX - 1) / blockSizeX, (imgSize.y + blockSizeY - 1) / blockSizeY, 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); gpuLineLocal << < gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, height, nodata); gpuLineUfGlobal << <gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, height, nodata); gpuLineUfFinal << < gridSizeLine, blockSizeLine >> > (dev_labelMap, width, height); getEachPixelPeriNoSplit << <gridSizeLine, blockSizeLine >> >(dev_iData, dev_PixelPerimeter, width, height); getPixNumAndPeri << <gridSizeLine, blockSizeLine >> >(dev_pixNum, dev_perimeter, dev_labelMap, dev_PixelPerimeter, width, height); cudaEventRecord(stop); checkCudaErrors(cudaMemcpy(labelMap, dev_labelMap, sizeof(int)* width * height, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(perimeter, dev_perimeter, sizeof(int)* width * height, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(pixNum, dev_pixNum, sizeof(int)* width * height, cudaMemcpyDeviceToHost)); cudaFree(dev_iData); cudaFree(dev_labelMap); cudaFree(dev_perimeter); cudaFree(dev_pixNum); cudaFree(dev_PixelPerimeter); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); //std::cout << "milliseconds = " << milliseconds << std::endl; return milliseconds; } double LineSplitCCL(int** h_subDataNextBlock, dataBlock &dataBlockNext, CGDALRead* pread, int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end) { /* input: allData: all data from data_start to data_end width: width height: data_end-data_start kernel configuration: blockSize gridSize output: nextTaskStartLabel: the label of data_end row */ cout << "LineSplitCCL" << endl; // device data //input int * dev_iData; //½ÓÊÕԭʼͼÏñÖµ(task_start task_end) int * dev_iData_last; //½ÓÊÕԭʼͼÏñµÄ×îºóÒ»ÐÐ //Öмä±äÁ¿ int * dev_PixelPerimeter; //É豸¶ËÔÝ´æÖܳ¤(task_start task_end) //ÓÃÓÚÉú³É×îºósubPatchµÄÊý×é int * dev_labelMap; //É豸¶Ë¾Ö²¿±ê¼Ç(task_start task_end) int * dev_pixNum; //É豸¶ËÔÝ´æÃæ»ý(task_start task_end) int * dev_perimeter; //É豸¶ËÔÝ´æÖܳ¤(task_start task_end) //allocate size checkCudaErrors(cudaMalloc((void**)&dev_iData, sizeof(int)* width * task_height)); checkCudaErrors(cudaMalloc((void**)&dev_iData_last, sizeof(int)* width)); checkCudaErrors(cudaMalloc((void**)&dev_PixelPerimeter, sizeof(int)* width * (task_height))); checkCudaErrors(cudaMalloc((void**)&dev_labelMap, sizeof(int)* width * task_height)); checkCudaErrors(cudaMalloc((void**)&dev_pixNum, sizeof(int)* width * (task_height))); checkCudaErrors(cudaMalloc((void**)&dev_perimeter, sizeof(int)* width * (task_height))); // copy data checkCudaErrors(cudaMemcpyAsync(dev_iData, allData, sizeof(int)* width * task_height, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(dev_iData_last, allData + width * task_height, sizeof(int)* width, cudaMemcpyHostToDevice)); // checkCudaErrors(cudaMemcpy(dev_iData, allData, sizeof(int)* width * task_height, cudaMemcpyHostToDevice)); // checkCudaErrors(cudaMemcpy(dev_iData_last, allData + width * task_height, sizeof(int)* width, cudaMemcpyHostToDevice)); // set data checkCudaErrors(cudaMemset(dev_PixelPerimeter, 0, sizeof(int)* width * task_height)); checkCudaErrors(cudaMemset(dev_pixNum, 0, sizeof(int)* width * task_height)); checkCudaErrors(cudaMemset(dev_perimeter, 0, sizeof(int)* width * task_height)); // reconfigue the dimension of block and grid const int blockSizeX = blockSize.x * blockSize.y; const int blockSizeY = 1; dim3 blockSizeLine(blockSizeX, blockSizeY, 1); dim3 gridSizeLine((width + blockSizeX - 1) / blockSizeX, (task_height + blockSizeY - 1) / blockSizeY, 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); gpuLineLocal << < gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, task_height, nodata); gpuLineUfGlobal << <gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, task_height, nodata); gpuLineUfFinal << < gridSizeLine, blockSizeLine >> > (dev_labelMap, width, task_height); cout << "--------------label---finish---------------------------------" << endl; if (task_start == data_start) getEachPixelPeriTop1 << < gridSizeLine, blockSizeLine >> >(dev_iData,dev_iData_last,dev_PixelPerimeter, width, task_height, data_start, data_end, task_start, task_end); cout << "--------------getEachPixelPeriTop----finish------------------" << endl; cudaFree(dev_iData); getPixNumAndPeri << <gridSizeLine, blockSizeLine >> >(dev_pixNum, dev_perimeter, dev_labelMap, dev_PixelPerimeter, width, task_height); cout << "--------------getPixNumAndPeri----finish--------------------" << endl; // checkCudaErrors(cudaMemcpy(h_labelMap, dev_labelMap, sizeof(int)* task_height * width, cudaMemcpyDeviceToHost)); // cout << "--------------cudaMemcpyDeviceToHost--h_labelMap--finish--------------------" << endl; // checkCudaErrors(cudaMemcpy(perimeter, dev_perimeter, sizeof(int)* task_height * width, cudaMemcpyDeviceToHost)); // cout << "--------------cudaMemcpyDeviceToHost--perimeter--finish--------------------" << endl; // checkCudaErrors(cudaMemcpy(pixNum, dev_pixNum, sizeof(int)* task_height * width, cudaMemcpyDeviceToHost)); // cout << "--------------cudaMemcpyDeviceToHost--pixNum--finish--------------------" << endl; checkCudaErrors(cudaMemcpyAsync(h_labelMap, dev_labelMap, sizeof(int)* task_height * width, cudaMemcpyDeviceToHost)); cout << "--------------cudaMemcpyDeviceToHost--h_labelMap--finish--------------------" << endl; checkCudaErrors(cudaMemcpyAsync(perimeter, dev_perimeter, sizeof(int)* task_height * width, cudaMemcpyDeviceToHost)); cout << "--------------cudaMemcpyDeviceToHost--perimeter--finish--------------------" << endl; checkCudaErrors(cudaMemcpyAsync(pixNum, dev_pixNum, sizeof(int)* task_height * width, cudaMemcpyDeviceToHost)); cout << "--------------cudaMemcpyDeviceToHost--pixNum--finish--------------------" << endl; cudaEventRecord(stop); unsigned long int counter = 0; while (cudaEventQuery(stop) == cudaErrorNotReady) { counter++; } loadBlockData(width, dataBlockNext.subDataHeight, dataBlockNext.dataStart, h_subDataNextBlock, pread); printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter); cudaFree(dev_PixelPerimeter); cudaFree(dev_labelMap); cudaFree(dev_perimeter); cudaFree(dev_pixNum); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << "milliseconds = " << milliseconds << std::endl; checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); return milliseconds; } double LineSplitCCL2(int** h_subDataNextBlock, dataBlock* dataBlockArray, int iBlock, CGDALRead* pread, int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end) { /* input: allData: all data from data_start to data_end firstRowLabel:ÓÃÓÚ¸üе±Ç°¿éµÄ±êǩΪȫ¾Ö±êÇ© width: width height: data_end-data_start kernel configuration: blockSize gridSize output: nextTaskStartLabel: the label of data_end row */ cout << "LineSplitCCL2" << endl; // device data //input int * dev_iData; //½ÓÊÕԭʼͼÏñÖµ(task_start task_end) int * dev_dataStartValue;//½ÓÊÕԭʼͼÏñÖµdata_start int * dev_dataLastValue;//½ÓÊÕԭʼͼÏñÖµdata_end //Öмä±äÁ¿ int * dev_PixelPerimeter; //É豸¶ËÔÝ´æÖܳ¤(task_start task_end) //ÓÃÓÚÉú³É×îºósubPatchµÄÊý×é int * dev_labelMap; //É豸¶Ë¾Ö²¿±ê¼Ç(task_start task_end) int * dev_pixNum; //É豸¶ËÔÝ´æÃæ»ý(task_start task_end) int * dev_perimeter; //É豸¶ËÔÝ´æÖܳ¤(task_start task_end) //allocate size checkCudaErrors(cudaMalloc((void**)&dev_iData, sizeof(int)* width * task_height)); checkCudaErrors(cudaMalloc((void**)&dev_dataStartValue, sizeof(int)* width)); checkCudaErrors(cudaMalloc((void**)&dev_PixelPerimeter, sizeof(int)* width * task_height)); checkCudaErrors(cudaMalloc((void**)&dev_labelMap, sizeof(int)* width * task_height)); checkCudaErrors(cudaMalloc((void**)&dev_pixNum, sizeof(int)* width * task_height)); checkCudaErrors(cudaMalloc((void**)&dev_perimeter, sizeof(int)* width * task_height)); // copy data // checkCudaErrors(cudaMemcpy(dev_dataStartValue, allData, sizeof(int)* width, cudaMemcpyHostToDevice)); // checkCudaErrors(cudaMemcpy(dev_iData, allData + width, sizeof(int)* width * task_height, cudaMemcpyHostToDevice)); // if(task_end != data_end) // { // checkCudaErrors(cudaMalloc((void**)&dev_dataLastValue, sizeof(int)* width)); // checkCudaErrors(cudaMemcpy(dev_dataLastValue, allData+width*(task_height+1), sizeof(int)* width, cudaMemcpyHostToDevice)); // } checkCudaErrors(cudaMemcpyAsync(dev_dataStartValue, allData, sizeof(int)* width, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(dev_iData, allData + width, sizeof(int)* width * task_height, cudaMemcpyHostToDevice)); if(task_end != data_end) { checkCudaErrors(cudaMalloc((void**)&dev_dataLastValue, sizeof(int)* width)); checkCudaErrors(cudaMemcpyAsync(dev_dataLastValue, allData+width*(task_height+1), sizeof(int)* width, cudaMemcpyHostToDevice)); } // set data checkCudaErrors(cudaMemset(dev_PixelPerimeter, 0, sizeof(int)* width *task_height)); checkCudaErrors(cudaMemset(dev_pixNum, 0, sizeof(int)* width * task_height)); checkCudaErrors(cudaMemset(dev_perimeter, 0, sizeof(int)* width * task_height)); // reconfigue the dimension of block and grid const int blockSizeX = blockSize.x * blockSize.y; const int blockSizeY = 1; dim3 blockSizeLine(blockSizeX, blockSizeY, 1); dim3 gridSizeLine((width + blockSizeX - 1) / blockSizeX, (task_height + blockSizeY - 1) / blockSizeY, 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); gpuLineLocal << < gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, task_height, nodata); gpuLineUfGlobal << <gridSizeLine, blockSizeLine >> > (dev_iData, dev_labelMap, width, task_height, nodata); gpuLineUfFinal << < gridSizeLine, blockSizeLine >> > (dev_labelMap, width, task_height); if (task_end == data_end) getEachPixelPeriBottom << < gridSizeLine, blockSizeLine >> >(dev_iData, dev_dataStartValue, dev_PixelPerimeter, width, task_height, data_start, data_end, task_start, task_end); else getEachPixelPeriMid2 << < gridSizeLine, blockSizeLine >> >(dev_iData, dev_dataStartValue, dev_dataLastValue, dev_PixelPerimeter, width, task_height, data_start, data_end, task_start, task_end); cudaFree(dev_iData); getPixNumAndPeri << <gridSizeLine, blockSizeLine >> >(dev_pixNum, dev_perimeter, dev_labelMap, dev_PixelPerimeter, width, task_height); updateDevLabel << <gridSizeLine, blockSizeLine >> > (dev_labelMap, labelStart, task_height, width); cout << "--------------updateDevLabeling----finish--------------------" << endl; // have CPU do some work while waiting for stage 1 to finish // checkCudaErrors(cudaMemcpy(h_labelMap, dev_labelMap, sizeof(int)* width* task_height, cudaMemcpyDeviceToHost)); // cout << "--------------cudaMemcpyDeviceToHost--dev_labelMap--finish--------------------" << endl; // checkCudaErrors(cudaMemcpy(perimeter, dev_perimeter, sizeof(int)* width* task_height, cudaMemcpyDeviceToHost)); // cout << "--------------cudaMemcpyDeviceToHost--dev_perimeter--finish--------------------" << endl; // checkCudaErrors(cudaMemcpy(pixNum, dev_pixNum, sizeof(int)* width* task_height, cudaMemcpyDeviceToHost)); // cout << "--------------cudaMemcpyDeviceToHost--pixNum--finish--------------------" << endl; checkCudaErrors(cudaMemcpyAsync(h_labelMap, dev_labelMap, sizeof(int)* width* task_height, cudaMemcpyDeviceToHost)); cout << "--------------cudaMemcpyDeviceToHost--dev_labelMap--finish--------------------" << endl; checkCudaErrors(cudaMemcpyAsync(perimeter, dev_perimeter, sizeof(int)* width* task_height, cudaMemcpyDeviceToHost)); cout << "--------------cudaMemcpyDeviceToHost--dev_perimeter--finish--------------------" << endl; checkCudaErrors(cudaMemcpyAsync(pixNum, dev_pixNum, sizeof(int)* width* task_height, cudaMemcpyDeviceToHost)); cout << "--------------cudaMemcpyDeviceToHost--pixNum--finish--------------------" << endl; cudaEventRecord(stop); unsigned long int counter = 0; while (cudaEventQuery(stop) == cudaErrorNotReady) { counter++; } if (task_end != data_end) loadBlockData(width, dataBlockArray[iBlock+1].subDataHeight, dataBlockArray[iBlock+1].dataStart, h_subDataNextBlock, pread); printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter); cudaFree(dev_dataStartValue); if(task_end != data_end) cudaFree(dev_dataLastValue); cudaFree(dev_PixelPerimeter); cudaFree(dev_labelMap); cudaFree(dev_perimeter); cudaFree(dev_pixNum); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); std::cout << "milliseconds = " << milliseconds << std::endl; return milliseconds; }
a3d23a87ee8943fc4ab2d7f3ea30a092af557a32.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "include/global_const.h" __global__ void NormKernel(double *d, size_t ferm_offset, float2 *vec) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_length = blockDim.x * gridDim.x; __shared__ double norm[128]; //Allocates shared mem float2 f_0, f_1, f_2; norm[threadIdx.x] = 1.0; //First block of sites if(idx < size_dev) { f_0 = tex1Dfetch(fermion_texRef, idx + ferm_offset); f_1 = tex1Dfetch(fermion_texRef, size_dev + idx + ferm_offset); f_2 = tex1Dfetch(fermion_texRef, 2*size_dev + idx + ferm_offset); #ifdef USE_INTRINSIC norm[threadIdx.x] = __dmul_rn((double)f_0.x, (double)f_0.x); norm[threadIdx.x] += __dmul_rn((double)f_0.y, (double)f_0.y); norm[threadIdx.x] += __dmul_rn((double)f_1.x, (double)f_1.x); norm[threadIdx.x] += __dmul_rn((double)f_1.y, (double)f_1.y); norm[threadIdx.x] += __dmul_rn((double)f_2.x, (double)f_2.x); norm[threadIdx.x] += __dmul_rn((double)f_2.y, (double)f_2.y); #else norm[threadIdx.x] = (double)f_0.x*(double)f_0.x+(double)f_0.y*(double)f_0.y+ (double)f_1.x*(double)f_1.x+(double)f_1.y*(double)f_1.y+ (double)f_2.x*(double)f_2.x+(double)f_2.y*(double)f_2.y; #endif idx += grid_length; } //Other blocks of sites while (idx < size_dev) { f_0 = tex1Dfetch(fermion_texRef, idx + ferm_offset); f_1 = tex1Dfetch(fermion_texRef, size_dev + idx + ferm_offset); f_2 = tex1Dfetch(fermion_texRef, 2*size_dev + idx + ferm_offset); #ifdef USE_INTRINSIC norm[threadIdx.x] += __dmul_rn((double)f_0.x, (double)f_0.x); norm[threadIdx.x] += __dmul_rn((double)f_0.y, (double)f_0.y); norm[threadIdx.x] += __dmul_rn((double)f_1.x, (double)f_1.x); norm[threadIdx.x] += __dmul_rn((double)f_1.y, (double)f_1.y); norm[threadIdx.x] += __dmul_rn((double)f_2.x, (double)f_2.x); norm[threadIdx.x] += __dmul_rn((double)f_2.y, (double)f_2.y); #else norm[threadIdx.x] += (double)f_0.x*(double)f_0.x+(double)f_0.y*(double)f_0.y+ (double)f_1.x*(double)f_1.x+(double)f_1.y*(double)f_1.y+ (double)f_2.x*(double)f_2.x+(double)f_2.y*(double)f_2.y; #endif idx += grid_length; } __syncthreads(); //Performs first reduction if (threadIdx.x < 64) { norm[threadIdx.x] += norm[threadIdx.x+64]; } __syncthreads(); if (threadIdx.x < 32 ) //Inside a warp - no syncthreads() needed { volatile double *smem = norm; smem[threadIdx.x] += smem[threadIdx.x + 32]; smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } if (threadIdx.x == 0) d[blockIdx.x] = norm[0]; //Outputs gridDim.x numbers to be further reduced } __global__ void NormKernelD(double *d, double2 *vec) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_length = blockDim.x * gridDim.x; __shared__ double norm[128]; //Allocates shared mem double2 f_0, f_1, f_2; norm[threadIdx.x] = 0.0; //First block of sites if(idx < size_dev) { f_0 = vec[ idx]; f_1 = vec[ size_dev + idx]; f_2 = vec[ 2*size_dev + idx]; #ifdef USE_INTRINSIC norm[threadIdx.x] = __dmul_rn(f_0.x, f_0.x); norm[threadIdx.x] += __dmul_rn(f_0.y, f_0.y); norm[threadIdx.x] += __dmul_rn(f_1.x, f_1.x); norm[threadIdx.x] += __dmul_rn(f_1.y, f_1.y); norm[threadIdx.x] += __dmul_rn(f_2.x, f_2.x); norm[threadIdx.x] += __dmul_rn(f_2.y, f_2.y); #else norm[threadIdx.x] = f_0.x*f_0.x+f_0.y*f_0.y+ f_1.x*f_1.x+f_1.y*f_1.y+ f_2.x*f_2.x+f_2.y*f_2.y; #endif idx += grid_length; } //Other blocks of sites while (idx < size_dev) { f_0 = vec[ idx]; f_1 = vec[ size_dev + idx]; f_2 = vec[ 2*size_dev + idx]; #ifdef USE_INTRINSIC norm[threadIdx.x] += __dmul_rn(f_0.x, f_0.x); norm[threadIdx.x] += __dmul_rn(f_0.y, f_0.y); norm[threadIdx.x] += __dmul_rn(f_1.x, f_1.x); norm[threadIdx.x] += __dmul_rn(f_1.y, f_1.y); norm[threadIdx.x] += __dmul_rn(f_2.x, f_2.x); norm[threadIdx.x] += __dmul_rn(f_2.y, f_2.y); #else norm[threadIdx.x] += f_0.x*f_0.x+f_0.y*f_0.y+ f_1.x*f_1.x+f_1.y*f_1.y+ f_2.x*f_2.x+f_2.y*f_2.y; #endif idx += grid_length; } __syncthreads(); //Performs first reduction if (threadIdx.x < 64) { norm[threadIdx.x] += norm[threadIdx.x+64]; } __syncthreads(); if (threadIdx.x < 32 ) //Inside a warp - no syncthreads() needed { volatile double *smem = norm; smem[threadIdx.x] += smem[threadIdx.x + 32]; smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } if (threadIdx.x == 0) d[blockIdx.x] = norm[0]; //Outputs gridDim.x numbers to be further reduced } __global__ void IpdotNormKernel(double *d, float4 *ipdot) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_length = blockDim.x * gridDim.x; __shared__ double norm[128]; //Allocates shared mem float4 f_0; norm[threadIdx.x] = 0.0; //First block of sites if(idx < 8*size_dev) { f_0 = ipdot[idx]; #ifdef USE_INTRINSIC norm[threadIdx.x] = __dmul_rn((double)f_0.x, (double)f_0.x); norm[threadIdx.x] += __dmul_rn((double)f_0.y, (double)f_0.y); norm[threadIdx.x] += __dmul_rn((double)f_0.z, (double)f_0.z); norm[threadIdx.x] += __dmul_rn((double)f_0.w, (double)f_0.w); #else norm[threadIdx.x] = (double)f_0.x*(double)f_0.x+(double)f_0.y*(double)f_0.y+ (double)f_0.z*(double)f_0.z+(double)f_0.w*(double)f_0.w; #endif idx += grid_length; } //Other blocks of sites while (idx < 8*size_dev) { f_0 = ipdot[idx]; #ifdef USE_INTRINSIC norm[threadIdx.x] += __dmul_rn((double)f_0.x, (double)f_0.x); norm[threadIdx.x] += __dmul_rn((double)f_0.y, (double)f_0.y); norm[threadIdx.x] += __dmul_rn((double)f_0.z, (double)f_0.z); norm[threadIdx.x] += __dmul_rn((double)f_0.w, (double)f_0.w); #else norm[threadIdx.x] += (double)f_0.x*(double)f_0.x+(double)f_0.y*(double)f_0.y+ (double)f_0.z*(double)f_0.z+(double)f_0.w*(double)f_0.w; #endif idx += grid_length; } __syncthreads(); //Performs first reduction if (threadIdx.x < 64) { norm[threadIdx.x] += norm[threadIdx.x+64]; } __syncthreads(); if (threadIdx.x < 32 ) //Inside a warp - no syncthreads() needed { volatile double *smem = norm; smem[threadIdx.x] += smem[threadIdx.x + 32]; smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } if (threadIdx.x == 0) d[blockIdx.x] = norm[0]; //Outputs gridDim.x numbers to be further reduced } template <unsigned int blockSize>__global__ void ReduceGPU(float *input, float *output) { __shared__ float sdata[blockSize]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int ix = blockIdx.x * (blockSize*2) + threadIdx.x; sdata[tid] = input[ix]+ input[ix + blockSize]; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if(tid < 32) { volatile float * smem = sdata; if (blockSize >= 64) { smem[tid] += smem[tid + 32]; } if (blockSize >= 32) { smem[tid] += smem[tid + 16]; } if (blockSize >= 16) { smem[tid] += smem[tid + 8]; } if (blockSize >= 8) { smem[tid] += smem[tid + 4]; } if (blockSize >= 4) { smem[tid] += smem[tid + 2]; } if (blockSize >= 2) { smem[tid] += smem[tid + 1]; } // if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; } // if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; } // if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; } // if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; } // if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; } // if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; } } // write result for this block to global mem if (tid == 0) output[blockIdx.x] = sdata[0]; } template <unsigned int blockSize>__global__ void ReduceDGPU(double *input, double *output) { __shared__ double sdata[blockSize]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int ix = blockIdx.x * (blockSize*2) + threadIdx.x; sdata[tid] = input[ix]+ input[ix + blockSize]; __syncthreads(); // do reduction in shared mem if(blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if(blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if(blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if(tid < 32) { volatile double * smem = sdata; if (blockSize >= 64) { smem[tid] += smem[tid + 32]; } if (blockSize >= 32) { smem[tid] += smem[tid + 16]; } if (blockSize >= 16) { smem[tid] += smem[tid + 8]; } if (blockSize >= 8) { smem[tid] += smem[tid + 4]; } if (blockSize >= 4) { smem[tid] += smem[tid + 2]; } if (blockSize >= 2) { smem[tid] += smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) output[blockIdx.x] = sdata[0]; } template <unsigned int blockSize>__global__ void ReduceSingleDGPU(double *input, double *output) { __shared__ double sdata[blockSize]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; sdata[tid] = input[tid]; __syncthreads(); // do reduction in shared mem if (blockSize >= 64) { if (threadIdx.x < 32 && threadIdx.x + 32 < blockDim.x) { sdata[tid] += sdata[tid + 32]; } __syncthreads();} if (blockSize >= 32) { if (threadIdx.x < 16 && threadIdx.x + 16 < blockDim.x) { sdata[tid] += sdata[tid + 16]; } __syncthreads();} if (blockSize >= 16) { if (threadIdx.x < 8 && threadIdx.x + 8 < blockDim.x) { sdata[tid] += sdata[tid + 8]; } __syncthreads();} if (blockSize >= 8) { if (threadIdx.x < 4 && threadIdx.x + 4 < blockDim.x) { sdata[tid] += sdata[tid + 4]; } __syncthreads();} if (blockSize >= 4) { if (threadIdx.x < 2 && threadIdx.x + 2 < blockDim.x) { sdata[tid] += sdata[tid + 2]; } __syncthreads();} if (blockSize >= 2) { if (threadIdx.x < 1 && threadIdx.x + 1 < blockDim.x) { sdata[tid] += sdata[tid + 1]; } } // write result for this block to global mem if (tid == 0) output[blockIdx.x] = sdata[0]; } void Reduce(float *in, float *out, int blocks, int threads, int sharedmem) { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside Reduce ...\33[0m\n"); #endif switch(threads) { case 512: hipLaunchKernelGGL(( ReduceGPU<512>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceGPU<512>"); break; case 256: hipLaunchKernelGGL(( ReduceGPU<256>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceGPU<256>"); break; case 128: hipLaunchKernelGGL(( ReduceGPU<128>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceGPU<128>"); break; case 64: hipLaunchKernelGGL(( ReduceGPU<64>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceGPU<64>"); break; case 32: hipLaunchKernelGGL(( ReduceGPU<32>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceGPU<32>"); break; case 16: hipLaunchKernelGGL(( ReduceGPU<16>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceGPU<16>"); break; case 8: hipLaunchKernelGGL(( ReduceGPU<8>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceGPU<8>"); break; case 4: hipLaunchKernelGGL(( ReduceGPU<4>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceGPU<4>"); break; case 2: hipLaunchKernelGGL(( ReduceGPU<2>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceGPU<2>"); break; case 1: hipLaunchKernelGGL(( ReduceGPU<1>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceGPU<1>"); break; } #ifdef DEBUG_MODE_2 printf("\033[32m\tterminated Reduce\033[0m\n"); #endif } void ReduceDouble(double *in, double *out, int blocks, int threads, int sharedmem) { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside ReduceDouble ...\033[0m\n"); #endif switch(threads) { case 512: hipLaunchKernelGGL(( ReduceDGPU<512>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceDGPU<512>"); break; case 256: hipLaunchKernelGGL(( ReduceDGPU<256>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceDGPU<256>"); break; case 128: hipLaunchKernelGGL(( ReduceDGPU<128>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceDGPU<128>"); break; case 64: hipLaunchKernelGGL(( ReduceDGPU<64>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceDGPU<64>"); break; case 32: hipLaunchKernelGGL(( ReduceDGPU<32>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceDGPU<32>"); break; case 16: hipLaunchKernelGGL(( ReduceDGPU<16>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceDGPU<16>"); break; case 8: hipLaunchKernelGGL(( ReduceDGPU<8>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceDGPU<8>"); break; case 4: hipLaunchKernelGGL(( ReduceDGPU<4>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceDGPU<4>"); break; case 2: hipLaunchKernelGGL(( ReduceDGPU<2>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceDGPU<2>"); break; case 1: hipLaunchKernelGGL(( ReduceDGPU<1>), dim3(blocks), dim3(threads), sharedmem, 0, in, out); cudaCheckError(AT,"ReduceDGPU<1>"); break; } #ifdef DEBUG_MODE_2 printf("\033[32m\tterminated ReduceDouble\033[0m\n"); #endif } void Norm2(double *d, float2 *vector) { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside Norm2 ...\033[0m\n"); #endif //Create and destroy here the variables for accumulation //They need to be of dimension NormGrid.x unsigned int threads, sharedmem; dim3 NormBlock(128); // here 128 is needed (see NormKernel) const unsigned int grid_size_limit = 1 << (int)ceil(log2((double)size/(double)NormBlock.x)); //Number of blocks const unsigned int grid_size = (grid_size_limit < 64) ? grid_size_limit : 64; dim3 NormGrid(grid_size); size_t vector_size=3*size*sizeof(float2); #ifdef DEBUG_MODE_2 float2 loc_vec; cudaSafe(AT,hipMemcpy(&loc_vec, vector, sizeof(float2), hipMemcpyDeviceToHost), "hipMemcpy"); printf("\033[32mDEBUG: Input vector size : %d\033[0m\n",(int)(vector_size)); printf("\033[32mDEBUG: Parameters : Grid size %d\033[0m\n",grid_size); printf("\033[32mDEBUG: Vector[0] : %f , %f \033[0m\n",loc_vec.x, loc_vec.y); #endif double *temp_d; cudaSafe(AT,hipMalloc((void**)&temp_d, sizeof(double)*grid_size), "hipMalloc"); size_t offset_f; cudaSafe(AT,hipBindTexture(&offset_f, fermion_texRef, vector, vector_size), "hipBindTexture"); offset_f/=sizeof(float2); hipLaunchKernelGGL(( NormKernel), dim3(NormGrid), dim3(NormBlock), 0, 0, temp_d, offset_f, vector); cudaCheckError(AT,"NormKernel"); //Accumulates moving a window of grid_size elements //Outputs a vector of grid_size elements (<=64 and power of 2) #ifdef DEBUG_MODE_2 double local_t[64]; int k; cudaSafe(AT,hipMemcpy(&local_t, temp_d, grid_size*sizeof(double), hipMemcpyDeviceToHost), "hipMemcpy"); for (k = 0; k < grid_size; k++) { printf("\033[32m\tTemporary vector [%d] : %f\033[0m\n", k, local_t[k]); } #endif cudaSafe(AT,hipUnbindTexture(fermion_texRef), "hipUnbindTexture"); //Further reduction threads = NormGrid.x; sharedmem = threads*sizeof(double); switch(threads) { case 64: hipLaunchKernelGGL(( ReduceSingleDGPU<64>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<64>"); break; case 32: hipLaunchKernelGGL(( ReduceSingleDGPU<32>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<32>"); break; case 16: hipLaunchKernelGGL(( ReduceSingleDGPU<16>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<16>"); break; case 8: hipLaunchKernelGGL(( ReduceSingleDGPU<8>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<8>"); break; case 4: hipLaunchKernelGGL(( ReduceSingleDGPU<4>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<4>"); break; case 2: hipLaunchKernelGGL(( ReduceSingleDGPU<2>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<2>"); break; case 1: hipLaunchKernelGGL(( ReduceSingleDGPU<1>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<1>"); break; } cudaSafe(AT,hipMemcpy(d, temp_d, sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy"); cudaSafe(AT,hipFree(temp_d), "hipFree"); #ifdef DEBUG_MODE_2 double local; cudaSafe(AT,hipMemcpy(&local, d, sizeof(double), hipMemcpyDeviceToHost), "hipMemcpy"); printf("\033[32m\tNorm2 result : %f\033[0m\n", local); printf("\033[32m\tterminated Norm2 \033[0m\n"); exit(1); #endif } void Norm2D(double *d, double2 *vector) { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside Norm2D ...\033[0m\n"); #endif //Create and destroy here the variables for accumulation //They need to be of dimension NormGrid.x unsigned int threads, sharedmem; dim3 NormBlock(128); // here 128 is needed (see NormKernel) const unsigned int grid_size_limit = 1 << (int)ceil(log2((double)size/(double)NormBlock.x)); //Number of blocks const unsigned int grid_size = (grid_size_limit < 64) ? grid_size_limit : 64; dim3 NormGrid(grid_size); double *temp_d; cudaSafe(AT,hipMalloc((void**)&temp_d, sizeof(double)*grid_size), "hipMalloc"); hipLaunchKernelGGL(( NormKernelD), dim3(NormGrid), dim3(NormBlock), 0, 0, temp_d, vector); cudaCheckError(AT,"NormKernelD"); //Accumulates moving a window of grid_size elements //Outputs a vector of grid_size elements (<=64 and power of 2) //Further reduction threads = NormGrid.x; sharedmem = threads*sizeof(double); switch(threads) { case 64: hipLaunchKernelGGL(( ReduceSingleDGPU<64>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<64>"); break; case 32: hipLaunchKernelGGL(( ReduceSingleDGPU<32>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<32>"); break; case 16: hipLaunchKernelGGL(( ReduceSingleDGPU<16>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<16>"); break; case 8: hipLaunchKernelGGL(( ReduceSingleDGPU<8>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<8>"); break; case 4: hipLaunchKernelGGL(( ReduceSingleDGPU<4>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<4>"); break; case 2: hipLaunchKernelGGL(( ReduceSingleDGPU<2>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<2>"); break; case 1: hipLaunchKernelGGL(( ReduceSingleDGPU<1>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<1>"); break; } cudaSafe(AT,hipMemcpy(d, temp_d, sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy"); cudaSafe(AT,hipFree(temp_d), "hipFree"); #ifdef DEBUG_MODE_2 printf("\033[32m\tterminated Norm2D \033[0m\n"); #endif } void IpdotNorm2(double *d, float4 *ipdot) // ipdot has 2*no_links=8*size float4 elements { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside IpdotNorm2 ...\033[0m\n"); #endif //Create and destroy here the variables for accumulation //They need to be of dimension NormGrid.x unsigned int threads, sharedmem; dim3 NormBlock(128); // here 128 is needed (see NormKernel) const unsigned int grid_size_limit = 1 << (int)ceil(log2((8.0*(double)size)/(double)NormBlock.x)); //Number of blocks const unsigned int grid_size = (grid_size_limit < 64) ? grid_size_limit : 64; dim3 NormGrid(grid_size); double *temp_d; cudaSafe(AT,hipMalloc((void**)&temp_d, sizeof(double)*grid_size), "hipMalloc"); hipLaunchKernelGGL(( IpdotNormKernel), dim3(NormGrid), dim3(NormBlock), 0, 0, temp_d, ipdot); cudaCheckError(AT,"IpdotNormKernel"); //Accumulates moving a window of grid_size elements //Outputs a vector of grid_size elements (<=64 and power of 2) //Further reduction threads = NormGrid.x; sharedmem = threads*sizeof(double); switch(threads) { case 64: hipLaunchKernelGGL(( ReduceSingleDGPU<64>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<64>"); break; case 32: hipLaunchKernelGGL(( ReduceSingleDGPU<32>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<32>"); break; case 16: hipLaunchKernelGGL(( ReduceSingleDGPU<16>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<16>"); break; case 8: hipLaunchKernelGGL(( ReduceSingleDGPU<8>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<8>"); break; case 4: hipLaunchKernelGGL(( ReduceSingleDGPU<4>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<4>"); break; case 2: hipLaunchKernelGGL(( ReduceSingleDGPU<2>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<2>"); break; case 1: hipLaunchKernelGGL(( ReduceSingleDGPU<1>), dim3(1), dim3(threads), sharedmem, 0, temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<1>"); break; } cudaSafe(AT,hipMemcpy(d, temp_d, sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy"); cudaSafe(AT,hipFree(temp_d), "hipFree"); #ifdef DEBUG_MODE_2 printf("\033[32m\tterminated IpdotNorm2 \033[0m\n"); #endif }
a3d23a87ee8943fc4ab2d7f3ea30a092af557a32.cu
#include "include/global_const.h" __global__ void NormKernel(double *d, size_t ferm_offset, float2 *vec) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_length = blockDim.x * gridDim.x; __shared__ double norm[128]; //Allocates shared mem float2 f_0, f_1, f_2; norm[threadIdx.x] = 1.0; //First block of sites if(idx < size_dev) { f_0 = tex1Dfetch(fermion_texRef, idx + ferm_offset); f_1 = tex1Dfetch(fermion_texRef, size_dev + idx + ferm_offset); f_2 = tex1Dfetch(fermion_texRef, 2*size_dev + idx + ferm_offset); #ifdef USE_INTRINSIC norm[threadIdx.x] = __dmul_rn((double)f_0.x, (double)f_0.x); norm[threadIdx.x] += __dmul_rn((double)f_0.y, (double)f_0.y); norm[threadIdx.x] += __dmul_rn((double)f_1.x, (double)f_1.x); norm[threadIdx.x] += __dmul_rn((double)f_1.y, (double)f_1.y); norm[threadIdx.x] += __dmul_rn((double)f_2.x, (double)f_2.x); norm[threadIdx.x] += __dmul_rn((double)f_2.y, (double)f_2.y); #else norm[threadIdx.x] = (double)f_0.x*(double)f_0.x+(double)f_0.y*(double)f_0.y+ (double)f_1.x*(double)f_1.x+(double)f_1.y*(double)f_1.y+ (double)f_2.x*(double)f_2.x+(double)f_2.y*(double)f_2.y; #endif idx += grid_length; } //Other blocks of sites while (idx < size_dev) { f_0 = tex1Dfetch(fermion_texRef, idx + ferm_offset); f_1 = tex1Dfetch(fermion_texRef, size_dev + idx + ferm_offset); f_2 = tex1Dfetch(fermion_texRef, 2*size_dev + idx + ferm_offset); #ifdef USE_INTRINSIC norm[threadIdx.x] += __dmul_rn((double)f_0.x, (double)f_0.x); norm[threadIdx.x] += __dmul_rn((double)f_0.y, (double)f_0.y); norm[threadIdx.x] += __dmul_rn((double)f_1.x, (double)f_1.x); norm[threadIdx.x] += __dmul_rn((double)f_1.y, (double)f_1.y); norm[threadIdx.x] += __dmul_rn((double)f_2.x, (double)f_2.x); norm[threadIdx.x] += __dmul_rn((double)f_2.y, (double)f_2.y); #else norm[threadIdx.x] += (double)f_0.x*(double)f_0.x+(double)f_0.y*(double)f_0.y+ (double)f_1.x*(double)f_1.x+(double)f_1.y*(double)f_1.y+ (double)f_2.x*(double)f_2.x+(double)f_2.y*(double)f_2.y; #endif idx += grid_length; } __syncthreads(); //Performs first reduction if (threadIdx.x < 64) { norm[threadIdx.x] += norm[threadIdx.x+64]; } __syncthreads(); if (threadIdx.x < 32 ) //Inside a warp - no syncthreads() needed { volatile double *smem = norm; smem[threadIdx.x] += smem[threadIdx.x + 32]; smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } if (threadIdx.x == 0) d[blockIdx.x] = norm[0]; //Outputs gridDim.x numbers to be further reduced } __global__ void NormKernelD(double *d, double2 *vec) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_length = blockDim.x * gridDim.x; __shared__ double norm[128]; //Allocates shared mem double2 f_0, f_1, f_2; norm[threadIdx.x] = 0.0; //First block of sites if(idx < size_dev) { f_0 = vec[ idx]; f_1 = vec[ size_dev + idx]; f_2 = vec[ 2*size_dev + idx]; #ifdef USE_INTRINSIC norm[threadIdx.x] = __dmul_rn(f_0.x, f_0.x); norm[threadIdx.x] += __dmul_rn(f_0.y, f_0.y); norm[threadIdx.x] += __dmul_rn(f_1.x, f_1.x); norm[threadIdx.x] += __dmul_rn(f_1.y, f_1.y); norm[threadIdx.x] += __dmul_rn(f_2.x, f_2.x); norm[threadIdx.x] += __dmul_rn(f_2.y, f_2.y); #else norm[threadIdx.x] = f_0.x*f_0.x+f_0.y*f_0.y+ f_1.x*f_1.x+f_1.y*f_1.y+ f_2.x*f_2.x+f_2.y*f_2.y; #endif idx += grid_length; } //Other blocks of sites while (idx < size_dev) { f_0 = vec[ idx]; f_1 = vec[ size_dev + idx]; f_2 = vec[ 2*size_dev + idx]; #ifdef USE_INTRINSIC norm[threadIdx.x] += __dmul_rn(f_0.x, f_0.x); norm[threadIdx.x] += __dmul_rn(f_0.y, f_0.y); norm[threadIdx.x] += __dmul_rn(f_1.x, f_1.x); norm[threadIdx.x] += __dmul_rn(f_1.y, f_1.y); norm[threadIdx.x] += __dmul_rn(f_2.x, f_2.x); norm[threadIdx.x] += __dmul_rn(f_2.y, f_2.y); #else norm[threadIdx.x] += f_0.x*f_0.x+f_0.y*f_0.y+ f_1.x*f_1.x+f_1.y*f_1.y+ f_2.x*f_2.x+f_2.y*f_2.y; #endif idx += grid_length; } __syncthreads(); //Performs first reduction if (threadIdx.x < 64) { norm[threadIdx.x] += norm[threadIdx.x+64]; } __syncthreads(); if (threadIdx.x < 32 ) //Inside a warp - no syncthreads() needed { volatile double *smem = norm; smem[threadIdx.x] += smem[threadIdx.x + 32]; smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } if (threadIdx.x == 0) d[blockIdx.x] = norm[0]; //Outputs gridDim.x numbers to be further reduced } __global__ void IpdotNormKernel(double *d, float4 *ipdot) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_length = blockDim.x * gridDim.x; __shared__ double norm[128]; //Allocates shared mem float4 f_0; norm[threadIdx.x] = 0.0; //First block of sites if(idx < 8*size_dev) { f_0 = ipdot[idx]; #ifdef USE_INTRINSIC norm[threadIdx.x] = __dmul_rn((double)f_0.x, (double)f_0.x); norm[threadIdx.x] += __dmul_rn((double)f_0.y, (double)f_0.y); norm[threadIdx.x] += __dmul_rn((double)f_0.z, (double)f_0.z); norm[threadIdx.x] += __dmul_rn((double)f_0.w, (double)f_0.w); #else norm[threadIdx.x] = (double)f_0.x*(double)f_0.x+(double)f_0.y*(double)f_0.y+ (double)f_0.z*(double)f_0.z+(double)f_0.w*(double)f_0.w; #endif idx += grid_length; } //Other blocks of sites while (idx < 8*size_dev) { f_0 = ipdot[idx]; #ifdef USE_INTRINSIC norm[threadIdx.x] += __dmul_rn((double)f_0.x, (double)f_0.x); norm[threadIdx.x] += __dmul_rn((double)f_0.y, (double)f_0.y); norm[threadIdx.x] += __dmul_rn((double)f_0.z, (double)f_0.z); norm[threadIdx.x] += __dmul_rn((double)f_0.w, (double)f_0.w); #else norm[threadIdx.x] += (double)f_0.x*(double)f_0.x+(double)f_0.y*(double)f_0.y+ (double)f_0.z*(double)f_0.z+(double)f_0.w*(double)f_0.w; #endif idx += grid_length; } __syncthreads(); //Performs first reduction if (threadIdx.x < 64) { norm[threadIdx.x] += norm[threadIdx.x+64]; } __syncthreads(); if (threadIdx.x < 32 ) //Inside a warp - no syncthreads() needed { volatile double *smem = norm; smem[threadIdx.x] += smem[threadIdx.x + 32]; smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } if (threadIdx.x == 0) d[blockIdx.x] = norm[0]; //Outputs gridDim.x numbers to be further reduced } template <unsigned int blockSize>__global__ void ReduceGPU(float *input, float *output) { __shared__ float sdata[blockSize]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int ix = blockIdx.x * (blockSize*2) + threadIdx.x; sdata[tid] = input[ix]+ input[ix + blockSize]; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if(tid < 32) { volatile float * smem = sdata; if (blockSize >= 64) { smem[tid] += smem[tid + 32]; } if (blockSize >= 32) { smem[tid] += smem[tid + 16]; } if (blockSize >= 16) { smem[tid] += smem[tid + 8]; } if (blockSize >= 8) { smem[tid] += smem[tid + 4]; } if (blockSize >= 4) { smem[tid] += smem[tid + 2]; } if (blockSize >= 2) { smem[tid] += smem[tid + 1]; } // if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; } // if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; } // if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; } // if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; } // if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; } // if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; } } // write result for this block to global mem if (tid == 0) output[blockIdx.x] = sdata[0]; } template <unsigned int blockSize>__global__ void ReduceDGPU(double *input, double *output) { __shared__ double sdata[blockSize]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int ix = blockIdx.x * (blockSize*2) + threadIdx.x; sdata[tid] = input[ix]+ input[ix + blockSize]; __syncthreads(); // do reduction in shared mem if(blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if(blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if(blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if(tid < 32) { volatile double * smem = sdata; if (blockSize >= 64) { smem[tid] += smem[tid + 32]; } if (blockSize >= 32) { smem[tid] += smem[tid + 16]; } if (blockSize >= 16) { smem[tid] += smem[tid + 8]; } if (blockSize >= 8) { smem[tid] += smem[tid + 4]; } if (blockSize >= 4) { smem[tid] += smem[tid + 2]; } if (blockSize >= 2) { smem[tid] += smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) output[blockIdx.x] = sdata[0]; } template <unsigned int blockSize>__global__ void ReduceSingleDGPU(double *input, double *output) { __shared__ double sdata[blockSize]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; sdata[tid] = input[tid]; __syncthreads(); // do reduction in shared mem if (blockSize >= 64) { if (threadIdx.x < 32 && threadIdx.x + 32 < blockDim.x) { sdata[tid] += sdata[tid + 32]; } __syncthreads();} if (blockSize >= 32) { if (threadIdx.x < 16 && threadIdx.x + 16 < blockDim.x) { sdata[tid] += sdata[tid + 16]; } __syncthreads();} if (blockSize >= 16) { if (threadIdx.x < 8 && threadIdx.x + 8 < blockDim.x) { sdata[tid] += sdata[tid + 8]; } __syncthreads();} if (blockSize >= 8) { if (threadIdx.x < 4 && threadIdx.x + 4 < blockDim.x) { sdata[tid] += sdata[tid + 4]; } __syncthreads();} if (blockSize >= 4) { if (threadIdx.x < 2 && threadIdx.x + 2 < blockDim.x) { sdata[tid] += sdata[tid + 2]; } __syncthreads();} if (blockSize >= 2) { if (threadIdx.x < 1 && threadIdx.x + 1 < blockDim.x) { sdata[tid] += sdata[tid + 1]; } } // write result for this block to global mem if (tid == 0) output[blockIdx.x] = sdata[0]; } void Reduce(float *in, float *out, int blocks, int threads, int sharedmem) { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside Reduce ...\33[0m\n"); #endif switch(threads) { case 512: ReduceGPU<512><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceGPU<512>"); break; case 256: ReduceGPU<256><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceGPU<256>"); break; case 128: ReduceGPU<128><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceGPU<128>"); break; case 64: ReduceGPU<64><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceGPU<64>"); break; case 32: ReduceGPU<32><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceGPU<32>"); break; case 16: ReduceGPU<16><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceGPU<16>"); break; case 8: ReduceGPU<8><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceGPU<8>"); break; case 4: ReduceGPU<4><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceGPU<4>"); break; case 2: ReduceGPU<2><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceGPU<2>"); break; case 1: ReduceGPU<1><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceGPU<1>"); break; } #ifdef DEBUG_MODE_2 printf("\033[32m\tterminated Reduce\033[0m\n"); #endif } void ReduceDouble(double *in, double *out, int blocks, int threads, int sharedmem) { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside ReduceDouble ...\033[0m\n"); #endif switch(threads) { case 512: ReduceDGPU<512><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceDGPU<512>"); break; case 256: ReduceDGPU<256><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceDGPU<256>"); break; case 128: ReduceDGPU<128><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceDGPU<128>"); break; case 64: ReduceDGPU<64><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceDGPU<64>"); break; case 32: ReduceDGPU<32><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceDGPU<32>"); break; case 16: ReduceDGPU<16><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceDGPU<16>"); break; case 8: ReduceDGPU<8><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceDGPU<8>"); break; case 4: ReduceDGPU<4><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceDGPU<4>"); break; case 2: ReduceDGPU<2><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceDGPU<2>"); break; case 1: ReduceDGPU<1><<<blocks, threads, sharedmem>>>(in, out); cudaCheckError(AT,"ReduceDGPU<1>"); break; } #ifdef DEBUG_MODE_2 printf("\033[32m\tterminated ReduceDouble\033[0m\n"); #endif } void Norm2(double *d, float2 *vector) { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside Norm2 ...\033[0m\n"); #endif //Create and destroy here the variables for accumulation //They need to be of dimension NormGrid.x unsigned int threads, sharedmem; dim3 NormBlock(128); // here 128 is needed (see NormKernel) const unsigned int grid_size_limit = 1 << (int)ceil(log2((double)size/(double)NormBlock.x)); //Number of blocks const unsigned int grid_size = (grid_size_limit < 64) ? grid_size_limit : 64; dim3 NormGrid(grid_size); size_t vector_size=3*size*sizeof(float2); #ifdef DEBUG_MODE_2 float2 loc_vec; cudaSafe(AT,cudaMemcpy(&loc_vec, vector, sizeof(float2), cudaMemcpyDeviceToHost), "cudaMemcpy"); printf("\033[32mDEBUG: Input vector size : %d\033[0m\n",(int)(vector_size)); printf("\033[32mDEBUG: Parameters : Grid size %d\033[0m\n",grid_size); printf("\033[32mDEBUG: Vector[0] : %f , %f \033[0m\n",loc_vec.x, loc_vec.y); #endif double *temp_d; cudaSafe(AT,cudaMalloc((void**)&temp_d, sizeof(double)*grid_size), "cudaMalloc"); size_t offset_f; cudaSafe(AT,cudaBindTexture(&offset_f, fermion_texRef, vector, vector_size), "cudaBindTexture"); offset_f/=sizeof(float2); NormKernel<<<NormGrid, NormBlock>>>(temp_d, offset_f, vector); cudaCheckError(AT,"NormKernel"); //Accumulates moving a window of grid_size elements //Outputs a vector of grid_size elements (<=64 and power of 2) #ifdef DEBUG_MODE_2 double local_t[64]; int k; cudaSafe(AT,cudaMemcpy(&local_t, temp_d, grid_size*sizeof(double), cudaMemcpyDeviceToHost), "cudaMemcpy"); for (k = 0; k < grid_size; k++) { printf("\033[32m\tTemporary vector [%d] : %f\033[0m\n", k, local_t[k]); } #endif cudaSafe(AT,cudaUnbindTexture(fermion_texRef), "cudaUnbindTexture"); //Further reduction threads = NormGrid.x; sharedmem = threads*sizeof(double); switch(threads) { case 64: ReduceSingleDGPU<64><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<64>"); break; case 32: ReduceSingleDGPU<32><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<32>"); break; case 16: ReduceSingleDGPU<16><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<16>"); break; case 8: ReduceSingleDGPU<8><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<8>"); break; case 4: ReduceSingleDGPU<4><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<4>"); break; case 2: ReduceSingleDGPU<2><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<2>"); break; case 1: ReduceSingleDGPU<1><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<1>"); break; } cudaSafe(AT,cudaMemcpy(d, temp_d, sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy"); cudaSafe(AT,cudaFree(temp_d), "cudaFree"); #ifdef DEBUG_MODE_2 double local; cudaSafe(AT,cudaMemcpy(&local, d, sizeof(double), cudaMemcpyDeviceToHost), "cudaMemcpy"); printf("\033[32m\tNorm2 result : %f\033[0m\n", local); printf("\033[32m\tterminated Norm2 \033[0m\n"); exit(1); #endif } void Norm2D(double *d, double2 *vector) { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside Norm2D ...\033[0m\n"); #endif //Create and destroy here the variables for accumulation //They need to be of dimension NormGrid.x unsigned int threads, sharedmem; dim3 NormBlock(128); // here 128 is needed (see NormKernel) const unsigned int grid_size_limit = 1 << (int)ceil(log2((double)size/(double)NormBlock.x)); //Number of blocks const unsigned int grid_size = (grid_size_limit < 64) ? grid_size_limit : 64; dim3 NormGrid(grid_size); double *temp_d; cudaSafe(AT,cudaMalloc((void**)&temp_d, sizeof(double)*grid_size), "cudaMalloc"); NormKernelD<<<NormGrid, NormBlock>>>(temp_d, vector); cudaCheckError(AT,"NormKernelD"); //Accumulates moving a window of grid_size elements //Outputs a vector of grid_size elements (<=64 and power of 2) //Further reduction threads = NormGrid.x; sharedmem = threads*sizeof(double); switch(threads) { case 64: ReduceSingleDGPU<64><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<64>"); break; case 32: ReduceSingleDGPU<32><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<32>"); break; case 16: ReduceSingleDGPU<16><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<16>"); break; case 8: ReduceSingleDGPU<8><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<8>"); break; case 4: ReduceSingleDGPU<4><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<4>"); break; case 2: ReduceSingleDGPU<2><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<2>"); break; case 1: ReduceSingleDGPU<1><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<1>"); break; } cudaSafe(AT,cudaMemcpy(d, temp_d, sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy"); cudaSafe(AT,cudaFree(temp_d), "cudaFree"); #ifdef DEBUG_MODE_2 printf("\033[32m\tterminated Norm2D \033[0m\n"); #endif } void IpdotNorm2(double *d, float4 *ipdot) // ipdot has 2*no_links=8*size float4 elements { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside IpdotNorm2 ...\033[0m\n"); #endif //Create and destroy here the variables for accumulation //They need to be of dimension NormGrid.x unsigned int threads, sharedmem; dim3 NormBlock(128); // here 128 is needed (see NormKernel) const unsigned int grid_size_limit = 1 << (int)ceil(log2((8.0*(double)size)/(double)NormBlock.x)); //Number of blocks const unsigned int grid_size = (grid_size_limit < 64) ? grid_size_limit : 64; dim3 NormGrid(grid_size); double *temp_d; cudaSafe(AT,cudaMalloc((void**)&temp_d, sizeof(double)*grid_size), "cudaMalloc"); IpdotNormKernel<<<NormGrid, NormBlock>>>(temp_d, ipdot); cudaCheckError(AT,"IpdotNormKernel"); //Accumulates moving a window of grid_size elements //Outputs a vector of grid_size elements (<=64 and power of 2) //Further reduction threads = NormGrid.x; sharedmem = threads*sizeof(double); switch(threads) { case 64: ReduceSingleDGPU<64><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<64>"); break; case 32: ReduceSingleDGPU<32><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<32>"); break; case 16: ReduceSingleDGPU<16><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<16>"); break; case 8: ReduceSingleDGPU<8><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<8>"); break; case 4: ReduceSingleDGPU<4><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<4>"); break; case 2: ReduceSingleDGPU<2><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<2>"); break; case 1: ReduceSingleDGPU<1><<<1, threads, sharedmem>>>(temp_d, temp_d); cudaCheckError(AT,"ReduceSingleDGPU<1>"); break; } cudaSafe(AT,cudaMemcpy(d, temp_d, sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy"); cudaSafe(AT,cudaFree(temp_d), "cudaFree"); #ifdef DEBUG_MODE_2 printf("\033[32m\tterminated IpdotNorm2 \033[0m\n"); #endif }
44601eec1287ff948766446eb17c96d3b80ae8d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gauge_field_order.h> namespace quda { /** Kernel argument struct */ template <typename OutOrder, typename InOrder> struct CopyGaugeExArg { OutOrder out; const InOrder in; int Xin[QUDA_MAX_DIM]; int Xout[QUDA_MAX_DIM]; int volume; int volumeEx; int nDim; int geometry; int faceVolumeCB[QUDA_MAX_DIM]; bool regularToextended; CopyGaugeExArg(const OutOrder &out, const InOrder &in, const int *Xout, const int *Xin, const int *faceVolumeCB, int nDim, int geometry) : out(out), in(in), nDim(nDim), geometry(geometry) { for (int d=0; d<nDim; d++) { this->Xout[d] = Xout[d]; this->Xin[d] = Xin[d]; this->faceVolumeCB[d] = faceVolumeCB[d]; } if(out.volumeCB > in.volumeCB){ this->volume = 2*in.volumeCB; this->volumeEx = 2*out.volumeCB; this->regularToextended = true; } else{ this->volume = 2*out.volumeCB; this->volumeEx = 2*in.volumeCB; this->regularToextended = false; } } }; /** Copy a regular/extended gauge field into an extended/regular gauge field */ template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool regularToextended> __device__ __host__ void copyGaugeEx(CopyGaugeExArg<OutOrder,InOrder> &arg, int X, int parity) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; int x[4]; int R[4]; int xin, xout; if(regularToextended){ //regular to extended for (int d=0; d<4; d++) R[d] = (arg.Xout[d] - arg.Xin[d]) >> 1; int za = X/(arg.Xin[0]/2); int x0h = X - za*(arg.Xin[0]/2); int zb = za/arg.Xin[1]; x[1] = za - zb*arg.Xin[1]; x[3] = zb / arg.Xin[2]; x[2] = zb - x[3]*arg.Xin[2]; x[0] = 2*x0h + ((x[1] + x[2] + x[3] + parity) & 1); // Y is the cb spatial index into the extended gauge field xout = ((((x[3]+R[3])*arg.Xout[2] + (x[2]+R[2]))*arg.Xout[1] + (x[1]+R[1]))*arg.Xout[0]+(x[0]+R[0])) >> 1; xin = X; } else{ //extended to regular gauge for (int d=0; d<4; d++) R[d] = (arg.Xin[d] - arg.Xout[d]) >> 1; int za = X/(arg.Xout[0]/2); int x0h = X - za*(arg.Xout[0]/2); int zb = za/arg.Xout[1]; x[1] = za - zb*arg.Xout[1]; x[3] = zb / arg.Xout[2]; x[2] = zb - x[3]*arg.Xout[2]; x[0] = 2*x0h + ((x[1] + x[2] + x[3] + parity) & 1); // Y is the cb spatial index into the extended gauge field xin = ((((x[3]+R[3])*arg.Xin[2] + (x[2]+R[2]))*arg.Xin[1] + (x[1]+R[1]))*arg.Xin[0]+(x[0]+R[0])) >> 1; xout = X; } for(int d=0; d<arg.geometry; d++){ RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, xin, d, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, xout, d, parity); }//dir } template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool regularToextended> void copyGaugeEx(CopyGaugeExArg<OutOrder,InOrder> arg) { for (int parity=0; parity<2; parity++) { for(int X=0; X<arg.volume/2; X++){ copyGaugeEx<FloatOut, FloatIn, length, OutOrder, InOrder, regularToextended>(arg, X, parity); } } } template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool regularToextended> __global__ void copyGaugeExKernel(CopyGaugeExArg<OutOrder,InOrder> arg) { for (int parity=0; parity<2; parity++) { int X = blockIdx.x * blockDim.x + threadIdx.x; if (X >= arg.volume/2) return; copyGaugeEx<FloatOut, FloatIn, length, OutOrder, InOrder, regularToextended>(arg, X, parity); } } template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> class CopyGaugeEx : Tunable { CopyGaugeExArg<OutOrder,InOrder> arg; const GaugeField &meta; // use for metadata QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.volume/2; } public: CopyGaugeEx(CopyGaugeExArg<OutOrder,InOrder> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("out_stride=%d,in_stride=%d,geometery=%d",arg.out.stride,arg.in.stride,arg.geometry); } virtual ~CopyGaugeEx() { ; } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (location == QUDA_CPU_FIELD_LOCATION) { if(arg.regularToextended) copyGaugeEx<FloatOut, FloatIn, length, OutOrder, InOrder, true>(arg); else copyGaugeEx<FloatOut, FloatIn, length, OutOrder, InOrder, false>(arg); } else if (location == QUDA_CUDA_FIELD_LOCATION) { if(arg.regularToextended)hipLaunchKernelGGL(( copyGaugeExKernel<FloatOut, FloatIn, length, OutOrder, InOrder, true>) , dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg); elsehipLaunchKernelGGL(( copyGaugeExKernel<FloatOut, FloatIn, length, OutOrder, InOrder, false>) , dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } std::string paramString(const TuneParam &param) const { // Don't bother printing the grid dim. std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), "; ps << "shared=" << param.shared_bytes; return ps.str(); } long long flops() const { return 0; } long long bytes() const { int sites = 4*arg.volume/2; return 2 * sites * ( arg.in.Bytes() + arg.in.hasPhase*sizeof(FloatIn) + arg.out.Bytes() + arg.out.hasPhase*sizeof(FloatOut) ); } }; template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyGaugeEx(OutOrder outOrder, const InOrder inOrder, const int *E, const int *X, const int *faceVolumeCB, const GaugeField &meta, QudaFieldLocation location) { CopyGaugeExArg<OutOrder,InOrder> arg(outOrder, inOrder, E, X, faceVolumeCB, meta.Ndim(), meta.Geometry()); CopyGaugeEx<FloatOut, FloatIn, length, OutOrder, InOrder> copier(arg, meta, location); copier.apply(0); if (location == QUDA_CUDA_FIELD_LOCATION) checkCudaError(); } template <typename FloatOut, typename FloatIn, int length, typename InOrder> void copyGaugeEx(const InOrder &inOrder, const int *X, GaugeField &out, QudaFieldLocation location, FloatOut *Out) { int faceVolumeCB[QUDA_MAX_DIM]; for (int i=0; i<4; i++) faceVolumeCB[i] = out.SurfaceCB(i) * out.Nface(); if (out.isNative()) { if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) { if (typeid(FloatOut)==typeid(short) && out.LinkType() == QUDA_ASQTAD_FAT_LINKS) { copyGaugeEx<short,FloatIn,length> (FloatNOrder<short,length,2,19>(out, (short*)Out), inOrder, out.X(), X, faceVolumeCB, out, location); } else { typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_NO>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); } } else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_12>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) { typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_8>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); #ifdef GPU_STAGGERED_DIRAC } else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) { typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_13>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) { typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_9>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); #endif } else { errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order()); } } else if (out.Order() == QUDA_QDP_GAUGE_ORDER) { #ifdef BUILD_QDP_INTERFACE copyGaugeEx<FloatOut,FloatIn,length> (QDPOrder<FloatOut,length>(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); #else errorQuda("QDP interface has not been built\n"); #endif } else if (out.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE copyGaugeEx<FloatOut,FloatIn,length> (MILCOrder<FloatOut,length>(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); #else errorQuda("MILC interface has not been built\n"); #endif } else if (out.Order() == QUDA_TIFR_GAUGE_ORDER) { #ifdef BUILD_TIFR_INTERFACE copyGaugeEx<FloatOut,FloatIn,length> (TIFROrder<FloatOut,length>(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); #else errorQuda("TIFR interface has not been built\n"); #endif } else { errorQuda("Gauge field %d order not supported", out.Order()); } } template <typename FloatOut, typename FloatIn, int length> void copyGaugeEx(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out, FloatIn *In) { if (in.isNative()) { if (in.Reconstruct() == QUDA_RECONSTRUCT_NO) { if (typeid(FloatIn)==typeid(short) && in.LinkType() == QUDA_ASQTAD_FAT_LINKS) { copyGaugeEx<FloatOut,short,length> (FloatNOrder<short,length,2,19>(in, (short*)In), in.X(), out, location, Out); } else { typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_NO>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(in, In), in.X(), out, location, Out); } } else if (in.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_12>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(in, In), in.X(), out, location, Out); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) { typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_8>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(in, In), in.X(), out, location, Out); #ifdef GPU_STAGGERED_DIRAC } else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) { typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_13>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(in, In), in.X(), out, location, Out); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) { typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_9>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(in, In), in.X(), out, location, Out); #endif } else { errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order()); } } else if (in.Order() == QUDA_QDP_GAUGE_ORDER) { #ifdef BUILD_QDP_INTERFACE copyGaugeEx<FloatOut,FloatIn,length>(QDPOrder<FloatIn,length>(in, In), in.X(), out, location, Out); #else errorQuda("QDP interface has not been built\n"); #endif } else if (in.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE copyGaugeEx<FloatOut,FloatIn,length>(MILCOrder<FloatIn,length>(in, In), in.X(), out, location, Out); #else errorQuda("MILC interface has not been built\n"); #endif } else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) { #ifdef BUILD_TIFR_INTERFACE copyGaugeEx<FloatOut,FloatIn,length>(TIFROrder<FloatIn,length>(in, In), in.X(), out, location, Out); #else errorQuda("TIFR interface has not been built\n"); #endif } else { errorQuda("Gauge field %d order not supported", in.Order()); } } template <typename FloatOut, typename FloatIn> void copyGaugeEx(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out, FloatIn *In) { if (in.Ncolor() != 3 && out.Ncolor() != 3) { errorQuda("Unsupported number of colors; out.Nc=%d, in.Nc=%d", out.Ncolor(), in.Ncolor()); } if (out.Geometry() != in.Geometry()) { errorQuda("Field geometries %d %d do not match", out.Geometry(), in.Geometry()); } if (in.LinkType() != QUDA_ASQTAD_MOM_LINKS && out.LinkType() != QUDA_ASQTAD_MOM_LINKS) { // we are doing gauge field packing copyGaugeEx<FloatOut,FloatIn,18>(out, in, location, Out, In); } else { errorQuda("Not supported"); } } void copyExtendedGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, void *Out, void *In) { for (int d=0; d<in.Ndim(); d++) { if ( (out.X()[d] - in.X()[d]) % 2 != 0) errorQuda("Cannot copy into an asymmetrically extended gauge field"); } if (out.Precision() == QUDA_DOUBLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyGaugeEx(out, in, location, (double*)Out, (double*)In); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyGaugeEx(out, in, location, (double*)Out, (float*)In); } } else if (out.Precision() == QUDA_SINGLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyGaugeEx(out, in, location, (float*)Out, (double*)In); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyGaugeEx(out, in, location, (float*)Out, (float*)In); } } } } // namespace quda
44601eec1287ff948766446eb17c96d3b80ae8d6.cu
#include <gauge_field_order.h> namespace quda { /** Kernel argument struct */ template <typename OutOrder, typename InOrder> struct CopyGaugeExArg { OutOrder out; const InOrder in; int Xin[QUDA_MAX_DIM]; int Xout[QUDA_MAX_DIM]; int volume; int volumeEx; int nDim; int geometry; int faceVolumeCB[QUDA_MAX_DIM]; bool regularToextended; CopyGaugeExArg(const OutOrder &out, const InOrder &in, const int *Xout, const int *Xin, const int *faceVolumeCB, int nDim, int geometry) : out(out), in(in), nDim(nDim), geometry(geometry) { for (int d=0; d<nDim; d++) { this->Xout[d] = Xout[d]; this->Xin[d] = Xin[d]; this->faceVolumeCB[d] = faceVolumeCB[d]; } if(out.volumeCB > in.volumeCB){ this->volume = 2*in.volumeCB; this->volumeEx = 2*out.volumeCB; this->regularToextended = true; } else{ this->volume = 2*out.volumeCB; this->volumeEx = 2*in.volumeCB; this->regularToextended = false; } } }; /** Copy a regular/extended gauge field into an extended/regular gauge field */ template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool regularToextended> __device__ __host__ void copyGaugeEx(CopyGaugeExArg<OutOrder,InOrder> &arg, int X, int parity) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; int x[4]; int R[4]; int xin, xout; if(regularToextended){ //regular to extended for (int d=0; d<4; d++) R[d] = (arg.Xout[d] - arg.Xin[d]) >> 1; int za = X/(arg.Xin[0]/2); int x0h = X - za*(arg.Xin[0]/2); int zb = za/arg.Xin[1]; x[1] = za - zb*arg.Xin[1]; x[3] = zb / arg.Xin[2]; x[2] = zb - x[3]*arg.Xin[2]; x[0] = 2*x0h + ((x[1] + x[2] + x[3] + parity) & 1); // Y is the cb spatial index into the extended gauge field xout = ((((x[3]+R[3])*arg.Xout[2] + (x[2]+R[2]))*arg.Xout[1] + (x[1]+R[1]))*arg.Xout[0]+(x[0]+R[0])) >> 1; xin = X; } else{ //extended to regular gauge for (int d=0; d<4; d++) R[d] = (arg.Xin[d] - arg.Xout[d]) >> 1; int za = X/(arg.Xout[0]/2); int x0h = X - za*(arg.Xout[0]/2); int zb = za/arg.Xout[1]; x[1] = za - zb*arg.Xout[1]; x[3] = zb / arg.Xout[2]; x[2] = zb - x[3]*arg.Xout[2]; x[0] = 2*x0h + ((x[1] + x[2] + x[3] + parity) & 1); // Y is the cb spatial index into the extended gauge field xin = ((((x[3]+R[3])*arg.Xin[2] + (x[2]+R[2]))*arg.Xin[1] + (x[1]+R[1]))*arg.Xin[0]+(x[0]+R[0])) >> 1; xout = X; } for(int d=0; d<arg.geometry; d++){ RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, xin, d, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, xout, d, parity); }//dir } template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool regularToextended> void copyGaugeEx(CopyGaugeExArg<OutOrder,InOrder> arg) { for (int parity=0; parity<2; parity++) { for(int X=0; X<arg.volume/2; X++){ copyGaugeEx<FloatOut, FloatIn, length, OutOrder, InOrder, regularToextended>(arg, X, parity); } } } template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool regularToextended> __global__ void copyGaugeExKernel(CopyGaugeExArg<OutOrder,InOrder> arg) { for (int parity=0; parity<2; parity++) { int X = blockIdx.x * blockDim.x + threadIdx.x; if (X >= arg.volume/2) return; copyGaugeEx<FloatOut, FloatIn, length, OutOrder, InOrder, regularToextended>(arg, X, parity); } } template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> class CopyGaugeEx : Tunable { CopyGaugeExArg<OutOrder,InOrder> arg; const GaugeField &meta; // use for metadata QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.volume/2; } public: CopyGaugeEx(CopyGaugeExArg<OutOrder,InOrder> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("out_stride=%d,in_stride=%d,geometery=%d",arg.out.stride,arg.in.stride,arg.geometry); } virtual ~CopyGaugeEx() { ; } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (location == QUDA_CPU_FIELD_LOCATION) { if(arg.regularToextended) copyGaugeEx<FloatOut, FloatIn, length, OutOrder, InOrder, true>(arg); else copyGaugeEx<FloatOut, FloatIn, length, OutOrder, InOrder, false>(arg); } else if (location == QUDA_CUDA_FIELD_LOCATION) { if(arg.regularToextended) copyGaugeExKernel<FloatOut, FloatIn, length, OutOrder, InOrder, true> <<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg); else copyGaugeExKernel<FloatOut, FloatIn, length, OutOrder, InOrder, false> <<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } std::string paramString(const TuneParam &param) const { // Don't bother printing the grid dim. std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), "; ps << "shared=" << param.shared_bytes; return ps.str(); } long long flops() const { return 0; } long long bytes() const { int sites = 4*arg.volume/2; return 2 * sites * ( arg.in.Bytes() + arg.in.hasPhase*sizeof(FloatIn) + arg.out.Bytes() + arg.out.hasPhase*sizeof(FloatOut) ); } }; template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyGaugeEx(OutOrder outOrder, const InOrder inOrder, const int *E, const int *X, const int *faceVolumeCB, const GaugeField &meta, QudaFieldLocation location) { CopyGaugeExArg<OutOrder,InOrder> arg(outOrder, inOrder, E, X, faceVolumeCB, meta.Ndim(), meta.Geometry()); CopyGaugeEx<FloatOut, FloatIn, length, OutOrder, InOrder> copier(arg, meta, location); copier.apply(0); if (location == QUDA_CUDA_FIELD_LOCATION) checkCudaError(); } template <typename FloatOut, typename FloatIn, int length, typename InOrder> void copyGaugeEx(const InOrder &inOrder, const int *X, GaugeField &out, QudaFieldLocation location, FloatOut *Out) { int faceVolumeCB[QUDA_MAX_DIM]; for (int i=0; i<4; i++) faceVolumeCB[i] = out.SurfaceCB(i) * out.Nface(); if (out.isNative()) { if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) { if (typeid(FloatOut)==typeid(short) && out.LinkType() == QUDA_ASQTAD_FAT_LINKS) { copyGaugeEx<short,FloatIn,length> (FloatNOrder<short,length,2,19>(out, (short*)Out), inOrder, out.X(), X, faceVolumeCB, out, location); } else { typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_NO>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); } } else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_12>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) { typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_8>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); #ifdef GPU_STAGGERED_DIRAC } else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) { typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_13>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) { typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_9>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); #endif } else { errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order()); } } else if (out.Order() == QUDA_QDP_GAUGE_ORDER) { #ifdef BUILD_QDP_INTERFACE copyGaugeEx<FloatOut,FloatIn,length> (QDPOrder<FloatOut,length>(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); #else errorQuda("QDP interface has not been built\n"); #endif } else if (out.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE copyGaugeEx<FloatOut,FloatIn,length> (MILCOrder<FloatOut,length>(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); #else errorQuda("MILC interface has not been built\n"); #endif } else if (out.Order() == QUDA_TIFR_GAUGE_ORDER) { #ifdef BUILD_TIFR_INTERFACE copyGaugeEx<FloatOut,FloatIn,length> (TIFROrder<FloatOut,length>(out, Out), inOrder, out.X(), X, faceVolumeCB, out, location); #else errorQuda("TIFR interface has not been built\n"); #endif } else { errorQuda("Gauge field %d order not supported", out.Order()); } } template <typename FloatOut, typename FloatIn, int length> void copyGaugeEx(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out, FloatIn *In) { if (in.isNative()) { if (in.Reconstruct() == QUDA_RECONSTRUCT_NO) { if (typeid(FloatIn)==typeid(short) && in.LinkType() == QUDA_ASQTAD_FAT_LINKS) { copyGaugeEx<FloatOut,short,length> (FloatNOrder<short,length,2,19>(in, (short*)In), in.X(), out, location, Out); } else { typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_NO>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(in, In), in.X(), out, location, Out); } } else if (in.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_12>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(in, In), in.X(), out, location, Out); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) { typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_8>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(in, In), in.X(), out, location, Out); #ifdef GPU_STAGGERED_DIRAC } else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) { typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_13>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(in, In), in.X(), out, location, Out); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) { typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_9>::type G; copyGaugeEx<FloatOut,FloatIn,length> (G(in, In), in.X(), out, location, Out); #endif } else { errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order()); } } else if (in.Order() == QUDA_QDP_GAUGE_ORDER) { #ifdef BUILD_QDP_INTERFACE copyGaugeEx<FloatOut,FloatIn,length>(QDPOrder<FloatIn,length>(in, In), in.X(), out, location, Out); #else errorQuda("QDP interface has not been built\n"); #endif } else if (in.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE copyGaugeEx<FloatOut,FloatIn,length>(MILCOrder<FloatIn,length>(in, In), in.X(), out, location, Out); #else errorQuda("MILC interface has not been built\n"); #endif } else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) { #ifdef BUILD_TIFR_INTERFACE copyGaugeEx<FloatOut,FloatIn,length>(TIFROrder<FloatIn,length>(in, In), in.X(), out, location, Out); #else errorQuda("TIFR interface has not been built\n"); #endif } else { errorQuda("Gauge field %d order not supported", in.Order()); } } template <typename FloatOut, typename FloatIn> void copyGaugeEx(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out, FloatIn *In) { if (in.Ncolor() != 3 && out.Ncolor() != 3) { errorQuda("Unsupported number of colors; out.Nc=%d, in.Nc=%d", out.Ncolor(), in.Ncolor()); } if (out.Geometry() != in.Geometry()) { errorQuda("Field geometries %d %d do not match", out.Geometry(), in.Geometry()); } if (in.LinkType() != QUDA_ASQTAD_MOM_LINKS && out.LinkType() != QUDA_ASQTAD_MOM_LINKS) { // we are doing gauge field packing copyGaugeEx<FloatOut,FloatIn,18>(out, in, location, Out, In); } else { errorQuda("Not supported"); } } void copyExtendedGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, void *Out, void *In) { for (int d=0; d<in.Ndim(); d++) { if ( (out.X()[d] - in.X()[d]) % 2 != 0) errorQuda("Cannot copy into an asymmetrically extended gauge field"); } if (out.Precision() == QUDA_DOUBLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyGaugeEx(out, in, location, (double*)Out, (double*)In); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyGaugeEx(out, in, location, (double*)Out, (float*)In); } } else if (out.Precision() == QUDA_SINGLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyGaugeEx(out, in, location, (float*)Out, (double*)In); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyGaugeEx(out, in, location, (float*)Out, (float*)In); } } } } // namespace quda
55e2323b85198b2f3a3014be7bc660a000eb9caf.hip
// !!! This is a file automatically generated by hipify!!! #include<iostream> #include<stdio.h> #include<string.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> using namespace std; using namespace cv; int main() { int i,j,k; cv::Mat input; input = cv::imread("input.jpg", IMREAD_COLOR); unsigned char *temp = (unsigned char*)(input.data); int rows = input.rows , cols = input.cols; int process[rows][cols]; int swap[rows][cols]; k=0; for(i=0;i<rows;i++){ for(j=0;j<cols;j++){ process[i][j] = temp[k]; k++; } } int size_input = sizeof(char) * 3 * img_pic.rows * img_pic.cols; unsigned char *dev_input; hipMalloc( (void**)&dev_input, size_input); hipMemcpy( dev_input, temp, size_input, hipMemcpyHostToDevice); /*int colsTemp; for(i=0;i<rows;i++){ k=0; colsTemp = cols-1; for(j=0;j<cols;j++){ swap[i][k] = process[i][colsTemp]; colsTemp--; k++; } }*/ k=0; for(i=0;i<rows;i++){ for(j=0;j<cols;j++){ temp[k] = swap[i][j]; k++; } } Mat output = Mat(rows, cols, CV_8UC3 , temp); cv::imwrite("output.jpg",output); }
55e2323b85198b2f3a3014be7bc660a000eb9caf.cu
#include<iostream> #include<stdio.h> #include<string.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> using namespace std; using namespace cv; int main() { int i,j,k; cv::Mat input; input = cv::imread("input.jpg", IMREAD_COLOR); unsigned char *temp = (unsigned char*)(input.data); int rows = input.rows , cols = input.cols; int process[rows][cols]; int swap[rows][cols]; k=0; for(i=0;i<rows;i++){ for(j=0;j<cols;j++){ process[i][j] = temp[k]; k++; } } int size_input = sizeof(char) * 3 * img_pic.rows * img_pic.cols; unsigned char *dev_input; cudaMalloc( (void**)&dev_input, size_input); cudaMemcpy( dev_input, temp, size_input, cudaMemcpyHostToDevice); /*int colsTemp; for(i=0;i<rows;i++){ k=0; colsTemp = cols-1; for(j=0;j<cols;j++){ swap[i][k] = process[i][colsTemp]; colsTemp--; k++; } }*/ k=0; for(i=0;i<rows;i++){ for(j=0;j<cols;j++){ temp[k] = swap[i][j]; k++; } } Mat output = Mat(rows, cols, CV_8UC3 , temp); cv::imwrite("output.jpg",output); }
37004d43062db2d79ed90d03513c5ed04679442c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void add(int n, float a, float *x, float *y){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; }
37004d43062db2d79ed90d03513c5ed04679442c.cu
#include "includes.h" __global__ void add(int n, float a, float *x, float *y){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; }
e5a3b53d53371e451a37a505cad6ee4750709d59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sys/time.h" #include <stdio.h> double getTimeStamp() { struct timeval tv; gettimeofday(&tv, NULL); return (double)tv.tv_usec / 1000000 + tv.tv_sec; } // device-side matrix addition /*__global__ void f_addmat(float* A, float* B, float* C, int nx, int ny) { // kernel code might look something like this // but you may want to pad the matrices and index into them accordingly int ix = threadId.x + bloackId.x * blockDim.x; int iy = threadId.y + bloackId.y * blockDim.y; int idx = iy * ny + ix; if ((ix < nx) && (iy < ny)) C[idx] = A[idx] + B[idx]; }*/ __global__ void f_addmat(float* A, float* B, float* C, int nx, int ny) { // kernel code might look something like this // but you may want to pad the matrices and index into them accordingly int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; //int numElem=nx*ny; long long idx = iy * nx + ix; if ((ix < nx) && (iy < ny)){ C[idx] = A[idx] + B[idx]; } } void matrixSumHost(float* A, float* B, float* C, int nx, int ny) { float* ia = A, * ib = B, * ic = C; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) ic[ix] = ia[ix] + ib[ix]; ia += nx; ib += nx; ic += nx; } } void initDataA(float *h_A, int nx, int ny) { int xcoord = 0; int ycoord = 0; for (int i = 0; i < ny*nx; i++) { h_A[i]= (float)(xcoord + ycoord) / 3.0; if (xcoord == nx - 1) { xcoord = 0; ycoord += 1; } else { xcoord += 1; } } } void initDataB(float* h_B, int nx, int ny) { int xcoord = 0; int ycoord = 0; for (int i = 0; i < ny * nx; i++) { h_B[i] = (float)(xcoord + ycoord)* 3.14; if (xcoord == nx - 1) { xcoord = 0; ycoord += 1; } else { xcoord += 1; } } } //int argc, char* argv[] int main(int argc, char* argv[]) { if (argc != 3) { printf("Error: wrong number of args\n"); //exit(); } int nx = atoi(argv[1]); // should check validity int ny = atoi(argv[2]); // should check validity int noElems = nx * ny; int bytes = noElems * sizeof(float); //printf("my name"); float* h_A = (float*)malloc(bytes); float* h_B = (float*)malloc(bytes); float* h_hC = (float*)malloc(bytes); // host result initDataA(h_A, nx,ny); initDataB(h_B, nx, ny); matrixSumHost(h_A, h_B, h_hC, nx, ny); /* device side*/ float* d_A, * d_B, * d_C; hipMalloc((void**)& d_A, bytes); hipMalloc((void**)& d_B, bytes); hipMalloc((void**)& d_C, bytes); double timeStampA = getTimeStamp(); float* h_dC = (float*)malloc(bytes); hipMemcpy(d_A, h_A, bytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, bytes, hipMemcpyHostToDevice); double timeStampB = getTimeStamp(); int blockx=32; int blocky=32; int marker=0; while (nx>blockx*65535){ marker=1; blockx=2*blockx; } while (ny>blocky*65535){ marker=2; blocky=2*blocky; } if (marker==1){ blocky=1024/blockx; } dim3 block(blockx, blocky); // you will want to configure this int gridSizeX=(nx + block.x - 1) / block.x; if (gridSizeX>=65535){ gridSizeX=65535; } int gridSizeY=(ny + block.y - 1) / block.y; if (gridSizeY>=65535){ gridSizeY=65535; } dim3 grid(gridSizeX, gridSizeY); hipLaunchKernelGGL(( f_addmat) , dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C, nx, ny); hipDeviceSynchronize(); double timeStampC = getTimeStamp(); hipMemcpy(h_dC, d_C, bytes, hipMemcpyDeviceToHost); double timeStampD = getTimeStamp(); hipFree(d_A); hipFree(d_B); hipFree(d_C); hipDeviceReset(); /*for (int j=0;j<10;j++){ printf("%f %f\n",h_dC[j],h_hC[j]); }*/ for (int i = 0; i < noElems; i++) { if (h_dC[i]!=h_hC[i]){ printf("wrong %d\n",i); for (int j=i;j<i+10;j++){ printf("%f %f\n",h_dC[j],h_hC[j]); } break; } } printf("total_time:%4f",timeStampD-timeStampA); printf("CPU_GPU_time:%4f",timeStampB-timeStampA); printf("kernel_time:%4f",timeStampC-timeStampB); printf("GPU_CPU_time:%4f",timeStampD-timeStampC); //............................................................................... /*for (int i = 0; i < noElems; i++) { cout << h_A[i]; } cout << " " << endl; for (int i = 0; i < noElems; i++) { cout << h_B[i]; } cout << " " << endl; matrixSumHost(h_A, h_B, h_hC, nx, ny); for (int i = 0; i < noElems; i++) { cout << h_hC[i]; } cout << " " << endl;*/ //cout << h_A[900]<<endl; /* // get program arguments if (argc != 3) { printf("Error: wrong number of args\n"); //exit(); } int nx = atoi(argv[2]); // should check validity int ny = atoi(argv[3]); // should check validity int noElems = nx * ny; int bytes = noElems * sizeof(float); // but you may want to pad the matrices // alloc memory host-side float* h_A = (float*)malloc(bytes); float* h_B = (float*)malloc(bytes); float* h_hC = (float*)malloc(bytes); // host result float* h_dC = (float*)malloc(bytes); // gpu result // init matrices with random data //initData(h_A, noElems); initData(h_B, noElems); // alloc memory dev-side float* d_A, * d_B, * d_C; hipMalloc((void**)& d_A, bytes); hipMalloc((void**)& d_B, bytes); hipMalloc((void**)& d_C, bytes); double timeStampA = getTimeStamp(); hipMemcpy(d_A, h_A, bytes, cudaMemCpyHostToDevice); hipMemcpy(d_B, h_B, bytes, cudaMemCpyHostToDevice); // note that the transfers would be twice as fast if h_A and h_B // matrices are pinned double timeStampB = getTimeStamp(); // invoke Kernel dim3 block(32, 32); // you will want to configure this dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); f_addmat << <grid, block >> > (d_A, d_B, d_C, nx, ny); hipDeviceSynchronize(); double timeStampC = getTimeStamp(); //copy data back cudaMemCpy(h_dC, d_C, bytes, cudaMemCpyDeviceToHost); double timeStampD = getTimeStamp(); // free GPU resources hipFree(d_A); hipFree(d_B); hipFree(d_C); hipDeviceReset(); // check result h_addmat(h_A, h_B, h_hC, nx, ny); h_dC == h+hC??? // print out results */ }
e5a3b53d53371e451a37a505cad6ee4750709d59.cu
#include "sys/time.h" #include <stdio.h> double getTimeStamp() { struct timeval tv; gettimeofday(&tv, NULL); return (double)tv.tv_usec / 1000000 + tv.tv_sec; } // device-side matrix addition /*__global__ void f_addmat(float* A, float* B, float* C, int nx, int ny) { // kernel code might look something like this // but you may want to pad the matrices and index into them accordingly int ix = threadId.x + bloackId.x * blockDim.x; int iy = threadId.y + bloackId.y * blockDim.y; int idx = iy * ny + ix; if ((ix < nx) && (iy < ny)) C[idx] = A[idx] + B[idx]; }*/ __global__ void f_addmat(float* A, float* B, float* C, int nx, int ny) { // kernel code might look something like this // but you may want to pad the matrices and index into them accordingly int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; //int numElem=nx*ny; long long idx = iy * nx + ix; if ((ix < nx) && (iy < ny)){ C[idx] = A[idx] + B[idx]; } } void matrixSumHost(float* A, float* B, float* C, int nx, int ny) { float* ia = A, * ib = B, * ic = C; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) ic[ix] = ia[ix] + ib[ix]; ia += nx; ib += nx; ic += nx; } } void initDataA(float *h_A, int nx, int ny) { int xcoord = 0; int ycoord = 0; for (int i = 0; i < ny*nx; i++) { h_A[i]= (float)(xcoord + ycoord) / 3.0; if (xcoord == nx - 1) { xcoord = 0; ycoord += 1; } else { xcoord += 1; } } } void initDataB(float* h_B, int nx, int ny) { int xcoord = 0; int ycoord = 0; for (int i = 0; i < ny * nx; i++) { h_B[i] = (float)(xcoord + ycoord)* 3.14; if (xcoord == nx - 1) { xcoord = 0; ycoord += 1; } else { xcoord += 1; } } } //int argc, char* argv[] int main(int argc, char* argv[]) { if (argc != 3) { printf("Error: wrong number of args\n"); //exit(); } int nx = atoi(argv[1]); // should check validity int ny = atoi(argv[2]); // should check validity int noElems = nx * ny; int bytes = noElems * sizeof(float); //printf("my name"); float* h_A = (float*)malloc(bytes); float* h_B = (float*)malloc(bytes); float* h_hC = (float*)malloc(bytes); // host result initDataA(h_A, nx,ny); initDataB(h_B, nx, ny); matrixSumHost(h_A, h_B, h_hC, nx, ny); /* device side*/ float* d_A, * d_B, * d_C; cudaMalloc((void**)& d_A, bytes); cudaMalloc((void**)& d_B, bytes); cudaMalloc((void**)& d_C, bytes); double timeStampA = getTimeStamp(); float* h_dC = (float*)malloc(bytes); cudaMemcpy(d_A, h_A, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, bytes, cudaMemcpyHostToDevice); double timeStampB = getTimeStamp(); int blockx=32; int blocky=32; int marker=0; while (nx>blockx*65535){ marker=1; blockx=2*blockx; } while (ny>blocky*65535){ marker=2; blocky=2*blocky; } if (marker==1){ blocky=1024/blockx; } dim3 block(blockx, blocky); // you will want to configure this int gridSizeX=(nx + block.x - 1) / block.x; if (gridSizeX>=65535){ gridSizeX=65535; } int gridSizeY=(ny + block.y - 1) / block.y; if (gridSizeY>=65535){ gridSizeY=65535; } dim3 grid(gridSizeX, gridSizeY); f_addmat <<<grid, block >>> (d_A, d_B, d_C, nx, ny); cudaDeviceSynchronize(); double timeStampC = getTimeStamp(); cudaMemcpy(h_dC, d_C, bytes, cudaMemcpyDeviceToHost); double timeStampD = getTimeStamp(); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); /*for (int j=0;j<10;j++){ printf("%f %f\n",h_dC[j],h_hC[j]); }*/ for (int i = 0; i < noElems; i++) { if (h_dC[i]!=h_hC[i]){ printf("wrong %d\n",i); for (int j=i;j<i+10;j++){ printf("%f %f\n",h_dC[j],h_hC[j]); } break; } } printf("total_time:%4f",timeStampD-timeStampA); printf("CPU_GPU_time:%4f",timeStampB-timeStampA); printf("kernel_time:%4f",timeStampC-timeStampB); printf("GPU_CPU_time:%4f",timeStampD-timeStampC); //............................................................................... /*for (int i = 0; i < noElems; i++) { cout << h_A[i]; } cout << " " << endl; for (int i = 0; i < noElems; i++) { cout << h_B[i]; } cout << " " << endl; matrixSumHost(h_A, h_B, h_hC, nx, ny); for (int i = 0; i < noElems; i++) { cout << h_hC[i]; } cout << " " << endl;*/ //cout << h_A[900]<<endl; /* // get program arguments if (argc != 3) { printf("Error: wrong number of args\n"); //exit(); } int nx = atoi(argv[2]); // should check validity int ny = atoi(argv[3]); // should check validity int noElems = nx * ny; int bytes = noElems * sizeof(float); // but you may want to pad the matrices¡­ // alloc memory host-side float* h_A = (float*)malloc(bytes); float* h_B = (float*)malloc(bytes); float* h_hC = (float*)malloc(bytes); // host result float* h_dC = (float*)malloc(bytes); // gpu result // init matrices with random data //initData(h_A, noElems); initData(h_B, noElems); // alloc memory dev-side float* d_A, * d_B, * d_C; cudaMalloc((void**)& d_A, bytes); cudaMalloc((void**)& d_B, bytes); cudaMalloc((void**)& d_C, bytes); double timeStampA = getTimeStamp(); cudaMemcpy(d_A, h_A, bytes, cudaMemCpyHostToDevice); cudaMemcpy(d_B, h_B, bytes, cudaMemCpyHostToDevice); // note that the transfers would be twice as fast if h_A and h_B // matrices are pinned double timeStampB = getTimeStamp(); // invoke Kernel dim3 block(32, 32); // you will want to configure this dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); f_addmat << <grid, block >> > (d_A, d_B, d_C, nx, ny); cudaDeviceSynchronize(); double timeStampC = getTimeStamp(); //copy data back cudaMemCpy(h_dC, d_C, bytes, cudaMemCpyDeviceToHost); double timeStampD = getTimeStamp(); // free GPU resources cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); // check result h_addmat(h_A, h_B, h_hC, nx, ny); h_dC == h+hC??? // print out results */ }
51522eafc98464573e057552d72f980645ead304.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Float2Byte.h" __host__ __device__ float Range(float a, float Small, float Big) { if (a < Small)a = Small; else if (a > Big)a = Big; return a; } __global__ void Float2Byte(int width, int sampled, int spp, float* in, GLbyte* out) { const int x = blockIdx.x * 16 + threadIdx.x, y = blockIdx.y * 16 + threadIdx.y; const auto index = width * 4 * y + x * 4; //if (x == 1 && y == 1)printf("Convert %f,%f,%f,%f -- %d,%f\n", in[index], in[index + 1], in[index + 2], in[index + 3],sampled,in[index]/sampled); for (auto i = 0; i < 4; i++)out[index + i] = Range((in[index + i] / sampled), 0, 1) * 255; //if (x == 1 && y == 1)printf("pixel %f,%f,%f,%f\n",out[index], out[index + 1], out[index + 2], out[index + 3]); }
51522eafc98464573e057552d72f980645ead304.cu
#include "Float2Byte.h" __host__ __device__ float Range(float a, float Small, float Big) { if (a < Small)a = Small; else if (a > Big)a = Big; return a; } __global__ void Float2Byte(int width, int sampled, int spp, float* in, GLbyte* out) { const int x = blockIdx.x * 16 + threadIdx.x, y = blockIdx.y * 16 + threadIdx.y; const auto index = width * 4 * y + x * 4; //if (x == 1 && y == 1)printf("Convert %f,%f,%f,%f -- %d,%f\n", in[index], in[index + 1], in[index + 2], in[index + 3],sampled,in[index]/sampled); for (auto i = 0; i < 4; i++)out[index + i] = Range((in[index + i] / sampled), 0, 1) * 255; //if (x == 1 && y == 1)printf("pixel %f,%f,%f,%f\n",out[index], out[index + 1], out[index + 2], out[index + 3]); }
913034d3d12ae8313d38c8dc4b375163dd78f3ed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zlacpy_batched.cu normal z -> c, Fri Jul 18 17:34:12 2014 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches clacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread copies one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void clacpy_batched_kernel( int m, int n, const magmaFloatComplex * const *dAarray, int ldda, magmaFloatComplex **dBarray, int lddb ) { // dA and dB iterate across row i const magmaFloatComplex *dA = dAarray[ blockIdx.y ]; magmaFloatComplex *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const magmaFloatComplex *dAend = dA + n*ldda; while( dA < dAend ) { *dB = *dA; dA += ldda; dB += lddb; } } } /* ===================================================================== */ /** Note -------- - UPLO Parameter is disabled - Do we want to provide a generic function to the user with all the options? Purpose ------- CLACPY copies all or part of a set of two-dimensional matrices dAarray[i] to another set of matrices dBarray[i], for i = 0, ..., batchCount-1. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of each matrix dAarray[i] to be copied to dBarray[i]. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part Otherwise: All of each matrix dAarray[i] @param[in] m INTEGER The number of rows of each matrix dAarray[i]. M >= 0. @param[in] n INTEGER The number of columns of each matrix dAarray[i]. N >= 0. @param[in] dAarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX array, dimension (LDDA,N) The m by n matrices dAarray[i]. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). @param[out] dBarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX array, dimension (LDDB,N) The m by n matrices dBarray[i]. On exit, matrix dBarray[i] = matrix dAarray[i] in the locations specified by UPLO. @param[in] lddb INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). @param[in] batchCount INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_clacpy_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, const magmaFloatComplex * const *dAarray, magma_int_t ldda, magmaFloatComplex **dBarray, magma_int_t lddb, magma_int_t batchCount ) { magma_int_t info = 0; if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); if ( uplo == MagmaUpper ) { fprintf(stderr, "lacpy upper is not implemented\n"); } else if ( uplo == MagmaLower ) { fprintf(stderr, "lacpy lower is not implemented\n"); } else { hipLaunchKernelGGL(( clacpy_batched_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, dAarray, ldda, dBarray, lddb ); } }
913034d3d12ae8313d38c8dc4b375163dd78f3ed.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zlacpy_batched.cu normal z -> c, Fri Jul 18 17:34:12 2014 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches clacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread copies one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void clacpy_batched_kernel( int m, int n, const magmaFloatComplex * const *dAarray, int ldda, magmaFloatComplex **dBarray, int lddb ) { // dA and dB iterate across row i const magmaFloatComplex *dA = dAarray[ blockIdx.y ]; magmaFloatComplex *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const magmaFloatComplex *dAend = dA + n*ldda; while( dA < dAend ) { *dB = *dA; dA += ldda; dB += lddb; } } } /* ===================================================================== */ /** Note -------- - UPLO Parameter is disabled - Do we want to provide a generic function to the user with all the options? Purpose ------- CLACPY copies all or part of a set of two-dimensional matrices dAarray[i] to another set of matrices dBarray[i], for i = 0, ..., batchCount-1. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of each matrix dAarray[i] to be copied to dBarray[i]. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part Otherwise: All of each matrix dAarray[i] @param[in] m INTEGER The number of rows of each matrix dAarray[i]. M >= 0. @param[in] n INTEGER The number of columns of each matrix dAarray[i]. N >= 0. @param[in] dAarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX array, dimension (LDDA,N) The m by n matrices dAarray[i]. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). @param[out] dBarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX array, dimension (LDDB,N) The m by n matrices dBarray[i]. On exit, matrix dBarray[i] = matrix dAarray[i] in the locations specified by UPLO. @param[in] lddb INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). @param[in] batchCount INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_clacpy_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, const magmaFloatComplex * const *dAarray, magma_int_t ldda, magmaFloatComplex **dBarray, magma_int_t lddb, magma_int_t batchCount ) { magma_int_t info = 0; if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); if ( uplo == MagmaUpper ) { fprintf(stderr, "lacpy upper is not implemented\n"); } else if ( uplo == MagmaLower ) { fprintf(stderr, "lacpy lower is not implemented\n"); } else { clacpy_batched_kernel<<< grid, threads, 0, magma_stream >>>( m, n, dAarray, ldda, dBarray, lddb ); } }
bd3894b46a37315dd2c26149023a8f768582325e.hip
// !!! This is a file automatically generated by hipify!!! #include "chainerx/cuda/cuda_device.h" #include <algorithm> #include <cstdint> #include <mutex> #include <numeric> #include <type_traits> #include <gsl/gsl> #include <hip/hip_runtime.h> #include "chainerx/array.h" #include "chainerx/axes.h" #include "chainerx/cuda/cuda.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/data_type.cuh" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/kernel_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/indexable_array.h" #include "chainerx/indexer.h" #include "chainerx/kernels/indexing.h" #include "chainerx/macro.h" #include "chainerx/routines/indexing.h" #include "chainerx/shape.h" namespace chainerx { namespace cuda { namespace { // Makes axes for permutation that moves [first_axis, last_axis) to the head. Axes MakeRollingPermutation(int8_t first_axis, int8_t last_axis, int8_t ndim) { CHAINERX_ASSERT(0 <= first_axis); CHAINERX_ASSERT(first_axis < last_axis); CHAINERX_ASSERT(last_axis <= ndim); Axes permutation{}; permutation.resize(ndim); auto head_end = permutation.begin() + (last_axis - first_axis); auto last = permutation.begin() + last_axis; std::iota(permutation.begin(), head_end, first_axis); std::iota(head_end, last, int8_t{0}); std::iota(last, permutation.end(), last_axis); return permutation; } template <typename T, typename TIndex> __global__ void TakeCudaKernel( IndexableArray<const T> a_iarray, IndexableArray<T> out_iarray, IndexableArray<const TIndex> indices_iarray, Indexer<> a_indexer, Indexer<> out_indexer, Indexer<> indices_indexer, TIndex common_total_size, TIndex axis_dim) { static_assert(std::is_same<TIndex, int64_t>::value || std::is_same<TIndex, int32_t>::value, ""); for (auto it = out_indexer.It(blockIdx.x * blockDim.x + threadIdx.x, blockDim.x * gridDim.x); it; ++it) { TIndex indices_pos = static_cast<TIndex>(it.raw_index()) / common_total_size; TIndex common_pos = static_cast<TIndex>(it.raw_index()) % common_total_size; TIndex index = indices_iarray[indices_indexer.It(indices_pos)]; if (index < 0) { index = axis_dim - ((-index + axis_dim - 1) % axis_dim + 1); } else { index = index % axis_dim; } CHAINERX_ASSERT(0 <= index); CHAINERX_ASSERT(index < axis_dim); out_iarray[it] = a_iarray[a_indexer.It(index * common_total_size + common_pos)]; } } template <typename T, typename TIndex> __global__ void AddAtCudaKernel( IndexableArray<const T> a_iarray, IndexableArray<const T> b_iarray, IndexableArray<T> out_iarray, IndexableArray<const TIndex> indices_iarray, Indexer<> b_indexer, Indexer<> out_indexer, Indexer<> indices_indexer, TIndex common_total_size, TIndex axis_dim) { static_assert(std::is_same<TIndex, int64_t>::value || std::is_same<TIndex, int32_t>::value, ""); for (auto it = out_indexer.It(blockIdx.x * blockDim.x + threadIdx.x, blockDim.x * gridDim.x); it; ++it) { TIndex axis_pos = static_cast<TIndex>(it.raw_index()) / common_total_size; TIndex common_pos = static_cast<TIndex>(it.raw_index()) % common_total_size; cuda_internal::DataType<T> out_value = cuda_internal::StorageToDataType<const T>(a_iarray[it]); for (auto it_indices = indices_indexer.It(0); it_indices; ++it_indices) { TIndex index = indices_iarray[it_indices]; if (index < 0) { index = axis_dim - ((-index + axis_dim - 1) % axis_dim + 1); } else { index = index % axis_dim; } CHAINERX_ASSERT(0 <= index); CHAINERX_ASSERT(index < axis_dim); if (index == axis_pos) { out_value += cuda_internal::StorageToDataType<const T>( b_iarray[b_indexer.It(it_indices.raw_index() * common_total_size + common_pos)]); } } out_iarray[it] = cuda_internal::DataToStorageType<T>(out_value); } } template <typename TIndex> void TakeImpl(Device& device, const Array& a, const Array& indices, int8_t axis, const Array& out) { static_assert(std::is_same<TIndex, int64_t>::value || std::is_same<TIndex, int32_t>::value, ""); CHAINERX_ASSERT( (std::is_same<TIndex, int64_t>::value && indices.dtype() == Dtype::kInt64) || (std::is_same<TIndex, int32_t>::value && indices.dtype() == Dtype::kInt32)); device.CheckDevicesCompatible(a, indices, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&a, &indices, axis, &out](auto pt) { using T = typename decltype(pt)::type; // a and out are transposed as follows. // a: (Ni..., N, Nj...) => (N, Ni..., Nj...) // out: (Ni..., Nk..., Nj...) => (Nk..., Ni..., Nj...) // // indices is used as is. // indices: (Nk...) IndexableArray<const T> a_iarray{a}; Axes a_perm = MakeRollingPermutation(axis, axis + 1, a.ndim()); a_iarray.Permute(a_perm); Shape a_shape = internal::TransposeShape(a.shape(), a_perm); Indexer<> a_indexer{a_shape}; IndexableArray<T> out_iarray{out}; Axes out_perm = MakeRollingPermutation(axis, axis + indices.ndim(), out.ndim()); out_iarray.Permute(out_perm); Shape out_shape = internal::TransposeShape(out.shape(), out_perm); Indexer<> out_indexer{out_shape}; IndexableArray<const TIndex> indices_iarray{indices}; Indexer<> indices_indexer{indices.shape()}; // size of (Ni..., Nj...) part TIndex common_total_size = gsl::narrow<TIndex>(a_indexer.total_size() / a_shape[0]); TIndex axis_dim = gsl::narrow<TIndex>(a_shape[0]); // TODO(niboshi): Calculate kMaxBlockSize per device std::lock_guard<std::mutex> lock{*cuda_internal::g_mutex}; static const int kMaxBlockSize = CudaOccupancyMaxPotentialBlockSize(&TakeCudaKernel<T, TIndex>).block_size; int64_t total_size = out_indexer.total_size(); int64_t grid_size = (total_size + kMaxBlockSize - 1) / kMaxBlockSize; int64_t block_size = std::min<TIndex>(total_size, kMaxBlockSize); hipLaunchKernelGGL(( TakeCudaKernel), dim3(grid_size), dim3(block_size), 0, 0, a_iarray, out_iarray, indices_iarray, a_indexer, out_indexer, indices_indexer, common_total_size, axis_dim); }); } template <typename TIndex> void AddAtImpl(Device& device, const Array& a, const Array& indices, int8_t axis, const Array& b, const Array& out) { // TODO(niboshi): Current implementation only distributes output elements in respective threads. Summation on the indices is performed // serially in each thread. This implementation can be improved by distributing indices as well, possibly using atomicAdd. static_assert(std::is_same<TIndex, int64_t>::value || std::is_same<TIndex, int32_t>::value, ""); CHAINERX_ASSERT( (std::is_same<TIndex, int64_t>::value && indices.dtype() == Dtype::kInt64) || (std::is_same<TIndex, int32_t>::value && indices.dtype() == Dtype::kInt32)); CHAINERX_ASSERT(a.shape() == out.shape()); device.CheckDevicesCompatible(a, indices, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&a, &indices, axis, &b, &out](auto pt) { using T = typename decltype(pt)::type; // b and out are transposed as follows. // a: (Ni..., N, Nj...) => (N, Ni..., Nj...) // b: (Ni..., Nk..., Nj...) => (Nk..., Ni..., Nj...) // out: (Ni..., N , Nj...) => (N , Ni..., Nj...) // // indices is used as is. // indices: (Nk...) IndexableArray<const T> a_iarray{a}; Axes a_perm = MakeRollingPermutation(axis, axis + 1, a.ndim()); a_iarray.Permute(a_perm); Shape a_shape = internal::TransposeShape(a.shape(), a_perm); Indexer<> a_indexer{a_shape}; IndexableArray<const T> b_iarray{b}; Axes b_perm = MakeRollingPermutation(axis, axis + indices.ndim(), b.ndim()); b_iarray.Permute(b_perm); Shape b_shape = internal::TransposeShape(b.shape(), b_perm); Indexer<> b_indexer{b_shape}; IndexableArray<T> out_iarray{out}; Axes out_perm = MakeRollingPermutation(axis, axis + 1, out.ndim()); out_iarray.Permute(out_perm); Shape out_shape = internal::TransposeShape(out.shape(), out_perm); Indexer<> out_indexer{out_shape}; IndexableArray<const TIndex> indices_iarray{indices}; Indexer<> indices_indexer{indices.shape()}; // size of (Ni..., Nj...) part TIndex common_total_size = gsl::narrow<TIndex>(a_indexer.total_size() / a_shape[0]); TIndex axis_dim = gsl::narrow<TIndex>(a_shape[0]); static const int kMaxBlockSize = CudaOccupancyMaxPotentialBlockSize(&AddAtCudaKernel<T, TIndex>).block_size; int64_t total_size = out_indexer.total_size(); int64_t grid_size = (total_size + kMaxBlockSize - 1) / kMaxBlockSize; int64_t block_size = std::min<int64_t>(total_size, kMaxBlockSize); hipLaunchKernelGGL(( AddAtCudaKernel), dim3(grid_size), dim3(block_size), 0, 0, a_iarray, b_iarray, out_iarray, indices_iarray, b_indexer, out_indexer, indices_indexer, common_total_size, axis_dim); }); } class CudaTakeKernel : public TakeKernel { public: void Call(const Array& a, const Array& indices, int8_t axis, const Array& out) override { Device& device = a.device(); CHAINERX_ASSERT(GetKind(indices.dtype()) == DtypeKind::kInt || GetKind(indices.dtype()) == DtypeKind::kUInt); device.CheckDevicesCompatible(a, indices, out); CudaSetDeviceScope scope{device.index()}; if (indices.dtype() == Dtype::kInt64) { TakeImpl<int64_t>(device, a, indices, axis, out); } else { const Array& indices_cast = indices.dtype() == Dtype::kInt32 ? indices : indices.AsType(Dtype::kInt32); TakeImpl<int32_t>(device, a, indices_cast, axis, out); } } }; CHAINERX_CUDA_REGISTER_KERNEL(TakeKernel, CudaTakeKernel); class CudaAddAtKernel : public AddAtKernel { public: void Call(const Array& a, const Array& indices, int8_t axis, const Array& b, const Array& out) override { Device& device = a.device(); CHAINERX_ASSERT(GetKind(indices.dtype()) == DtypeKind::kInt || GetKind(indices.dtype()) == DtypeKind::kUInt); device.CheckDevicesCompatible(a, indices, out); CudaSetDeviceScope scope{device.index()}; if (indices.dtype() == Dtype::kInt64) { AddAtImpl<int64_t>(device, a, indices, axis, b, out); } else { const Array& indices_cast = indices.dtype() == Dtype::kInt32 ? indices : indices.AsType(Dtype::kInt32); AddAtImpl<int32_t>(device, a, indices_cast, axis, b, out); } } }; CHAINERX_CUDA_REGISTER_KERNEL(AddAtKernel, CudaAddAtKernel); template <typename T> struct WhereImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, bool condition, CudaType x, CudaType y, CudaType& out) { out = condition ? x : y; } }; class CudaWhereKernel : public WhereKernel { public: void Call(const Array& condition, const Array& x, const Array& y, const Array& out) override { Device& device = condition.device(); device.CheckDevicesCompatible(condition, x, y, out); const Array& condition_cast = condition.dtype() != Dtype::kBool ? condition.AsType(Dtype::kBool) : condition; Dtype out_dtype = out.dtype(); const Array& x_cast = x.dtype() != out_dtype ? x.AsType(out_dtype) : x; const Array& y_cast = y.dtype() != out_dtype ? y.AsType(out_dtype) : y; CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto x_pt) { using T = typename decltype(x_pt)::type; Elementwise<const bool, const T, const T, T>(WhereImpl<T>{}, condition_cast, x_cast, y_cast, out); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(WhereKernel, CudaWhereKernel); template <typename T> struct WhereAASImpl { using CudaType = cuda_internal::DataType<T>; CudaType y; __device__ void operator()(int64_t /*i*/, bool condition, CudaType x, CudaType& out) { out = condition ? x : y; } }; class CudaWhereAASKernel : public WhereAASKernel { public: void Call(const Array& condition, const Array& x, Scalar y, const Array& out) override { Device& device = condition.device(); device.CheckDevicesCompatible(condition, x, out); const Array& condition_cast = condition.dtype() != Dtype::kBool ? condition.AsType(Dtype::kBool) : condition; Dtype out_dtype = out.dtype(); const Array& x_cast = x.dtype() != out_dtype ? x.AsType(out_dtype) : x; CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto x_pt) { using T = typename decltype(x_pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const bool, const T, T>(WhereAASImpl<T>{static_cast<CudaType>(y)}, condition_cast, x_cast, out); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(WhereAASKernel, CudaWhereAASKernel); template <typename T> struct WhereASAImpl { using CudaType = cuda_internal::DataType<T>; CudaType x; __device__ void operator()(int64_t /*i*/, bool condition, CudaType y, CudaType& out) { out = condition ? x : y; } }; class CudaWhereASAKernel : public WhereASAKernel { public: void Call(const Array& condition, Scalar x, const Array& y, const Array& out) override { Device& device = condition.device(); device.CheckDevicesCompatible(condition, y, out); const Array& condition_cast = condition.dtype() != Dtype::kBool ? condition.AsType(Dtype::kBool) : condition; Dtype out_dtype = out.dtype(); const Array& y_cast = y.dtype() != out_dtype ? y.AsType(out_dtype) : y; CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto x_pt) { using T = typename decltype(x_pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const bool, const T, T>(WhereASAImpl<T>{static_cast<CudaType>(x)}, condition_cast, y_cast, out); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(WhereASAKernel, CudaWhereASAKernel); template <typename T> struct WhereASSImpl { using CudaType = cuda_internal::DataType<T>; CudaType x; CudaType y; __device__ void operator()(int64_t /*i*/, bool condition, CudaType& out) { out = condition ? x : y; } }; class CudaWhereASSKernel : public WhereASSKernel { public: void Call(const Array& condition, Scalar x, Scalar y, const Array& out) override { Device& device = condition.device(); device.CheckDevicesCompatible(condition, out); const Array& condition_cast = condition.dtype() != Dtype::kBool ? condition.AsType(Dtype::kBool) : condition; CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto x_pt) { using T = typename decltype(x_pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const bool, T>(WhereASSImpl<T>{static_cast<CudaType>(x), static_cast<CudaType>(y)}, condition_cast, out); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(WhereASSKernel, CudaWhereASSKernel); } // namespace } // namespace cuda } // namespace chainerx
bd3894b46a37315dd2c26149023a8f768582325e.cu
#include "chainerx/cuda/cuda_device.h" #include <algorithm> #include <cstdint> #include <mutex> #include <numeric> #include <type_traits> #include <gsl/gsl> #include <cuda_runtime.h> #include "chainerx/array.h" #include "chainerx/axes.h" #include "chainerx/cuda/cuda.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/data_type.cuh" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/kernel_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/indexable_array.h" #include "chainerx/indexer.h" #include "chainerx/kernels/indexing.h" #include "chainerx/macro.h" #include "chainerx/routines/indexing.h" #include "chainerx/shape.h" namespace chainerx { namespace cuda { namespace { // Makes axes for permutation that moves [first_axis, last_axis) to the head. Axes MakeRollingPermutation(int8_t first_axis, int8_t last_axis, int8_t ndim) { CHAINERX_ASSERT(0 <= first_axis); CHAINERX_ASSERT(first_axis < last_axis); CHAINERX_ASSERT(last_axis <= ndim); Axes permutation{}; permutation.resize(ndim); auto head_end = permutation.begin() + (last_axis - first_axis); auto last = permutation.begin() + last_axis; std::iota(permutation.begin(), head_end, first_axis); std::iota(head_end, last, int8_t{0}); std::iota(last, permutation.end(), last_axis); return permutation; } template <typename T, typename TIndex> __global__ void TakeCudaKernel( IndexableArray<const T> a_iarray, IndexableArray<T> out_iarray, IndexableArray<const TIndex> indices_iarray, Indexer<> a_indexer, Indexer<> out_indexer, Indexer<> indices_indexer, TIndex common_total_size, TIndex axis_dim) { static_assert(std::is_same<TIndex, int64_t>::value || std::is_same<TIndex, int32_t>::value, ""); for (auto it = out_indexer.It(blockIdx.x * blockDim.x + threadIdx.x, blockDim.x * gridDim.x); it; ++it) { TIndex indices_pos = static_cast<TIndex>(it.raw_index()) / common_total_size; TIndex common_pos = static_cast<TIndex>(it.raw_index()) % common_total_size; TIndex index = indices_iarray[indices_indexer.It(indices_pos)]; if (index < 0) { index = axis_dim - ((-index + axis_dim - 1) % axis_dim + 1); } else { index = index % axis_dim; } CHAINERX_ASSERT(0 <= index); CHAINERX_ASSERT(index < axis_dim); out_iarray[it] = a_iarray[a_indexer.It(index * common_total_size + common_pos)]; } } template <typename T, typename TIndex> __global__ void AddAtCudaKernel( IndexableArray<const T> a_iarray, IndexableArray<const T> b_iarray, IndexableArray<T> out_iarray, IndexableArray<const TIndex> indices_iarray, Indexer<> b_indexer, Indexer<> out_indexer, Indexer<> indices_indexer, TIndex common_total_size, TIndex axis_dim) { static_assert(std::is_same<TIndex, int64_t>::value || std::is_same<TIndex, int32_t>::value, ""); for (auto it = out_indexer.It(blockIdx.x * blockDim.x + threadIdx.x, blockDim.x * gridDim.x); it; ++it) { TIndex axis_pos = static_cast<TIndex>(it.raw_index()) / common_total_size; TIndex common_pos = static_cast<TIndex>(it.raw_index()) % common_total_size; cuda_internal::DataType<T> out_value = cuda_internal::StorageToDataType<const T>(a_iarray[it]); for (auto it_indices = indices_indexer.It(0); it_indices; ++it_indices) { TIndex index = indices_iarray[it_indices]; if (index < 0) { index = axis_dim - ((-index + axis_dim - 1) % axis_dim + 1); } else { index = index % axis_dim; } CHAINERX_ASSERT(0 <= index); CHAINERX_ASSERT(index < axis_dim); if (index == axis_pos) { out_value += cuda_internal::StorageToDataType<const T>( b_iarray[b_indexer.It(it_indices.raw_index() * common_total_size + common_pos)]); } } out_iarray[it] = cuda_internal::DataToStorageType<T>(out_value); } } template <typename TIndex> void TakeImpl(Device& device, const Array& a, const Array& indices, int8_t axis, const Array& out) { static_assert(std::is_same<TIndex, int64_t>::value || std::is_same<TIndex, int32_t>::value, ""); CHAINERX_ASSERT( (std::is_same<TIndex, int64_t>::value && indices.dtype() == Dtype::kInt64) || (std::is_same<TIndex, int32_t>::value && indices.dtype() == Dtype::kInt32)); device.CheckDevicesCompatible(a, indices, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&a, &indices, axis, &out](auto pt) { using T = typename decltype(pt)::type; // a and out are transposed as follows. // a: (Ni..., N, Nj...) => (N, Ni..., Nj...) // out: (Ni..., Nk..., Nj...) => (Nk..., Ni..., Nj...) // // indices is used as is. // indices: (Nk...) IndexableArray<const T> a_iarray{a}; Axes a_perm = MakeRollingPermutation(axis, axis + 1, a.ndim()); a_iarray.Permute(a_perm); Shape a_shape = internal::TransposeShape(a.shape(), a_perm); Indexer<> a_indexer{a_shape}; IndexableArray<T> out_iarray{out}; Axes out_perm = MakeRollingPermutation(axis, axis + indices.ndim(), out.ndim()); out_iarray.Permute(out_perm); Shape out_shape = internal::TransposeShape(out.shape(), out_perm); Indexer<> out_indexer{out_shape}; IndexableArray<const TIndex> indices_iarray{indices}; Indexer<> indices_indexer{indices.shape()}; // size of (Ni..., Nj...) part TIndex common_total_size = gsl::narrow<TIndex>(a_indexer.total_size() / a_shape[0]); TIndex axis_dim = gsl::narrow<TIndex>(a_shape[0]); // TODO(niboshi): Calculate kMaxBlockSize per device std::lock_guard<std::mutex> lock{*cuda_internal::g_mutex}; static const int kMaxBlockSize = CudaOccupancyMaxPotentialBlockSize(&TakeCudaKernel<T, TIndex>).block_size; int64_t total_size = out_indexer.total_size(); int64_t grid_size = (total_size + kMaxBlockSize - 1) / kMaxBlockSize; int64_t block_size = std::min<TIndex>(total_size, kMaxBlockSize); TakeCudaKernel<<<grid_size, block_size>>>( a_iarray, out_iarray, indices_iarray, a_indexer, out_indexer, indices_indexer, common_total_size, axis_dim); }); } template <typename TIndex> void AddAtImpl(Device& device, const Array& a, const Array& indices, int8_t axis, const Array& b, const Array& out) { // TODO(niboshi): Current implementation only distributes output elements in respective threads. Summation on the indices is performed // serially in each thread. This implementation can be improved by distributing indices as well, possibly using atomicAdd. static_assert(std::is_same<TIndex, int64_t>::value || std::is_same<TIndex, int32_t>::value, ""); CHAINERX_ASSERT( (std::is_same<TIndex, int64_t>::value && indices.dtype() == Dtype::kInt64) || (std::is_same<TIndex, int32_t>::value && indices.dtype() == Dtype::kInt32)); CHAINERX_ASSERT(a.shape() == out.shape()); device.CheckDevicesCompatible(a, indices, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&a, &indices, axis, &b, &out](auto pt) { using T = typename decltype(pt)::type; // b and out are transposed as follows. // a: (Ni..., N, Nj...) => (N, Ni..., Nj...) // b: (Ni..., Nk..., Nj...) => (Nk..., Ni..., Nj...) // out: (Ni..., N , Nj...) => (N , Ni..., Nj...) // // indices is used as is. // indices: (Nk...) IndexableArray<const T> a_iarray{a}; Axes a_perm = MakeRollingPermutation(axis, axis + 1, a.ndim()); a_iarray.Permute(a_perm); Shape a_shape = internal::TransposeShape(a.shape(), a_perm); Indexer<> a_indexer{a_shape}; IndexableArray<const T> b_iarray{b}; Axes b_perm = MakeRollingPermutation(axis, axis + indices.ndim(), b.ndim()); b_iarray.Permute(b_perm); Shape b_shape = internal::TransposeShape(b.shape(), b_perm); Indexer<> b_indexer{b_shape}; IndexableArray<T> out_iarray{out}; Axes out_perm = MakeRollingPermutation(axis, axis + 1, out.ndim()); out_iarray.Permute(out_perm); Shape out_shape = internal::TransposeShape(out.shape(), out_perm); Indexer<> out_indexer{out_shape}; IndexableArray<const TIndex> indices_iarray{indices}; Indexer<> indices_indexer{indices.shape()}; // size of (Ni..., Nj...) part TIndex common_total_size = gsl::narrow<TIndex>(a_indexer.total_size() / a_shape[0]); TIndex axis_dim = gsl::narrow<TIndex>(a_shape[0]); static const int kMaxBlockSize = CudaOccupancyMaxPotentialBlockSize(&AddAtCudaKernel<T, TIndex>).block_size; int64_t total_size = out_indexer.total_size(); int64_t grid_size = (total_size + kMaxBlockSize - 1) / kMaxBlockSize; int64_t block_size = std::min<int64_t>(total_size, kMaxBlockSize); AddAtCudaKernel<<<grid_size, block_size>>>( a_iarray, b_iarray, out_iarray, indices_iarray, b_indexer, out_indexer, indices_indexer, common_total_size, axis_dim); }); } class CudaTakeKernel : public TakeKernel { public: void Call(const Array& a, const Array& indices, int8_t axis, const Array& out) override { Device& device = a.device(); CHAINERX_ASSERT(GetKind(indices.dtype()) == DtypeKind::kInt || GetKind(indices.dtype()) == DtypeKind::kUInt); device.CheckDevicesCompatible(a, indices, out); CudaSetDeviceScope scope{device.index()}; if (indices.dtype() == Dtype::kInt64) { TakeImpl<int64_t>(device, a, indices, axis, out); } else { const Array& indices_cast = indices.dtype() == Dtype::kInt32 ? indices : indices.AsType(Dtype::kInt32); TakeImpl<int32_t>(device, a, indices_cast, axis, out); } } }; CHAINERX_CUDA_REGISTER_KERNEL(TakeKernel, CudaTakeKernel); class CudaAddAtKernel : public AddAtKernel { public: void Call(const Array& a, const Array& indices, int8_t axis, const Array& b, const Array& out) override { Device& device = a.device(); CHAINERX_ASSERT(GetKind(indices.dtype()) == DtypeKind::kInt || GetKind(indices.dtype()) == DtypeKind::kUInt); device.CheckDevicesCompatible(a, indices, out); CudaSetDeviceScope scope{device.index()}; if (indices.dtype() == Dtype::kInt64) { AddAtImpl<int64_t>(device, a, indices, axis, b, out); } else { const Array& indices_cast = indices.dtype() == Dtype::kInt32 ? indices : indices.AsType(Dtype::kInt32); AddAtImpl<int32_t>(device, a, indices_cast, axis, b, out); } } }; CHAINERX_CUDA_REGISTER_KERNEL(AddAtKernel, CudaAddAtKernel); template <typename T> struct WhereImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, bool condition, CudaType x, CudaType y, CudaType& out) { out = condition ? x : y; } }; class CudaWhereKernel : public WhereKernel { public: void Call(const Array& condition, const Array& x, const Array& y, const Array& out) override { Device& device = condition.device(); device.CheckDevicesCompatible(condition, x, y, out); const Array& condition_cast = condition.dtype() != Dtype::kBool ? condition.AsType(Dtype::kBool) : condition; Dtype out_dtype = out.dtype(); const Array& x_cast = x.dtype() != out_dtype ? x.AsType(out_dtype) : x; const Array& y_cast = y.dtype() != out_dtype ? y.AsType(out_dtype) : y; CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto x_pt) { using T = typename decltype(x_pt)::type; Elementwise<const bool, const T, const T, T>(WhereImpl<T>{}, condition_cast, x_cast, y_cast, out); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(WhereKernel, CudaWhereKernel); template <typename T> struct WhereAASImpl { using CudaType = cuda_internal::DataType<T>; CudaType y; __device__ void operator()(int64_t /*i*/, bool condition, CudaType x, CudaType& out) { out = condition ? x : y; } }; class CudaWhereAASKernel : public WhereAASKernel { public: void Call(const Array& condition, const Array& x, Scalar y, const Array& out) override { Device& device = condition.device(); device.CheckDevicesCompatible(condition, x, out); const Array& condition_cast = condition.dtype() != Dtype::kBool ? condition.AsType(Dtype::kBool) : condition; Dtype out_dtype = out.dtype(); const Array& x_cast = x.dtype() != out_dtype ? x.AsType(out_dtype) : x; CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto x_pt) { using T = typename decltype(x_pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const bool, const T, T>(WhereAASImpl<T>{static_cast<CudaType>(y)}, condition_cast, x_cast, out); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(WhereAASKernel, CudaWhereAASKernel); template <typename T> struct WhereASAImpl { using CudaType = cuda_internal::DataType<T>; CudaType x; __device__ void operator()(int64_t /*i*/, bool condition, CudaType y, CudaType& out) { out = condition ? x : y; } }; class CudaWhereASAKernel : public WhereASAKernel { public: void Call(const Array& condition, Scalar x, const Array& y, const Array& out) override { Device& device = condition.device(); device.CheckDevicesCompatible(condition, y, out); const Array& condition_cast = condition.dtype() != Dtype::kBool ? condition.AsType(Dtype::kBool) : condition; Dtype out_dtype = out.dtype(); const Array& y_cast = y.dtype() != out_dtype ? y.AsType(out_dtype) : y; CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto x_pt) { using T = typename decltype(x_pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const bool, const T, T>(WhereASAImpl<T>{static_cast<CudaType>(x)}, condition_cast, y_cast, out); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(WhereASAKernel, CudaWhereASAKernel); template <typename T> struct WhereASSImpl { using CudaType = cuda_internal::DataType<T>; CudaType x; CudaType y; __device__ void operator()(int64_t /*i*/, bool condition, CudaType& out) { out = condition ? x : y; } }; class CudaWhereASSKernel : public WhereASSKernel { public: void Call(const Array& condition, Scalar x, Scalar y, const Array& out) override { Device& device = condition.device(); device.CheckDevicesCompatible(condition, out); const Array& condition_cast = condition.dtype() != Dtype::kBool ? condition.AsType(Dtype::kBool) : condition; CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto x_pt) { using T = typename decltype(x_pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const bool, T>(WhereASSImpl<T>{static_cast<CudaType>(x), static_cast<CudaType>(y)}, condition_cast, out); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(WhereASSKernel, CudaWhereASSKernel); } // namespace } // namespace cuda } // namespace chainerx
9686dfab47f3426e34279942438fa3cff0f38be3.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { #define SHOW_TIMING 0 /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); thrust::host_vector<int> thrustHst_idata(idata, idata+n); thrust::device_vector<int> thrustDev_idata(thrustHst_idata); thrust::device_vector<int> thrustDev_odata(n); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); thrust::exclusive_scan(thrustDev_idata.begin(), thrustDev_idata.end(), thrustDev_odata.begin()); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); if(SHOW_TIMING) std::cout<<"Total time in milliseconds : "<<milliseconds<<std::endl; thrust::copy(thrustDev_odata.begin(), thrustDev_odata.end(), odata); } } }
9686dfab47f3426e34279942438fa3cff0f38be3.cu
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { #define SHOW_TIMING 0 /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); thrust::host_vector<int> thrustHst_idata(idata, idata+n); thrust::device_vector<int> thrustDev_idata(thrustHst_idata); thrust::device_vector<int> thrustDev_odata(n); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); thrust::exclusive_scan(thrustDev_idata.begin(), thrustDev_idata.end(), thrustDev_odata.begin()); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); if(SHOW_TIMING) std::cout<<"Total time in milliseconds : "<<milliseconds<<std::endl; thrust::copy(thrustDev_odata.begin(), thrustDev_odata.end(), odata); } } }
48964469728012d294037ab60ad68197cb1719f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/layer_norm.h> #include <type_traits> #include <thrust/tuple.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/hip/block_reduce.cuh> #include <c10/hip/HIPMathCompat.h> namespace at { namespace native { namespace { constexpr int kCUDANumThreads = 256; constexpr int kColwiseReduceTileSize = 32; constexpr int vec_size = 4; //we could make it dependent on dtype, but that would lead to different results between float and low-p types // aligned vector generates vectorized load/store on CUDA (copy-pasted from MemoryAccess.cuh) template<typename scalar_t, int vec_size> struct alignas(sizeof(scalar_t) * vec_size) aligned_vector { scalar_t val[vec_size]; }; template <typename T, typename T_ACC> __global__ void RowwiseMomentsCUDAKernel( int64_t N, T_ACC eps, const T* X, T_ACC* mean, T_ACC* rstd) { using WelfordType = WelfordData<T_ACC, int64_t, T_ACC>; using WelfordOp = WelfordOps<T_ACC, T_ACC, int64_t, T_ACC, thrust::pair<T_ACC, T_ACC>>; __shared__ typename std::aligned_storage<sizeof(WelfordType), alignof(WelfordType)>:: type val_shared[C10_WARP_SIZE]; WelfordType* val_shared_ptr = reinterpret_cast<WelfordType*>(val_shared); const int64_t i = blockIdx.x; WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false}; WelfordType val(0, 0, 0, 0); for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; val = welford_op.reduce(val, static_cast<T_ACC>(X[index]), index); } val = cuda_utils::BlockReduce( val, welford_op, /*identity_element=*/WelfordType(0, 0, 0, 0), val_shared_ptr); if (threadIdx.x == 0) { T_ACC m1; T_ACC m2; thrust::tie(m2, m1) = welford_op.project(val); mean[i] = m1; rstd[i] = c10::hip::compat::rsqrt(m2 + eps); } } template <typename T, typename T_ACC> __global__ void LayerNormForwardCUDAKernel( int64_t N, const T* X, const T_ACC* mean, const T_ACC* rstd, const T* gamma, const T* beta, T* Y) { const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); const T_ACC beta_v = beta == nullptr ? T_ACC(0) : static_cast<T_ACC>(beta[j]); Y[index] = (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]) * gamma_v + beta_v; } } struct WelfordDataLN{ float mean; float sigma2; float count; C10_HOST_DEVICE WelfordDataLN(): mean(0.f), sigma2(0.f), count(0.f){} C10_HOST_DEVICE WelfordDataLN(float mean, float sigma2, float count): mean(mean), sigma2(sigma2), count(count) {} }; template<typename U> __device__ WelfordDataLN cuWelfordOnlineSum( const U val, const WelfordDataLN& curr_sum) { U delta = val - curr_sum.mean; U new_count = curr_sum.count + 1.f; U new_mean = curr_sum.mean + delta * (1.f/new_count); //proper division is slow, this is less accurate but noticeably faster return {new_mean, curr_sum.sigma2 + delta * (val - new_mean), new_count}; } __device__ WelfordDataLN cuWelfordCombine( const WelfordDataLN dataB, const WelfordDataLN dataA ) { using U = decltype(dataB.count); U delta = dataB.mean - dataA.mean; U count = dataA.count + dataB.count; U mean, sigma2; if (count > decltype(dataB.count){0}) { auto coef = 1.f/count; //NB we don't use --use_fast_math, but this is emulation, 1./count goes to intrinsic, `* coef` is multiplication, instead of slow fp division auto nA = dataA.count * coef; auto nB = dataB.count * coef; mean = nA*dataA.mean + nB*dataB.mean; sigma2 = dataA.sigma2 + dataB.sigma2 + delta * delta * dataA.count * nB; } else { mean = U(0); sigma2 = U(0); } return {mean, sigma2, count}; } template<typename T, int alignment=16> __device__ WelfordDataLN compute_stats( const T* __restrict__ X, const int N, float * buf ) { //X points to the row to read using vec_t = aligned_vector<T, vec_size>; using acc_t = acc_type<T, true>; const vec_t * X_vec = reinterpret_cast<const vec_t*>(X); const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const int n_vec_to_read = N/vec_size; WelfordDataLN wd(0.f, 0.f, 0.f); //no tail, we check that N is multiple of vec_size for (int i = thrx; i < n_vec_to_read; i += numx) { vec_t data = X_vec[i]; #pragma unroll for (int ii=0; ii < vec_size; ii++){ wd = cuWelfordOnlineSum(static_cast<acc_t>(data.val[ii]), wd); } } // intra-warp reduction for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) { WelfordDataLN wdB{WARP_SHFL_DOWN(wd.mean, offset), WARP_SHFL_DOWN(wd.sigma2, offset), WARP_SHFL_DOWN(wd.count, offset)}; wd = cuWelfordCombine(wd, wdB); } // threadIdx.x == 0 has correct values for each warp // inter-warp reductions if (blockDim.y > 1) { float * meansigmabuf = buf; float * countbuf = buf + blockDim.y; for (int offset = blockDim.y/2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2*offset) { const int wrt_y = threadIdx.y - offset; meansigmabuf[2*wrt_y] = wd.mean; meansigmabuf[2*wrt_y+1] = wd.sigma2; countbuf[wrt_y] = wd.count; } __syncthreads(); // lower half merges if (threadIdx.x == 0 && threadIdx.y < offset) { WelfordDataLN wdB{meansigmabuf[2*threadIdx.y], meansigmabuf[2*threadIdx.y+1], countbuf[threadIdx.y]}; wd = cuWelfordCombine(wd, wdB); } __syncthreads(); } if (threadIdx.x == 0 && threadIdx.y ==0) { meansigmabuf[0] = wd.mean; meansigmabuf[1] = wd.sigma2/float(N); } __syncthreads(); return WelfordDataLN{meansigmabuf[0], meansigmabuf[1],0.f}; } else { return WelfordDataLN{WARP_SHFL(wd.mean,0), WARP_SHFL(wd.sigma2,0)/float(N), 0.f}; } } template <typename T, typename T_ACC, typename std::enable_if<!std::is_same<T, double>::value, int>::type = 0> __device__ __inline__ void vectorized_layer_norm_kernel_impl( const int N, T_ACC eps, const T* __restrict__ X, const T* gamma, const T* beta, T_ACC* mean, T_ACC* rstd, T* Y){ extern __shared__ float s_data[]; //if we made smem WelfordDataLN type, there would be bank conflicts, //as one thread would have to write 3 consecutive floats auto i1 = blockIdx.x; const T * block_row = X + i1 * N; WelfordDataLN wd = compute_stats(block_row, N, s_data); using vec_t = aligned_vector<T, vec_size>; const vec_t * X_vec = reinterpret_cast<const vec_t*>(block_row); vec_t * Y_vec = reinterpret_cast<vec_t*>(Y + i1 * N); const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const int n_vec_to_read = N/vec_size; T_ACC rstd_val = c10::hip::compat::rsqrt(wd.sigma2 + eps); //no tail, N is guaranteed to be multiple of vec size for (int i = thrx; i < n_vec_to_read; i += numx) { vec_t data = X_vec[i]; vec_t out; //computation is performed in T_ACC, X is cast to T_ACC and result is implicitly cast to T if (gamma != nullptr && beta != nullptr) { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = static_cast<T_ACC>(gamma[i*vec_size + ii]) * (rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean)) + static_cast<T_ACC>(beta[i*vec_size + ii]); } } else { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean); } } Y_vec[i] = out; } if (thrx == 0) { mean[i1] = wd.mean; rstd[i1] = rstd_val; } } template <typename T, typename T_ACC, typename std::enable_if<std::is_same<T, double>::value, int>::type = 0> __device__ __inline__ void vectorized_layer_norm_kernel_impl( const int N, T_ACC eps, const T* __restrict__ X, const T* gamma, const T* beta, T_ACC* mean, T_ACC* rstd, T* Y){ CUDA_KERNEL_ASSERT("doesn't work with double"); } //to avoid windows SFINAE errors template <typename T, typename T_ACC> __global__ __inline__ void vectorized_layer_norm_kernel( const int N, T_ACC eps, const T* __restrict__ X, const T* gamma, const T* beta, T_ACC* mean, T_ACC* rstd, T* Y){ vectorized_layer_norm_kernel_impl(N, eps, X, gamma, beta, mean, rstd, Y); } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( int64_t N, const T* dY, const T* X, const T* gamma, acc_type<T, true>* ds, acc_type<T, true>* db) { using T_ACC = acc_type<T, true>; __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; const int64_t i = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]) * gamma_v; sum2 += static_cast<T_ACC>(dY[index]) * gamma_v; } sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); if (threadIdx.x == 0) { ds[i] = sum1; db[i] = sum2; } } template <typename T, typename T_ACC> __global__ void ComputeGradientFusedParamsCUDAKernel( int64_t M, int64_t N, const T_ACC* mean, const T_ACC* rstd, const acc_type<T, true>* ds, const acc_type<T, true>* db, acc_type<T, true>* c1, acc_type<T, true>* c2) { const int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < M) { const T_ACC s = T_ACC(1) / static_cast<T_ACC>(N); const T_ACC a = (db[index] * static_cast<T_ACC>(mean[index]) - ds[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * s; c1[index] = a; c2[index] = -(a * static_cast<T_ACC>(mean[index]) + db[index] * static_cast<T_ACC>(rstd[index]) * s); } } template <typename T, typename T_ACC> __global__ void LayerNormBackwardCUDAKernel( int64_t N, const T* dY, const T* X, const T* gamma, const T_ACC* a, const acc_type<T, true>* b, const acc_type<T, true>* c, T* dX) { const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); dX[index] = static_cast<T_ACC>(a[i]) * static_cast<T_ACC>(dY[index]) * gamma_v + b[i] * static_cast<T_ACC>(X[index]) + c[i]; } } template <typename T, typename T_ACC> __global__ void GammaBetaBackwardSimpleCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T_ACC* mean, const T_ACC* rstd, T* dg, T* db) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; if (j < N) { T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t i = 0; i < M; ++i) { const int64_t index = i * N + j; sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]) * (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]); sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]); } if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } template <typename T, typename T_ACC> __global__ void GammaBetaBackwardCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T_ACC* mean, const T_ACC* rstd, T* dg, T* db) { __shared__ T_ACC g_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; __shared__ T_ACC b_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; T_ACC dg_sum1 = 0; T_ACC dg_sum2 = 0; T_ACC db_sum1 = 0; T_ACC db_sum2 = 0; if (j < N) { for (int64_t i = threadIdx.y; i < M; i += blockDim.y * 2) { const int64_t i1 = i; const int64_t i2 = i + blockDim.y; const int64_t index1 = i1 * N + j; const int64_t index2 = i2 * N + j; dg_sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]) * (static_cast<T_ACC>(X[index1]) - static_cast<T_ACC>(mean[i1])) * static_cast<T_ACC>(rstd[i1]); db_sum1 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]); if (i2 < M) { dg_sum2 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]) * (static_cast<T_ACC>(X[index2]) - static_cast<T_ACC>(mean[i2])) * static_cast<T_ACC>(rstd[i2]); db_sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]); } } } g_shared[threadIdx.y][threadIdx.x] = dg_sum1; g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; b_shared[threadIdx.y][threadIdx.x] = db_sum1; b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; __syncthreads(); T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } } template <typename T, typename T_ACC> //typename std::enable_if<!std::is_same<>::type* = nullptr> void launch_vectorized_layer_norm_kernel( int N, int64_t M, T_ACC eps, const T* X_data, const T* gamma_data, const T* beta_data, T* Y_data, T_ACC* mean_data, T_ACC* rstd_data ) { //constexpr int alignment = 16; //currently unused to make sure float and half results are bw accurate auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); const int num_threads = 128; const dim3 threads(C10_WARP_SIZE,num_threads/C10_WARP_SIZE,1); const dim3 blocks(M); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(threads.y % 2 == 0 || threads.y == 1); int nshared = threads.y > 1 ? threads.y * 3/2 *sizeof(T_ACC) : 0; hipLaunchKernelGGL(( vectorized_layer_norm_kernel), dim3(blocks), dim3(threads), nshared, stream, N, eps, X_data, gamma_data, beta_data, mean_data, rstd_data, Y_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename T, typename T_ACC> void LayerNormKernelImplInternal( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, T_ACC eps, Tensor* Y, Tensor* mean, Tensor* rstd) { // assumes input, gamma and beta are of proper shape, this was checked in _check_layer_norm_inputs // assumes all tensors are contiguous const T* X_data = X.data_ptr<T>(); const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr; T* Y_data = Y->data_ptr<T>(); T_ACC* mean_data = mean->data_ptr<T_ACC>(); T_ACC* rstd_data = rstd->data_ptr<T_ACC>(); // check if can take fast path - all tensors are properly aligned, N is less than 2^24 (to use float count), // N is multiple of vec_size (so that all rows are aligned if tensor is aligned) auto can_vectorize = [&](const T * ptr, int alignment){uint64_t addr = reinterpret_cast<uint64_t>(ptr); return addr % alignment == 0;}; constexpr int num_vec_elems = vec_size; constexpr int alignment = num_vec_elems * sizeof(T); if ((std::is_same<T, float>::value || std::is_same<T, at::Half>::value) && N <= 1ULL << std::numeric_limits<float>::digits && N % num_vec_elems == 0 && can_vectorize(X_data, alignment) && can_vectorize(Y_data, alignment)) { launch_vectorized_layer_norm_kernel(static_cast<int>(N), M, eps, X_data, gamma_data, beta_data, Y_data, mean_data, rstd_data); } else { hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T, T_ACC>) , dim3(M), dim3(cuda_utils::kCUDABlockReduceNumThreads), 0, cuda_stream, N, eps, X_data, mean_data, rstd_data); C10_HIP_KERNEL_LAUNCH_CHECK(); hipLaunchKernelGGL(( LayerNormForwardCUDAKernel<T, T_ACC>), dim3(M), dim3(kCUDANumThreads), 0, cuda_stream, N, X_data, mean_data, rstd_data, gamma_data, beta_data, Y_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } } void LayerNormKernelImpl( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, double eps, Tensor* Y, Tensor* mean, Tensor* rstd) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormKernelImpl", [&]() { using acc_t = acc_type<scalar_t, true>; LayerNormKernelImplInternal<scalar_t, acc_t>( X, gamma, beta, M, N, static_cast<acc_t>(eps), Y, mean, rstd); }); } template <typename T> void LayerNormBackwardKernelImplInternal( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { using T_ACC = acc_type<T, true>; DCHECK_EQ(dY.numel(), M * N); DCHECK_EQ(X.numel(), M * N); DCHECK_EQ(mean.numel(), M); DCHECK_EQ(rstd.numel(), M); DCHECK(!gamma.defined() || gamma.numel() == N); const T* dY_data = dY.template data_ptr<T>(); const T* X_data = X.template data_ptr<T>(); const T_ACC* mean_data = mean.template data_ptr<T_ACC>(); const T_ACC* rstd_data = rstd.template data_ptr<T_ACC>(); const T* gamma_data = gamma.defined() ? gamma.template data_ptr<T>() : nullptr; T* dX_data = dX->defined() ? dX->template data_ptr<T>() : nullptr; hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (dX_data != nullptr) { const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type(); Tensor ds = at::empty({M}, X.options().dtype(kAccType)); Tensor db = at::empty({M}, X.options().dtype(kAccType)); Tensor scale = at::empty({M}, X.options().dtype(kAccType)); Tensor bias = at::empty({M}, X.options().dtype(kAccType)); T_ACC* ds_data = ds.template data_ptr<T_ACC>(); T_ACC* db_data = db.template data_ptr<T_ACC>(); T_ACC* scale_data = scale.template data_ptr<T_ACC>(); T_ACC* bias_data = bias.template data_ptr<T_ACC>(); hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<T>) , dim3(M), dim3(cuda_utils::kCUDABlockReduceNumThreads), 0, cuda_stream, N, dY_data, X_data, gamma_data, ds_data, db_data); C10_HIP_KERNEL_LAUNCH_CHECK(); const int64_t B = (M + kCUDANumThreads - 1) / kCUDANumThreads; hipLaunchKernelGGL(( ComputeGradientFusedParamsCUDAKernel<T, T_ACC>) , dim3(B), dim3(kCUDANumThreads), 0, cuda_stream, M, N, mean_data, rstd_data, ds_data, db_data, scale_data, bias_data); C10_HIP_KERNEL_LAUNCH_CHECK(); hipLaunchKernelGGL(( LayerNormBackwardCUDAKernel<T>), dim3(M), dim3(kCUDANumThreads), 0, cuda_stream, N, dY_data, X_data, gamma_data, rstd_data, scale_data, bias_data, dX_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } if (dgamma->defined() || dbeta->defined()) { T* dgamma_data = dgamma->defined() ? dgamma->template data_ptr<T>() : nullptr; T* dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T>() : nullptr; if (M < 512) { // For small batch size, do colwise reduce directly. const int64_t B = (N + kCUDANumThreads - 1) / kCUDANumThreads; hipLaunchKernelGGL(( GammaBetaBackwardSimpleCUDAKernel<T, T_ACC>) , dim3(B), dim3(kCUDANumThreads), 0, cuda_stream, M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { const int64_t B = (N + kColwiseReduceTileSize - 1) / kColwiseReduceTileSize; constexpr int kThreadX = kColwiseReduceTileSize; constexpr int kThreadY = kColwiseReduceTileSize / 2; hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<T, T_ACC>) , dim3(B), dim3(dim3(kThreadX, kThreadY)), 0, cuda_stream, M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } void LayerNormBackwardKernelImpl( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormBackwardKernelImpl", [&]() { LayerNormBackwardKernelImplInternal<scalar_t>( dY.contiguous(), X, mean, rstd, gamma, M, N, dX, dgamma, dbeta); }); } } // namespace std::tuple<Tensor, Tensor, Tensor> layer_norm_cuda( const Tensor& input, IntArrayRef normalized_shape, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */, double eps) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; auto M_N = _check_layer_norm_inputs(input, normalized_shape, weight, bias); auto M = M_N.first; auto N = M_N.second; auto X = input.expect_contiguous(); auto gamma = weight.expect_contiguous(); auto beta = bias.expect_contiguous(); Tensor Y = at::native::empty_like( *X, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true); Tensor mean = at::empty({M}, X->options().dtype(acc_type)); Tensor rstd = at::empty({M}, X->options().dtype(acc_type)); if (M > 0) { LayerNormKernelImpl(*X, *gamma, *beta, M, N, eps, &Y, &mean, &rstd); const auto input_shape = input.sizes(); const size_t axis = input.dim() - normalized_shape.size(); std::vector<int64_t> stat_shape; for (size_t idx = 0; idx < axis; ++idx) { stat_shape.push_back(input_shape[idx]); } for (size_t idx = axis; idx < input.dim(); ++idx) { stat_shape.push_back(1); } mean = mean.view(stat_shape); rstd = rstd.view(stat_shape); } return std::make_tuple(std::move(Y), std::move(mean), std::move(rstd)); } std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_cuda( const Tensor& dY, const Tensor& input, IntArrayRef normalized_shape, const Tensor& mean, const Tensor& rstd, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */, std::array<bool, 3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; auto M_N = _check_layer_norm_inputs(input, normalized_shape, weight, bias); auto M = M_N.first; auto N = M_N.second; auto X = input.expect_contiguous(); auto gamma = weight.expect_contiguous(); auto beta = bias.expect_contiguous(); Tensor dX; Tensor dgamma; Tensor dbeta; if (grad_input_mask[0]) { dX = at::native::empty_like( *X, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[1]) { dgamma = M > 0 ? at::native::empty_like( *gamma, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like( *gamma, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[2]) { dbeta = M > 0 ? at::native::empty_like( *beta, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like( *beta, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (M > 0) { LayerNormBackwardKernelImpl( dY, *X, mean, rstd, *gamma, M, N, &dX, &dgamma, &dbeta); } return std::make_tuple(std::move(dX), std::move(dgamma), std::move(dbeta)); } } // namespace native } // namespace at
48964469728012d294037ab60ad68197cb1719f7.cu
#include <ATen/native/layer_norm.h> #include <type_traits> #include <thrust/tuple.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/cuda/block_reduce.cuh> #include <c10/cuda/CUDAMathCompat.h> namespace at { namespace native { namespace { constexpr int kCUDANumThreads = 256; constexpr int kColwiseReduceTileSize = 32; constexpr int vec_size = 4; //we could make it dependent on dtype, but that would lead to different results between float and low-p types // aligned vector generates vectorized load/store on CUDA (copy-pasted from MemoryAccess.cuh) template<typename scalar_t, int vec_size> struct alignas(sizeof(scalar_t) * vec_size) aligned_vector { scalar_t val[vec_size]; }; template <typename T, typename T_ACC> __global__ void RowwiseMomentsCUDAKernel( int64_t N, T_ACC eps, const T* X, T_ACC* mean, T_ACC* rstd) { using WelfordType = WelfordData<T_ACC, int64_t, T_ACC>; using WelfordOp = WelfordOps<T_ACC, T_ACC, int64_t, T_ACC, thrust::pair<T_ACC, T_ACC>>; __shared__ typename std::aligned_storage<sizeof(WelfordType), alignof(WelfordType)>:: type val_shared[C10_WARP_SIZE]; WelfordType* val_shared_ptr = reinterpret_cast<WelfordType*>(val_shared); const int64_t i = blockIdx.x; WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false}; WelfordType val(0, 0, 0, 0); for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; val = welford_op.reduce(val, static_cast<T_ACC>(X[index]), index); } val = cuda_utils::BlockReduce( val, welford_op, /*identity_element=*/WelfordType(0, 0, 0, 0), val_shared_ptr); if (threadIdx.x == 0) { T_ACC m1; T_ACC m2; thrust::tie(m2, m1) = welford_op.project(val); mean[i] = m1; rstd[i] = c10::cuda::compat::rsqrt(m2 + eps); } } template <typename T, typename T_ACC> __global__ void LayerNormForwardCUDAKernel( int64_t N, const T* X, const T_ACC* mean, const T_ACC* rstd, const T* gamma, const T* beta, T* Y) { const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); const T_ACC beta_v = beta == nullptr ? T_ACC(0) : static_cast<T_ACC>(beta[j]); Y[index] = (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]) * gamma_v + beta_v; } } struct WelfordDataLN{ float mean; float sigma2; float count; C10_HOST_DEVICE WelfordDataLN(): mean(0.f), sigma2(0.f), count(0.f){} C10_HOST_DEVICE WelfordDataLN(float mean, float sigma2, float count): mean(mean), sigma2(sigma2), count(count) {} }; template<typename U> __device__ WelfordDataLN cuWelfordOnlineSum( const U val, const WelfordDataLN& curr_sum) { U delta = val - curr_sum.mean; U new_count = curr_sum.count + 1.f; U new_mean = curr_sum.mean + delta * (1.f/new_count); //proper division is slow, this is less accurate but noticeably faster return {new_mean, curr_sum.sigma2 + delta * (val - new_mean), new_count}; } __device__ WelfordDataLN cuWelfordCombine( const WelfordDataLN dataB, const WelfordDataLN dataA ) { using U = decltype(dataB.count); U delta = dataB.mean - dataA.mean; U count = dataA.count + dataB.count; U mean, sigma2; if (count > decltype(dataB.count){0}) { auto coef = 1.f/count; //NB we don't use --use_fast_math, but this is emulation, 1./count goes to intrinsic, `* coef` is multiplication, instead of slow fp division auto nA = dataA.count * coef; auto nB = dataB.count * coef; mean = nA*dataA.mean + nB*dataB.mean; sigma2 = dataA.sigma2 + dataB.sigma2 + delta * delta * dataA.count * nB; } else { mean = U(0); sigma2 = U(0); } return {mean, sigma2, count}; } template<typename T, int alignment=16> __device__ WelfordDataLN compute_stats( const T* __restrict__ X, const int N, float * buf ) { //X points to the row to read using vec_t = aligned_vector<T, vec_size>; using acc_t = acc_type<T, true>; const vec_t * X_vec = reinterpret_cast<const vec_t*>(X); const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const int n_vec_to_read = N/vec_size; WelfordDataLN wd(0.f, 0.f, 0.f); //no tail, we check that N is multiple of vec_size for (int i = thrx; i < n_vec_to_read; i += numx) { vec_t data = X_vec[i]; #pragma unroll for (int ii=0; ii < vec_size; ii++){ wd = cuWelfordOnlineSum(static_cast<acc_t>(data.val[ii]), wd); } } // intra-warp reduction for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) { WelfordDataLN wdB{WARP_SHFL_DOWN(wd.mean, offset), WARP_SHFL_DOWN(wd.sigma2, offset), WARP_SHFL_DOWN(wd.count, offset)}; wd = cuWelfordCombine(wd, wdB); } // threadIdx.x == 0 has correct values for each warp // inter-warp reductions if (blockDim.y > 1) { float * meansigmabuf = buf; float * countbuf = buf + blockDim.y; for (int offset = blockDim.y/2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2*offset) { const int wrt_y = threadIdx.y - offset; meansigmabuf[2*wrt_y] = wd.mean; meansigmabuf[2*wrt_y+1] = wd.sigma2; countbuf[wrt_y] = wd.count; } __syncthreads(); // lower half merges if (threadIdx.x == 0 && threadIdx.y < offset) { WelfordDataLN wdB{meansigmabuf[2*threadIdx.y], meansigmabuf[2*threadIdx.y+1], countbuf[threadIdx.y]}; wd = cuWelfordCombine(wd, wdB); } __syncthreads(); } if (threadIdx.x == 0 && threadIdx.y ==0) { meansigmabuf[0] = wd.mean; meansigmabuf[1] = wd.sigma2/float(N); } __syncthreads(); return WelfordDataLN{meansigmabuf[0], meansigmabuf[1],0.f}; } else { return WelfordDataLN{WARP_SHFL(wd.mean,0), WARP_SHFL(wd.sigma2,0)/float(N), 0.f}; } } template <typename T, typename T_ACC, typename std::enable_if<!std::is_same<T, double>::value, int>::type = 0> __device__ __inline__ void vectorized_layer_norm_kernel_impl( const int N, T_ACC eps, const T* __restrict__ X, const T* gamma, const T* beta, T_ACC* mean, T_ACC* rstd, T* Y){ extern __shared__ float s_data[]; //if we made smem WelfordDataLN type, there would be bank conflicts, //as one thread would have to write 3 consecutive floats auto i1 = blockIdx.x; const T * block_row = X + i1 * N; WelfordDataLN wd = compute_stats(block_row, N, s_data); using vec_t = aligned_vector<T, vec_size>; const vec_t * X_vec = reinterpret_cast<const vec_t*>(block_row); vec_t * Y_vec = reinterpret_cast<vec_t*>(Y + i1 * N); const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; const int n_vec_to_read = N/vec_size; T_ACC rstd_val = c10::cuda::compat::rsqrt(wd.sigma2 + eps); //no tail, N is guaranteed to be multiple of vec size for (int i = thrx; i < n_vec_to_read; i += numx) { vec_t data = X_vec[i]; vec_t out; //computation is performed in T_ACC, X is cast to T_ACC and result is implicitly cast to T if (gamma != nullptr && beta != nullptr) { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = static_cast<T_ACC>(gamma[i*vec_size + ii]) * (rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean)) + static_cast<T_ACC>(beta[i*vec_size + ii]); } } else { #pragma unroll for (int ii=0; ii < vec_size; ii++){ out.val[ii] = rstd_val * (static_cast<T_ACC>(data.val[ii]) - wd.mean); } } Y_vec[i] = out; } if (thrx == 0) { mean[i1] = wd.mean; rstd[i1] = rstd_val; } } template <typename T, typename T_ACC, typename std::enable_if<std::is_same<T, double>::value, int>::type = 0> __device__ __inline__ void vectorized_layer_norm_kernel_impl( const int N, T_ACC eps, const T* __restrict__ X, const T* gamma, const T* beta, T_ACC* mean, T_ACC* rstd, T* Y){ CUDA_KERNEL_ASSERT("doesn't work with double"); } //to avoid windows SFINAE errors template <typename T, typename T_ACC> __global__ __inline__ void vectorized_layer_norm_kernel( const int N, T_ACC eps, const T* __restrict__ X, const T* gamma, const T* beta, T_ACC* mean, T_ACC* rstd, T* Y){ vectorized_layer_norm_kernel_impl(N, eps, X, gamma, beta, mean, rstd, Y); } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( int64_t N, const T* dY, const T* X, const T* gamma, acc_type<T, true>* ds, acc_type<T, true>* db) { using T_ACC = acc_type<T, true>; __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; const int64_t i = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]) * gamma_v; sum2 += static_cast<T_ACC>(dY[index]) * gamma_v; } sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); if (threadIdx.x == 0) { ds[i] = sum1; db[i] = sum2; } } template <typename T, typename T_ACC> __global__ void ComputeGradientFusedParamsCUDAKernel( int64_t M, int64_t N, const T_ACC* mean, const T_ACC* rstd, const acc_type<T, true>* ds, const acc_type<T, true>* db, acc_type<T, true>* c1, acc_type<T, true>* c2) { const int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < M) { const T_ACC s = T_ACC(1) / static_cast<T_ACC>(N); const T_ACC a = (db[index] * static_cast<T_ACC>(mean[index]) - ds[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * s; c1[index] = a; c2[index] = -(a * static_cast<T_ACC>(mean[index]) + db[index] * static_cast<T_ACC>(rstd[index]) * s); } } template <typename T, typename T_ACC> __global__ void LayerNormBackwardCUDAKernel( int64_t N, const T* dY, const T* X, const T* gamma, const T_ACC* a, const acc_type<T, true>* b, const acc_type<T, true>* c, T* dX) { const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); dX[index] = static_cast<T_ACC>(a[i]) * static_cast<T_ACC>(dY[index]) * gamma_v + b[i] * static_cast<T_ACC>(X[index]) + c[i]; } } template <typename T, typename T_ACC> __global__ void GammaBetaBackwardSimpleCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T_ACC* mean, const T_ACC* rstd, T* dg, T* db) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; if (j < N) { T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t i = 0; i < M; ++i) { const int64_t index = i * N + j; sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]) * (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]); sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]); } if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } template <typename T, typename T_ACC> __global__ void GammaBetaBackwardCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T_ACC* mean, const T_ACC* rstd, T* dg, T* db) { __shared__ T_ACC g_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; __shared__ T_ACC b_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; T_ACC dg_sum1 = 0; T_ACC dg_sum2 = 0; T_ACC db_sum1 = 0; T_ACC db_sum2 = 0; if (j < N) { for (int64_t i = threadIdx.y; i < M; i += blockDim.y * 2) { const int64_t i1 = i; const int64_t i2 = i + blockDim.y; const int64_t index1 = i1 * N + j; const int64_t index2 = i2 * N + j; dg_sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]) * (static_cast<T_ACC>(X[index1]) - static_cast<T_ACC>(mean[i1])) * static_cast<T_ACC>(rstd[i1]); db_sum1 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]); if (i2 < M) { dg_sum2 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]) * (static_cast<T_ACC>(X[index2]) - static_cast<T_ACC>(mean[i2])) * static_cast<T_ACC>(rstd[i2]); db_sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]); } } } g_shared[threadIdx.y][threadIdx.x] = dg_sum1; g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; b_shared[threadIdx.y][threadIdx.x] = db_sum1; b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; __syncthreads(); T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } } template <typename T, typename T_ACC> //typename std::enable_if<!std::is_same<>::type* = nullptr> void launch_vectorized_layer_norm_kernel( int N, int64_t M, T_ACC eps, const T* X_data, const T* gamma_data, const T* beta_data, T* Y_data, T_ACC* mean_data, T_ACC* rstd_data ) { //constexpr int alignment = 16; //currently unused to make sure float and half results are bw accurate auto stream = at::cuda::getCurrentCUDAStream().stream(); const int num_threads = 128; const dim3 threads(C10_WARP_SIZE,num_threads/C10_WARP_SIZE,1); const dim3 blocks(M); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(threads.y % 2 == 0 || threads.y == 1); int nshared = threads.y > 1 ? threads.y * 3/2 *sizeof(T_ACC) : 0; vectorized_layer_norm_kernel<<<blocks, threads, nshared, stream>>>(N, eps, X_data, gamma_data, beta_data, mean_data, rstd_data, Y_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename T, typename T_ACC> void LayerNormKernelImplInternal( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, T_ACC eps, Tensor* Y, Tensor* mean, Tensor* rstd) { // assumes input, gamma and beta are of proper shape, this was checked in _check_layer_norm_inputs // assumes all tensors are contiguous const T* X_data = X.data_ptr<T>(); const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr; T* Y_data = Y->data_ptr<T>(); T_ACC* mean_data = mean->data_ptr<T_ACC>(); T_ACC* rstd_data = rstd->data_ptr<T_ACC>(); // check if can take fast path - all tensors are properly aligned, N is less than 2^24 (to use float count), // N is multiple of vec_size (so that all rows are aligned if tensor is aligned) auto can_vectorize = [&](const T * ptr, int alignment){uint64_t addr = reinterpret_cast<uint64_t>(ptr); return addr % alignment == 0;}; constexpr int num_vec_elems = vec_size; constexpr int alignment = num_vec_elems * sizeof(T); if ((std::is_same<T, float>::value || std::is_same<T, at::Half>::value) && N <= 1ULL << std::numeric_limits<float>::digits && N % num_vec_elems == 0 && can_vectorize(X_data, alignment) && can_vectorize(Y_data, alignment)) { launch_vectorized_layer_norm_kernel(static_cast<int>(N), M, eps, X_data, gamma_data, beta_data, Y_data, mean_data, rstd_data); } else { cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); RowwiseMomentsCUDAKernel<T, T_ACC> <<<M, cuda_utils::kCUDABlockReduceNumThreads, 0, cuda_stream>>>( N, eps, X_data, mean_data, rstd_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); LayerNormForwardCUDAKernel<T, T_ACC><<<M, kCUDANumThreads, 0, cuda_stream>>>( N, X_data, mean_data, rstd_data, gamma_data, beta_data, Y_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } void LayerNormKernelImpl( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, double eps, Tensor* Y, Tensor* mean, Tensor* rstd) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormKernelImpl", [&]() { using acc_t = acc_type<scalar_t, true>; LayerNormKernelImplInternal<scalar_t, acc_t>( X, gamma, beta, M, N, static_cast<acc_t>(eps), Y, mean, rstd); }); } template <typename T> void LayerNormBackwardKernelImplInternal( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { using T_ACC = acc_type<T, true>; DCHECK_EQ(dY.numel(), M * N); DCHECK_EQ(X.numel(), M * N); DCHECK_EQ(mean.numel(), M); DCHECK_EQ(rstd.numel(), M); DCHECK(!gamma.defined() || gamma.numel() == N); const T* dY_data = dY.template data_ptr<T>(); const T* X_data = X.template data_ptr<T>(); const T_ACC* mean_data = mean.template data_ptr<T_ACC>(); const T_ACC* rstd_data = rstd.template data_ptr<T_ACC>(); const T* gamma_data = gamma.defined() ? gamma.template data_ptr<T>() : nullptr; T* dX_data = dX->defined() ? dX->template data_ptr<T>() : nullptr; cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); if (dX_data != nullptr) { const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type(); Tensor ds = at::empty({M}, X.options().dtype(kAccType)); Tensor db = at::empty({M}, X.options().dtype(kAccType)); Tensor scale = at::empty({M}, X.options().dtype(kAccType)); Tensor bias = at::empty({M}, X.options().dtype(kAccType)); T_ACC* ds_data = ds.template data_ptr<T_ACC>(); T_ACC* db_data = db.template data_ptr<T_ACC>(); T_ACC* scale_data = scale.template data_ptr<T_ACC>(); T_ACC* bias_data = bias.template data_ptr<T_ACC>(); ComputeInternalGradientsCUDAKernel<T> <<<M, cuda_utils::kCUDABlockReduceNumThreads, 0, cuda_stream>>>( N, dY_data, X_data, gamma_data, ds_data, db_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); const int64_t B = (M + kCUDANumThreads - 1) / kCUDANumThreads; ComputeGradientFusedParamsCUDAKernel<T, T_ACC> <<<B, kCUDANumThreads, 0, cuda_stream>>>( M, N, mean_data, rstd_data, ds_data, db_data, scale_data, bias_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); LayerNormBackwardCUDAKernel<T><<<M, kCUDANumThreads, 0, cuda_stream>>>( N, dY_data, X_data, gamma_data, rstd_data, scale_data, bias_data, dX_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } if (dgamma->defined() || dbeta->defined()) { T* dgamma_data = dgamma->defined() ? dgamma->template data_ptr<T>() : nullptr; T* dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T>() : nullptr; if (M < 512) { // For small batch size, do colwise reduce directly. const int64_t B = (N + kCUDANumThreads - 1) / kCUDANumThreads; GammaBetaBackwardSimpleCUDAKernel<T, T_ACC> <<<B, kCUDANumThreads, 0, cuda_stream>>>( M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { const int64_t B = (N + kColwiseReduceTileSize - 1) / kColwiseReduceTileSize; constexpr int kThreadX = kColwiseReduceTileSize; constexpr int kThreadY = kColwiseReduceTileSize / 2; GammaBetaBackwardCUDAKernel<T, T_ACC> <<<B, dim3(kThreadX, kThreadY), 0, cuda_stream>>>( M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } void LayerNormBackwardKernelImpl( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormBackwardKernelImpl", [&]() { LayerNormBackwardKernelImplInternal<scalar_t>( dY.contiguous(), X, mean, rstd, gamma, M, N, dX, dgamma, dbeta); }); } } // namespace std::tuple<Tensor, Tensor, Tensor> layer_norm_cuda( const Tensor& input, IntArrayRef normalized_shape, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */, double eps) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; auto M_N = _check_layer_norm_inputs(input, normalized_shape, weight, bias); auto M = M_N.first; auto N = M_N.second; auto X = input.expect_contiguous(); auto gamma = weight.expect_contiguous(); auto beta = bias.expect_contiguous(); Tensor Y = at::native::empty_like( *X, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true); Tensor mean = at::empty({M}, X->options().dtype(acc_type)); Tensor rstd = at::empty({M}, X->options().dtype(acc_type)); if (M > 0) { LayerNormKernelImpl(*X, *gamma, *beta, M, N, eps, &Y, &mean, &rstd); const auto input_shape = input.sizes(); const size_t axis = input.dim() - normalized_shape.size(); std::vector<int64_t> stat_shape; for (size_t idx = 0; idx < axis; ++idx) { stat_shape.push_back(input_shape[idx]); } for (size_t idx = axis; idx < input.dim(); ++idx) { stat_shape.push_back(1); } mean = mean.view(stat_shape); rstd = rstd.view(stat_shape); } return std::make_tuple(std::move(Y), std::move(mean), std::move(rstd)); } std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_cuda( const Tensor& dY, const Tensor& input, IntArrayRef normalized_shape, const Tensor& mean, const Tensor& rstd, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */, std::array<bool, 3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; auto M_N = _check_layer_norm_inputs(input, normalized_shape, weight, bias); auto M = M_N.first; auto N = M_N.second; auto X = input.expect_contiguous(); auto gamma = weight.expect_contiguous(); auto beta = bias.expect_contiguous(); Tensor dX; Tensor dgamma; Tensor dbeta; if (grad_input_mask[0]) { dX = at::native::empty_like( *X, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[1]) { dgamma = M > 0 ? at::native::empty_like( *gamma, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like( *gamma, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[2]) { dbeta = M > 0 ? at::native::empty_like( *beta, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like( *beta, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (M > 0) { LayerNormBackwardKernelImpl( dY, *X, mean, rstd, *gamma, M, N, &dX, &dgamma, &dbeta); } return std::make_tuple(std::move(dX), std::move(dgamma), std::move(dbeta)); } } // namespace native } // namespace at
5672061bd7bee74b575c406da49ec801e89758e2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <gsl/gsl_sf_gamma.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "dgesvd.h" #include <cula_lapack_device.h> #include <cula_blas_device.h> #define BLOCK_SIZE 16 //double *GlobalGmat_d; //double *GlobalStaticGmat_d; //double *GlobalStaticUGmat_d; //float *GlobalGmatFloat_d; //double *GlobalStaticDmat_d; double *GlobalTotalMatrix_d; // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) double iter_factorialGPU(unsigned int n) { double ret = 1; for(unsigned int i = 1; i <= n; ++i) ret *= i; return ret; } void checkStatus(culaStatus status) { char buf[256]; if(!status) return; culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf)); printf("%s\n", buf); culaShutdown(); exit(EXIT_FAILURE); } void checkStatusCarryOn(culaStatus status, int &CarryOn) { char buf[256]; if(!status){ CarryOn=0; return; } culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf)); //printf("%s\n", buf); CarryOn=1; culaShutdown(); culaStatus status2; status2 = culaInitialize(); } void checkCudaError(hipError_t err) { if(!err) return; printf("%s\n", hipGetErrorString(err)); culaShutdown(); exit(EXIT_FAILURE); } // simple kernel function that adds two vectors __global__ void vect_add(double *a, double *b, int N) { int Bidx = blockIdx.x; //a[Bidx*N+Tidx] = a[Bidx*N+Tidx] + b[Bidx*N+Tidx]; for(int i =0; i < N; i++){ a[Bidx*N+i] = a[Bidx*N+i] + b[Bidx*N+i]; } } // simple kernel function that calcs det of a matrix __global__ void calc_det(double *a, double *det, int N) { det[0]=0; for(int i =0; i < N; i++){ det[0]+=log(a[i*N+i]); } det[0]=det[0]*2; } // simple kernel function that calcs det of a matrix __global__ void Floatcalc_det(float *a, double *det, int N) { det[0]=0; for(int i =0; i < N; i++){ det[0]+=log(a[i*N+i]); } det[0]=det[0]*2; } // simple kernel function that calcs det of a matrix __global__ void calc_detFloat(float *a, double *det, int N) { det[0]=0; for(int i =0; i < N; i++){ det[0]+=log(a[i*N+i]); } det[0]=det[0]*2; } // simple kernel function that calcs det of a matrix __global__ void calc_DiagLike(double *Vec, double *Noise, int N, double *val) { val[0]=0; for(int i =0; i < N; i++){ val[0]+=Vec[i]*Vec[i]*Noise[i]; } } // simple kernel function that calcs det of a matrix __global__ void calc_DotLike(double *Vec1, double *Vec2, int N, double *val) { val[0]=0; for(int i =0; i < N; i++){ val[0]+=Vec1[i]*Vec2[i]; //printf("GPU copy %i %g %g\n", i,Vec1[i],Vec2[i]); } } // simple kernel function that calcs det of a matrix __global__ void copyvec(double *Vec1, double *Vec2, int N) { int Bidx = blockIdx.x; __syncthreads(); Vec1[Bidx]=Vec2[Bidx]; //printf("copy: %i %g\n",Bidx, Vec1[Bidx]); } /* __global__ void Makecov(double *A_d, double *BATvec, double *NoiseVec, double *SpecParm, int Aheight, int Awidth) { // Each thread computes one element of C // by accumulating results into Cvalue double LongestPeriod=1.0/pow(10.0,-5); // double flo=1.0/LongestPeriod; double modelalpha=SpecParm[1]; double gwampsquared=SpecParm[0]; double covconst=SpecParm[2]; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __syncthreads(); if(row >= Aheight || col >= Awidth) return; double timdiff= BATvec[row] - BATvec[col]; double tau=2.0*M_PI*fabs(timdiff); double covsum=0; for(int k=0; k <=10; k++){ double ret = 1; for(unsigned int i = 1; i <= 2*k; ++i){ ret *= (double)i; // printf("Ret: %i %g \n",i,ret); } covsum=covsum+pow(-1.0,k)*(pow(flo*tau,2*k))/(ret*(2*k+1-modelalpha)); //printf("covsum: %i %i %i %g \n",row,col,k,covsum); } A_d[row * Awidth + col]=gwampsquared*(covconst*pow((flo*tau),(modelalpha-1)) - covsum); if(row==col){ A_d[row * Awidth + col] += NoiseVec[row]; } } __global__ void MakeDMcov(double *A_d, double *BATvec, double *NoiseVec, double *DMVec, double *SpecParm, int Aheight, int Awidth) { // Each thread computes one element of C // by accumulating results into Cvalue double LongestPeriod=1.0/pow(10.0,-5); double flo=1.0/LongestPeriod; double gwampsquared=SpecParm[0]; double modelalpha=SpecParm[1]; double covconst=SpecParm[2]; double dmampsquared=SpecParm[3]; double dmmodelalpha=SpecParm[4]; double dmcovconst=SpecParm[5]; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __syncthreads(); if(row >= Aheight || col >= Awidth) return; double timdiff= BATvec[row] - BATvec[col]; double tau=2.0*M_PI*fabs(timdiff); double covsum=0; double dmcovsum=0; for(int k=0; k <=5; k++){ double ret = 1; for(unsigned int i = 1; i <= 2*k; ++i){ ret *= (double)i; } covsum=covsum+pow(-1.0,k)*(pow(flo*tau,2*k))/(ret*(2*k+1-modelalpha)); dmcovsum=dmcovsum+pow(-1.0,k)*(pow(flo*tau,2*k))/(ret*(2*k+1-dmmodelalpha)); } double gwpart=0; if(SpecParm[0] !=0 )gwpart=gwampsquared*(covconst*pow((flo*tau),(modelalpha-1)) - covsum); double dmpart=0; if(SpecParm[3] !=0 )dmpart=dmampsquared*(dmcovconst*pow((flo*tau),(dmmodelalpha-1)) - dmcovsum)*DMVec[row]*DMVec[col]; A_d[row * Awidth + col]= gwpart+dmpart; if(row==col){ A_d[row * Awidth + col] += NoiseVec[row]; } //printf("%i %i %g\n",row,col,A_d[row * Awidth + col]); } */ __global__ void MatMulKernel(int Arow,int Acol,int Brow, int Bcol,double *A,double *B,double *C) { int Crow=Arow; int Ccol=Bcol; double Ctemp = 0.0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __syncthreads(); if(row < Arow && col < Bcol) { //if(row<32)printf("GPUNT: %i %i %g %g \n", row, col, B[col * Brow + row], A[row] ); Ctemp = A[row] * B[col * Brow + row]; //GGTest[col*N + row] C[col*Crow+row] = Ctemp; } // } __global__ void SimpleDiagMatMulKernel(int N,int T,double *Noise_d,double *TMatrix_d,double *NTMatrix_d) { __syncthreads(); for(int i=0;i<T; i++){ for(int j=0;j<N; j++){ //if(i ==28)printf("GPU SDMMK %i %i %g %g \n",i,j,TMatrix_d[i*N + j],Noise_d[j]); NTMatrix_d[i*N + j]=TMatrix_d[i*N + j]*Noise_d[j]; } } } /* extern "C" void WhiteMarginGPUWrapper_(void *context, double *TNDMVec, double *resvec, double *Noise, int N, int D, int NTime, int NJumps, double *likevals){ double *resvec_d; double *Noise_d; double *DMatrix_d; double *NT_d; double *TNT_d; double *NTd_d; hipError_t err; culaStatus status; // allocate memory on GPU err = hipMalloc( (void **)&resvec_d, sizeof(double)*N ); checkCudaError(err); err = hipMalloc( (void **)&Noise_d, sizeof(double)*N ); checkCudaError(err); err = hipMalloc( (void **)&DMatrix_d, sizeof(double)*N*D ); checkCudaError(err); err = hipMalloc( (void **)&NT_d, sizeof(double)*N*D ); checkCudaError(err); err = hipMalloc( (void **)&TNT_d, sizeof(double)*D*D ); checkCudaError(err); err = hipMalloc( (void **)&NTd_d, sizeof(double)*D); checkCudaError(err); // copy vectors from CPU to GPU err = hipMemcpy(resvec_d, resvec, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy( Noise_d, Noise, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid; ///////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Form the Design Matrix//////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// if(D != NTime+NJumps){ err = hipMemcpy( DMatrix_d, TNDMVec, sizeof(double)*D*N, hipMemcpyHostToDevice ); checkCudaError(err); double *U_d; double *V_d; double *S_d; err = hipMalloc( (void **)&U_d, sizeof(double)*N*N ); checkCudaError(err); err = hipMalloc( (void **)&V_d, sizeof(double)*D*D ); checkCudaError(err); err = hipMalloc( (void **)&S_d, sizeof(double)*D ); checkCudaError(err); culaDeviceDgesvd('O','N', N, D, DMatrix_d, N, S_d, U_d, N, V_d, D); hipFree(V_d); hipFree(S_d); hipFree(U_d); hipDeviceSynchronize(); } else{ DMatrix_d=GlobalStaticDmat_d; hipDeviceSynchronize(); } /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Do the Algebra/////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// //printf("entered 5 %i %i\n",T,N); dimGrid.x=(D + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (N + dimBlock.y - 1)/dimBlock.y; hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, N,N,N, D,Noise_d,DMatrix_d,NT_d); //SimpleDiagMatMulKernel<<<1,1>>>(N, T, Noise_d, TMatrix_d, NT_d); hipDeviceSynchronize(); double alpha=1.0; double beta=0.0; status = culaDeviceDgemm('T', 'N', D, D, N, alpha, DMatrix_d, N, NT_d, N, beta, TNT_d, D); checkStatus(status); hipDeviceSynchronize(); status = culaDeviceDgemv('T', N, D, alpha, NT_d, N, resvec_d, 1, beta, NTd_d, 1); checkStatus(status); hipDeviceSynchronize(); //printf("entered 6: %i \n", T); double *dettemp_d; double *tempval=new double[1]; err = hipMalloc( (void **)&dettemp_d, sizeof(double) ); checkCudaError(err); int carryOn=0; status = culaDeviceDpotrf('L', D, TNT_d, D); hipDeviceSynchronize(); //printf("entered 6.25\n"); checkStatusCarryOn(status,carryOn); hipDeviceSynchronize(); //printf("entered 6.5\n"); hipLaunchKernelGGL(( calc_det), dim3(1), dim3(1) , 0, 0, TNT_d, dettemp_d, D); err = hipMemcpy( tempval, dettemp_d, sizeof(double), hipMemcpyDeviceToHost); checkCudaError(err); likevals[0]=tempval[0]; hipDeviceSynchronize(); if(carryOn == 1){ hipFree(resvec_d); hipFree(Noise_d); hipFree(DMatrix_d); hipFree(NT_d); hipFree(TNT_d); hipFree(NTd_d); hipFree(dettemp_d); delete(tempval); return; } //printf("entered 7\n"); double *WorkVec_d; err = hipMalloc( (void **)&WorkVec_d, sizeof(double)*D ); checkCudaError(err); copyvec<<< D, 1 >>>(WorkVec_d, NTd_d, D); hipDeviceSynchronize(); status=culaDeviceDpotrs('L', D, 1, TNT_d, D, WorkVec_d, D); checkStatus(status); hipDeviceSynchronize(); double *freqlike_d; err = hipMalloc( (void **)&freqlike_d, sizeof(double)); checkCudaError(err); hipLaunchKernelGGL(( calc_DotLike), dim3(1), dim3(1) , 0, 0, WorkVec_d, NTd_d, D, freqlike_d); hipDeviceSynchronize(); err = hipMemcpy( tempval, freqlike_d, sizeof(double), hipMemcpyDeviceToHost); checkCudaError(err); likevals[1]=tempval[0]; hipFree(resvec_d); hipFree(Noise_d); hipFree(DMatrix_d); hipFree(NT_d); hipFree(TNT_d); hipFree(NTd_d); hipFree(WorkVec_d); hipFree(dettemp_d); delete(tempval); } */ // simple kernel function that calculates the FMatrix __global__ void make_fmatrix(double *TMatrix_d,double *Freqs_d, double *ObsFreqs_d, double *BATvec_d, double *DMVec_d, int *SysGroups_d, int *BandFreqs, int N,int RF, int DMF, int BandNoiseCoeff, int GroupNoiseCoeff, int incRED, int incDM, int incBandNoise, int incGroupNoise, int ReplaceTMatrix, int TimetoMargin, int numTime, int numJumps, double *DMatrix_d) { int Bidx = blockIdx.x; if(TimetoMargin != numJumps + numTime){ for(int i=0;i<TimetoMargin;i++){ TMatrix_d[i*N + Bidx]=DMatrix_d[i*N + Bidx]; } } int startpos=0; if(incRED !=0){ if(ReplaceTMatrix==0){ for(int i=0;i<RF/2;i++){ TMatrix_d[(TimetoMargin+i)*N + Bidx]=cos(2*M_PI*Freqs_d[i]*BATvec_d[Bidx]); TMatrix_d[(TimetoMargin+i+RF/2)*N + Bidx]=sin(2*M_PI*Freqs_d[i]*BATvec_d[Bidx]); } } startpos=RF; } if(incDM !=0){ if(ReplaceTMatrix==0){ for(int i=0;i<DMF/2;i++){ //if(Bidx==0)printf("D: %i %i %g %g \n", Bidx,i,1.0/Freqs_d[i], DMVec_d[Bidx]); TMatrix_d[(TimetoMargin+startpos+i)*N + Bidx]=cos(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx])*DMVec_d[Bidx]; TMatrix_d[(TimetoMargin+startpos+i+DMF/2)*N + Bidx]=sin(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx])*DMVec_d[Bidx]; } } startpos=startpos+DMF; } if(incBandNoise > 0){ for(int b = 0; b < incBandNoise; b++){ if(ReplaceTMatrix==0){ int startfreq = BandFreqs[b*3+0]; int stopfreq = BandFreqs[b*3+1]; int BandScale = BandFreqs[b*3+2]; for(int i=0;i<BandNoiseCoeff/2;i++){ if(ObsFreqs_d[Bidx] > startfreq && ObsFreqs_d[Bidx] < stopfreq){ TMatrix_d[(TimetoMargin+startpos+i)*N + Bidx]=cos(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx]); TMatrix_d[(TimetoMargin+startpos+i+BandNoiseCoeff/2)*N + Bidx]=sin(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx]); } else{ TMatrix_d[(TimetoMargin+startpos+i)*N + Bidx]=0; TMatrix_d[(TimetoMargin+startpos+i+BandNoiseCoeff/2)*N + Bidx]=0; } } } startpos=startpos+BandNoiseCoeff; } } //printf("In GPU : %i \n", incGroupNoise); if(incGroupNoise > 0){ for(int g = 0; g < incGroupNoise; g++){ for(int i=0;i<GroupNoiseCoeff/2;i++){ //printf("GPU Groups %i %i \n", Bidx, SysGroups_d[Bidx]); if(SysGroups_d[Bidx] == g+1){ TMatrix_d[(TimetoMargin+startpos+i)*N + Bidx]=cos(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx]); TMatrix_d[(TimetoMargin+startpos+i+GroupNoiseCoeff/2)*N + Bidx]=sin(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx]); } else{ TMatrix_d[(TimetoMargin+startpos+i)*N + Bidx]=0; TMatrix_d[(TimetoMargin+startpos+i+GroupNoiseCoeff/2)*N + Bidx]=0; } } startpos=startpos+GroupNoiseCoeff; } } } /* // simple kernel function that calculates the FMatrix __global__ void add_EcorrToFMatrix(double *FMatrix_d, double *EMatrix_d, int Nobs, int FSize, int EpochSize) { int Bidx = blockIdx.x; for(int i=0;i<EpochSize;i++){ FMatrix_d[(FSize+i)*Nobs + Bidx]= EMatrix_d[i*Nobs + Bidx]; } } */ /* // simple kernel function that calculates the FMatrix __global__ void make_DMfmatrix(double *FMatrix_d,double *Freqs_d, double *BATvec_d, double *DMVec_d, int N,int F) { int Bidx = blockIdx.x; for(int i=0;i<F/4;i++){ FMatrix_d[i*N + Bidx]=cos(2*M_PI*Freqs_d[i]*BATvec_d[Bidx]); FMatrix_d[(i+F/4)*N + Bidx]=cos(2*M_PI*Freqs_d[i]*BATvec_d[Bidx])*DMVec_d[Bidx]; FMatrix_d[(i+F/2)*N + Bidx]=sin(2*M_PI*Freqs_d[i]*BATvec_d[Bidx]); FMatrix_d[(i+3*F/4)*N + Bidx]=sin(2*M_PI*Freqs_d[i]*BATvec_d[Bidx])*DMVec_d[Bidx]; } } __global__ void fastmake_fmatrix(double *FMatrix_d,double *Freqs_d, double *BATvec_d, int Aheight,int Awidth) { // Each thread computes one element of F // by accumulating results into Cvalue int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __syncthreads(); if(row >= Aheight || col >= Awidth) return; FMatrix_d[row * Awidth + col]=cos(2*M_PI*Freqs_d[col]*BATvec_d[row]); FMatrix_d[row*Awidth + col + Awidth/2]=sin(2*M_PI*Freqs_d[col]*BATvec_d[row]); } */ /* extern "C" void LRedGPUWrapper_(double *Freqs, double *resvec, double *BATvec, double *DMVec, double *Noise, double **FNF, double *NFd, int N, int RF,int DMF, int F, int incRED, int incDM){ double *Freqs_d; double *resvec_d; double *BATvec_d; double *Noise_d; double *DMVec_d; double *FMatrix_d; double *NF_d; double *FNF_d; double *NFd_d; double *FNFvec; FNFvec = (double*)malloc(sizeof(double)*F*F); hipError_t err; culaStatus status; err = hipMalloc( (void **)&Freqs_d, sizeof(double)*F ); checkCudaError(err); err = hipMalloc( (void **)&resvec_d, sizeof(double)*N ); checkCudaError(err); err = hipMalloc( (void **)&BATvec_d, sizeof(double)*N ); checkCudaError(err); err = hipMalloc( (void **)&Noise_d, sizeof(double)*N ); checkCudaError(err); err = hipMalloc( (void **)&DMVec_d, sizeof(double)*N ); checkCudaError(err); err = hipMalloc( (void **)&FMatrix_d, sizeof(double)*N*F ); checkCudaError(err); err = hipMalloc( (void **)&NF_d, sizeof(double)*N*F ); checkCudaError(err); err = hipMalloc( (void **)&FNF_d, sizeof(double)*F*F ); checkCudaError(err); err = hipMalloc( (void **)&NFd_d, sizeof(double)*F); checkCudaError(err); // copy vectors from CPU to GPU err = hipMemcpy( Freqs_d, Freqs, sizeof(double)*F, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy(resvec_d, resvec, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy(BATvec_d, BATvec, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy( Noise_d, Noise, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy( DMVec_d, DMVec, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); // make_fmatrix<<< N, 1 >>>(FMatrix_d,Freqs_d,BATvec_d,N,F); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid; dimGrid.x=(F + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (N + dimBlock.y - 1)/dimBlock.y; //fastmake_fmatrix<<<dimGrid, dimBlock>>>(FMatrix_d,Freqs_d,BATvec_d,N,F); //make_fmatrix<<< N, 1 >>>(FMatrix_d,Freqs_d,BATvec_d,DMVec_d,N,RF,DMF, 0, incRED, incDM, 0); MatMulKernel<<<dimGrid, dimBlock>>>(N,N,N, F,Noise_d,FMatrix_d,NF_d); double alpha=1.0; double beta=0.0; status = culaDeviceDgemm('T', 'N', F, F, N, alpha, FMatrix_d, N, NF_d, N, beta, FNF_d, F); checkStatus(status); status = culaDeviceDgemv('T', N, F, alpha, NF_d, N, resvec_d, 1, beta, NFd_d, 1); checkStatus(status); err = hipMemcpy(FNFvec, FNF_d, sizeof(double)*F*F, hipMemcpyDeviceToHost); checkCudaError(err); err = hipMemcpy(NFd, NFd_d, sizeof(double)*F, hipMemcpyDeviceToHost); checkCudaError(err); for(int f1=0;f1<F; f1++){ for(int f2=0;f2<F; f2++){ FNF[f2][f1]=FNFvec[f1*F + f2]; } } hipFree(Freqs_d); hipFree(BATvec_d); hipFree(Noise_d); hipFree(FMatrix_d); hipFree(NF_d); hipFree(FNF_d); hipFree(resvec_d); hipFree(NFd_d); hipFree(DMVec_d); free(FNFvec); } */ /* // simple kernel function that calculates the TMatrix __global__ void make_Tmatrix(double *TMatrix_d, double *DMatrix_d, double *FMatrix_d, int N, int T, int D, int F) { // int row = blockIdx.y * blockDim.y + threadIdx.y; // int col = blockIdx.x * blockDim.x + threadIdx.x; __syncthreads(); // int Bidx = blockIdx.x; int Bidx = blockIdx.y * blockDim.y + threadIdx.y; int i = blockIdx.x * blockDim.x + threadIdx.x; // for(int i=0;i<T;i++){ if(i<D){ TMatrix_d[i*N + Bidx]=DMatrix_d[i*N + Bidx]; } else{ if(i==D)printf("i==D %i TMatrix_d[i*N + Bidx]=FMatrix_d[(i-D)*N + Bidx]; } // } int Bidx = blockIdx.x; for(int i=0;i<T;i++){ if(i<D){ TMatrix_d[i*N + Bidx]=DMatrix_d[i*N + Bidx]; } else{ //if(i==D)printf("i==D %i %g \n", Bidx, FMatrix_d[(i-D)*N + Bidx]); TMatrix_d[i*N + Bidx]=FMatrix_d[(i-D)*N + Bidx]; } } } */ // simple kernel function that adds powercoeff to TNT __global__ void addCoeffsKernel(int T, int D,int F,double *TNT_d, double *powercoeffs_d) { __syncthreads(); for(int i =0; i < F; i++){ TNT_d[(D+i)*T+D+i]+=1.0/powercoeffs_d[i]; } } extern "C" void NewLRedMarginGPUWrapper_(void *context, double *TNDMVec, double *Freqs, double *ObsFreqs, double *powercoeff, double *resvec, double *BATvec, double *DMVec, double *Noise, int *SysGroups, int N, int RF,int DMF, int BandNoiseCoeff, int GroupNoiseCoeff, int D, int F, int T, int incRED, int incDM, int incBandNoise, int incGroupNoise, int NTime, int NJumps, double *likevals, int incNGJitter, int numNGJitterEpochs, int *BandInfo, int ReplaceTMatrix){ //printf("entered 1\n"); double *Freqs_d; double *ObsFreqs_d; double *powercoeff_d; double *resvec_d; double *BATvec_d; double *Noise_d; double *DMVec_d; int *SysGroups_d; int *BandInfo_d; // double *FMatrix_d; double *DMatrix_d; // double *TMatrix_d; double *NT_d; double *TNT_d; double *NTd_d; hipError_t err; culaStatus status; // allocate memory on GPU err = hipMalloc( (void **)&Freqs_d, sizeof(double)*F ); checkCudaError(err); err = hipMalloc( (void **)&ObsFreqs_d, sizeof(double)*N ); checkCudaError(err); err = hipMalloc( (void **)&powercoeff_d, sizeof(double)*F ); checkCudaError(err); err = hipMalloc( (void **)&resvec_d, sizeof(double)*N ); checkCudaError(err); err = hipMalloc( (void **)&BATvec_d, sizeof(double)*N ); checkCudaError(err); err = hipMalloc( (void **)&Noise_d, sizeof(double)*N ); checkCudaError(err); err = hipMalloc( (void **)&DMVec_d, sizeof(double)*N ); checkCudaError(err); err = hipMalloc( (void **)&SysGroups_d, sizeof(int)*N ); checkCudaError(err); err = hipMalloc( (void **)&BandInfo_d, sizeof(int)*3*incBandNoise); checkCudaError(err); // err = hipMalloc( (void **)&FMatrix_d, sizeof(double)*N*F ); // checkCudaError(err); // err = hipMalloc( (void **)&TMatrix_d, sizeof(double)*N*T ); // checkCudaError(err); err = hipMalloc( (void **)&NT_d, sizeof(double)*N*T ); checkCudaError(err); err = hipMalloc( (void **)&TNT_d, sizeof(double)*T*T ); checkCudaError(err); err = hipMalloc( (void **)&NTd_d, sizeof(double)*T); checkCudaError(err); // copy vectors from CPU to GPU err = hipMemcpy( Freqs_d, Freqs, sizeof(double)*F, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy( ObsFreqs_d, ObsFreqs, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy( powercoeff_d, powercoeff, sizeof(double)*F, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy(resvec_d, resvec, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy(BATvec_d, BATvec, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy( Noise_d, Noise, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy( DMVec_d, DMVec, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy( SysGroups_d, SysGroups, sizeof(int)*N, hipMemcpyHostToDevice ); checkCudaError(err); err = hipMemcpy( BandInfo_d, BandInfo, sizeof(int)*3*incBandNoise, hipMemcpyHostToDevice ); checkCudaError(err); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid; ///////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Form the Design Matrix//////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// //printf("entered 2\n"); if(D != NTime+NJumps){ err = hipMalloc( (void **)&DMatrix_d, sizeof(double)*N*D ); checkCudaError(err); err = hipMemcpy( DMatrix_d, TNDMVec, sizeof(double)*D*N, hipMemcpyHostToDevice ); checkCudaError(err); double *U_d; double *V_d; double *S_d; err = hipMalloc( (void **)&U_d, sizeof(double)*N*N ); checkCudaError(err); err = hipMalloc( (void **)&V_d, sizeof(double)*D*D ); checkCudaError(err); err = hipMalloc( (void **)&S_d, sizeof(double)*D ); checkCudaError(err); culaDeviceDgesvd('O','N', N, D, DMatrix_d, N, S_d, U_d, N, V_d, D); hipFree(V_d); hipFree(S_d); hipFree(U_d); hipDeviceSynchronize(); } /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Form the F Matrix///////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// //printf("entered 3\n"); hipLaunchKernelGGL(( make_fmatrix), dim3(N), dim3(1) , 0, 0, GlobalTotalMatrix_d,Freqs_d,ObsFreqs_d, BATvec_d,DMVec_d,SysGroups_d, BandInfo_d, N,RF,DMF, BandNoiseCoeff, GroupNoiseCoeff, incRED, incDM, incBandNoise, incGroupNoise, ReplaceTMatrix, D, NTime,NJumps,DMatrix_d); hipDeviceSynchronize(); /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Add ECORR Matrix///////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// /* if(incNGJitter > 0){ int NGCoeffStartPoint = RF+DMF+incGroupNoise*GroupNoiseCoeff+incBandNoise*BandNoiseCoeff; add_EcorrToFMatrix<<< N, 1 >>>(FMatrix_d, GlobalEMatrix_d, N, NGCoeffStartPoint, numNGJitterEpochs); hipDeviceSynchronize(); } */ /* /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Form the T Matrix///////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// //printf("entered 4\n"); dimGrid.x=(T + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (N + dimBlock.y - 1)/dimBlock.y; if(D != NTime+NJumps){ make_Tmatrix<<<N,1>>>(TMatrix_d, DMatrix_d, FMatrix_d, N, T, D, F); } else{ make_Tmatrix<<<N,1>>>(TMatrix_d, GlobalStaticDmat_d, FMatrix_d, N, T, D, F); } hipDeviceSynchronize(); */ /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Do the Algebra/////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// //printf("entered 5 %i %i\n",T,N); dimGrid.x=(T + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (N + dimBlock.y - 1)/dimBlock.y; hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, N,N,N, T,Noise_d,GlobalTotalMatrix_d,NT_d); //SimpleDiagMatMulKernel<<<1,1>>>(N, T, Noise_d, TMatrix_d, NT_d); hipDeviceSynchronize(); double alpha=1.0; double beta=0.0; status = culaDeviceDgemm('T', 'N', T, T, N, alpha, GlobalTotalMatrix_d, N, NT_d, N, beta, TNT_d, T); checkStatus(status); hipDeviceSynchronize(); status = culaDeviceDgemv('T', N, T, alpha, NT_d, N, resvec_d, 1, beta, NTd_d, 1); checkStatus(status); hipDeviceSynchronize(); dimGrid.x=(T + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (T + dimBlock.y - 1)/dimBlock.y; hipLaunchKernelGGL(( addCoeffsKernel), dim3(1),dim3(1), 0, 0, T,D,F,TNT_d,powercoeff_d); hipDeviceSynchronize(); //printf("entered 6: %i \n", T); double *dettemp_d; double *tempval=new double[1]; err = hipMalloc( (void **)&dettemp_d, sizeof(double) ); checkCudaError(err); int carryOn=0; status = culaDeviceDpotrf('L', T, TNT_d, T); hipDeviceSynchronize(); //printf("entered 6.25\n"); checkStatusCarryOn(status,carryOn); hipDeviceSynchronize(); //printf("entered 6.5\n"); hipLaunchKernelGGL(( calc_det), dim3(1), dim3(1) , 0, 0, TNT_d, dettemp_d, T); err = hipMemcpy( tempval, dettemp_d, sizeof(double), hipMemcpyDeviceToHost); checkCudaError(err); likevals[0]=tempval[0]; hipDeviceSynchronize(); if(carryOn == 1){ hipFree(Freqs_d); hipFree(ObsFreqs_d); hipFree(powercoeff_d); hipFree(resvec_d); hipFree(BATvec_d); hipFree(Noise_d); hipFree(DMVec_d); hipFree(SysGroups_d); // hipFree(FMatrix_d); if(D != NTime+NJumps){hipFree(DMatrix_d);} // hipFree(TMatrix_d); hipFree(NT_d); hipFree(TNT_d); hipFree(NTd_d); hipFree(BandInfo_d); hipFree(dettemp_d); delete(tempval); return; } //printf("entered 7\n"); double *WorkVec_d; err = hipMalloc( (void **)&WorkVec_d, sizeof(double)*T ); checkCudaError(err); hipLaunchKernelGGL(( copyvec), dim3(T), dim3(1) , 0, 0, WorkVec_d, NTd_d, T); hipDeviceSynchronize(); status=culaDeviceDpotrs('L', T, 1, TNT_d, T, WorkVec_d, T); checkStatus(status); hipDeviceSynchronize(); double *freqlike_d; err = hipMalloc( (void **)&freqlike_d, sizeof(double)); checkCudaError(err); hipLaunchKernelGGL(( calc_DotLike), dim3(1), dim3(1) , 0, 0, WorkVec_d, NTd_d, T, freqlike_d); hipDeviceSynchronize(); err = hipMemcpy( tempval, freqlike_d, sizeof(double), hipMemcpyDeviceToHost); checkCudaError(err); likevals[1]=tempval[0]; //printf("entered 8\n"); hipFree(Freqs_d); hipFree(ObsFreqs_d); hipFree(powercoeff_d); hipFree(resvec_d); hipFree(BATvec_d); hipFree(Noise_d); hipFree(DMVec_d); hipFree(SysGroups_d); // hipFree(FMatrix_d); if(D != NTime+NJumps){hipFree(DMatrix_d);} // hipFree(TMatrix_d); hipFree(NT_d); hipFree(TNT_d); hipFree(NTd_d); hipFree(BandInfo_d); hipFree(WorkVec_d); hipFree(dettemp_d); hipFree(freqlike_d); delete(tempval); //printf("entered 9\n"); } extern "C" void copy_staticTmat_(double *T, int totalsize, int Nobs){ hipError_t err; err = hipMalloc( (void **)&GlobalTotalMatrix_d, sizeof(double)*totalsize*Nobs ); checkCudaError(err); // copy vectors from CPU to GPU err = hipMemcpy( GlobalTotalMatrix_d, T, sizeof(double)*totalsize*Nobs, hipMemcpyHostToDevice ); checkCudaError(err); return; } /* extern "C" void copy_floatgmat_(float *G, int N){ hipError_t err; // Allocate memory on GPU //printf("copying G\n"); err = hipMalloc( (void **)&GlobalGmatFloat_d, sizeof(float)*N ); checkCudaError(err); // copy vectors from CPU to GPU err = hipMemcpy( GlobalGmatFloat_d, G, sizeof(float)*N, hipMemcpyHostToDevice ); checkCudaError(err); return; } extern "C" void copy_gmat_(double *G, int N){ hipError_t err; // Allocate memory on GPU //printf("copying G\n"); err = hipMalloc( (void **)&GlobalGmat_d, sizeof(double)*N ); checkCudaError(err); // copy vectors from CPU to GPU err = hipMemcpy( GlobalGmat_d, G, sizeof(double)*N, hipMemcpyHostToDevice ); checkCudaError(err); return; } extern "C" void copy_staticgmat_(double *G, int M, int N){ hipError_t err; // Allocate memory on GPU //printf("copying G\n"); err = hipMalloc( (void **)&GlobalStaticGmat_d, sizeof(double)*N*M ); checkCudaError(err); // copy vectors from CPU to GPU err = hipMemcpy( GlobalStaticGmat_d, G, sizeof(double)*N*M, hipMemcpyHostToDevice ); checkCudaError(err); return; } extern "C" void copy_staticumat_(double *G, int M, int N){ hipError_t err; // Allocate memory on GPU //printf("copying G\n"); err = hipMalloc( (void **)&GlobalStaticUGmat_d, sizeof(double)*M*N ); checkCudaError(err); // copy vectors from CPU to GPU err = hipMemcpy( GlobalStaticUGmat_d, G, sizeof(double)*M*N, hipMemcpyHostToDevice ); checkCudaError(err); return; } extern "C" void copy_staticdmat_(double **TNDM, double *TNDMVec, int N, int D){ hipError_t err; // Allocate memory on GPU //printf("copying G\n"); err = hipMalloc( (void **)&GlobalStaticDmat_d, sizeof(double)*N*D ); checkCudaError(err); err = hipMemcpy(GlobalStaticDmat_d, TNDMVec, sizeof(double)*D*N, hipMemcpyHostToDevice ); checkCudaError(err); double *U_d; double *V_d; double *S_d; err = hipMalloc( (void **)&U_d, sizeof(double)*N*N ); checkCudaError(err); err = hipMalloc( (void **)&V_d, sizeof(double)*D*D ); checkCudaError(err); err = hipMalloc( (void **)&S_d, sizeof(double)*D ); checkCudaError(err); culaDeviceDgesvd('O','N', N, D, GlobalStaticDmat_d, N, S_d, U_d, N, V_d, D); hipFree(V_d); hipFree(S_d); hipFree(U_d); hipDeviceSynchronize(); } extern "C" void copy_staticECorrmat_(double *E, int EcorrSize, int Nobs){ hipError_t err; err = hipMalloc( (void **)&GlobalEMatrix_d, sizeof(double)*EcorrSize*Nobs ); checkCudaError(err); // copy vectors from CPU to GPU err = hipMemcpy( GlobalEMatrix_d, E, sizeof(double)*EcorrSize*Nobs, hipMemcpyHostToDevice ); checkCudaError(err); return; } */
5672061bd7bee74b575c406da49ec801e89758e2.cu
#include <stdio.h> #include <stdlib.h> #include <gsl/gsl_sf_gamma.h> #include <cuda.h> #include <cuda_runtime.h> #include "dgesvd.h" #include <cula_lapack_device.h> #include <cula_blas_device.h> #define BLOCK_SIZE 16 //double *GlobalGmat_d; //double *GlobalStaticGmat_d; //double *GlobalStaticUGmat_d; //float *GlobalGmatFloat_d; //double *GlobalStaticDmat_d; double *GlobalTotalMatrix_d; // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) double iter_factorialGPU(unsigned int n) { double ret = 1; for(unsigned int i = 1; i <= n; ++i) ret *= i; return ret; } void checkStatus(culaStatus status) { char buf[256]; if(!status) return; culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf)); printf("%s\n", buf); culaShutdown(); exit(EXIT_FAILURE); } void checkStatusCarryOn(culaStatus status, int &CarryOn) { char buf[256]; if(!status){ CarryOn=0; return; } culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf)); //printf("%s\n", buf); CarryOn=1; culaShutdown(); culaStatus status2; status2 = culaInitialize(); } void checkCudaError(cudaError_t err) { if(!err) return; printf("%s\n", cudaGetErrorString(err)); culaShutdown(); exit(EXIT_FAILURE); } // simple kernel function that adds two vectors __global__ void vect_add(double *a, double *b, int N) { int Bidx = blockIdx.x; //a[Bidx*N+Tidx] = a[Bidx*N+Tidx] + b[Bidx*N+Tidx]; for(int i =0; i < N; i++){ a[Bidx*N+i] = a[Bidx*N+i] + b[Bidx*N+i]; } } // simple kernel function that calcs det of a matrix __global__ void calc_det(double *a, double *det, int N) { det[0]=0; for(int i =0; i < N; i++){ det[0]+=log(a[i*N+i]); } det[0]=det[0]*2; } // simple kernel function that calcs det of a matrix __global__ void Floatcalc_det(float *a, double *det, int N) { det[0]=0; for(int i =0; i < N; i++){ det[0]+=log(a[i*N+i]); } det[0]=det[0]*2; } // simple kernel function that calcs det of a matrix __global__ void calc_detFloat(float *a, double *det, int N) { det[0]=0; for(int i =0; i < N; i++){ det[0]+=log(a[i*N+i]); } det[0]=det[0]*2; } // simple kernel function that calcs det of a matrix __global__ void calc_DiagLike(double *Vec, double *Noise, int N, double *val) { val[0]=0; for(int i =0; i < N; i++){ val[0]+=Vec[i]*Vec[i]*Noise[i]; } } // simple kernel function that calcs det of a matrix __global__ void calc_DotLike(double *Vec1, double *Vec2, int N, double *val) { val[0]=0; for(int i =0; i < N; i++){ val[0]+=Vec1[i]*Vec2[i]; //printf("GPU copy %i %g %g\n", i,Vec1[i],Vec2[i]); } } // simple kernel function that calcs det of a matrix __global__ void copyvec(double *Vec1, double *Vec2, int N) { int Bidx = blockIdx.x; __syncthreads(); Vec1[Bidx]=Vec2[Bidx]; //printf("copy: %i %g\n",Bidx, Vec1[Bidx]); } /* __global__ void Makecov(double *A_d, double *BATvec, double *NoiseVec, double *SpecParm, int Aheight, int Awidth) { // Each thread computes one element of C // by accumulating results into Cvalue double LongestPeriod=1.0/pow(10.0,-5); // double flo=1.0/LongestPeriod; double modelalpha=SpecParm[1]; double gwampsquared=SpecParm[0]; double covconst=SpecParm[2]; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __syncthreads(); if(row >= Aheight || col >= Awidth) return; double timdiff= BATvec[row] - BATvec[col]; double tau=2.0*M_PI*fabs(timdiff); double covsum=0; for(int k=0; k <=10; k++){ double ret = 1; for(unsigned int i = 1; i <= 2*k; ++i){ ret *= (double)i; // printf("Ret: %i %g \n",i,ret); } covsum=covsum+pow(-1.0,k)*(pow(flo*tau,2*k))/(ret*(2*k+1-modelalpha)); //printf("covsum: %i %i %i %g \n",row,col,k,covsum); } A_d[row * Awidth + col]=gwampsquared*(covconst*pow((flo*tau),(modelalpha-1)) - covsum); if(row==col){ A_d[row * Awidth + col] += NoiseVec[row]; } } __global__ void MakeDMcov(double *A_d, double *BATvec, double *NoiseVec, double *DMVec, double *SpecParm, int Aheight, int Awidth) { // Each thread computes one element of C // by accumulating results into Cvalue double LongestPeriod=1.0/pow(10.0,-5); double flo=1.0/LongestPeriod; double gwampsquared=SpecParm[0]; double modelalpha=SpecParm[1]; double covconst=SpecParm[2]; double dmampsquared=SpecParm[3]; double dmmodelalpha=SpecParm[4]; double dmcovconst=SpecParm[5]; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __syncthreads(); if(row >= Aheight || col >= Awidth) return; double timdiff= BATvec[row] - BATvec[col]; double tau=2.0*M_PI*fabs(timdiff); double covsum=0; double dmcovsum=0; for(int k=0; k <=5; k++){ double ret = 1; for(unsigned int i = 1; i <= 2*k; ++i){ ret *= (double)i; } covsum=covsum+pow(-1.0,k)*(pow(flo*tau,2*k))/(ret*(2*k+1-modelalpha)); dmcovsum=dmcovsum+pow(-1.0,k)*(pow(flo*tau,2*k))/(ret*(2*k+1-dmmodelalpha)); } double gwpart=0; if(SpecParm[0] !=0 )gwpart=gwampsquared*(covconst*pow((flo*tau),(modelalpha-1)) - covsum); double dmpart=0; if(SpecParm[3] !=0 )dmpart=dmampsquared*(dmcovconst*pow((flo*tau),(dmmodelalpha-1)) - dmcovsum)*DMVec[row]*DMVec[col]; A_d[row * Awidth + col]= gwpart+dmpart; if(row==col){ A_d[row * Awidth + col] += NoiseVec[row]; } //printf("%i %i %g\n",row,col,A_d[row * Awidth + col]); } */ __global__ void MatMulKernel(int Arow,int Acol,int Brow, int Bcol,double *A,double *B,double *C) { int Crow=Arow; int Ccol=Bcol; double Ctemp = 0.0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __syncthreads(); if(row < Arow && col < Bcol) { //if(row<32)printf("GPUNT: %i %i %g %g \n", row, col, B[col * Brow + row], A[row] ); Ctemp = A[row] * B[col * Brow + row]; //GGTest[col*N + row] C[col*Crow+row] = Ctemp; } // } __global__ void SimpleDiagMatMulKernel(int N,int T,double *Noise_d,double *TMatrix_d,double *NTMatrix_d) { __syncthreads(); for(int i=0;i<T; i++){ for(int j=0;j<N; j++){ //if(i ==28)printf("GPU SDMMK %i %i %g %g \n",i,j,TMatrix_d[i*N + j],Noise_d[j]); NTMatrix_d[i*N + j]=TMatrix_d[i*N + j]*Noise_d[j]; } } } /* extern "C" void WhiteMarginGPUWrapper_(void *context, double *TNDMVec, double *resvec, double *Noise, int N, int D, int NTime, int NJumps, double *likevals){ double *resvec_d; double *Noise_d; double *DMatrix_d; double *NT_d; double *TNT_d; double *NTd_d; cudaError_t err; culaStatus status; // allocate memory on GPU err = cudaMalloc( (void **)&resvec_d, sizeof(double)*N ); checkCudaError(err); err = cudaMalloc( (void **)&Noise_d, sizeof(double)*N ); checkCudaError(err); err = cudaMalloc( (void **)&DMatrix_d, sizeof(double)*N*D ); checkCudaError(err); err = cudaMalloc( (void **)&NT_d, sizeof(double)*N*D ); checkCudaError(err); err = cudaMalloc( (void **)&TNT_d, sizeof(double)*D*D ); checkCudaError(err); err = cudaMalloc( (void **)&NTd_d, sizeof(double)*D); checkCudaError(err); // copy vectors from CPU to GPU err = cudaMemcpy(resvec_d, resvec, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy( Noise_d, Noise, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid; ///////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Form the Design Matrix//////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// if(D != NTime+NJumps){ err = cudaMemcpy( DMatrix_d, TNDMVec, sizeof(double)*D*N, cudaMemcpyHostToDevice ); checkCudaError(err); double *U_d; double *V_d; double *S_d; err = cudaMalloc( (void **)&U_d, sizeof(double)*N*N ); checkCudaError(err); err = cudaMalloc( (void **)&V_d, sizeof(double)*D*D ); checkCudaError(err); err = cudaMalloc( (void **)&S_d, sizeof(double)*D ); checkCudaError(err); culaDeviceDgesvd('O','N', N, D, DMatrix_d, N, S_d, U_d, N, V_d, D); cudaFree(V_d); cudaFree(S_d); cudaFree(U_d); cudaDeviceSynchronize(); } else{ DMatrix_d=GlobalStaticDmat_d; cudaDeviceSynchronize(); } /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Do the Algebra/////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// //printf("entered 5 %i %i\n",T,N); dimGrid.x=(D + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (N + dimBlock.y - 1)/dimBlock.y; MatMulKernel<<<dimGrid, dimBlock>>>(N,N,N, D,Noise_d,DMatrix_d,NT_d); //SimpleDiagMatMulKernel<<<1,1>>>(N, T, Noise_d, TMatrix_d, NT_d); cudaDeviceSynchronize(); double alpha=1.0; double beta=0.0; status = culaDeviceDgemm('T', 'N', D, D, N, alpha, DMatrix_d, N, NT_d, N, beta, TNT_d, D); checkStatus(status); cudaDeviceSynchronize(); status = culaDeviceDgemv('T', N, D, alpha, NT_d, N, resvec_d, 1, beta, NTd_d, 1); checkStatus(status); cudaDeviceSynchronize(); //printf("entered 6: %i \n", T); double *dettemp_d; double *tempval=new double[1]; err = cudaMalloc( (void **)&dettemp_d, sizeof(double) ); checkCudaError(err); int carryOn=0; status = culaDeviceDpotrf('L', D, TNT_d, D); cudaDeviceSynchronize(); //printf("entered 6.25\n"); checkStatusCarryOn(status,carryOn); cudaDeviceSynchronize(); //printf("entered 6.5\n"); calc_det<<< 1, 1 >>>( TNT_d, dettemp_d, D); err = cudaMemcpy( tempval, dettemp_d, sizeof(double), cudaMemcpyDeviceToHost); checkCudaError(err); likevals[0]=tempval[0]; cudaDeviceSynchronize(); if(carryOn == 1){ cudaFree(resvec_d); cudaFree(Noise_d); cudaFree(DMatrix_d); cudaFree(NT_d); cudaFree(TNT_d); cudaFree(NTd_d); cudaFree(dettemp_d); delete(tempval); return; } //printf("entered 7\n"); double *WorkVec_d; err = cudaMalloc( (void **)&WorkVec_d, sizeof(double)*D ); checkCudaError(err); copyvec<<< D, 1 >>>(WorkVec_d, NTd_d, D); cudaDeviceSynchronize(); status=culaDeviceDpotrs('L', D, 1, TNT_d, D, WorkVec_d, D); checkStatus(status); cudaDeviceSynchronize(); double *freqlike_d; err = cudaMalloc( (void **)&freqlike_d, sizeof(double)); checkCudaError(err); calc_DotLike<<< 1, 1 >>>(WorkVec_d, NTd_d, D, freqlike_d); cudaDeviceSynchronize(); err = cudaMemcpy( tempval, freqlike_d, sizeof(double), cudaMemcpyDeviceToHost); checkCudaError(err); likevals[1]=tempval[0]; cudaFree(resvec_d); cudaFree(Noise_d); cudaFree(DMatrix_d); cudaFree(NT_d); cudaFree(TNT_d); cudaFree(NTd_d); cudaFree(WorkVec_d); cudaFree(dettemp_d); delete(tempval); } */ // simple kernel function that calculates the FMatrix __global__ void make_fmatrix(double *TMatrix_d,double *Freqs_d, double *ObsFreqs_d, double *BATvec_d, double *DMVec_d, int *SysGroups_d, int *BandFreqs, int N,int RF, int DMF, int BandNoiseCoeff, int GroupNoiseCoeff, int incRED, int incDM, int incBandNoise, int incGroupNoise, int ReplaceTMatrix, int TimetoMargin, int numTime, int numJumps, double *DMatrix_d) { int Bidx = blockIdx.x; if(TimetoMargin != numJumps + numTime){ for(int i=0;i<TimetoMargin;i++){ TMatrix_d[i*N + Bidx]=DMatrix_d[i*N + Bidx]; } } int startpos=0; if(incRED !=0){ if(ReplaceTMatrix==0){ for(int i=0;i<RF/2;i++){ TMatrix_d[(TimetoMargin+i)*N + Bidx]=cos(2*M_PI*Freqs_d[i]*BATvec_d[Bidx]); TMatrix_d[(TimetoMargin+i+RF/2)*N + Bidx]=sin(2*M_PI*Freqs_d[i]*BATvec_d[Bidx]); } } startpos=RF; } if(incDM !=0){ if(ReplaceTMatrix==0){ for(int i=0;i<DMF/2;i++){ //if(Bidx==0)printf("D: %i %i %g %g \n", Bidx,i,1.0/Freqs_d[i], DMVec_d[Bidx]); TMatrix_d[(TimetoMargin+startpos+i)*N + Bidx]=cos(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx])*DMVec_d[Bidx]; TMatrix_d[(TimetoMargin+startpos+i+DMF/2)*N + Bidx]=sin(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx])*DMVec_d[Bidx]; } } startpos=startpos+DMF; } if(incBandNoise > 0){ for(int b = 0; b < incBandNoise; b++){ if(ReplaceTMatrix==0){ int startfreq = BandFreqs[b*3+0]; int stopfreq = BandFreqs[b*3+1]; int BandScale = BandFreqs[b*3+2]; for(int i=0;i<BandNoiseCoeff/2;i++){ if(ObsFreqs_d[Bidx] > startfreq && ObsFreqs_d[Bidx] < stopfreq){ TMatrix_d[(TimetoMargin+startpos+i)*N + Bidx]=cos(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx]); TMatrix_d[(TimetoMargin+startpos+i+BandNoiseCoeff/2)*N + Bidx]=sin(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx]); } else{ TMatrix_d[(TimetoMargin+startpos+i)*N + Bidx]=0; TMatrix_d[(TimetoMargin+startpos+i+BandNoiseCoeff/2)*N + Bidx]=0; } } } startpos=startpos+BandNoiseCoeff; } } //printf("In GPU : %i \n", incGroupNoise); if(incGroupNoise > 0){ for(int g = 0; g < incGroupNoise; g++){ for(int i=0;i<GroupNoiseCoeff/2;i++){ //printf("GPU Groups %i %i \n", Bidx, SysGroups_d[Bidx]); if(SysGroups_d[Bidx] == g+1){ TMatrix_d[(TimetoMargin+startpos+i)*N + Bidx]=cos(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx]); TMatrix_d[(TimetoMargin+startpos+i+GroupNoiseCoeff/2)*N + Bidx]=sin(2*M_PI*Freqs_d[startpos+i]*BATvec_d[Bidx]); } else{ TMatrix_d[(TimetoMargin+startpos+i)*N + Bidx]=0; TMatrix_d[(TimetoMargin+startpos+i+GroupNoiseCoeff/2)*N + Bidx]=0; } } startpos=startpos+GroupNoiseCoeff; } } } /* // simple kernel function that calculates the FMatrix __global__ void add_EcorrToFMatrix(double *FMatrix_d, double *EMatrix_d, int Nobs, int FSize, int EpochSize) { int Bidx = blockIdx.x; for(int i=0;i<EpochSize;i++){ FMatrix_d[(FSize+i)*Nobs + Bidx]= EMatrix_d[i*Nobs + Bidx]; } } */ /* // simple kernel function that calculates the FMatrix __global__ void make_DMfmatrix(double *FMatrix_d,double *Freqs_d, double *BATvec_d, double *DMVec_d, int N,int F) { int Bidx = blockIdx.x; for(int i=0;i<F/4;i++){ FMatrix_d[i*N + Bidx]=cos(2*M_PI*Freqs_d[i]*BATvec_d[Bidx]); FMatrix_d[(i+F/4)*N + Bidx]=cos(2*M_PI*Freqs_d[i]*BATvec_d[Bidx])*DMVec_d[Bidx]; FMatrix_d[(i+F/2)*N + Bidx]=sin(2*M_PI*Freqs_d[i]*BATvec_d[Bidx]); FMatrix_d[(i+3*F/4)*N + Bidx]=sin(2*M_PI*Freqs_d[i]*BATvec_d[Bidx])*DMVec_d[Bidx]; } } __global__ void fastmake_fmatrix(double *FMatrix_d,double *Freqs_d, double *BATvec_d, int Aheight,int Awidth) { // Each thread computes one element of F // by accumulating results into Cvalue int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __syncthreads(); if(row >= Aheight || col >= Awidth) return; FMatrix_d[row * Awidth + col]=cos(2*M_PI*Freqs_d[col]*BATvec_d[row]); FMatrix_d[row*Awidth + col + Awidth/2]=sin(2*M_PI*Freqs_d[col]*BATvec_d[row]); } */ /* extern "C" void LRedGPUWrapper_(double *Freqs, double *resvec, double *BATvec, double *DMVec, double *Noise, double **FNF, double *NFd, int N, int RF,int DMF, int F, int incRED, int incDM){ double *Freqs_d; double *resvec_d; double *BATvec_d; double *Noise_d; double *DMVec_d; double *FMatrix_d; double *NF_d; double *FNF_d; double *NFd_d; double *FNFvec; FNFvec = (double*)malloc(sizeof(double)*F*F); cudaError_t err; culaStatus status; err = cudaMalloc( (void **)&Freqs_d, sizeof(double)*F ); checkCudaError(err); err = cudaMalloc( (void **)&resvec_d, sizeof(double)*N ); checkCudaError(err); err = cudaMalloc( (void **)&BATvec_d, sizeof(double)*N ); checkCudaError(err); err = cudaMalloc( (void **)&Noise_d, sizeof(double)*N ); checkCudaError(err); err = cudaMalloc( (void **)&DMVec_d, sizeof(double)*N ); checkCudaError(err); err = cudaMalloc( (void **)&FMatrix_d, sizeof(double)*N*F ); checkCudaError(err); err = cudaMalloc( (void **)&NF_d, sizeof(double)*N*F ); checkCudaError(err); err = cudaMalloc( (void **)&FNF_d, sizeof(double)*F*F ); checkCudaError(err); err = cudaMalloc( (void **)&NFd_d, sizeof(double)*F); checkCudaError(err); // copy vectors from CPU to GPU err = cudaMemcpy( Freqs_d, Freqs, sizeof(double)*F, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy(resvec_d, resvec, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy(BATvec_d, BATvec, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy( Noise_d, Noise, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy( DMVec_d, DMVec, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); // make_fmatrix<<< N, 1 >>>(FMatrix_d,Freqs_d,BATvec_d,N,F); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid; dimGrid.x=(F + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (N + dimBlock.y - 1)/dimBlock.y; //fastmake_fmatrix<<<dimGrid, dimBlock>>>(FMatrix_d,Freqs_d,BATvec_d,N,F); //make_fmatrix<<< N, 1 >>>(FMatrix_d,Freqs_d,BATvec_d,DMVec_d,N,RF,DMF, 0, incRED, incDM, 0); MatMulKernel<<<dimGrid, dimBlock>>>(N,N,N, F,Noise_d,FMatrix_d,NF_d); double alpha=1.0; double beta=0.0; status = culaDeviceDgemm('T', 'N', F, F, N, alpha, FMatrix_d, N, NF_d, N, beta, FNF_d, F); checkStatus(status); status = culaDeviceDgemv('T', N, F, alpha, NF_d, N, resvec_d, 1, beta, NFd_d, 1); checkStatus(status); err = cudaMemcpy(FNFvec, FNF_d, sizeof(double)*F*F, cudaMemcpyDeviceToHost); checkCudaError(err); err = cudaMemcpy(NFd, NFd_d, sizeof(double)*F, cudaMemcpyDeviceToHost); checkCudaError(err); for(int f1=0;f1<F; f1++){ for(int f2=0;f2<F; f2++){ FNF[f2][f1]=FNFvec[f1*F + f2]; } } cudaFree(Freqs_d); cudaFree(BATvec_d); cudaFree(Noise_d); cudaFree(FMatrix_d); cudaFree(NF_d); cudaFree(FNF_d); cudaFree(resvec_d); cudaFree(NFd_d); cudaFree(DMVec_d); free(FNFvec); } */ /* // simple kernel function that calculates the TMatrix __global__ void make_Tmatrix(double *TMatrix_d, double *DMatrix_d, double *FMatrix_d, int N, int T, int D, int F) { // int row = blockIdx.y * blockDim.y + threadIdx.y; // int col = blockIdx.x * blockDim.x + threadIdx.x; __syncthreads(); // int Bidx = blockIdx.x; int Bidx = blockIdx.y * blockDim.y + threadIdx.y; int i = blockIdx.x * blockDim.x + threadIdx.x; // for(int i=0;i<T;i++){ if(i<D){ TMatrix_d[i*N + Bidx]=DMatrix_d[i*N + Bidx]; } else{ if(i==D)printf("i==D %i TMatrix_d[i*N + Bidx]=FMatrix_d[(i-D)*N + Bidx]; } // } int Bidx = blockIdx.x; for(int i=0;i<T;i++){ if(i<D){ TMatrix_d[i*N + Bidx]=DMatrix_d[i*N + Bidx]; } else{ //if(i==D)printf("i==D %i %g \n", Bidx, FMatrix_d[(i-D)*N + Bidx]); TMatrix_d[i*N + Bidx]=FMatrix_d[(i-D)*N + Bidx]; } } } */ // simple kernel function that adds powercoeff to TNT __global__ void addCoeffsKernel(int T, int D,int F,double *TNT_d, double *powercoeffs_d) { __syncthreads(); for(int i =0; i < F; i++){ TNT_d[(D+i)*T+D+i]+=1.0/powercoeffs_d[i]; } } extern "C" void NewLRedMarginGPUWrapper_(void *context, double *TNDMVec, double *Freqs, double *ObsFreqs, double *powercoeff, double *resvec, double *BATvec, double *DMVec, double *Noise, int *SysGroups, int N, int RF,int DMF, int BandNoiseCoeff, int GroupNoiseCoeff, int D, int F, int T, int incRED, int incDM, int incBandNoise, int incGroupNoise, int NTime, int NJumps, double *likevals, int incNGJitter, int numNGJitterEpochs, int *BandInfo, int ReplaceTMatrix){ //printf("entered 1\n"); double *Freqs_d; double *ObsFreqs_d; double *powercoeff_d; double *resvec_d; double *BATvec_d; double *Noise_d; double *DMVec_d; int *SysGroups_d; int *BandInfo_d; // double *FMatrix_d; double *DMatrix_d; // double *TMatrix_d; double *NT_d; double *TNT_d; double *NTd_d; cudaError_t err; culaStatus status; // allocate memory on GPU err = cudaMalloc( (void **)&Freqs_d, sizeof(double)*F ); checkCudaError(err); err = cudaMalloc( (void **)&ObsFreqs_d, sizeof(double)*N ); checkCudaError(err); err = cudaMalloc( (void **)&powercoeff_d, sizeof(double)*F ); checkCudaError(err); err = cudaMalloc( (void **)&resvec_d, sizeof(double)*N ); checkCudaError(err); err = cudaMalloc( (void **)&BATvec_d, sizeof(double)*N ); checkCudaError(err); err = cudaMalloc( (void **)&Noise_d, sizeof(double)*N ); checkCudaError(err); err = cudaMalloc( (void **)&DMVec_d, sizeof(double)*N ); checkCudaError(err); err = cudaMalloc( (void **)&SysGroups_d, sizeof(int)*N ); checkCudaError(err); err = cudaMalloc( (void **)&BandInfo_d, sizeof(int)*3*incBandNoise); checkCudaError(err); // err = cudaMalloc( (void **)&FMatrix_d, sizeof(double)*N*F ); // checkCudaError(err); // err = cudaMalloc( (void **)&TMatrix_d, sizeof(double)*N*T ); // checkCudaError(err); err = cudaMalloc( (void **)&NT_d, sizeof(double)*N*T ); checkCudaError(err); err = cudaMalloc( (void **)&TNT_d, sizeof(double)*T*T ); checkCudaError(err); err = cudaMalloc( (void **)&NTd_d, sizeof(double)*T); checkCudaError(err); // copy vectors from CPU to GPU err = cudaMemcpy( Freqs_d, Freqs, sizeof(double)*F, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy( ObsFreqs_d, ObsFreqs, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy( powercoeff_d, powercoeff, sizeof(double)*F, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy(resvec_d, resvec, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy(BATvec_d, BATvec, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy( Noise_d, Noise, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy( DMVec_d, DMVec, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy( SysGroups_d, SysGroups, sizeof(int)*N, cudaMemcpyHostToDevice ); checkCudaError(err); err = cudaMemcpy( BandInfo_d, BandInfo, sizeof(int)*3*incBandNoise, cudaMemcpyHostToDevice ); checkCudaError(err); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid; ///////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Form the Design Matrix//////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// //printf("entered 2\n"); if(D != NTime+NJumps){ err = cudaMalloc( (void **)&DMatrix_d, sizeof(double)*N*D ); checkCudaError(err); err = cudaMemcpy( DMatrix_d, TNDMVec, sizeof(double)*D*N, cudaMemcpyHostToDevice ); checkCudaError(err); double *U_d; double *V_d; double *S_d; err = cudaMalloc( (void **)&U_d, sizeof(double)*N*N ); checkCudaError(err); err = cudaMalloc( (void **)&V_d, sizeof(double)*D*D ); checkCudaError(err); err = cudaMalloc( (void **)&S_d, sizeof(double)*D ); checkCudaError(err); culaDeviceDgesvd('O','N', N, D, DMatrix_d, N, S_d, U_d, N, V_d, D); cudaFree(V_d); cudaFree(S_d); cudaFree(U_d); cudaDeviceSynchronize(); } /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Form the F Matrix///////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// //printf("entered 3\n"); make_fmatrix<<< N, 1 >>>(GlobalTotalMatrix_d,Freqs_d,ObsFreqs_d, BATvec_d,DMVec_d,SysGroups_d, BandInfo_d, N,RF,DMF, BandNoiseCoeff, GroupNoiseCoeff, incRED, incDM, incBandNoise, incGroupNoise, ReplaceTMatrix, D, NTime,NJumps,DMatrix_d); cudaDeviceSynchronize(); /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Add ECORR Matrix///////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// /* if(incNGJitter > 0){ int NGCoeffStartPoint = RF+DMF+incGroupNoise*GroupNoiseCoeff+incBandNoise*BandNoiseCoeff; add_EcorrToFMatrix<<< N, 1 >>>(FMatrix_d, GlobalEMatrix_d, N, NGCoeffStartPoint, numNGJitterEpochs); cudaDeviceSynchronize(); } */ /* /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Form the T Matrix///////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// //printf("entered 4\n"); dimGrid.x=(T + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (N + dimBlock.y - 1)/dimBlock.y; if(D != NTime+NJumps){ make_Tmatrix<<<N,1>>>(TMatrix_d, DMatrix_d, FMatrix_d, N, T, D, F); } else{ make_Tmatrix<<<N,1>>>(TMatrix_d, GlobalStaticDmat_d, FMatrix_d, N, T, D, F); } cudaDeviceSynchronize(); */ /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////Do the Algebra/////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// //printf("entered 5 %i %i\n",T,N); dimGrid.x=(T + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (N + dimBlock.y - 1)/dimBlock.y; MatMulKernel<<<dimGrid, dimBlock>>>(N,N,N, T,Noise_d,GlobalTotalMatrix_d,NT_d); //SimpleDiagMatMulKernel<<<1,1>>>(N, T, Noise_d, TMatrix_d, NT_d); cudaDeviceSynchronize(); double alpha=1.0; double beta=0.0; status = culaDeviceDgemm('T', 'N', T, T, N, alpha, GlobalTotalMatrix_d, N, NT_d, N, beta, TNT_d, T); checkStatus(status); cudaDeviceSynchronize(); status = culaDeviceDgemv('T', N, T, alpha, NT_d, N, resvec_d, 1, beta, NTd_d, 1); checkStatus(status); cudaDeviceSynchronize(); dimGrid.x=(T + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (T + dimBlock.y - 1)/dimBlock.y; addCoeffsKernel<<<1,1>>>(T,D,F,TNT_d,powercoeff_d); cudaDeviceSynchronize(); //printf("entered 6: %i \n", T); double *dettemp_d; double *tempval=new double[1]; err = cudaMalloc( (void **)&dettemp_d, sizeof(double) ); checkCudaError(err); int carryOn=0; status = culaDeviceDpotrf('L', T, TNT_d, T); cudaDeviceSynchronize(); //printf("entered 6.25\n"); checkStatusCarryOn(status,carryOn); cudaDeviceSynchronize(); //printf("entered 6.5\n"); calc_det<<< 1, 1 >>>( TNT_d, dettemp_d, T); err = cudaMemcpy( tempval, dettemp_d, sizeof(double), cudaMemcpyDeviceToHost); checkCudaError(err); likevals[0]=tempval[0]; cudaDeviceSynchronize(); if(carryOn == 1){ cudaFree(Freqs_d); cudaFree(ObsFreqs_d); cudaFree(powercoeff_d); cudaFree(resvec_d); cudaFree(BATvec_d); cudaFree(Noise_d); cudaFree(DMVec_d); cudaFree(SysGroups_d); // cudaFree(FMatrix_d); if(D != NTime+NJumps){cudaFree(DMatrix_d);} // cudaFree(TMatrix_d); cudaFree(NT_d); cudaFree(TNT_d); cudaFree(NTd_d); cudaFree(BandInfo_d); cudaFree(dettemp_d); delete(tempval); return; } //printf("entered 7\n"); double *WorkVec_d; err = cudaMalloc( (void **)&WorkVec_d, sizeof(double)*T ); checkCudaError(err); copyvec<<< T, 1 >>>(WorkVec_d, NTd_d, T); cudaDeviceSynchronize(); status=culaDeviceDpotrs('L', T, 1, TNT_d, T, WorkVec_d, T); checkStatus(status); cudaDeviceSynchronize(); double *freqlike_d; err = cudaMalloc( (void **)&freqlike_d, sizeof(double)); checkCudaError(err); calc_DotLike<<< 1, 1 >>>(WorkVec_d, NTd_d, T, freqlike_d); cudaDeviceSynchronize(); err = cudaMemcpy( tempval, freqlike_d, sizeof(double), cudaMemcpyDeviceToHost); checkCudaError(err); likevals[1]=tempval[0]; //printf("entered 8\n"); cudaFree(Freqs_d); cudaFree(ObsFreqs_d); cudaFree(powercoeff_d); cudaFree(resvec_d); cudaFree(BATvec_d); cudaFree(Noise_d); cudaFree(DMVec_d); cudaFree(SysGroups_d); // cudaFree(FMatrix_d); if(D != NTime+NJumps){cudaFree(DMatrix_d);} // cudaFree(TMatrix_d); cudaFree(NT_d); cudaFree(TNT_d); cudaFree(NTd_d); cudaFree(BandInfo_d); cudaFree(WorkVec_d); cudaFree(dettemp_d); cudaFree(freqlike_d); delete(tempval); //printf("entered 9\n"); } extern "C" void copy_staticTmat_(double *T, int totalsize, int Nobs){ cudaError_t err; err = cudaMalloc( (void **)&GlobalTotalMatrix_d, sizeof(double)*totalsize*Nobs ); checkCudaError(err); // copy vectors from CPU to GPU err = cudaMemcpy( GlobalTotalMatrix_d, T, sizeof(double)*totalsize*Nobs, cudaMemcpyHostToDevice ); checkCudaError(err); return; } /* extern "C" void copy_floatgmat_(float *G, int N){ cudaError_t err; // Allocate memory on GPU //printf("copying G\n"); err = cudaMalloc( (void **)&GlobalGmatFloat_d, sizeof(float)*N ); checkCudaError(err); // copy vectors from CPU to GPU err = cudaMemcpy( GlobalGmatFloat_d, G, sizeof(float)*N, cudaMemcpyHostToDevice ); checkCudaError(err); return; } extern "C" void copy_gmat_(double *G, int N){ cudaError_t err; // Allocate memory on GPU //printf("copying G\n"); err = cudaMalloc( (void **)&GlobalGmat_d, sizeof(double)*N ); checkCudaError(err); // copy vectors from CPU to GPU err = cudaMemcpy( GlobalGmat_d, G, sizeof(double)*N, cudaMemcpyHostToDevice ); checkCudaError(err); return; } extern "C" void copy_staticgmat_(double *G, int M, int N){ cudaError_t err; // Allocate memory on GPU //printf("copying G\n"); err = cudaMalloc( (void **)&GlobalStaticGmat_d, sizeof(double)*N*M ); checkCudaError(err); // copy vectors from CPU to GPU err = cudaMemcpy( GlobalStaticGmat_d, G, sizeof(double)*N*M, cudaMemcpyHostToDevice ); checkCudaError(err); return; } extern "C" void copy_staticumat_(double *G, int M, int N){ cudaError_t err; // Allocate memory on GPU //printf("copying G\n"); err = cudaMalloc( (void **)&GlobalStaticUGmat_d, sizeof(double)*M*N ); checkCudaError(err); // copy vectors from CPU to GPU err = cudaMemcpy( GlobalStaticUGmat_d, G, sizeof(double)*M*N, cudaMemcpyHostToDevice ); checkCudaError(err); return; } extern "C" void copy_staticdmat_(double **TNDM, double *TNDMVec, int N, int D){ cudaError_t err; // Allocate memory on GPU //printf("copying G\n"); err = cudaMalloc( (void **)&GlobalStaticDmat_d, sizeof(double)*N*D ); checkCudaError(err); err = cudaMemcpy(GlobalStaticDmat_d, TNDMVec, sizeof(double)*D*N, cudaMemcpyHostToDevice ); checkCudaError(err); double *U_d; double *V_d; double *S_d; err = cudaMalloc( (void **)&U_d, sizeof(double)*N*N ); checkCudaError(err); err = cudaMalloc( (void **)&V_d, sizeof(double)*D*D ); checkCudaError(err); err = cudaMalloc( (void **)&S_d, sizeof(double)*D ); checkCudaError(err); culaDeviceDgesvd('O','N', N, D, GlobalStaticDmat_d, N, S_d, U_d, N, V_d, D); cudaFree(V_d); cudaFree(S_d); cudaFree(U_d); cudaDeviceSynchronize(); } extern "C" void copy_staticECorrmat_(double *E, int EcorrSize, int Nobs){ cudaError_t err; err = cudaMalloc( (void **)&GlobalEMatrix_d, sizeof(double)*EcorrSize*Nobs ); checkCudaError(err); // copy vectors from CPU to GPU err = cudaMemcpy( GlobalEMatrix_d, E, sizeof(double)*EcorrSize*Nobs, cudaMemcpyHostToDevice ); checkCudaError(err); return; } */
f35f79650f10c2045da423d8e07a9da975dd9d76.hip
// !!! This is a file automatically generated by hipify!!! /* * nvbio * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // sample_kmers.h // #include "sample_kmers.h" #include "utils.h" #include <nvbio/basic/pipeline_context.h> #include <nvbio/basic/numbers.h> #include <nvbio/basic/bloom_filter.h> #include <nvbio/basic/primitives.h> #include <nvbio/basic/console.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/threads.h> #include <nvbio/basic/system.h> #include <nvbio/basic/exceptions.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/io/sequence/sequence.h> #include <nvbio/strings/prefetcher.h> #include <stdio.h> #include <stdlib.h> using namespace nvbio; /// /// A functor to sample kmers and insert them in a Bloom filter /// template <typename string_set_type, typename filter_type> struct SampleKmersFunctor { /// constructor /// ///\param _k kmer length ///\param _alpha the sampling frequency ///\param _string_set the input string set to sample ///\param _filter the kmer Bloom filter /// NVBIO_HOST_DEVICE SampleKmersFunctor( const uint32 _k, const float _alpha, const string_set_type _string_set, filter_type _filter) : k(_k), kmask( (uint64(1u) << (k*2))-1u ), alpha( _alpha ), string_set( _string_set ), filter(_filter) {} /// functor operator /// ///\param i input string index /// NVBIO_HOST_DEVICE void operator() (const uint32 i) const { typedef typename string_set_type::string_type string_type; typedef typename string_traits<string_type>::forward_iterator forward_iterator; // fetch the i-th string const string_type string = string_set[i]; const uint32 len = length( string ); if (len < k) return; // build a forward string iterator forward_iterator it( string.begin() ); // start with an empty kmer uint64 kmer = 0u; uint32 kmer_len = 0u; // initialie a random number generator LCG_random random( hash(i) ); for (uint32 j = 0; j < len; ++j) { // fetch the next character const uint8 c = *it; ++it; if (c < 4) // make sure this is not an N { kmer |= c; // insert the new character at the end of the kmer (in a big-endian encoding) if (kmer_len < k) kmer_len++; if (kmer_len >= k) // check whether we have an actual 'k'-mer { if (float( random.next() ) / float(LCG_random::MAX) < alpha) { // insert the kmer filter.insert( kmer ); } } // shift the kmer to the right, dropping the last symbol kmer <<= 2; kmer &= kmask; } else { // an N, skip all k-mers containing it it += k-1; j += k-1; // and reset the kmer kmer = 0u; kmer_len = 0u; } } } const uint32 k; const uint64 kmask; const float alpha; string_set_type string_set; mutable filter_type filter; }; // process the next batch // bool SampleKmersStage::process(PipelineContext& context) { typedef nvbio::io::SequenceDataAccess<DNA_N>::sequence_string_set_type string_set_type; // declare the Bloom filter type typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, uint64_2*> filter_type; typedef SampleKmersFunctor<string_set_type,filter_type> functor_type; // fetch the input nvbio::io::SequenceDataHost* h_read_data = context.input<nvbio::io::SequenceDataHost>( 0 ); float time = 0.0f; // introduce a timing scope try { const nvbio::ScopedTimer<float> timer( &time ); if (device >= 0) { // // Device (GPU) path // // set the device hipSetDevice( device ); // copy it to the device nvbio::io::SequenceDataDevice d_read_data( *h_read_data ); // build a view const nvbio::io::SequenceDataAccess<DNA_N> d_read_view( d_read_data ); // build the Bloom filter filter_type filter( SAMPLED_KMERS_FILTER_K, filter_size, (uint64_2*)filter_storage ); //filter_type filter( filter_size, filter_storage ); // build the kmer sampling functor const functor_type kmer_filter( k, alpha, d_read_view.sequence_string_set(), filter ); device_for_each( d_read_view.size(), kmer_filter ); hipDeviceSynchronize(); cuda::check_error("sample-kmers"); } else { // // Host (CPU) path // omp_set_num_threads( -device ); // build a view const io::SequenceDataAccess<DNA_N> h_read_view( *h_read_data ); // build the Bloom filter filter_type filter( SAMPLED_KMERS_FILTER_K, filter_size, (uint64_2*)filter_storage ); // build the kmer sampling functor const functor_type kmer_filter( k, alpha, h_read_view.sequence_string_set(), filter ); host_for_each( h_read_view.size(), kmer_filter ); } } catch (nvbio::cuda_error &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::bad_alloc &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::logic_error &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::runtime_error &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (thrust::system::system_error &e) { log_error(stderr, "[SampleKmersStage] caught a thrust::system_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::bad_alloc &e) { log_error(stderr, "[SampleKmersStage] caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::logic_error &e) { log_error(stderr, "[SampleKmersStage] caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::runtime_error &e) { log_error(stderr, "[SampleKmersStage] caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (...) { log_error(stderr, "[SampleKmersStage] caught an unknown exception!\n"); exit(1); } // update the time stats stats->m_mutex.lock(); stats->m_time += time; log_info(stderr, "\r processed reads [%llu, %llu] (%.1fM / %.2fG bps, %.1fK reads/s, %.1fM bps/s - %s<%d>) ", stats->m_reads, stats->m_reads + h_read_data->size(), 1.0e-6f * (h_read_data->bps()), 1.0e-9f * (stats->m_bps + h_read_data->bps()), stats->m_time ? (1.0e-3f * (stats->m_reads + h_read_data->size())) / stats->m_time : 0.0f, stats->m_time ? (1.0e-6f * (stats->m_bps + h_read_data->bps() )) / stats->m_time : 0.0f, device >= 0 ? "gpu" : "cpu", device >= 0 ? device : -device ); log_debug_cont(stderr, "\n"); log_debug(stderr," peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); stats->m_reads += h_read_data->size(); stats->m_bps += h_read_data->bps(); stats->m_mutex.unlock(); return true; } /// /// A functor to sample kmers and insert them in a Bloom filter /// template <typename string_set_type, typename sampled_filter_type, typename trusted_filter_type, typename threshold_type> struct TrustedKmersFunctor { /// constructor /// ///\param _k kmer length ///\param _alpha the sampling frequency ///\param _string_set the input string set to sample ///\param _filter the kmer Bloom filter /// NVBIO_HOST_DEVICE TrustedKmersFunctor( const uint32 _k, const string_set_type _string_set, const sampled_filter_type _sampled_filter, trusted_filter_type _trusted_filter, const threshold_type _threshold) : k(_k), kmask( (uint64(1u) << (k*2))-1u ), string_set( _string_set ), sampled_filter(_sampled_filter), trusted_filter(_trusted_filter), threshold(_threshold) {} /// functor operator /// ///\param i input string index /// NVBIO_HOST_DEVICE void operator() (const uint32 i) const { typedef typename string_set_type::string_type string_type; typedef nvbio::StringPrefetcher< string_type, nvbio::lmem_cache_tag<MAX_READ_LENGTH> > string_prefetcher_type; typedef typename string_prefetcher_type::string_type local_string_type; typedef typename nvbio::string_traits<local_string_type>::forward_iterator forward_iterator; //bool occur[MAX_READ_LENGTH]; uint32 occur_storage[MAX_READ_LENGTH/32]; nvbio::PackedStream<uint32*,uint8,1u,false> occur( occur_storage ); // instantiate a prefetcher string_prefetcher_type string_prefetcher; // fetch the i-th string //const string_type string = string_set[i]; const local_string_type string = string_prefetcher.load( string_set[i] ); const uint32 len = length( string ); if (len < k) return; // build a forward string iterator forward_iterator it( string.begin() ); // start with an empty kmer uint64 kmer = 0u; uint32 kmer_len = 0u; const uint32 occur_cnt = len - k + 1; // initialize all to false for (uint32 j = 0; j < (occur_cnt+31)/32; ++j) occur_storage[j] = 0u; // mark occurring kmers for (uint32 j = 0; j < len; ++j) { // fetch the next character const uint8 c = *it; ++it; if (c < 4) // make sure this is not an N { kmer |= c; // insert the new character at the end of the kmer (in a big-endian encoding) if (kmer_len < k) kmer_len++; if (kmer_len >= k) // check whether we have an actual 'k'-mer { if (sampled_filter[ kmer ]) occur[j - k + 1] = true; } // shift the kmer to the right, dropping the last symbol kmer <<= 2; kmer &= kmask; } else { // an N, skip all kmers containing it it += k-1; j += k-1; // and reset the kmer kmer = 0u; kmer_len = 0u; } } // mark trusted kmers int32 zero_cnt = 0; int32 one_cnt = 0; // reset the forward iterator it = forward_iterator( string.begin() ); // start with an empty kmer kmer = 0u; kmer_len = 0u; // keep a k-bits mask of trusted positions const uint64 trusted_mask = (uint64(1u) << k) - 1u; uint64 trusted = 0u; for (uint32 j = 0; j < len; ++j) { if (j >= k) { if (occur[j - k]) --one_cnt; else --zero_cnt; } if (j < occur_cnt) { if (occur[j]) ++one_cnt; else ++zero_cnt; } const int32 sum = one_cnt + zero_cnt; //if (qual[j] <= bad_quality) //{ // trusted[j] = false; // continue ; //} trusted |= (one_cnt > threshold[sum]) ? 1u : 0u; // fetch the next character const uint8 c = *it; ++it; if (c < 4) // if an N, skip it (the kmers containing it will be marked as untrusted and skipped as well) { kmer |= c; // insert the new character at the end of the kmer (in a big-endian encoding) if (popc( trusted ) == k) // check whether we have an actual 'k'-mer - i.e. k trusted positions in a row trusted_filter.insert( kmer ); } // shift the kmer to the right, dropping the last symbol kmer <<= 2; kmer &= kmask; // shift the trusted bits by one to the right, dropping the last symbol trusted <<= 1; trusted &= trusted_mask; } } const uint32 k; const uint64 kmask; string_set_type string_set; const sampled_filter_type sampled_filter; mutable trusted_filter_type trusted_filter; const threshold_type threshold; }; // process the next batch // bool TrustedKmersStage::process(PipelineContext& context) { typedef nvbio::io::SequenceDataAccess<DNA_N>::sequence_string_set_type string_set_type; // fetch the input nvbio::io::SequenceDataHost* h_read_data = context.input<nvbio::io::SequenceDataHost>( 0 ); float time = 0.0f; // introduce a timing scope try { const nvbio::ScopedTimer<float> timer( &time ); if (device >= 0) { // // Device (GPU) path // // declare the Bloom filter types typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, nvbio::cuda::ldg_pointer<uint4> > sampled_filter_type; typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, uint64_2*> trusted_filter_type; typedef TrustedKmersFunctor<string_set_type,sampled_filter_type,trusted_filter_type, cuda::ldg_pointer<uint32> > functor_type; // set the device hipSetDevice( device ); // copy it to the device io::SequenceDataDevice d_read_data( *h_read_data ); // build a view const io::SequenceDataAccess<DNA_N> d_read_view( d_read_data ); // build the Bloom filter sampled_filter_type sampled_filter( SAMPLED_KMERS_FILTER_K, sampled_filter_size, (const uint4*)sampled_filter_storage ); trusted_filter_type trusted_filter( TRUSTED_KMERS_FILTER_K, trusted_filter_size, (uint64_2*)trusted_filter_storage ); // build the kmer sampling functor const functor_type kmer_filter( k, d_read_view.sequence_string_set(), sampled_filter, trusted_filter, cuda::make_ldg_pointer(threshold) ); // and apply the functor to all reads in the batch device_for_each( d_read_view.size(), kmer_filter ); hipDeviceSynchronize(); cuda::check_error("mark-trusted-kmers"); } else { // // Host (CPU) path // omp_set_num_threads( -device ); // declare the Bloom filter types typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, const uint64_2*> sampled_filter_type; typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, uint64_2*> trusted_filter_type; typedef TrustedKmersFunctor<string_set_type,sampled_filter_type,trusted_filter_type,const uint32*> functor_type; // build a view const nvbio::io::SequenceDataAccess<DNA_N> h_read_view( *h_read_data ); // build the Bloom filter sampled_filter_type sampled_filter( SAMPLED_KMERS_FILTER_K, sampled_filter_size, (const uint64_2*)sampled_filter_storage ); trusted_filter_type trusted_filter( TRUSTED_KMERS_FILTER_K, trusted_filter_size, (uint64_2*)trusted_filter_storage ); // build the kmer sampling functor const TrustedKmersFunctor<string_set_type,sampled_filter_type,trusted_filter_type,const uint32*> kmer_filter( k, h_read_view.sequence_string_set(), sampled_filter, trusted_filter, threshold ); // and apply the functor to all reads in the batch host_for_each( h_read_view.size(), kmer_filter ); } } catch (nvbio::cuda_error &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::bad_alloc &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::logic_error &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::runtime_error &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (thrust::system::system_error &e) { log_error(stderr, "[TrustedKmersStage] caught a thrust::system_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::bad_alloc &e) { log_error(stderr, "[TrustedKmersStage] caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::logic_error &e) { log_error(stderr, "[TrustedKmersStage] caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::runtime_error &e) { log_error(stderr, "[TrustedKmersStage] caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (...) { log_error(stderr, "[TrustedKmersStage] caught an unknown exception!\n"); exit(1); } // update the time stats stats->m_mutex.lock(); stats->m_time += time; log_info(stderr, "\r processed reads [%llu, %llu] (%.1fM / %.2fG bps, %.1fK reads/s, %.1fM bps/s - %s<%d>) ", stats->m_reads, stats->m_reads + h_read_data->size(), 1.0e-6f * (h_read_data->bps()), 1.0e-9f * (stats->m_bps + h_read_data->bps()), stats->m_time ? (1.0e-3f * (stats->m_reads + h_read_data->size())) / stats->m_time : 0.0f, stats->m_time ? (1.0e-6f * (stats->m_bps + h_read_data->bps() )) / stats->m_time : 0.0f, device >= 0 ? "gpu" : "cpu", device >= 0 ? device : -device ); log_debug_cont(stderr, "\n"); log_debug(stderr," peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); stats->m_reads += h_read_data->size(); stats->m_bps += h_read_data->bps(); stats->m_mutex.unlock(); return true; }
f35f79650f10c2045da423d8e07a9da975dd9d76.cu
/* * nvbio * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // sample_kmers.h // #include "sample_kmers.h" #include "utils.h" #include <nvbio/basic/pipeline_context.h> #include <nvbio/basic/numbers.h> #include <nvbio/basic/bloom_filter.h> #include <nvbio/basic/primitives.h> #include <nvbio/basic/console.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/threads.h> #include <nvbio/basic/system.h> #include <nvbio/basic/exceptions.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/io/sequence/sequence.h> #include <nvbio/strings/prefetcher.h> #include <stdio.h> #include <stdlib.h> using namespace nvbio; /// /// A functor to sample kmers and insert them in a Bloom filter /// template <typename string_set_type, typename filter_type> struct SampleKmersFunctor { /// constructor /// ///\param _k kmer length ///\param _alpha the sampling frequency ///\param _string_set the input string set to sample ///\param _filter the kmer Bloom filter /// NVBIO_HOST_DEVICE SampleKmersFunctor( const uint32 _k, const float _alpha, const string_set_type _string_set, filter_type _filter) : k(_k), kmask( (uint64(1u) << (k*2))-1u ), alpha( _alpha ), string_set( _string_set ), filter(_filter) {} /// functor operator /// ///\param i input string index /// NVBIO_HOST_DEVICE void operator() (const uint32 i) const { typedef typename string_set_type::string_type string_type; typedef typename string_traits<string_type>::forward_iterator forward_iterator; // fetch the i-th string const string_type string = string_set[i]; const uint32 len = length( string ); if (len < k) return; // build a forward string iterator forward_iterator it( string.begin() ); // start with an empty kmer uint64 kmer = 0u; uint32 kmer_len = 0u; // initialie a random number generator LCG_random random( hash(i) ); for (uint32 j = 0; j < len; ++j) { // fetch the next character const uint8 c = *it; ++it; if (c < 4) // make sure this is not an N { kmer |= c; // insert the new character at the end of the kmer (in a big-endian encoding) if (kmer_len < k) kmer_len++; if (kmer_len >= k) // check whether we have an actual 'k'-mer { if (float( random.next() ) / float(LCG_random::MAX) < alpha) { // insert the kmer filter.insert( kmer ); } } // shift the kmer to the right, dropping the last symbol kmer <<= 2; kmer &= kmask; } else { // an N, skip all k-mers containing it it += k-1; j += k-1; // and reset the kmer kmer = 0u; kmer_len = 0u; } } } const uint32 k; const uint64 kmask; const float alpha; string_set_type string_set; mutable filter_type filter; }; // process the next batch // bool SampleKmersStage::process(PipelineContext& context) { typedef nvbio::io::SequenceDataAccess<DNA_N>::sequence_string_set_type string_set_type; // declare the Bloom filter type typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, uint64_2*> filter_type; typedef SampleKmersFunctor<string_set_type,filter_type> functor_type; // fetch the input nvbio::io::SequenceDataHost* h_read_data = context.input<nvbio::io::SequenceDataHost>( 0 ); float time = 0.0f; // introduce a timing scope try { const nvbio::ScopedTimer<float> timer( &time ); if (device >= 0) { // // Device (GPU) path // // set the device cudaSetDevice( device ); // copy it to the device nvbio::io::SequenceDataDevice d_read_data( *h_read_data ); // build a view const nvbio::io::SequenceDataAccess<DNA_N> d_read_view( d_read_data ); // build the Bloom filter filter_type filter( SAMPLED_KMERS_FILTER_K, filter_size, (uint64_2*)filter_storage ); //filter_type filter( filter_size, filter_storage ); // build the kmer sampling functor const functor_type kmer_filter( k, alpha, d_read_view.sequence_string_set(), filter ); device_for_each( d_read_view.size(), kmer_filter ); cudaDeviceSynchronize(); cuda::check_error("sample-kmers"); } else { // // Host (CPU) path // omp_set_num_threads( -device ); // build a view const io::SequenceDataAccess<DNA_N> h_read_view( *h_read_data ); // build the Bloom filter filter_type filter( SAMPLED_KMERS_FILTER_K, filter_size, (uint64_2*)filter_storage ); // build the kmer sampling functor const functor_type kmer_filter( k, alpha, h_read_view.sequence_string_set(), filter ); host_for_each( h_read_view.size(), kmer_filter ); } } catch (nvbio::cuda_error &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::bad_alloc &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::logic_error &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::runtime_error &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (thrust::system::system_error &e) { log_error(stderr, "[SampleKmersStage] caught a thrust::system_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::bad_alloc &e) { log_error(stderr, "[SampleKmersStage] caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::logic_error &e) { log_error(stderr, "[SampleKmersStage] caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::runtime_error &e) { log_error(stderr, "[SampleKmersStage] caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (...) { log_error(stderr, "[SampleKmersStage] caught an unknown exception!\n"); exit(1); } // update the time stats stats->m_mutex.lock(); stats->m_time += time; log_info(stderr, "\r processed reads [%llu, %llu] (%.1fM / %.2fG bps, %.1fK reads/s, %.1fM bps/s - %s<%d>) ", stats->m_reads, stats->m_reads + h_read_data->size(), 1.0e-6f * (h_read_data->bps()), 1.0e-9f * (stats->m_bps + h_read_data->bps()), stats->m_time ? (1.0e-3f * (stats->m_reads + h_read_data->size())) / stats->m_time : 0.0f, stats->m_time ? (1.0e-6f * (stats->m_bps + h_read_data->bps() )) / stats->m_time : 0.0f, device >= 0 ? "gpu" : "cpu", device >= 0 ? device : -device ); log_debug_cont(stderr, "\n"); log_debug(stderr," peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); stats->m_reads += h_read_data->size(); stats->m_bps += h_read_data->bps(); stats->m_mutex.unlock(); return true; } /// /// A functor to sample kmers and insert them in a Bloom filter /// template <typename string_set_type, typename sampled_filter_type, typename trusted_filter_type, typename threshold_type> struct TrustedKmersFunctor { /// constructor /// ///\param _k kmer length ///\param _alpha the sampling frequency ///\param _string_set the input string set to sample ///\param _filter the kmer Bloom filter /// NVBIO_HOST_DEVICE TrustedKmersFunctor( const uint32 _k, const string_set_type _string_set, const sampled_filter_type _sampled_filter, trusted_filter_type _trusted_filter, const threshold_type _threshold) : k(_k), kmask( (uint64(1u) << (k*2))-1u ), string_set( _string_set ), sampled_filter(_sampled_filter), trusted_filter(_trusted_filter), threshold(_threshold) {} /// functor operator /// ///\param i input string index /// NVBIO_HOST_DEVICE void operator() (const uint32 i) const { typedef typename string_set_type::string_type string_type; typedef nvbio::StringPrefetcher< string_type, nvbio::lmem_cache_tag<MAX_READ_LENGTH> > string_prefetcher_type; typedef typename string_prefetcher_type::string_type local_string_type; typedef typename nvbio::string_traits<local_string_type>::forward_iterator forward_iterator; //bool occur[MAX_READ_LENGTH]; uint32 occur_storage[MAX_READ_LENGTH/32]; nvbio::PackedStream<uint32*,uint8,1u,false> occur( occur_storage ); // instantiate a prefetcher string_prefetcher_type string_prefetcher; // fetch the i-th string //const string_type string = string_set[i]; const local_string_type string = string_prefetcher.load( string_set[i] ); const uint32 len = length( string ); if (len < k) return; // build a forward string iterator forward_iterator it( string.begin() ); // start with an empty kmer uint64 kmer = 0u; uint32 kmer_len = 0u; const uint32 occur_cnt = len - k + 1; // initialize all to false for (uint32 j = 0; j < (occur_cnt+31)/32; ++j) occur_storage[j] = 0u; // mark occurring kmers for (uint32 j = 0; j < len; ++j) { // fetch the next character const uint8 c = *it; ++it; if (c < 4) // make sure this is not an N { kmer |= c; // insert the new character at the end of the kmer (in a big-endian encoding) if (kmer_len < k) kmer_len++; if (kmer_len >= k) // check whether we have an actual 'k'-mer { if (sampled_filter[ kmer ]) occur[j - k + 1] = true; } // shift the kmer to the right, dropping the last symbol kmer <<= 2; kmer &= kmask; } else { // an N, skip all kmers containing it it += k-1; j += k-1; // and reset the kmer kmer = 0u; kmer_len = 0u; } } // mark trusted kmers int32 zero_cnt = 0; int32 one_cnt = 0; // reset the forward iterator it = forward_iterator( string.begin() ); // start with an empty kmer kmer = 0u; kmer_len = 0u; // keep a k-bits mask of trusted positions const uint64 trusted_mask = (uint64(1u) << k) - 1u; uint64 trusted = 0u; for (uint32 j = 0; j < len; ++j) { if (j >= k) { if (occur[j - k]) --one_cnt; else --zero_cnt; } if (j < occur_cnt) { if (occur[j]) ++one_cnt; else ++zero_cnt; } const int32 sum = one_cnt + zero_cnt; //if (qual[j] <= bad_quality) //{ // trusted[j] = false; // continue ; //} trusted |= (one_cnt > threshold[sum]) ? 1u : 0u; // fetch the next character const uint8 c = *it; ++it; if (c < 4) // if an N, skip it (the kmers containing it will be marked as untrusted and skipped as well) { kmer |= c; // insert the new character at the end of the kmer (in a big-endian encoding) if (popc( trusted ) == k) // check whether we have an actual 'k'-mer - i.e. k trusted positions in a row trusted_filter.insert( kmer ); } // shift the kmer to the right, dropping the last symbol kmer <<= 2; kmer &= kmask; // shift the trusted bits by one to the right, dropping the last symbol trusted <<= 1; trusted &= trusted_mask; } } const uint32 k; const uint64 kmask; string_set_type string_set; const sampled_filter_type sampled_filter; mutable trusted_filter_type trusted_filter; const threshold_type threshold; }; // process the next batch // bool TrustedKmersStage::process(PipelineContext& context) { typedef nvbio::io::SequenceDataAccess<DNA_N>::sequence_string_set_type string_set_type; // fetch the input nvbio::io::SequenceDataHost* h_read_data = context.input<nvbio::io::SequenceDataHost>( 0 ); float time = 0.0f; // introduce a timing scope try { const nvbio::ScopedTimer<float> timer( &time ); if (device >= 0) { // // Device (GPU) path // // declare the Bloom filter types typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, nvbio::cuda::ldg_pointer<uint4> > sampled_filter_type; typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, uint64_2*> trusted_filter_type; typedef TrustedKmersFunctor<string_set_type,sampled_filter_type,trusted_filter_type, cuda::ldg_pointer<uint32> > functor_type; // set the device cudaSetDevice( device ); // copy it to the device io::SequenceDataDevice d_read_data( *h_read_data ); // build a view const io::SequenceDataAccess<DNA_N> d_read_view( d_read_data ); // build the Bloom filter sampled_filter_type sampled_filter( SAMPLED_KMERS_FILTER_K, sampled_filter_size, (const uint4*)sampled_filter_storage ); trusted_filter_type trusted_filter( TRUSTED_KMERS_FILTER_K, trusted_filter_size, (uint64_2*)trusted_filter_storage ); // build the kmer sampling functor const functor_type kmer_filter( k, d_read_view.sequence_string_set(), sampled_filter, trusted_filter, cuda::make_ldg_pointer(threshold) ); // and apply the functor to all reads in the batch device_for_each( d_read_view.size(), kmer_filter ); cudaDeviceSynchronize(); cuda::check_error("mark-trusted-kmers"); } else { // // Host (CPU) path // omp_set_num_threads( -device ); // declare the Bloom filter types typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, const uint64_2*> sampled_filter_type; typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, uint64_2*> trusted_filter_type; typedef TrustedKmersFunctor<string_set_type,sampled_filter_type,trusted_filter_type,const uint32*> functor_type; // build a view const nvbio::io::SequenceDataAccess<DNA_N> h_read_view( *h_read_data ); // build the Bloom filter sampled_filter_type sampled_filter( SAMPLED_KMERS_FILTER_K, sampled_filter_size, (const uint64_2*)sampled_filter_storage ); trusted_filter_type trusted_filter( TRUSTED_KMERS_FILTER_K, trusted_filter_size, (uint64_2*)trusted_filter_storage ); // build the kmer sampling functor const TrustedKmersFunctor<string_set_type,sampled_filter_type,trusted_filter_type,const uint32*> kmer_filter( k, h_read_view.sequence_string_set(), sampled_filter, trusted_filter, threshold ); // and apply the functor to all reads in the batch host_for_each( h_read_view.size(), kmer_filter ); } } catch (nvbio::cuda_error &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::bad_alloc &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::logic_error &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::runtime_error &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (thrust::system::system_error &e) { log_error(stderr, "[TrustedKmersStage] caught a thrust::system_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::bad_alloc &e) { log_error(stderr, "[TrustedKmersStage] caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::logic_error &e) { log_error(stderr, "[TrustedKmersStage] caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::runtime_error &e) { log_error(stderr, "[TrustedKmersStage] caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (...) { log_error(stderr, "[TrustedKmersStage] caught an unknown exception!\n"); exit(1); } // update the time stats stats->m_mutex.lock(); stats->m_time += time; log_info(stderr, "\r processed reads [%llu, %llu] (%.1fM / %.2fG bps, %.1fK reads/s, %.1fM bps/s - %s<%d>) ", stats->m_reads, stats->m_reads + h_read_data->size(), 1.0e-6f * (h_read_data->bps()), 1.0e-9f * (stats->m_bps + h_read_data->bps()), stats->m_time ? (1.0e-3f * (stats->m_reads + h_read_data->size())) / stats->m_time : 0.0f, stats->m_time ? (1.0e-6f * (stats->m_bps + h_read_data->bps() )) / stats->m_time : 0.0f, device >= 0 ? "gpu" : "cpu", device >= 0 ? device : -device ); log_debug_cont(stderr, "\n"); log_debug(stderr," peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); stats->m_reads += h_read_data->size(); stats->m_bps += h_read_data->bps(); stats->m_mutex.unlock(); return true; }
7b98aa3914d3a0adadaf9fae04e208b7ca58e4f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/abs_op.h" #include <algorithm> #include <functional> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> __global__ void AbsGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 dX[i] = __ldg(X + i) == T(0) ? T(0) : (__ldg(X + i) > T(0) ? __ldg(dY + i) : -__ldg(dY + i)); #else dX[i] = X[i] == T(0) ? T(0) : (X[i] > T(0) ? dY[i] : -dY[i]); #endif } } } // namespace template <> template <typename T> bool AbsGradientFunctor<CUDAContext>::Forward( const std::vector<int>& X_dims, const std::vector<int>& /* dY_dims */, const T* X, const T* dY, T* dX, CUDAContext* context) const { const int size = std::accumulate( X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>()); hipLaunchKernelGGL(( AbsGradientCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, dY, X, dX); return true; } REGISTER_CUDA_OPERATOR( Abs, UnaryElementwiseOp< TensorTypes<float>, CUDAContext, AbsFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AbsGradient, BinaryElementwiseOp< TensorTypes<float>, CUDAContext, AbsGradientFunctor<CUDAContext>>); } // namespace caffe2
7b98aa3914d3a0adadaf9fae04e208b7ca58e4f1.cu
#include "caffe2/operators/abs_op.h" #include <algorithm> #include <functional> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> __global__ void AbsGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 dX[i] = __ldg(X + i) == T(0) ? T(0) : (__ldg(X + i) > T(0) ? __ldg(dY + i) : -__ldg(dY + i)); #else dX[i] = X[i] == T(0) ? T(0) : (X[i] > T(0) ? dY[i] : -dY[i]); #endif } } } // namespace template <> template <typename T> bool AbsGradientFunctor<CUDAContext>::Forward( const std::vector<int>& X_dims, const std::vector<int>& /* dY_dims */, const T* X, const T* dY, T* dX, CUDAContext* context) const { const int size = std::accumulate( X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>()); AbsGradientCUDAKernel<T> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, dY, X, dX); return true; } REGISTER_CUDA_OPERATOR( Abs, UnaryElementwiseOp< TensorTypes<float>, CUDAContext, AbsFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AbsGradient, BinaryElementwiseOp< TensorTypes<float>, CUDAContext, AbsGradientFunctor<CUDAContext>>); } // namespace caffe2
f7f6af70a1edb921fadfdf9f62e5c4a417ad0a6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "radius_cuda.h" #include <ATen/hip/HIPContext.h> #include "utils.cuh" #define THREADS 256 template <typename scalar_t> __global__ void radius_kernel(const scalar_t *__restrict__ x, const scalar_t *__restrict__ y, const int64_t *__restrict__ ptr_x, const int64_t *__restrict__ ptr_y, int64_t *__restrict__ row, int64_t *__restrict__ col, const scalar_t r, const int64_t n, const int64_t m, const int64_t dim, const int64_t num_examples, const int64_t max_num_neighbors) { const int64_t n_y = blockIdx.x * blockDim.x + threadIdx.x; if (n_y >= m) return; int64_t count = 0; const int64_t example_idx = get_example_idx(n_y, ptr_y, num_examples); for (int64_t n_x = ptr_x[example_idx]; n_x < ptr_x[example_idx + 1]; n_x++) { scalar_t dist = 0; for (int64_t d = 0; d < dim; d++) { dist += (x[n_x * dim + d] - y[n_y * dim + d]) * (x[n_x * dim + d] - y[n_y * dim + d]); } if (dist < r) { row[n_y * max_num_neighbors + count] = n_y; col[n_y * max_num_neighbors + count] = n_x; count++; } if (count >= max_num_neighbors) break; } } torch::Tensor radius_cuda(const torch::Tensor x, const torch::Tensor y, torch::optional<torch::Tensor> ptr_x, torch::optional<torch::Tensor> ptr_y, const double r, const int64_t max_num_neighbors) { CHECK_CUDA(x); CHECK_CONTIGUOUS(x); CHECK_INPUT(x.dim() == 2); CHECK_CUDA(y); CHECK_CONTIGUOUS(y); CHECK_INPUT(y.dim() == 2); CHECK_INPUT(x.size(1) == y.size(1)); hipSetDevice(x.get_device()); if (ptr_x.has_value()) { CHECK_CUDA(ptr_x.value()); CHECK_INPUT(ptr_x.value().dim() == 1); } else ptr_x = torch::arange(0, x.size(0) + 1, x.size(0), x.options().dtype(torch::kLong)); if (ptr_y.has_value()) { CHECK_CUDA(ptr_y.value()); CHECK_INPUT(ptr_y.value().dim() == 1); } else ptr_y = torch::arange(0, y.size(0) + 1, y.size(0), y.options().dtype(torch::kLong)); CHECK_INPUT(ptr_x.value().numel() == ptr_y.value().numel()); hipSetDevice(x.get_device()); auto row = torch::full(y.size(0) * max_num_neighbors, -1, ptr_y.value().options()); auto col = torch::full(y.size(0) * max_num_neighbors, -1, ptr_y.value().options()); dim3 BLOCKS((y.size(0) + THREADS - 1) / THREADS); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto scalar_type = x.scalar_type(); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, scalar_type, "_", [&] { hipLaunchKernelGGL(( radius_kernel<scalar_t>), dim3(BLOCKS), dim3(THREADS), 0, stream, x.data_ptr<scalar_t>(), y.data_ptr<scalar_t>(), ptr_x.value().data_ptr<int64_t>(), ptr_y.value().data_ptr<int64_t>(), row.data_ptr<int64_t>(), col.data_ptr<int64_t>(), r * r, x.size(0), y.size(0), x.size(1), ptr_x.value().numel() - 1, max_num_neighbors); }); auto mask = row != -1; return torch::stack({row.masked_select(mask), col.masked_select(mask)}, 0); }
f7f6af70a1edb921fadfdf9f62e5c4a417ad0a6b.cu
#include "radius_cuda.h" #include <ATen/cuda/CUDAContext.h> #include "utils.cuh" #define THREADS 256 template <typename scalar_t> __global__ void radius_kernel(const scalar_t *__restrict__ x, const scalar_t *__restrict__ y, const int64_t *__restrict__ ptr_x, const int64_t *__restrict__ ptr_y, int64_t *__restrict__ row, int64_t *__restrict__ col, const scalar_t r, const int64_t n, const int64_t m, const int64_t dim, const int64_t num_examples, const int64_t max_num_neighbors) { const int64_t n_y = blockIdx.x * blockDim.x + threadIdx.x; if (n_y >= m) return; int64_t count = 0; const int64_t example_idx = get_example_idx(n_y, ptr_y, num_examples); for (int64_t n_x = ptr_x[example_idx]; n_x < ptr_x[example_idx + 1]; n_x++) { scalar_t dist = 0; for (int64_t d = 0; d < dim; d++) { dist += (x[n_x * dim + d] - y[n_y * dim + d]) * (x[n_x * dim + d] - y[n_y * dim + d]); } if (dist < r) { row[n_y * max_num_neighbors + count] = n_y; col[n_y * max_num_neighbors + count] = n_x; count++; } if (count >= max_num_neighbors) break; } } torch::Tensor radius_cuda(const torch::Tensor x, const torch::Tensor y, torch::optional<torch::Tensor> ptr_x, torch::optional<torch::Tensor> ptr_y, const double r, const int64_t max_num_neighbors) { CHECK_CUDA(x); CHECK_CONTIGUOUS(x); CHECK_INPUT(x.dim() == 2); CHECK_CUDA(y); CHECK_CONTIGUOUS(y); CHECK_INPUT(y.dim() == 2); CHECK_INPUT(x.size(1) == y.size(1)); cudaSetDevice(x.get_device()); if (ptr_x.has_value()) { CHECK_CUDA(ptr_x.value()); CHECK_INPUT(ptr_x.value().dim() == 1); } else ptr_x = torch::arange(0, x.size(0) + 1, x.size(0), x.options().dtype(torch::kLong)); if (ptr_y.has_value()) { CHECK_CUDA(ptr_y.value()); CHECK_INPUT(ptr_y.value().dim() == 1); } else ptr_y = torch::arange(0, y.size(0) + 1, y.size(0), y.options().dtype(torch::kLong)); CHECK_INPUT(ptr_x.value().numel() == ptr_y.value().numel()); cudaSetDevice(x.get_device()); auto row = torch::full(y.size(0) * max_num_neighbors, -1, ptr_y.value().options()); auto col = torch::full(y.size(0) * max_num_neighbors, -1, ptr_y.value().options()); dim3 BLOCKS((y.size(0) + THREADS - 1) / THREADS); auto stream = at::cuda::getCurrentCUDAStream(); auto scalar_type = x.scalar_type(); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, scalar_type, "_", [&] { radius_kernel<scalar_t><<<BLOCKS, THREADS, 0, stream>>>( x.data_ptr<scalar_t>(), y.data_ptr<scalar_t>(), ptr_x.value().data_ptr<int64_t>(), ptr_y.value().data_ptr<int64_t>(), row.data_ptr<int64_t>(), col.data_ptr<int64_t>(), r * r, x.size(0), y.size(0), x.size(1), ptr_x.value().numel() - 1, max_num_neighbors); }); auto mask = row != -1; return torch::stack({row.masked_select(mask), col.masked_select(mask)}, 0); }
05b33a02436a62638b71a0ad9d499e47d3757e21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* ********************************* BLUEBOTTLE ********************************** ******************************************************************************* * * Copyright 2012 - 2016 Adam Sierakowski, The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ #include "cuda_bluebottle.h" // pressure; west; periodic __global__ void BC_p_W_P(real *p, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) p[dom->Gcc._isb + tj*s1b + tk*s2b] = p[(dom->Gcc._ie-1) + tj*s1b + tk*s2b]; } // pressure; west; Neumann __global__ void BC_p_W_N(real *p, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) p[dom->Gcc._isb + tj*s1b + tk*s2b] = p[dom->Gcc._is + tj*s1b + tk*s2b]; } // pressure; east; periodic __global__ void BC_p_E_P(real *p, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) p[(dom->Gcc._ieb-1) + tj*s1b + tk*s2b] = p[dom->Gcc._is + tj*s1b + tk*s2b]; } // pressure; east; Neumann __global__ void BC_p_E_N(real *p, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) p[(dom->Gcc._ieb-1) + tj*s1b + tk*s2b] = p[(dom->Gcc._ie-1) + tj*s1b + tk*s2b]; } // pressure; south; periodic __global__ void BC_p_S_P(real *p, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tk < dom->Gcc._knb)) p[ti + dom->Gcc._jsb*s1b + tk*s2b] = p[ti + (dom->Gcc._je-1)*s1b + tk*s2b]; } // pressure; south; Neumann __global__ void BC_p_S_N(real *p, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tk < dom->Gcc._knb)) p[ti + dom->Gcc._jsb*s1b + tk*s2b] = p[ti + dom->Gcc._js*s1b + tk*s2b]; } // pressure; north; periodic __global__ void BC_p_N_P(real *p, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tk < dom->Gcc._knb)) p[ti + (dom->Gcc._jeb-1)*s1b + tk*s2b] = p[ti + dom->Gcc._js*s1b + tk*s2b]; } // pressure; north; Neumann __global__ void BC_p_N_N(real *p, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tk < dom->Gcc._knb)) p[ti + (dom->Gcc._jeb-1)*s1b + tk*s2b] = p[ti + (dom->Gcc._je-1)*s1b + tk*s2b]; } // pressure; bottom; periodic __global__ void BC_p_B_P(real *p, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tj < dom->Gcc._jnb)) p[ti + tj*s1b + dom->Gcc._ksb*s2b] = p[ti + tj*s1b + (dom->Gcc._ke-1)*s2b]; } // pressure; bottom; Neumann __global__ void BC_p_B_N(real *p, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tj < dom->Gcc._jnb)) p[ti + tj*s1b + dom->Gcc._ksb*s2b] = p[ti + tj*s1b + dom->Gcc._ks*s2b]; } // pressure; top; periodic __global__ void BC_p_T_P(real *p, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tj < dom->Gcc._jnb)) p[ti + tj*s1b + (dom->Gcc._keb-1)*s2b] = p[ti + tj*s1b + dom->Gcc._ks*s2b]; } // pressure; top; Neumann __global__ void BC_p_T_N(real *p, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tj < dom->Gcc._jnb)) p[ti + tj*s1b + (dom->Gcc._keb-1)*s2b] = p[ti + tj*s1b + (dom->Gcc._ke-1)*s2b]; } // u-velocity; west; periodic __global__ void BC_u_W_P(real *u, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[dom->Gfx._isb + tj*s1b + tk*s2b] = u[(dom->Gfx._ie-2) + tj*s1b + tk*s2b]; u[dom->Gfx._is + tj*s1b + tk*s2b] = u[(dom->Gfx._ie-1) + tj*s1b + tk*s2b]; } } // u-velocity; west; Dirichlet __global__ void BC_u_W_D(real *u, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[dom->Gfx._isb + tj*s1b + tk*s2b] = 2. * bc - u[(dom->Gfx._is+1) + tj*s1b + tk*s2b]; u[dom->Gfx._is + tj*s1b + tk*s2b] = bc; } } // u-velocity; west; Neumann __global__ void BC_u_W_N(real *u, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) u[dom->Gfx._isb + tj*s1b + tk*s2b] = u[dom->Gfx._is + tj*s1b + tk*s2b]; } // u-velocity; west; Turbulent precursor __global__ void BC_u_W_T(real *u, dom_struct *dom, real* bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[dom->Gfx._isb + tj*s1b + tk*s2b] = 2. * bc[tj + tk*dom->Gfx.jnb] - u[(dom->Gfx._is+1) + tj*s1b + tk*s2b]; u[dom->Gfx._is + tj*s1b + tk*s2b] = bc[tj + tk*dom->Gfx.jnb]; } } // u-velocity; east; periodic __global__ void BC_u_E_P(real *u, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[(dom->Gfx._ieb-1) + tj*s1b + tk*s2b] = u[(dom->Gfx._is+1) + tj*s1b + tk*s2b]; u[(dom->Gfx._ie-1) + tj*s1b + tk*s2b] = u[dom->Gfx._is + tj*s1b + tk*s2b]; } } // u-velocity; east; Dirichlet __global__ void BC_u_E_D(real *u, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[(dom->Gfx._ieb-1) + tj*s1b + tk*s2b] = 2. * bc - u[(dom->Gfx._ie-2) + tj*s1b + tk*s2b]; u[(dom->Gfx._ie-1) + tj*s1b + tk*s2b] = bc; } } // u-velocity; east; Neumann __global__ void BC_u_E_N(real *u, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) u[(dom->Gfx._ieb-1) + tj*s1b + tk*s2b] = u[(dom->Gfx._ie-1) + tj*s1b + tk*s2b]; } // u-velocity; east; Turbulent precursor __global__ void BC_u_E_T(real *u, dom_struct *dom, real* bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[(dom->Gfx._ieb-1) + tj*s1b + tk*s2b] = 2. * bc[tj + tk*dom->Gfx.jnb] - u[(dom->Gfx._ie-2) + tj*s1b + tk*s2b]; u[(dom->Gfx._ie-1) + tj*s1b + tk*s2b] = bc[tj + tk*dom->Gfx.jnb]; } } // u-velocity; south; periodic __global__ void BC_u_S_P(real *u, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) { u[ti + dom->Gfx._jsb*s1b + tk*s2b] = u[ti + (dom->Gfx._je-1)*s1b + tk*s2b]; } } // u-velocity; south; Dirichlet __global__ void BC_u_S_D(real *u, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) { u[ti + dom->Gfx._jsb*s1b + tk*s2b] = 8./3. * bc - 2. * u[ti + dom->Gfx._js*s1b + tk*s2b] + 1./3. * u[ti + (dom->Gfx._js+1)*s1b + tk*s2b]; } } // u-velocity; south; Neumann __global__ void BC_u_S_N(real *u, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) u[ti + dom->Gfx._jsb*s1b + tk*s2b] = u[ti + dom->Gfx._js*s1b + tk*s2b]; } // u-velocity; south; Turbulent precursor __global__ void BC_u_S_T(real *u, dom_struct *dom, real* bc_s, real* bc_n) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) { u[ti + dom->Gfx._jsb*s1b + tk*s2b] = bc_s[tk + ti*dom->Gfx.knb]; u[ti + dom->Gfx._js*s1b + tk*s2b] = bc_n[tk + ti*dom->Gfx.knb]; } } // u-velocity; north; periodic __global__ void BC_u_N_P(real *u, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) u[ti + (dom->Gfx._jeb-1)*s1b + tk*s2b] = u[ti + dom->Gfx._js*s1b + tk*s2b]; } // u-velocity; north; Dirichlet __global__ void BC_u_N_D(real *u, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) { u[ti + (dom->Gfx._jeb-1)*s1b + tk*s2b] = 8./3. * bc - 2. * u[ti + (dom->Gfx._je-1)*s1b + tk*s2b] + 1./3. * u[ti + (dom->Gfx._je-2)*s1b + tk*s2b]; } } // u-velocity; north; Neumann __global__ void BC_u_N_N(real *u, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) u[ti + (dom->Gfx._jeb-1)*s1b + tk*s2b] = u[ti + (dom->Gfx._je-1)*s1b + tk*s2b]; } // u-velocity; north; Turbulent precursor __global__ void BC_u_N_T(real *u, dom_struct *dom, real* bc_s, real* bc_n) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) { // velocity within computational domain, near the boundary u[ti + (dom->Gfx._je-1)*s1b + tk*s2b] = bc_s[tk + ti*dom->Gfx.knb]; //velocity on ghost cells u[ti + (dom->Gfx._jeb-1)*s1b + tk*s2b] = bc_n[tk + ti*dom->Gfx.knb]; } } // u-velocity; bottom; periodic __global__ void BC_u_B_P(real *u, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + dom->Gfx._ksb*s2b] = u[ti + tj*s1b + (dom->Gfx._ke-1)*s2b]; } // u-velocity; bottom; Dirichlet __global__ void BC_u_B_D(real *u, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + dom->Gfx._ksb*s2b] = 8./3. * bc - 2. * u[ti + tj*s1b + dom->Gfx._ks*s2b] + 1./3. * u[ti + tj*s1b + (dom->Gfx._ks+1)*s2b]; } // u-velocity; bottom; Neumann __global__ void BC_u_B_N(real *u, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + dom->Gfx._ksb*s2b] = u[ti + tj*s1b + dom->Gfx._ks*s2b]; } // u-velocity; bottom; Turbulent precursor __global__ void BC_u_B_T(real *u, dom_struct *dom, real* bc_b, real* bc_t) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) { // copy u velocity on bottom from precursor to ghost point and first layer u[ti + tj*s1b + dom->Gfx._ksb*s2b] = bc_b[ti + tj*dom->Gfx.inb]; u[ti + tj*s1b + dom->Gfx._ks*s2b] = bc_t[ti + tj*dom->Gfx.inb]; } } // u-velocity; top; periodic __global__ void BC_u_T_P(real *u, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + (dom->Gfx._keb-1)*s2b] = u[ti + tj*s1b + dom->Gfx._ks*s2b]; } // u-velocity; top; Dirichlet __global__ void BC_u_T_D(real *u, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + (dom->Gfx._keb-1)*s2b] = 8./3. * bc - 2. * u[ti + tj*s1b + (dom->Gfx._ke-1)*s2b] + 1./3. * u[ti + tj*s1b + (dom->Gfx._ke-2)*s2b]; } // u-velocity; top; Neumann __global__ void BC_u_T_N(real *u, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + (dom->Gfx._keb-1)*s2b] = u[ti + tj*s1b + (dom->Gfx._ke-1)*s2b]; } // u-velocity; top; Turbulent precursor __global__ void BC_u_T_T(real *u, dom_struct *dom, real* bc_b, real* bc_t) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) { u[ti + tj*s1b + (dom->Gfx._ke-1)*s2b] = bc_b[ti + tj*dom->Gfx.inb]; // velocity on ghost cell u[ti + tj*s1b + (dom->Gfx._keb-1)*s2b] = bc_t[ti + tj*dom->Gfx.inb]; } } // v-velocity; west; periodic __global__ void BC_v_W_P(real *v, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) v[dom->Gfy._isb + tj*s1b + tk*s2b] = v[(dom->Gfy._ie-1) + tj*s1b + tk*s2b]; } // v-velocity; west; Dirichlet __global__ void BC_v_W_D(real *v, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) { v[dom->Gfy._isb + tj*s1b + tk*s2b] = 8./3. * bc - 2. * v[dom->Gfy._is + tj*s1b + tk*s2b] + 1./3. * v[(dom->Gfy._is+1) + tj*s1b + tk*s2b]; } } // v-velocity; west; Neumann __global__ void BC_v_W_N(real *v, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) v[dom->Gfy._isb + tj*s1b + tk*s2b] = v[dom->Gfy._is + tj*s1b + tk*s2b]; } // v-velocity; west; Turbulent precursor __global__ void BC_v_W_T(real *v, dom_struct *dom, real* bc_w, real* bc_e) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) { v[dom->Gfy._isb + tj*s1b + tk*s2b] = bc_w[tj + tk*dom->Gfy.jnb]; v[dom->Gfy._is + tj*s1b + tk*s2b] = bc_e[tj + tk*dom->Gfy.jnb]; } } // v-velocity; east; periodic __global__ void BC_v_E_P(real *v, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) v[(dom->Gfy._ieb-1) + tj*s1b + tk*s2b] = v[dom->Gfy._is + tj*s1b + tk*s2b]; } // v-velocity; east; Dirichlet __global__ void BC_v_E_D(real *v, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) { v[(dom->Gfy._ieb-1) + tj*s1b + tk*s2b] = 8./3. * bc - 2. * v[(dom->Gfy._ie-1) + tj*s1b + tk*s2b] + 1./3. * v[(dom->Gfy._ie-2) + tj*s1b + tk*s2b]; } } // v-velocity; east; Neumann __global__ void BC_v_E_N(real *v, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) v[(dom->Gfy._ieb-1) + tj*s1b + tk*s2b] = v[(dom->Gfy._ie-1) + tj*s1b + tk*s2b]; } // v-velocity; east; Turbulent precursor __global__ void BC_v_E_T(real *v, dom_struct *dom, real* bc_w, real* bc_e) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) { v[(dom->Gfy._ie-1) + tj*s1b + tk*s2b] = bc_w[tj + tk*dom->Gfy.jnb]; //velocity on ghost cell v[dom->Gfy._ieb-1 + tj*s1b + tk*s2b] = bc_e[tj + tk*dom->Gfy.jnb]; } } // v-velocity; south; periodic __global__ void BC_v_S_P(real *v, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + dom->Gfy._jsb*s1b + tk*s2b] = v[ti + (dom->Gfy._je-2)*s1b + tk*s2b]; v[ti + dom->Gfy._js*s1b + tk*s2b] = v[ti + (dom->Gfy._je-1)*s1b + tk*s2b]; } } // v-velocity; south; Dirichlet __global__ void BC_v_S_D(real *v, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + dom->Gfy._jsb*s1b + tk*s2b] = 2. * bc - v[ti + (dom->Gfy._js+1)*s1b + tk*s2b]; v[ti + dom->Gfy._js*s1b + tk*s2b] = bc; } } // v-velocity; south; Neumann __global__ void BC_v_S_N(real *v, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) v[ti + dom->Gfy._jsb*s1b + tk*s2b] = v[ti + dom->Gfy._js*s1b + tk*s2b]; } // v-velocity; south; Turbulent precursor __global__ void BC_v_S_T(real *v, dom_struct *dom, real* bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + dom->Gfy._jsb*s1b + tk*s2b] = 2. * bc[tk + ti*dom->Gfy.knb] - v[ti + (dom->Gfy._js+1)*s1b + tk*s2b]; v[ti + dom->Gfy._js*s1b + tk*s2b] = bc[tk + ti*dom->Gfy.knb]; } } // v-velocity; north; periodic __global__ void BC_v_N_P(real *v, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + (dom->Gfy._jeb-1)*s1b + tk*s2b] = v[ti + (dom->Gfy._js+1)*s1b + tk*s2b]; v[ti + (dom->Gfy._je-1)*s1b + tk*s2b] = v[ti + dom->Gfy._js*s1b + tk*s2b]; } } // v-velocity; north; Dirichlet __global__ void BC_v_N_D(real *v, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + (dom->Gfy._jeb-1)*s1b + tk*s2b] = 2. * bc - v[ti + (dom->Gfy._je-2)*s1b + tk*s2b]; v[ti + (dom->Gfy._je-1)*s1b + tk*s2b] = bc; } } // v-velocity; north; Neumann __global__ void BC_v_N_N(real *v, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) v[ti + (dom->Gfy._jeb-1)*s1b + tk*s2b] = v[ti + (dom->Gfy._je-1)*s1b + tk*s2b]; } // v-velocity; north; Turbulent precursor __global__ void BC_v_N_T(real *v, dom_struct *dom, real* bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + (dom->Gfy._jeb-1)*s1b + tk*s2b] = 2. * bc[tk + ti*dom->Gfy.knb] - v[ti + (dom->Gfy._je-2)*s1b + tk*s2b]; v[ti + (dom->Gfy._je-1)*s1b + tk*s2b] = bc[tk + ti*dom->Gfy.knb]; } } // v-velocity; bottom; periodic __global__ void BC_v_B_P(real *v, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + dom->Gfy._ksb*s2b] = v[ti + tj*s1b + (dom->Gfy._ke-1)*s2b]; } // v-velocity; bottom; Dirichlet __global__ void BC_v_B_D(real *v, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + dom->Gfy._ksb*s2b] = 8./3. * bc - 2. * v[ti + tj*s1b + dom->Gfy._ks*s2b] + 1./3. * v[ti + tj*s1b + (dom->Gfy._ks+1)*s2b]; } // v-velocity; bottom; Neumann __global__ void BC_v_B_N(real *v, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + dom->Gfy._ksb*s2b] = v[ti + tj*s1b + dom->Gfy._ks*s2b]; } // v-velocity; bottom; Turbulent precursor __global__ void BC_v_B_T(real *v, dom_struct *dom, real* bc_b, real* bc_t) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) { v[ti + tj*s1b + dom->Gfy._ksb*s2b] = bc_b[ti + tj*dom->Gfy.inb]; v[ti + tj*s1b + dom->Gfy._ks*s2b] = bc_t[ti + tj*dom->Gfy.inb]; } } // v-velocity; top; periodic __global__ void BC_v_T_P(real *v, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + (dom->Gfy._keb-1)*s2b] = v[ti + tj*s1b + dom->Gfy._ks*s2b]; } // v-velocity; top; Dirichlet __global__ void BC_v_T_D(real *v, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + (dom->Gfy._keb-1)*s2b] = 8./3. * bc - 2. * v[ti + tj*s1b + (dom->Gfy._ke-1)*s2b] + 1./3. * v[ti + tj*s1b + (dom->Gfy._ke-2)*s2b]; } // v-velocity; top; Neumann __global__ void BC_v_T_N(real *v, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + (dom->Gfy._keb-1)*s2b] = v[ti + tj*s1b + (dom->Gfy._ke-1)*s2b]; } // v-velocity; top; Turbulent precursor __global__ void BC_v_T_T(real *v, dom_struct *dom, real* bc_b, real* bc_t) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) { v[ti + tj*s1b + (dom->Gfy._ke-1)*s2b] = bc_b[ti + tj*dom->Gfy.inb]; // velocity on ghost cell v[ti + tj*s1b + (dom->Gfy._keb-1)*s2b] = bc_t[ti + tj*dom->Gfy.inb]; } } // w-velocity; west; periodic __global__ void BC_w_W_P(real *w, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[dom->Gfz._isb + tj*s1b + tk*s2b] = w[(dom->Gfz._ie-1) + tj*s1b + tk*s2b]; } // w-velocity; west; Dirichlet __global__ void BC_w_W_D(real *w, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[dom->Gfz._isb + tj*s1b + tk*s2b] = 8./3. * bc - 2. * w[dom->Gfz._is + tj*s1b + tk*s2b] + 1./3. * w[(dom->Gfz._is+1) + tj*s1b + tk*s2b]; } // w-velocity; west; Neumann __global__ void BC_w_W_N(real *w, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[dom->Gfz._isb + tj*s1b + tk*s2b] = w[dom->Gfz._is + tj*s1b + tk*s2b]; } // w-velocity; west; Turbulent precursor __global__ void BC_w_W_T(real *w, dom_struct *dom, real* bc_w, real* bc_e) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) { w[dom->Gfz._isb + tj*s1b + tk*s2b] = bc_w[tj + tk*dom->Gfz.jnb]; w[dom->Gfz._is + tj*s1b + tk*s2b] = bc_e[tj + tk*dom->Gfz.jnb]; } } // w-velocity; east; periodic __global__ void BC_w_E_P(real *w, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[(dom->Gfz._ieb-1) + tj*s1b + tk*s2b] = w[dom->Gfz._is + tj*s1b + tk*s2b]; } // w-velocity; east; Dirichlet __global__ void BC_w_E_D(real *w, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[(dom->Gfz._ieb-1) + tj*s1b + tk*s2b] = 8./3. * bc - 2. * w[(dom->Gfz._ie-1) + tj*s1b + tk*s2b] + 1./3. * w[(dom->Gfz._ie-2) + tj*s1b + tk*s2b]; } // w-velocity; east; Neumann __global__ void BC_w_E_N(real *w, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[(dom->Gfz._ieb-1) + tj*s1b + tk*s2b] = w[(dom->Gfz._ie-1) + tj*s1b + tk*s2b]; } // w-velocity; east; Turbulent precursor __global__ void BC_w_E_T(real *w, dom_struct *dom, real* bc_w, real* bc_e) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) { w[(dom->Gfz._ie-1) + tj*s1b + tk*s2b] = bc_w[tj + tk*dom->Gfz.jnb]; //velocity on ghost cell w[(dom->Gfz._ieb-1) + tj*s1b + tk*s2b] = bc_e[tj + tk*dom->Gfz.jnb]; } } // w-velocity; south; periodic __global__ void BC_w_S_P(real *w, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) { w[ti + dom->Gfz._jsb*s1b + tk*s2b] = w[ti + (dom->Gfz._je-1)*s1b + tk*s2b]; } } // w-velocity; south; Dirichlet __global__ void BC_w_S_D(real *w, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) w[ti + dom->Gfz._jsb*s1b + tk*s2b] = 8./3. * bc - 2. * w[ti + dom->Gfz._js*s1b + tk*s2b] + 1./3. * w[ti + (dom->Gfz._js+1)*s1b + tk*s2b]; } // w-velocity; south; Neumann __global__ void BC_w_S_N(real *w, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) w[ti + dom->Gfz._jsb*s1b + tk*s2b] = w[ti + dom->Gfz._js*s1b + tk*s2b]; } // w-velocity; south; Turbulent precursor __global__ void BC_w_S_T(real *w, dom_struct *dom, real* bc_s, real* bc_n) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) { w[ti + dom->Gfz._jsb*s1b + tk*s2b] = bc_s[tk + ti*dom->Gfz.knb]; w[ti + dom->Gfz._js*s1b + tk*s2b] = bc_n[tk + ti*dom->Gfz.knb]; } } // w-velocity; north; periodic __global__ void BC_w_N_P(real *w, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) w[ti + (dom->Gfz._jeb-1)*s1b + tk*s2b] = w[ti + dom->Gfz._js*s1b + tk*s2b]; } // w-velocity; north; Dirichlet __global__ void BC_w_N_D(real *w, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) w[ti + (dom->Gfz._jeb-1)*s1b + tk*s2b] = 8./3. * bc - 2. * w[ti + (dom->Gfz._je-1)*s1b + tk*s2b] + 1./3. * w[ti + (dom->Gfz._je-2)*s1b + tk*s2b]; } // w-velocity; north; Neumann __global__ void BC_w_N_N(real *w, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) w[ti + (dom->Gfz._jeb-1)*s1b + tk*s2b] = w[ti + (dom->Gfz._je-1)*s1b + tk*s2b]; } // w-velocity; north; Turbulent precursor __global__ void BC_w_N_T(real *w, dom_struct *dom, real* bc_s, real* bc_n) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) { w[ti + (dom->Gfz._je-1)*s1b + tk*s2b] = bc_s[tk + ti*dom->Gfz.knb]; //velocity on ghost cell w[ti + (dom->Gfz._jeb-1)*s1b + tk*s2b] = bc_n[tk + ti*dom->Gfz.knb]; } } // w-velocity; bottom; periodic __global__ void BC_w_B_P(real *w, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + dom->Gfz._ksb*s2b] = w[ti + tj*s1b + (dom->Gfz._ke-2)*s2b]; w[ti + tj*s1b + dom->Gfz._ks*s2b] = w[ti + tj*s1b + (dom->Gfz._ke-1)*s2b]; } } // w-velocity; bottom; Dirichlet __global__ void BC_w_B_D(real *w, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + dom->Gfz._ksb*s2b] = 2. * bc - w[ti + tj*s1b + (dom->Gfz._ks+1)*s2b]; w[ti + tj*s1b + dom->Gfz._ks*s2b] = bc; } } // w-velocity; bottom; Neumann __global__ void BC_w_B_N(real *w, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) w[ti + tj*s1b + dom->Gfz._ksb*s2b] = w[ti + tj*s1b + dom->Gfz._ks*s2b]; } // w-velocity; bottom; Turbulent precursor __global__ void BC_w_B_T(real *w, dom_struct *dom, real* bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + dom->Gfz._ksb*s2b] = 2. * bc[ti + tj*dom->Gfz.inb] - w[ti + tj*s1b + (dom->Gfz._ks+1)*s2b]; w[ti + tj*s1b + dom->Gfz._ks*s2b] = bc[ti + tj*dom->Gfz.inb]; } } // w-velocity; top; periodic __global__ void BC_w_T_P(real *w, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + (dom->Gfz._keb-1)*s2b] = w[ti + tj*s1b + (dom->Gfz._ks+1)*s2b]; w[ti + tj*s1b + (dom->Gfz._ke-1)*s2b] = w[ti + tj*s1b + dom->Gfz._ks*s2b]; } } // w-velocity; top; Dirichlet __global__ void BC_w_T_D(real *w, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + (dom->Gfz._keb-1)*s2b] = 2. * bc - w[ti + tj*s1b + (dom->Gfz._ke-2)*s2b]; w[ti + tj*s1b + (dom->Gfz._ke-1)*s2b] = bc; } } // w-velocity; top; Neumann __global__ void BC_w_T_N(real *w, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) w[ti + tj*s1b + (dom->Gfz._keb-1)*s2b] = w[ti + tj*s1b + (dom->Gfz._ke-1)*s2b]; } // w-velocity; top; Turbulent precursor __global__ void BC_w_T_T(real *w, dom_struct *dom, real* bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + (dom->Gfz._keb-1)*s2b] = 2. * bc[ti + tj*dom->Gfz.inb] - w[ti + tj*s1b + (dom->Gfz._ke-2)*s2b]; w[ti + tj*s1b + (dom->Gfz._ke-1)*s2b] = bc[ti + tj*dom->Gfz.inb]; } } __global__ void project_u(real *u_star, real *p, real rho_f, real dt, real *u, dom_struct *dom, real ddx, int *flag_u, int *phase) { int tj = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tk = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tj < dom->Gfx._je && tk < dom->Gfx._ke) { for(int i = dom->Gfx._is; i < dom->Gfx._ie; i++) { real gradPhi = abs(flag_u[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b]) * ddx * (p[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b] - p[(i-1) + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b]); u[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] = (u_star[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] - dt / rho_f * gradPhi); } } } __global__ void project_v(real *v_star, real *p, real rho_f, real dt, real *v, dom_struct *dom, real ddy, int *flag_v, int *phase) { int tk = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int ti = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tk < dom->Gfy._ke && ti < dom->Gfy._ie) { for(int j = dom->Gfy._js; j < dom->Gfy._je; j++) { real gradPhi = abs(flag_v[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b]) * ddy * (p[ti + j*dom->Gcc._s1b + tk*dom->Gcc._s2b] - p[ti + (j-1)*dom->Gcc._s1b + tk*dom->Gcc._s2b]); v[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] = (v_star[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] - dt / rho_f * gradPhi); } } } __global__ void project_w(real *w_star, real *p, real rho_f, real dt, real *w, dom_struct *dom, real ddz, int *flag_w, int *phase) { int ti = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tj = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(ti < dom->Gfz._ie && tj < dom->Gfz._je) { for(int k = dom->Gfz._ks; k < dom->Gfz._ke; k++) { real gradPhi = abs(flag_w[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b]) * ddz * (p[ti + tj*dom->Gcc._s1b + k*dom->Gcc._s2b] - p[ti + tj*dom->Gcc._s1b + (k-1)*dom->Gcc._s2b]); w[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] = (w_star[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] - dt / rho_f * gradPhi); } } } __global__ void update_p_laplacian(real *Lp, real *p, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tk = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tj < dom->Gcc._je && tk < dom->Gcc._ke) { for(int i = dom->Gcc._is; i < dom->Gcc._ie; i++) { int C = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; int W = (i-1) + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; int E = (i+1) + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; int S = i + (tj-1)*dom->Gcc._s1b + tk*dom->Gcc._s2b; int N = i + (tj+1)*dom->Gcc._s1b + tk*dom->Gcc._s2b; int B = i + tj*dom->Gcc._s1b + (tk-1)*dom->Gcc._s2b; int T = i + tj*dom->Gcc._s1b + (tk+1)*dom->Gcc._s2b; real ddpdxx = (p[E]-2.*p[C]+p[W])/dom->dx/dom->dx; real ddpdyy = (p[N]-2.*p[C]+p[S])/dom->dy/dom->dy; real ddpdzz = (p[T]-2.*p[C]+p[B])/dom->dz/dom->dz; Lp[C] = ddpdxx+ddpdyy+ddpdzz; } } } __global__ void update_p(real *Lp, real *p0, real *p, real *phi, dom_struct *dom, real nu, real dt, int *phase) { int tj = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tk = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tj < dom->Gcc._je && tk < dom->Gcc._ke) { for(int i = dom->Gcc._is; i < dom->Gcc._ie; i++) { int C = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; p[C] = (phase[C] < 0) * (p0[C] + phi[C]);// - 0.5*nu*dt*Lp[C]); } } } __global__ void copy_p_ghost(real *p, real *p_tmp, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gcc.je-DOM_BUF && tk < dom->Gcc.ke-DOM_BUF) { for(int i = dom->Gcc.is-DOM_BUF; i < dom->Gcc.ie-DOM_BUF; i++) { p[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gcc.s1b + (tk+DOM_BUF)*dom->Gcc.s2b] = p_tmp[i + tj*dom->Gcc.s1 + tk*dom->Gcc.s2]; } } } __global__ void copy_p_noghost(real *p_noghost, real *p_ghost, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gcc.je-DOM_BUF && tk < dom->Gcc.ke-DOM_BUF) { for(int i = dom->Gcc.is-DOM_BUF; i < dom->Gcc.ie-DOM_BUF; i++) { p_noghost[i + tj*dom->Gcc._s1 + tk*dom->Gcc._s2] = p_ghost[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gcc._s1b + (tk+DOM_BUF)*dom->Gcc._s2b]; } } } __global__ void copy_u_ghost(real *u_ghost, real *u_noghost, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.je-DOM_BUF && tk < dom->Gfx.ke-DOM_BUF) { for(int i = dom->Gfx.is-DOM_BUF; i < dom->Gfx.ie-DOM_BUF; i++) { u_ghost[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b] = u_noghost[i + tj*dom->Gfx._s1 + tk*dom->Gfx._s2]; } } } __global__ void copy_u_noghost(real *u_noghost, real *u_ghost, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.je-DOM_BUF && tk < dom->Gfx.ke-DOM_BUF) { for(int i = dom->Gfx.is-DOM_BUF; i < dom->Gfx.ie-DOM_BUF; i++) { u_noghost[i + tj*dom->Gfx._s1 + tk*dom->Gfx._s2] = u_ghost[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b]; } } } __global__ void copy_v_ghost(real *v_ghost, real *v_noghost, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.ke-DOM_BUF && ti < dom->Gfy.ie-DOM_BUF) { for(int j = dom->Gfy.js-DOM_BUF; j < dom->Gfy.je-DOM_BUF; j++) { v_ghost[(ti+DOM_BUF) + (j+DOM_BUF)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b] = v_noghost[ti + j*dom->Gfy._s1 + tk*dom->Gfy._s2]; } } } __global__ void copy_v_noghost(real *v_noghost, real *v_ghost, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.ke-DOM_BUF && ti < dom->Gfy.ie-DOM_BUF) { for(int j = dom->Gfy.js-DOM_BUF; j < dom->Gfy.je-DOM_BUF; j++) { v_noghost[ti + j*dom->Gfy._s1 + tk*dom->Gfy._s2] = v_ghost[(ti+DOM_BUF) + (j+DOM_BUF)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b]; } } } __global__ void copy_w_ghost(real *w_ghost, real *w_noghost, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.ie-DOM_BUF && tj < dom->Gfz.je-DOM_BUF) { for(int k = dom->Gfz.ks-DOM_BUF; k < dom->Gfz.ke-DOM_BUF; k++) { w_ghost[(ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (k+DOM_BUF)*dom->Gfz._s2b] = w_noghost[ti + tj*dom->Gfz._s1 + k*dom->Gfz._s2]; } } } __global__ void copy_w_noghost(real *w_noghost, real *w_ghost, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.ie-DOM_BUF && tj < dom->Gfz.je-DOM_BUF) { for(int k = dom->Gfz.ks-DOM_BUF; k < dom->Gfz.ke-DOM_BUF; k++) { w_noghost[ti + tj*dom->Gfz._s1 + k*dom->Gfz._s2] = w_ghost[(ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (k+DOM_BUF)*dom->Gfz._s2b]; } } } __global__ void copy_u_fluid(real *u_noghost, real *u_ghost, int *phase, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.je-DOM_BUF && tk < dom->Gfx.ke-DOM_BUF) { for(int i = dom->Gfx.is-DOM_BUF; i < dom->Gfx.ie-DOM_BUF; i++) { int boo = 1; if(phase[(i+DOM_BUF-1) + (tj+DOM_BUF)*dom->Gcc._s1b + (tk+DOM_BUF)*dom->Gcc._s2b] > -1) boo = 0; else if(phase[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gcc._s1b + (tk+DOM_BUF)*dom->Gcc._s2b] > -1) boo = 0; u_noghost[i + tj*dom->Gfx._s1 + tk*dom->Gfx._s2] = boo * u_ghost[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b]; } } } __global__ void copy_v_fluid(real *v_noghost, real *v_ghost, int *phase, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.ke-DOM_BUF && ti < dom->Gfy.ie-DOM_BUF) { for(int j = dom->Gfy.js-DOM_BUF; j < dom->Gfy.je-DOM_BUF; j++) { int boo = 1; if(phase[(ti+DOM_BUF) + (j+DOM_BUF-1)*dom->Gcc._s1b + (tk+DOM_BUF)*dom->Gcc._s2b] > -1) boo = 0; else if(phase[(ti+DOM_BUF) + (j+DOM_BUF)*dom->Gcc._s1b + (tk+DOM_BUF)*dom->Gcc._s2b] > -1) boo = 0; v_noghost[ti + j*dom->Gfy._s1 + tk*dom->Gfy._s2] = boo * v_ghost[(ti+DOM_BUF) + (j+DOM_BUF)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b]; } } } __global__ void copy_w_fluid(real *w_noghost, real *w_ghost, int *phase, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.ie-DOM_BUF && tj < dom->Gfz.je-DOM_BUF) { for(int k = dom->Gfz.ks-DOM_BUF; k < dom->Gfz.ke-DOM_BUF; k++) { int boo = 1; if(phase[(ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gcc._s1b + (k+DOM_BUF-1)*dom->Gcc._s2b] > -1) boo = 0; else if(phase[(ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gcc._s1b + (k+DOM_BUF)*dom->Gcc._s2b] > -1) boo = 0; w_noghost[ti + tj*dom->Gfz._s1 + k*dom->Gfz._s2] = boo * w_ghost[(ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (k+DOM_BUF)*dom->Gfz._s2b]; } } } #ifndef IMPLICIT __global__ void u_star_2(real rho_f, real nu, real *u0, real *v0, real *w0, real *p, real *f, real *diff0, real *conv0, real *diff, real *conv, real *u_star, dom_struct *dom, real dt0, real dt, int *phase) { // create shared memory // no reason to load pressure into shared memory, but leaving it in global // will require additional if statements, so keep it in shared __shared__ real s_u0[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u back __shared__ real s_u1[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u center __shared__ real s_u2[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u forward __shared__ real s_v01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v back __shared__ real s_v12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v forward __shared__ real s_w01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w back __shared__ real s_w12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w forward __shared__ real s_d[MAX_THREADS_DIM * MAX_THREADS_DIM]; // diff __shared__ real s_c[MAX_THREADS_DIM * MAX_THREADS_DIM]; // conv __shared__ real s_u_star[MAX_THREADS_DIM * MAX_THREADS_DIM]; // solution // working constants real ab0 = 0.5 * dt / dt0; // for Adams-Bashforth stepping real ab = 1. + ab0; // for Adams-Bashforth stepping real ddx = 1. / dom->dx; // to limit the number of divisions needed real ddy = 1. / dom->dy; // to limit the number of divisions needed real ddz = 1. / dom->dz; // to limit the number of divisions needed // loop over u-planes for(int i = dom->Gfx._is; i < dom->Gfx._ie; i++) { // subdomain indices // the extra 2*blockIdx.X terms implement the necessary overlapping of // shared memory blocks in the subdomain int j = blockIdx.x*blockDim.x + threadIdx.x - 2*blockIdx.x; int k = blockIdx.y*blockDim.y + threadIdx.y - 2*blockIdx.y; // shared memory indices int tj = threadIdx.x; int tk = threadIdx.y; // load shared memory // TODO: look into the effect of removing these if statements and simply // allowing memory overruns for threads that don't matter for particular // discretizations // TODO: THIS CAN BE FIXED BY PADDING ALL OF THESE ARRAYS WHEN COPYING FROM // HOST TO DEVICE if((k >= dom->Gfx._ksb && k < dom->Gfx._keb) && (j >= dom->Gfx._jsb && j < dom->Gfx._jeb)) { s_u0[tj + tk*blockDim.x] = u0[(i-1) + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; s_u1[tj + tk*blockDim.x] = u0[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; s_u2[tj + tk*blockDim.x] = u0[(i+1) + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; } if((k >= dom->Gfy._ksb && k < dom->Gfy._keb) && (j >= dom->Gfy._jsb && j < dom->Gfy._jeb)) { s_v01[tj + tk*blockDim.x] = v0[(i-1) + j*dom->Gfy._s1b + k*dom->Gfy._s2b]; s_v12[tj + tk*blockDim.x] = v0[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]; } if((k >= dom->Gfz._ksb && k < dom->Gfz._keb) && (j >= dom->Gfz._jsb && j < dom->Gfz._jeb)) { s_w01[tj + tk*blockDim.x] = w0[(i-1) + j*dom->Gfz._s1b + k*dom->Gfz._s2b]; s_w12[tj + tk*blockDim.x] = w0[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]; } s_u_star[tj + tk*blockDim.x] = 0.0; // make sure all threads complete shared memory copy __syncthreads(); // compute right-hand side // if off the shared memory block boundary if((tj > 0 && tj < blockDim.x-1) && (tk > 0 && tk < blockDim.y-1) && j < dom->Gfx.jeb && k < dom->Gfx.keb) { // pressure gradient s_u_star[tj + tk*blockDim.x] = (p[(i-1) + j*dom->Gcc._s1b + k*dom->Gcc._s2b] - p[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b]) * ddx / rho_f; // grab the required data points for calculations real u011 = s_u0[tj + tk*blockDim.x]; real u111 = s_u1[tj + tk*blockDim.x]; real u211 = s_u2[tj + tk*blockDim.x]; real u101 = s_u1[(tj-1) + tk*blockDim.x]; real u121 = s_u1[(tj+1) + tk*blockDim.x]; real v011 = s_v01[tj + tk*blockDim.x]; real v111 = s_v12[tj + tk*blockDim.x]; real v021 = s_v01[(tj+1) + tk*blockDim.x]; real v121 = s_v12[(tj+1) + tk*blockDim.x]; real u110 = s_u1[tj + (tk-1)*blockDim.x]; real u112 = s_u1[tj + (tk+1)*blockDim.x]; real w011 = s_w01[tj + tk*blockDim.x]; real w111 = s_w12[tj + tk*blockDim.x]; real w012 = s_w01[tj + (tk+1)*blockDim.x]; real w112 = s_w12[tj + (tk+1)*blockDim.x]; // compute convection term (Adams-Bashforth stepping) real duudx = (u211 + u111)*(u211 + u111) - (u111 + u011)*(u111 + u011); duudx *= 0.25 * ddx; real duvdy = (u121 + u111)*(v121 + v021) - (u111 + u101)*(v111 + v011); duvdy *= 0.25 * ddy; real duwdz = (u112 + u111)*(w112 + w012) - (u111 + u110)*(w111 + w011); duwdz *= 0.25 * ddz; s_c[tj + tk*blockDim.x] = duudx + duvdy + duwdz; // convection term sums into right-hand side #ifndef STOKESFLOW if(dt0 > 0) // Adams-Bashforth s_u_star[tj + tk*blockDim.x] += (-ab * s_c[tj + tk*blockDim.x] + ab0 * conv0[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]); else // forward Euler s_u_star[tj + tk*blockDim.x] += -s_c[tj + tk*blockDim.x]; #endif // compute diffusion term (Adams-Bashforth stepping) real dud1 = (u211 - u111) * ddx; real dud0 = (u111 - u011) * ddx; real ddudxx = (dud1 - dud0) * ddx; dud1 = (u121 - u111) * ddy; dud0 = (u111 - u101) * ddy; real ddudyy = (dud1 - dud0) * ddy; dud1 = (u112 - u111) * ddz; dud0 = (u111 - u110) * ddz; real ddudzz = (dud1 - dud0) * ddz; s_d[tj + tk*blockDim.x] = nu * (ddudxx + ddudyy + ddudzz); // diffusive term sums into right-hand side if(dt0 > 0) // Adams-Bashforth s_u_star[tj + tk*blockDim.x] += (ab * s_d[tj + tk*blockDim.x] - ab0 * diff0[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]); else s_u_star[tj + tk*blockDim.x] += s_d[tj + tk*blockDim.x]; // add on imposed pressure gradient s_u_star[tj + tk*blockDim.x] += f[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; // multiply by dt s_u_star[tj + tk*blockDim.x] *= dt; // velocity term sums into right-hand side s_u_star[tj + tk*blockDim.x] += u111; // zero contribution inside particles s_u_star[tj + tk*blockDim.x] *= (phase[(i-1) + j*dom->Gcc._s1b + k*dom->Gcc._s2b] < 0 && phase[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b] < 0); } // make sure all threads complete computations __syncthreads(); // copy shared memory back to global if((k >= dom->Gfx._ks && k < dom->Gfx._ke) && (j >= dom->Gfx._js && j < dom->Gfx._je) && (tj > 0 && tj < (blockDim.x-1)) && (tk > 0 && tk < (blockDim.y-1))) { u_star[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b] = s_u_star[tj + tk*blockDim.x]; conv[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b] = s_c[tj + tk*blockDim.x]; diff[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b] = s_d[tj + tk*blockDim.x]; } } } #endif #ifndef IMPLICIT __global__ void v_star_2(real rho_f, real nu, real *u0, real *v0, real *w0, real *p, real *f, real *diff0, real *conv0, real *diff, real *conv, real *v_star, dom_struct *dom, real dt0, real dt, int *phase) { // create shared memory // no reason to load pressure into shared memory, but leaving it in global // will require additional if statements, so keep it in shared __shared__ real s_v0[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v back __shared__ real s_v1[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v center __shared__ real s_v2[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v forward __shared__ real s_w01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w back __shared__ real s_w12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w forward __shared__ real s_u01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u back __shared__ real s_u12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u forward __shared__ real s_d[MAX_THREADS_DIM * MAX_THREADS_DIM]; // diff __shared__ real s_c[MAX_THREADS_DIM * MAX_THREADS_DIM]; // conv __shared__ real s_v_star[MAX_THREADS_DIM * MAX_THREADS_DIM]; // solution // working constants real ab0 = 0.5 * dt / dt0; // for Adams-Bashforth stepping real ab = 1. + ab0; // for Adams-Bashforth stepping real ddx = 1. / dom->dx; // to limit the number of divisions needed real ddy = 1. / dom->dy; // to limit the number of divisions needed real ddz = 1. / dom->dz; // to limit the number of divisions needed // loop over v-planes for(int j = dom->Gfy._js; j < dom->Gfy._je; j++) { // subdomain indices // the extra 2*blockIdx.X terms implement the necessary overlapping of // shared memory blocks in the subdomain int k = blockIdx.x*blockDim.x + threadIdx.x - 2*blockIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y - 2*blockIdx.y; // shared memory indices int tk = threadIdx.x; int ti = threadIdx.y; // load shared memory // TODO: look into the effect of removing these if statements and simply // allowing memory overruns for threads that don't matter for particular // discretizations if((i >= dom->Gfy._isb && i < dom->Gfy._ieb) && (k >= dom->Gfy._ksb && k < dom->Gfy._keb)) { s_v0[tk + ti*blockDim.x] = v0[i + (j-1)*dom->Gfy._s1b + k*dom->Gfy._s2b]; s_v1[tk + ti*blockDim.x] = v0[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]; s_v2[tk + ti*blockDim.x] = v0[i + (j+1)*dom->Gfy._s1b + k*dom->Gfy._s2b]; } if((i >= dom->Gfz._isb && i < dom->Gfz._ieb) && (k >= dom->Gfz._ksb && k < dom->Gfz._keb)) { s_w01[tk + ti*blockDim.x] = w0[i + (j-1)*dom->Gfz._s1b + k*dom->Gfz._s2b]; s_w12[tk + ti*blockDim.x] = w0[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]; } if((i >= dom->Gfx._isb && i < dom->Gfx._ieb) && (k >= dom->Gfx._ksb && k < dom->Gfx._keb)) { s_u01[tk + ti*blockDim.x] = u0[i + (j-1)*dom->Gfx._s1b + k*dom->Gfx._s2b]; s_u12[tk + ti*blockDim.x] = u0[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; } s_v_star[tk + ti*blockDim.x] = 0.0; // make sure all threads complete shared memory copy __syncthreads(); // compute right-hand side // if off the shared memory block boundary if((tk > 0 && tk < blockDim.x-1) && (ti > 0 && ti < blockDim.y-1) && k < dom->Gfy.keb && i < dom->Gfy.ieb) { // pressure gradient s_v_star[tk + ti*blockDim.x] = (p[i + (j-1)*dom->Gcc._s1b + k*dom->Gcc._s2b] - p[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b]) * ddy / rho_f; // grab the required data points for calculations real v101 = s_v0[tk + ti*blockDim.x]; real v111 = s_v1[tk + ti*blockDim.x]; real v121 = s_v2[tk + ti*blockDim.x]; real v110 = s_v1[(tk-1) + ti*blockDim.x]; real v112 = s_v1[(tk+1) + ti*blockDim.x]; real w101 = s_w01[tk + ti*blockDim.x]; real w111 = s_w12[tk + ti*blockDim.x]; real w102 = s_w01[(tk+1) + ti*blockDim.x]; real w112 = s_w12[(tk+1) + ti*blockDim.x]; real v011 = s_v1[tk + (ti-1)*blockDim.x]; real v211 = s_v1[tk + (ti+1)*blockDim.x]; real u101 = s_u01[tk + ti*blockDim.x]; real u111 = s_u12[tk + ti*blockDim.x]; real u201 = s_u01[tk + (ti+1)*blockDim.x]; real u211 = s_u12[tk + (ti+1)*blockDim.x]; // compute convection term (Adams-Bashforth stepping) real dvudx = (v211 + v111)*(u211 + u201) - (v111 + v011)*(u111 + u101); dvudx *= 0.25 * ddx; real dvvdy = (v121 + v111)*(v121 + v111) - (v111 + v101)*(v111 + v101); dvvdy *= 0.25 * ddy; real dvwdz = (v112 + v111)*(w112 + w102) - (v111 + v110)*(w111 + w101); dvwdz *= 0.25 * ddz; s_c[tk + ti*blockDim.x] = dvudx + dvvdy + dvwdz; // convection term sums into right-hand side #ifndef STOKESFLOW if(dt0 > 0) // Adams-Bashforth s_v_star[tk + ti*blockDim.x] += (-ab * s_c[tk + ti*blockDim.x] + ab0 * conv0[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]); else s_v_star[tk + ti*blockDim.x] += -s_c[tk + ti*blockDim.x]; #endif // compute diffusive term real dvd1 = (v211 - v111) * ddx; real dvd0 = (v111 - v011) * ddx; real ddvdxx = (dvd1 - dvd0) * ddx; dvd1 = (v121 - v111) * ddy; dvd0 = (v111 - v101) * ddy; real ddvdyy = (dvd1 - dvd0) * ddy; dvd1 = (v112 - v111) * ddz; dvd0 = (v111 - v110) * ddz; real ddvdzz = (dvd1 - dvd0) * ddz; s_d[tk + ti*blockDim.x] = nu * (ddvdxx + ddvdyy + ddvdzz); // diffusive term sums into right-hand side if(dt0 > 0) // Adams-Bashforth s_v_star[tk + ti*blockDim.x] += (ab * s_d[tk + ti*blockDim.x] - ab0 * diff0[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]); else s_v_star[tk + ti*blockDim.x] += s_d[tk + ti*blockDim.x]; // add on imposed pressure gradient s_v_star[tk + ti*blockDim.x] += f[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]; // multiply by dt s_v_star[tk + ti*blockDim.x] *= dt; // velocity term sums into right-hand side s_v_star[tk + ti*blockDim.x] += v111; // zero contribution inside particles s_v_star[tk + ti*blockDim.x] *= (phase[i + (j-1)*dom->Gcc._s1b + k*dom->Gcc._s2b] < 0 && phase[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b] < 0); } // make sure all threads complete computations __syncthreads(); // copy shared memory back to global if((i >= dom->Gfy._is && i < dom->Gfy._ie) && (k >= dom->Gfy._ks && k < dom->Gfy._ke) && (tk > 0 && tk < (blockDim.x-1)) && (ti > 0 && ti < (blockDim.y-1))) { v_star[i+ j*dom->Gfy._s1b + k*dom->Gfy._s2b] = s_v_star[tk + ti*blockDim.x]; conv[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b] = s_c[tk + ti*blockDim.x]; diff[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b] = s_d[tk + ti*blockDim.x]; } } } #endif #ifndef IMPLICIT __global__ void w_star_2(real rho_f, real nu, real *u0, real *v0, real *w0, real *p, real *f, real *diff0, real *conv0, real *diff, real *conv, real *w_star, dom_struct *dom, real dt0, real dt, int *phase) { // create shared memory // no reason to load pressure into shared memory, but leaving it in global // will require additional if statements, so keep it in shared __shared__ real s_w0[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w back __shared__ real s_w1[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w center __shared__ real s_w2[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w forward __shared__ real s_u01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u back __shared__ real s_u12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u forward __shared__ real s_v01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v back __shared__ real s_v12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v forward __shared__ real s_d[MAX_THREADS_DIM * MAX_THREADS_DIM]; // diff0 __shared__ real s_c[MAX_THREADS_DIM * MAX_THREADS_DIM]; // conv0 __shared__ real s_w_star[MAX_THREADS_DIM * MAX_THREADS_DIM]; // solution // working constants real ab0 = 0.5 * dt / dt0; // for Adams-Bashforth stepping real ab = 1. + ab0; // for Adams-Bashforth stepping real ddx = 1. / dom->dx; // to limit the number of divisions needed real ddy = 1. / dom->dy; // to limit the number of divisions needed real ddz = 1. / dom->dz; // to limit the number of divisions needed // loop over w-planes for(int k = dom->Gfz._ks; k < dom->Gfz._ke; k++) { // subdomain indices // the extra 2*blockIdx.X terms implement the necessary overlapping of // shared memory blocks in the subdomain int i = blockIdx.x*blockDim.x + threadIdx.x - 2*blockIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y - 2*blockIdx.y; // shared memory indices int ti = threadIdx.x; int tj = threadIdx.y; // load shared memory // TODO: look into the effect of removing these if statements and simply // allowing memory overruns for threads that don't matter for particular // discretizations if((j >= dom->Gfz._jsb && j < dom->Gfz._jeb) && (i >= dom->Gfz._isb && i < dom->Gfz._ieb)) { s_w0[ti + tj*blockDim.x] = w0[i + j*dom->Gfz._s1b + (k-1)*dom->Gfz._s2b]; s_w1[ti + tj*blockDim.x] = w0[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]; s_w2[ti + tj*blockDim.x] = w0[i + j*dom->Gfz._s1b + (k+1)*dom->Gfz._s2b]; } if((j >= dom->Gfx._jsb && j < dom->Gfx._jeb) && (i >= dom->Gfx._isb && i < dom->Gfx._ieb)) { s_u01[ti + tj*blockDim.x] = u0[i + j*dom->Gfx._s1b + (k-1)*dom->Gfx._s2b]; s_u12[ti + tj*blockDim.x] = u0[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; } if((j >= dom->Gfy._jsb && j < dom->Gfy._jeb) && (i >= dom->Gfy._isb && i < dom->Gfy._ieb)) { s_v01[ti + tj*blockDim.x] = v0[i + j*dom->Gfy._s1b + (k-1)*dom->Gfy._s2b]; s_v12[ti + tj*blockDim.x] = v0[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]; } s_w_star[ti + tj*blockDim.x] = 0.0; // make sure all threads complete shared memory copy __syncthreads(); // compute right-hand side // if off the shared memory block boundary if((ti > 0 && ti < blockDim.x-1) && (tj > 0 && tj < blockDim.y-1) && i < dom->Gfz.ieb && j < dom->Gfz.jeb) { // pressure gradient s_w_star[ti + tj*blockDim.x] = (p[i + j*dom->Gcc._s1b + (k-1)*dom->Gcc._s2b] - p[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b]) * ddz / rho_f; // grab the required data points for calculations real w110 = s_w0[ti + tj*blockDim.x]; real w111 = s_w1[ti + tj*blockDim.x]; real w112 = s_w2[ti + tj*blockDim.x]; real w011 = s_w1[(ti-1) + tj*blockDim.x]; real w211 = s_w1[(ti+1) + tj*blockDim.x]; real u110 = s_u01[ti + tj*blockDim.x]; real u111 = s_u12[ti + tj*blockDim.x]; real u210 = s_u01[(ti+1) + tj*blockDim.x]; real u211 = s_u12[(ti+1) + tj*blockDim.x]; real w101 = s_w1[ti + (tj-1)*blockDim.x]; real w121 = s_w1[ti + (tj+1)*blockDim.x]; real v110 = s_v01[ti + tj*blockDim.x]; real v111 = s_v12[ti + tj*blockDim.x]; real v120 = s_v01[ti + (tj+1)*blockDim.x]; real v121 = s_v12[ti + (tj+1)*blockDim.x]; // compute convection term (Adams-Bashforth stepping) real dwudx = (w211 + w111)*(u211 + u210) - (w111 + w011)*(u111 + u110); dwudx *= 0.25 * ddx; real dwvdy = (w121 + w111)*(v121 + v120) - (w111 + w101)*(v111 + v110); dwvdy *= 0.25 * ddy; real dwwdz = (w112 + w111)*(w112 + w111) - (w111 + w110)*(w111 + w110); dwwdz *= 0.25 * ddz; s_c[ti + tj*blockDim.x] = dwudx + dwvdy + dwwdz; // convection term sums into right-hand side #ifndef STOKESFLOW if(dt0 > 0) // Adams-Bashforth s_w_star[ti + tj*blockDim.x] += (-ab * s_c[ti + tj*blockDim.x] + ab0 * conv0[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]); else // forward Euler s_w_star[ti + tj*blockDim.x] += -s_c[ti + tj*blockDim.x]; #endif // compute diffusive term real dwd1 = (w211 - w111) * ddx; real dwd0 = (w111 - w011) * ddx; real ddwdxx = (dwd1 - dwd0) * ddx; dwd1 = (w121 - w111) * ddy; dwd0 = (w111 - w101) * ddy; real ddwdyy = (dwd1 - dwd0) * ddy; dwd1 = (w112 - w111) * ddz; dwd0 = (w111 - w110) * ddz; real ddwdzz = (dwd1 - dwd0) * ddz; s_d[ti + tj*blockDim.x] = nu * (ddwdxx + ddwdyy + ddwdzz); // diffusive term sums into right-hand side if(dt0 > 0) // Adams-Bashforth s_w_star[ti + tj*blockDim.x] += (ab * s_d[ti + tj*blockDim.x] - ab0 * diff0[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]); else // forward Euler s_w_star[ti + tj*blockDim.x] += s_d[ti + tj*blockDim.x]; // add on imposed pressure gradient s_w_star[ti + tj*blockDim.x] += f[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]; // multiply by dt s_w_star[ti + tj*blockDim.x] *= dt; // velocity term sums into right-hand side s_w_star[ti + tj*blockDim.x] += w111; // zero contribution inside particles s_w_star[ti + tj*blockDim.x] *= (phase[i + j*dom->Gcc._s1b + (k-1)*dom->Gcc._s2b] < 0 && phase[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b] < 0); } // make sure all threads complete computations __syncthreads(); // copy shared memory back to global if((j >= dom->Gfz._js && j < dom->Gfz._je) && (i >= dom->Gfz._is && i < dom->Gfz._ie) && (ti > 0 && ti < (blockDim.x-1)) && (tj > 0 && tj < (blockDim.y-1))) { w_star[i+ j*dom->Gfz._s1b + k*dom->Gfz._s2b] = s_w_star[ti + tj*blockDim.x]; conv[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b] = s_c[ti + tj*blockDim.x]; diff[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b] = s_d[ti + tj*blockDim.x]; } } } #endif __global__ void forcing_reset_x(real *fx, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; for(int i = dom->Gfx._isb; i < dom->Gfx._ieb; i++) { if(tj < dom->Gfx._jnb && tk < dom->Gfx._knb) { fx[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] = 0.; } } } __global__ void forcing_reset_y(real *fy, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; for(int j = dom->Gfy._jsb; j < dom->Gfy._jeb; j++) { if(tk < dom->Gfy._knb && ti < dom->Gfy._inb) { fy[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] = 0.; } } } __global__ void forcing_reset_z(real *fz, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; for(int k = dom->Gfz._ksb; k < dom->Gfz._keb; k++) { if(ti < dom->Gfz._inb && tj < dom->Gfz._jnb) { fz[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] = 0.; } } } __global__ void forcing_add_c_const(real val, real *cc, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tk = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; for(int i = dom->Gcc._isb; i < dom->Gcc._ieb; i++) { if(tj < dom->Gcc._jnb && tk < dom->Gcc._knb) { cc[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b] += val; } } } __global__ void forcing_add_x_const(real val, real *fx, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; for(int i = dom->Gfx._isb; i < dom->Gfx._ieb; i++) { if(tj < dom->Gfx._jnb && tk < dom->Gfx._knb) { fx[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] += val; } } } __global__ void forcing_add_y_const(real val, real *fy, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; for(int j = dom->Gfy._jsb; j < dom->Gfy._jeb; j++) { if(tk < dom->Gfy._knb && ti < dom->Gfy._inb) { fy[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] += val; } } } __global__ void forcing_add_z_const(real val, real *fz, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; for(int k = dom->Gfz._ksb; k < dom->Gfz._keb; k++) { if(ti < dom->Gfz._inb && tj < dom->Gfz._jnb) { fz[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] += val; } } } __global__ void forcing_add_x_field(real scale, real *val, real *fx, dom_struct *dom, int *phase) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; for(int i = dom->Gfx._isb; i < dom->Gfx._ieb; i++) { if(tj < dom->Gfx._jnb && tk < dom->Gfx._knb) { fx[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] += scale * val[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b]; } } } __global__ void forcing_add_y_field(real scale, real *val, real *fy, dom_struct *dom, int *phase) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; for(int j = dom->Gfy._jsb; j < dom->Gfy._jeb; j++) { if(tk < dom->Gfy._knb && ti < dom->Gfy._inb) { fy[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] += scale * val[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b]; } } } __global__ void forcing_add_z_field(real scale, real *val, real *fz, dom_struct *dom, int *phase) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; for(int k = dom->Gfz._ksb; k < dom->Gfz._keb; k++) { if(ti < dom->Gfz._inb && tj < dom->Gfz._jnb) { fz[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] += scale * val[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b]; } } } __global__ void surf_int_x_copy(real *u_star, real *u_star_tmp, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.jn && tk < dom->Gfx.kn) { int C = dom->Gfx.is + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b; int CC = 0 + tj + tk*dom->Gfx.jn; u_star_tmp[CC] = -u_star[C]; C = dom->Gfx.ie-1 + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b; CC = dom->Gfx.jn*dom->Gfx.kn + tj + tk*dom->Gfx.jn; u_star_tmp[CC] = u_star[C]; } } __global__ void surf_int_y_copy(real *v_star, real *v_star_tmp, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.kn && ti < dom->Gfy.in) { int C = (ti+DOM_BUF) + dom->Gfy.js*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b; int CC = ti + 0 + tk*dom->Gfy.in; v_star_tmp[CC] = -v_star[C]; C = (ti+DOM_BUF) + (dom->Gfy.je-1)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b; CC = ti + dom->Gfy.in*dom->Gfy.kn + tk*dom->Gfy.in; v_star_tmp[CC] = v_star[C]; } } __global__ void surf_int_z_copy(real *w_star, real *w_star_tmp, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.in && tj < dom->Gfz.jn) { int C = (ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + dom->Gfz.ks*dom->Gfz._s2b; int CC = ti + tj*dom->Gfz.in + 0; w_star_tmp[CC] = -w_star[C]; C = (ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (dom->Gfz.ke-1)*dom->Gfz._s2b; CC = ti + tj*dom->Gfz.in + dom->Gfz.in*dom->Gfz.jn; w_star_tmp[CC] = w_star[C]; } } __global__ void plane_eps_x_W(real eps, real *u_star, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.jn && tk < dom->Gfx.kn) { int C = dom->Gfx.is + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b; u_star[C] = u_star[C] + eps; } } __global__ void plane_eps_x_E(real eps, real *u_star, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.jn && tk < dom->Gfx.kn) { int C = dom->Gfx.ie-1 + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b; u_star[C] = u_star[C] - eps; } } __global__ void plane_eps_y_S(real eps, real *v_star, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.kn && ti < dom->Gfy.in) { int C = (ti+DOM_BUF) + (dom->Gfy.js)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b; v_star[C] = v_star[C] + eps; } } __global__ void plane_eps_y_N(real eps, real *v_star, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.kn && ti < dom->Gfy.in) { int C = (ti+DOM_BUF) + (dom->Gfy.je-1)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b; v_star[C] = v_star[C] - eps; } } __global__ void plane_eps_z_B(real eps, real *w_star, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.in && tj < dom->Gfz.jn) { int C = (ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (dom->Gfz.ks)*dom->Gfz._s2b; w_star[C] = w_star[C] + eps; } } __global__ void plane_eps_z_T(real eps, real *w_star, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.in && tj < dom->Gfz.jn) { int C = (ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (dom->Gfz.ke-1)*dom->Gfz._s2b; w_star[C] = w_star[C] - eps; } } __global__ void move_parts_a(dom_struct *dom, part_struct *parts, int nparts, real dt, real dt0, g_struct g, gradP_struct gradP, real rho_f, real ttime) { int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r; real m = vol * parts[pp].rho; if(pp < nparts) { if(parts[pp].translating) { // update linear accelerations parts[pp].udot = (parts[pp].Fx + parts[pp].kFx + parts[pp].iFx + parts[pp].aFx - vol*gradP.x) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.x; parts[pp].vdot = (parts[pp].Fy + parts[pp].kFy + parts[pp].iFy + parts[pp].aFy - vol*gradP.y) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.y; parts[pp].wdot = (parts[pp].Fz + parts[pp].kFz + parts[pp].iFz + parts[pp].aFz - vol*gradP.z) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.z; // update linear velocities parts[pp].u = parts[pp].u0 + 0.5*dt*(parts[pp].udot + parts[pp].udot0); parts[pp].v = parts[pp].v0 + 0.5*dt*(parts[pp].vdot + parts[pp].vdot0); parts[pp].w = parts[pp].w0 + 0.5*dt*(parts[pp].wdot + parts[pp].wdot0); // do not update position } if(parts[pp].rotating) { // update angular accelerations real I = 0.4 * m * parts[pp].r*parts[pp].r; parts[pp].oxdot = (parts[pp].Lx + parts[pp].iLx + parts[pp].aLx) / I; parts[pp].oydot = (parts[pp].Ly + parts[pp].iLy + parts[pp].aLy) / I; parts[pp].ozdot = (parts[pp].Lz + parts[pp].iLz + parts[pp].aLz) / I; // update angular velocities parts[pp].ox = parts[pp].ox0 + 0.5*dt*(parts[pp].oxdot + parts[pp].oxdot0); parts[pp].oy = parts[pp].oy0 + 0.5*dt*(parts[pp].oydot + parts[pp].oydot0); parts[pp].oz = parts[pp].oz0 + 0.5*dt*(parts[pp].ozdot + parts[pp].ozdot0); } } } __global__ void move_parts_b(dom_struct *dom, part_struct *parts, int nparts, real dt, real dt0, g_struct g, gradP_struct gradP, real rho_f, real ttime) { int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r; real m = vol * parts[pp].rho; if(pp < nparts) { if(parts[pp].translating) { // update linear accelerations parts[pp].udot = (parts[pp].Fx + parts[pp].kFx + parts[pp].iFx + parts[pp].aFx - vol*gradP.x) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.x; parts[pp].vdot = (parts[pp].Fy + parts[pp].kFy + parts[pp].iFy + parts[pp].aFy - vol*gradP.y) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.y; parts[pp].wdot = (parts[pp].Fz + parts[pp].kFz + parts[pp].iFz + parts[pp].aFz - vol*gradP.z) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.z; // update linear velocities parts[pp].u = parts[pp].u0 + 0.5*dt*(parts[pp].udot + parts[pp].udot0); parts[pp].v = parts[pp].v0 + 0.5*dt*(parts[pp].vdot + parts[pp].vdot0); parts[pp].w = parts[pp].w0 + 0.5*dt*(parts[pp].wdot + parts[pp].wdot0); // update position (trapezoidal rule) parts[pp].x = parts[pp].x0 + 0.5*dt*(parts[pp].u + parts[pp].u0); if(parts[pp].x < dom->xs) parts[pp].x = parts[pp].x + dom->xl; else if(parts[pp].x > dom->xe) parts[pp].x = parts[pp].x - dom->xl; parts[pp].y = parts[pp].y0 + 0.5*dt*(parts[pp].v + parts[pp].v0); if(parts[pp].y < dom->ys) parts[pp].y = parts[pp].y + dom->yl; else if(parts[pp].y > dom->ye) parts[pp].y = parts[pp].y - dom->yl; parts[pp].z = parts[pp].z0 + 0.5*dt*(parts[pp].w + parts[pp].w0); if(parts[pp].z < dom->zs) parts[pp].z = parts[pp].z + dom->zl; else if(parts[pp].z > dom->ze) parts[pp].z = parts[pp].z - dom->zl; // store for next time step parts[pp].x0 = parts[pp].x; parts[pp].y0 = parts[pp].y; parts[pp].z0 = parts[pp].z; parts[pp].u0 = parts[pp].u; parts[pp].v0 = parts[pp].v; parts[pp].w0 = parts[pp].w; parts[pp].udot0 = parts[pp].udot; parts[pp].vdot0 = parts[pp].vdot; parts[pp].wdot0 = parts[pp].wdot; } if(parts[pp].rotating) { // update angular accelerations real I = 0.4 * m * parts[pp].r*parts[pp].r; parts[pp].oxdot = (parts[pp].Lx + parts[pp].iLx + parts[pp].aLx) / I; parts[pp].oydot = (parts[pp].Ly + parts[pp].iLy + parts[pp].aLy) / I; parts[pp].ozdot = (parts[pp].Lz + parts[pp].iLz + parts[pp].aLz) / I; // update angular velocities parts[pp].ox = parts[pp].ox0 + 0.5*dt*(parts[pp].oxdot + parts[pp].oxdot0); parts[pp].oy = parts[pp].oy0 + 0.5*dt*(parts[pp].oydot + parts[pp].oydot0); parts[pp].oz = parts[pp].oz0 + 0.5*dt*(parts[pp].ozdot + parts[pp].ozdot0); /* update basis vectors */ // calculate rotation magnitude (trapezoidal rule) real mag = 0.5*sqrt(parts[pp].ox*parts[pp].ox + parts[pp].oy*parts[pp].oy + parts[pp].oz*parts[pp].oz); mag += 0.5*sqrt(parts[pp].ox0*parts[pp].ox0 + parts[pp].oy0*parts[pp].oy0 + parts[pp].oz0*parts[pp].oz0); // calculate normalized rotation axis real X = 0; real Y = 0; real Z = 0; if(mag > 0) { X = 0.5 * (parts[pp].ox + parts[pp].ox0) / mag; Y = 0.5 * (parts[pp].oy + parts[pp].oy0) / mag; Z = 0.5 * (parts[pp].oz + parts[pp].oz0) / mag; } // calculate rotation quaternion real theta = mag * dt; real qr = cos(0.5*theta); real qi = X * sin(0.5*theta); real qj = Y * sin(0.5*theta); real qk = Z * sin(0.5*theta); // compute quaternion conjugation to apply rotation to basis vectors rotate(qr, qi, qj, qk, &parts[pp].axx, &parts[pp].axy, &parts[pp].axz); rotate(qr, qi, qj, qk, &parts[pp].ayx, &parts[pp].ayy, &parts[pp].ayz); rotate(qr, qi, qj, qk, &parts[pp].azx, &parts[pp].azy, &parts[pp].azz); // store for next time step parts[pp].ox0 = parts[pp].ox; parts[pp].oy0 = parts[pp].oy; parts[pp].oz0 = parts[pp].oz; parts[pp].oxdot0 = parts[pp].oxdot; parts[pp].oydot0 = parts[pp].oydot; parts[pp].ozdot0 = parts[pp].ozdot; } } } __device__ void rotate(real qr, real qi, real qj, real qk, real *pi, real *pj, real *pk) { real Pr = *pi*qi + *pj*qj + *pk*qk; real Pi = *pi*qr - *pj*qk + *pk*qj; real Pj = *pi*qk + *pj*qr - *pk*qi; real Pk = -*pi*qj + *pj*qi + *pk*qr; *pi = qr*Pi + qi*Pr + qj*Pk - qk*Pj; *pj = qr*Pj - qi*Pk + qj*Pr + qk*Pi; *pk = qr*Pk + qi*Pj - qj*Pi + qk*Pr; } __global__ void collision_init(part_struct *parts, int nparts) { int j = threadIdx.x + blockIdx.x*blockDim.x; if(j < nparts) { parts[j].iFx = 0.; parts[j].iFy = 0.; parts[j].iFz = 0.; parts[j].iLx = 0.; parts[j].iLy = 0.; parts[j].iLz = 0.; } } __global__ void init(int *vector, int N, int val) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < N) { vector[i] = val; } } __global__ void bin_fill(int *partInd, int *partBin, int nparts, part_struct *parts, dom_struct *binDom, BC bc) { int pp = threadIdx.x + blockIdx.x*blockDim.x; int c; int ibin, jbin, kbin; // find the correct bin index for each part and store it if (pp < nparts) { ibin = floor((parts[pp].x - binDom->xs)/binDom->dx); jbin = floor((parts[pp].y - binDom->ys)/binDom->dy); kbin = floor((parts[pp].z - binDom->zs)/binDom->dz); c = ibin + jbin*binDom->Gcc.s1 + kbin*binDom->Gcc.s2; partInd[pp] = pp; // index of particle partBin[pp] = c; // bin index parts[pp].bin = c; // bin index (stored in particle) } } __global__ void bin_partCount(int *binCount, int *binStart, int *binEnd, dom_struct *binDom, BC bc, int nBins) { int bin = threadIdx.x + blockIdx.x*blockDim.x; // fill binCount if (bin < nBins) { binCount[bin] = binEnd[bin] - binStart[bin]; } } __global__ void bin_start(int *binStart, int *binEnd, int *partBin, int nparts) { // This kernel function was adapted from NVIDIA CUDA 5.5 Examples // This software contains source code provided by NVIDIA Corporation extern __shared__ int sharedBin[]; //blockSize + 1 int index = threadIdx.x + blockIdx.x*blockDim.x; int bin; // for a given bin index, the previous bins's index is stored in sharedBin if (index < nparts) { bin = partBin[index]; // Load bin data into shared memory so that we can look // at neighboring particle's hash value without loading // two bin values per thread sharedBin[threadIdx.x + 1] = bin; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle bin sharedBin[0] = partBin[index - 1]; } } __syncthreads(); if (index < nparts) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell bin = partBin[index]; if (index == 0 || bin != sharedBin[threadIdx.x]) { binStart[bin] = index; if (index > 0) binEnd[sharedBin[threadIdx.x]] = index; } if (index == nparts - 1) { binEnd[bin] = index + 1; } } } __global__ void collision_parts(part_struct *parts, int nparts, dom_struct *dom, real eps, real mu, real rhof, real nu, BC bc, int *binStart, int *binEnd, int *partBin, int *partInd, dom_struct *binDom, int interactionLengthRatio, real dt) { int index = threadIdx.x + blockIdx.x*blockDim.x; if (index < nparts) { int i = partInd[index]; int bin = partBin[index]; int kbin = floorf(bin/binDom->Gcc.s2); int jbin = floorf((bin - kbin*binDom->Gcc.s2)/binDom->Gcc.s1); int ibin = bin - kbin*binDom->Gcc.s2 - jbin*binDom->Gcc.s1; int l, m, n; // adjacent bin iterators int target, j; // target indices int adjBin, adjStart, adjEnd; // adjacent bin stuff int iStride, kStride, jStride; // how to get to adjacent bin int q; // iterator // predefine face locations // -1, -2 due to local vs global indexing and defiinition of dom_struct int fW = binDom->Gcc.is - 1; int fE = binDom->Gcc.ie - 2; int fS = binDom->Gcc.js - 1; int fN = binDom->Gcc.je - 2; int fB = binDom->Gcc.ks - 1; int fT = binDom->Gcc.ke - 2; // size checks int xnBin = (binDom->xn > 2); int ynBin = (binDom->yn > 2); int znBin = (binDom->zn > 2); // loop over adjacent bins and take care of periodic conditions for (n = -1; n <= 1; n++) { // if on a face and not periodic, continue // if on a face and periodic but only 2 bins, continue if ((n == -1 && kbin == fB && bc.uB != PERIODIC) || (n == 1 && kbin == fT && bc.uT != PERIODIC) || (n == -1 && kbin == fB && bc.uB == PERIODIC && znBin == 0) || (n == 1 && kbin == fT && bc.uT == PERIODIC && znBin == 0)) { continue; // if on a face and periodic, flip to other side } else if (n == -1 && kbin == fB && bc.uB == PERIODIC) { kStride = fT*binDom->Gcc.s2; } else if (n == 1 && kbin == fT && bc.uT == PERIODIC) { kStride = fB*binDom->Gcc.s2; // else, we are in the middle, do nothing special } else { kStride = (kbin + n)*binDom->Gcc.s2; } for (m = -1; m <= 1; m++) { if ((m == -1 && jbin == fS && bc.uS != PERIODIC) || (m == 1 && jbin == fN && bc.uN != PERIODIC) || (m == -1 && jbin == fS && bc.uS == PERIODIC && ynBin == 0) || (m == 1 && jbin == fN && bc.uN == PERIODIC && ynBin == 0)) { continue; } else if (m == -1 && jbin == fS && bc.uS == PERIODIC) { jStride = fN*binDom->Gcc.s1; } else if (m == 1 && jbin == fN && bc.uN == PERIODIC) { jStride = fS*binDom->Gcc.s1; } else { jStride = (jbin + m)*binDom->Gcc.s1; } for (l = -1; l <= 1; l++) { if ((l == -1 && ibin == fW && bc.uW != PERIODIC) || (l == 1 && ibin == fE && bc.uE != PERIODIC) || (l == -1 && ibin == fW && bc.uW == PERIODIC && xnBin == 0) || (l == 1 && ibin == fE && bc.uE == PERIODIC && xnBin == 0)) { continue; } else if (l == -1 && ibin == fW && bc.uW == PERIODIC) { iStride = fE; } else if (l == 1 && ibin == fE && bc.uE == PERIODIC) { iStride = fW; } else { iStride = ibin + l; } adjBin = iStride + jStride + kStride; adjStart = binStart[adjBin]; // find start and end of bins adjEnd = binEnd[adjBin]; if (adjStart != -1) { // if bin is not empty for (target = adjStart; target < adjEnd; target++) { j = partInd[target]; if (j != i) { // if its not original part // calculate forces real ai = parts[i].r; real aj = parts[j].r; real B = aj / ai; real hN = interactionLengthRatio * parts[i].r; real ux, uy, uz; real rx, rx1, rx2, ry, ry1, ry2, rz, rz1, rz2, r; real h, ah, lnah; real nx, ny, nz, udotn; real unx, uny, unz, utx, uty, utz, ut; real tx, ty, tz, t, bx, by, bz, b; real omegax, omegay, omegaz, omega; real ocrossnx, ocrossny, ocrossnz; real utcrossnx, utcrossny, utcrossnz; real opB; real Fnx, Fny, Fnz, Ftx, Fty, Ftz, Lox, Loy, Loz; real xi = parts[i].x; real xj = parts[j].x; // check for neighbors across the domain when using periodic // boundaries rx = xi - xj; rx1 = xi - (xj + dom->xl); rx2 = xi - (xj - dom->xl); if(rx1*rx1 < rx*rx) rx = rx1; if(rx2*rx2 < rx*rx) rx = rx2; rx = (bc.uW == PERIODIC) * rx + (bc.uW != PERIODIC) * (xi - xj); real yi = parts[i].y; real yj = parts[j].y; // check for neighbors across the domain when using periodic // boundaries ry = yi - yj; ry1 = yi - (yj + dom->yl); ry2 = yi - (yj - dom->yl); if(ry1*ry1 < ry*ry) ry = ry1; if(ry2*ry2 < ry*ry) ry = ry2; ry = (bc.vS == PERIODIC) * ry + (bc.vS != PERIODIC) * (yi - yj); real zi = parts[i].z; real zj = parts[j].z; // check for neighbors across the domain when using periodic // boundaries rz = zi - zj; rz1 = zi - (zj + dom->zl); rz2 = zi - (zj - dom->zl); if(rz1*rz1 < rz*rz) rz = rz1; if(rz2*rz2 < rz*rz) rz = rz2; rz = (bc.wB == PERIODIC) * rz + (bc.wB != PERIODIC) * (zi - zj); ux = 0.5*((parts[i].u - parts[j].u) + (parts[i].u0 - parts[j].u0)); uy = 0.5*((parts[i].v - parts[j].v) + (parts[i].v0 - parts[j].v0)); uz = 0.5*((parts[i].w - parts[j].w) + (parts[i].w0 - parts[j].w0)); r = sqrt(rx*rx + ry*ry + rz*rz); omegax = 0.5*((parts[i].ox + parts[j].ox) + (parts[i].ox0 + parts[j].ox0)); omegay = 0.5*((parts[i].oy + parts[j].oy) + (parts[i].oy0 + parts[j].oy0)); omegaz = 0.5*((parts[i].oz + parts[j].oz) + (parts[i].oz0 + parts[j].oz0)); omega = sqrt(omegax*omegax + omegay*omegay + omegaz*omegaz); h = r - ai - aj; nx = rx / r; ny = ry / r; nz = rz / r; udotn = ux * nx + uy * ny + uz * nz; unx = udotn * nx; uny = udotn * ny; unz = udotn * nz; utx = ux - unx; uty = uy - uny; utz = uz - unz; ut = sqrt(utx*utx + uty*uty + utz*utz); if(ut > 0) { tx = utx / ut; ty = uty / ut; tz = utz / ut; bx = ny*tz - nz*ty; by = -nx*tz + nz*tx; bz = nx*ty - ny*tx; b = sqrt(bx*bx + by*by + bz*bz); bx = bx / b; by = by / b; bz = bz / b; } else if(omega > 0) { bx = omegax / omega; by = omegay / omega; bz = omegaz / omega; tx = by*nz - bz*ny; ty = -bx*nz + bz*nx; tz = bx*ny - by*nx; t = sqrt(tx*tx + ty*ty + tz*tz); tx = tx / t; ty = ty / t; tz = tz / t; } else { tx = 1.; ty = 0.; tz = 0.; bx = ny*tz - nz*ty; by = -nx*tz + nz*tx; bz = nx*ty - ny*tx; b = sqrt(bx*bx + by*by + bz*bz); bx = bx / b; by = by / b; bz = bz / b; } opB = 1 + B; ocrossnx = omegay*nz - omegaz*ny; ocrossny = -omegax*nz + omegaz*nx; ocrossnz = omegax*ny - omegay*nx; utcrossnx = uty*nz - utz*ny; utcrossny = -utx*nz + utz*nx; utcrossnz = utx*ny - uty*nx; if(h < hN && h > 0) { // remove contact from list if it is there q = 0; while(parts[i].iSt[q] != j && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == j) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Fnx = -1. * B*B / (opB*opB) * ah - B*(1.+7.*B+B*B)/(5.*opB*opB*opB)*lnah; Fny = Fnx; Fnz = Fnx; Fnx *= 6.*PI*mu*ai*unx; Fny *= 6.*PI*mu*ai*uny; Fnz *= 6.*PI*mu*ai*unz; Ftx = -6.*PI*mu*ai*utx*4.*B*(2.+B+2.*B*B) /(15.*opB*opB*opB)*lnah; Fty = -6.*PI*mu*ai*uty*4.*B*(2.+B+2.*B*B) /(15.*opB*opB*opB)*lnah; Ftz = -6.*PI*mu*ai*utz*4.*B*(2.+B+2.*B*B) /(15.*opB*opB*opB)*lnah; Ftx += 8.*PI*mu*ai*ai*ocrossnx*B*(4.+B)/(10.*opB*opB)*lnah; Fty += 8.*PI*mu*ai*ai*ocrossny*B*(4.+B)/(10.*opB*opB)*lnah; Ftz += 8.*PI*mu*ai*ai*ocrossnz*B*(4.+B)/(10.*opB*opB)*lnah; Lox = -8.*PI*mu*ai*ai*utcrossnx*B*(4.+B)/(10.*opB*opB)*lnah; Loy = -8.*PI*mu*ai*ai*utcrossny*B*(4.+B)/(10.*opB*opB)*lnah; Loz = -8.*PI*mu*ai*ai*utcrossnz*B*(4.+B)/(10.*opB*opB)*lnah; Lox += -8.*PI*mu*ai*ai*ai*omegax*2.*B/(5.*opB)*lnah; Loy += -8.*PI*mu*ai*ai*ai*omegay*2.*B/(5.*opB)*lnah; Loz += -8.*PI*mu*ai*ai*ai*omegaz*2.*B/(5.*opB)*lnah; } else { ah = 0; lnah = 0; Fnx = 0; Fny = 0; Fnz = 0; Ftx = 0; Fty = 0; Ftz = 0; Lox = 0; Loy = 0; Loz = 0; } if(h < 0) { // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != j && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = j; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r *fabs(udotn)/nu; } real Vx = -utx + 0.5*(ai + aj + h)*ocrossnx; real Vy = -uty + 0.5*(ai + aj + h)*ocrossny; real Vz = -utz + 0.5*(ai + aj + h)*ocrossnz; real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai) *sqrt(-h); real sx = (Vx - Vx * nx) * dt; real sy = (Vy - Vy * ny) * dt; real sz = (Vz - Vz * nz) * dt; ah = 0; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[j].sigma*parts[j].sigma)/parts[j].E) /sqrt(1./ai + 1./aj); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q] *log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho *k*sqrt(-h)); // normal contact forces Fnx = (sqrt(-h*h*h)*k - eta*udotn)*nx; Fny = (sqrt(-h*h*h)*k - eta*udotn)*ny; Fnz = (sqrt(-h*h*h)*k - eta*udotn)*nz; // tangential contact forces real coeff_fric = 0.5 * (parts[i].coeff_fric + parts[j].coeff_fric); Ftx = -kt * sx; Fty = -kt * sy; Ftz = -kt * sz; Ftx = Ftx - Ftx * nx; Fty = Fty - Fty * ny; Ftz = Ftz - Ftz * nz; real Ft = sqrt(Ftx*Ftx + Fty*Fty + Ftz*Ftz); real Fn = sqrt(Fnx*Fnx + Fny*Fny + Fnz*Fnz); if(Ft > coeff_fric * Fn) { Ftx = coeff_fric * Fn * Ftx / Ft; Fty = coeff_fric * Fn * Fty / Ft; Ftz = coeff_fric * Fn * Ftz / Ft; } Lox = -(ai+0.5*h)*((Fny+Fty)*nz-(Fnz+Ftz)*ny); Loy = (ai+0.5*h)*((Fnx+Ftx)*nz-(Fnz+Ftz)*nx); Loz = -(ai+0.5*h)*((Fnx+Ftx)*ny-(Fny+Fty)*nx); } // assign forces parts[i].iFx += Fnx + Ftx; parts[i].iFy += Fny + Fty; parts[i].iFz += Fnz + Ftz; parts[i].iLx += Lox; parts[i].iLy += Loy; parts[i].iLz += Loz; } } } } } } } } __global__ void collision_walls(dom_struct *dom, part_struct *parts, int nparts, BC bc, real eps, real mu, real rhof, real nu, int interactionLengthRatio, real dt) { int i = threadIdx.x + blockIdx.x*blockDim.x; /**** parallelize this further by using a CUDA block for each wall ****/ int q; // iterator if(i < nparts) { real dx = 0; real dy = 0; real dz = 0; real Un, Utx, Uty, Utz; real omx, omy, omz; real ai = parts[i].r; real h = 0; real hN = interactionLengthRatio * parts[i].r; real ah, lnah; real Fnx, Fny, Fnz, Ftx, Fty, Ftz; real Lox, Loy, Loz; int isTrue = 0; // west wall dx = fabs(parts[i].x - (dom->xs + bc.dsW)); h = dx - ai; isTrue = (bc.pW == NEUMANN); // collision force applied ifTrue if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -10 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -10) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].u - bc.uWD; Utx = 0.; Uty = parts[i].v - bc.vWD; Utz = parts[i].w - bc.wWD; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = -6.*PI*mu*ai*Un*ah; Fny = 0.; Fnz = 0.; Ftx = 0.; Fty = -6.*PI*mu*ai*Uty*8./15.*lnah; Ftz = -6.*PI*mu*ai*Utz*8./15.*lnah; Ftx += 0.; Fty += 8.*PI*mu*ai*ai*omz*1./10.*lnah; Ftz += -8.*PI*mu*ai*ai*omy*1./10.*lnah; Lox = 0.; Loy = -8.*PI*mu*ai*ai*Utz*1./10.*lnah; Loz = 8.*PI*mu*ai*ai*Uty*1./10.*lnah; Lox += 0.; Loy += -8.*PI*mu*ai*ai*ai*omy*2./5.*lnah; Loz += -8.*PI*mu*ai*ai*ai*omz*2./5.*lnah; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = parts[i].u - bc.uWD; real Uty = 0.5*(parts[i].v+parts[i].v0) - bc.vSD; real Utz = 0.5*(parts[i].w+parts[i].w0) - bc.wSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -10 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -10; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vy = Uty - (ai + 0.5*h)*omz; real Vz = Utz + (ai + 0.5*h)*omx; real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sy = Vy * dt; real sz = Vz * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Fty = -kt * sy; real Ftz = -kt * sz; real Ft = sqrt(Fty*Fty + Ftz*Ftz); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Fty = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Fty / Ft; Ftz = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftz / Ft; } parts[i].iFx += isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iFy += isTrue * Fty; parts[i].iFz += isTrue * Ftz; parts[i].iLy += isTrue * (ai+0.5*h) * Ftz; parts[i].iLz -= isTrue * (ai+0.5*h) * Fty; } // east wall dx = fabs(parts[i].x - (dom->xe - bc.dsE)); h = dx - ai; isTrue = (bc.pE == NEUMANN); if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -11 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -11) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].u - bc.uED; Utx = 0.; Uty = parts[i].v - bc.vED; Utz = parts[i].w - bc.wED; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = -6.*PI*mu*ai*Un*ah; Fny = 0.; Fnz = 0.; Ftx = 0.; Fty = -6.*PI*mu*ai*Uty*8./15.*lnah; Ftz = -6.*PI*mu*ai*Utz*8./15.*lnah; Ftx += 0.; Fty += -8.*PI*mu*ai*ai*omz*1./10.*lnah; Ftz += 8.*PI*mu*ai*ai*omy*1./10.*lnah; Lox = 0.; Loy = 8.*PI*mu*ai*ai*Utz*1./10.*lnah; Loz = -8.*PI*mu*ai*ai*Uty*1./10.*lnah; Lox += 0.; Loy += -8.*PI*mu*ai*ai*ai*omy*2./5.*lnah; Loz += -8.*PI*mu*ai*ai*ai*omz*2./5.*lnah; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = -(parts[i].u - bc.uED); real Uty = 0.5*(parts[i].v+parts[i].v0) - bc.vSD; real Utz = 0.5*(parts[i].w+parts[i].w0) - bc.wSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -11 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -11; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vy = -(Uty - (ai + 0.5*h)*omz); real Vz = -(Utz + (ai + 0.5*h)*omx); real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sy = Vy * dt; real sz = Vz * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Fty = -kt * sy; real Ftz = -kt * sz; real Ft = sqrt(Fty*Fty + Ftz*Ftz); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Fty = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Fty / Ft; Ftz = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftz / Ft; } parts[i].iFx -= isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iFy += isTrue * Fty; parts[i].iFz += isTrue * Ftz; parts[i].iLy -= isTrue * (ai+0.5*h) * Ftz; parts[i].iLz += isTrue * (ai+0.5*h) * Ftx; } // south wall dy = fabs(parts[i].y - (dom->ys + bc.dsS)); h = dy - ai; isTrue = (bc.pS == NEUMANN); if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -12 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -12) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].v - bc.vSD; Utx = parts[i].u - bc.uSD; Uty = 0.; Utz = parts[i].w - bc.wSD; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = 0.; Fny = -6.*PI*mu*ai*Un*ah; Fnz = 0.; Ftx = -6.*PI*mu*ai*Utx*8./15.*lnah; Fty = 0.; Ftz = -6.*PI*mu*ai*Utz*8./15.*lnah; Ftx += -8.*PI*mu*ai*ai*omz*1./10.*lnah; Fty += 0.; Ftz += 8.*PI*mu*ai*ai*omx*1./10.*lnah; Lox = 8.*PI*mu*ai*ai*Utz*1./10.*lnah; Loy = 0.; Loz = -8.*PI*mu*ai*ai*Utx*1./10.*lnah; Lox += -8.*PI*mu*ai*ai*ai*omx*2./5.*lnah; Loy += 0.; Loz += -8.*PI*mu*ai*ai*ai*omz*2./5.*lnah; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = parts[i].v - bc.vSD; real Utx = 0.5*(parts[i].u+parts[i].u0) - bc.uSD; real Utz = 0.5*(parts[i].w+parts[i].w0) - bc.wSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -12 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -12; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vx = Utx + (ai + 0.5*h)*omz; real Vz = Utz - (ai + 0.5*h)*omx; real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sx = Vx * dt; real sz = Vz * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Ftx = -kt * sx; real Ftz = -kt * sz; real Ft = sqrt(Ftx*Ftx + Ftz*Ftz); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Ftx = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftx / Ft; Ftz = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftz / Ft; } parts[i].iFx += isTrue * Ftx; parts[i].iFy += isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iFz += isTrue * Ftz; parts[i].iLx -= isTrue * (ai+0.5*h) * Ftz; parts[i].iLz += isTrue * (ai+0.5*h) * Ftx; } // north wall dy = fabs(parts[i].y - (dom->ye - bc.dsN)); h = dy - ai; isTrue = (bc.pN == NEUMANN); if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -13 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -13) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].v - bc.vND; Utx = parts[i].u - bc.uND; Uty = 0.; Utz = parts[i].w - bc.wND; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = 0.; Fny = -6.*PI*mu*ai*Un*ah; Fnz = 0.; Ftx = -6.*PI*mu*ai*Utx*8./15.*lnah; Fty = 0.; Ftz = -6.*PI*mu*ai*Utz*8./15.*lnah; Ftx += 8.*PI*mu*ai*ai*omz*1./10.*lnah; Fty += 0.; Ftz += -8.*PI*mu*ai*ai*omx*1./10.*lnah; Lox = -8.*PI*mu*ai*ai*Utz*1./10.*lnah; Loy = 0.; Loz = 8.*PI*mu*ai*ai*Utx*1./10.*lnah; Lox += -8.*PI*mu*ai*ai*ai*omx*2./5.*lnah; Loy += 0.; Loz += -8.*PI*mu*ai*ai*ai*omz*2./5.*lnah; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = -(parts[i].v - bc.vND); real Utx = 0.5*(parts[i].u+parts[i].u0) - bc.uSD; real Utz = 0.5*(parts[i].w+parts[i].w0) - bc.wSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -13 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -13; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vx = -(Utx + (ai + 0.5*h)*omz); real Vz = -(Utz - (ai + 0.5*h)*omx); real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sx = Vx * dt; real sz = Vz * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Ftx = -kt * sx; real Ftz = -kt * sz; real Ft = sqrt(Ftx*Ftx + Ftz*Ftz); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Ftx = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftx / Ft; Ftz = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftz / Ft; } parts[i].iFx += isTrue * Ftx; parts[i].iFy -= isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iFz += isTrue * Ftz; parts[i].iLx += isTrue * (ai+0.5*h) * Ftz; parts[i].iLz -= isTrue * (ai+0.5*h) * Ftx; } // bottom wall dz = fabs(parts[i].z - (dom->zs + bc.dsB)); h = dz - ai; isTrue = (bc.pB == NEUMANN); if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -14 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -14) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].w - bc.wBD; Utx = parts[i].u - bc.uBD; Uty = parts[i].v - bc.vBD; Utz = 0.; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = 0.; Fny = 0.; Fnz = -6.*PI*mu*ai*Un*ah; Ftx = -6.*PI*mu*ai*Utx*8./15.*lnah; Fty = -6.*PI*mu*ai*Uty*8./15.*lnah; Ftz = 0.; Ftx += 8.*PI*mu*ai*ai*omy*1./10.*lnah; Fty += -8.*PI*mu*ai*ai*omx*1./10.*lnah; Ftz += 0.; Lox = -8.*PI*mu*ai*ai*Uty*1./10.*lnah; Loy = 8.*PI*mu*ai*ai*Utx*1./10.*lnah; Loz = 0.; Lox += -8.*PI*mu*ai*ai*ai*omx*2./5.*lnah; Loy += -8.*PI*mu*ai*ai*ai*omy*2./5.*lnah; Loz += 0.; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = parts[i].w - bc.wBD; real Utx = 0.5*(parts[i].u+parts[i].u0) - bc.uSD; real Uty = 0.5*(parts[i].v+parts[i].v0) - bc.vSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -14 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -14; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vx = Utx - (ai + 0.5*h)*omy; real Vy = Uty + (ai + 0.5*h)*omx; real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sx = Vx * dt; real sy = Vy * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Ftx = -kt * sx; real Fty = -kt * sy; real Ft = sqrt(Ftx*Ftx + Fty*Fty); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Ftx = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftx / Ft; Fty = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Fty / Ft; } parts[i].iFx += isTrue * Ftx; parts[i].iFy += isTrue * Fty; parts[i].iFz += isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iLx += isTrue * (ai+0.5*h) * Fty; parts[i].iLy -= isTrue * (ai+0.5*h) * Ftx; } // top wall dz = fabs(parts[i].z - (dom->ze - bc.dsT)); h = dz - ai; isTrue = (bc.pT == NEUMANN); if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -15 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -15) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].w - bc.wTD; Utx = parts[i].u - bc.uTD; Uty = parts[i].v - bc.vTD; Utz = 0.; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = 0.; Fny = 0.; Fnz = -6.*PI*mu*ai*Un*ah; Ftx = -6.*PI*mu*ai*Utx*8./15.*lnah; Fty = -6.*PI*mu*ai*Uty*8./15.*lnah; Ftz = 0.; Ftx += -8.*PI*mu*ai*ai*omy*1./10.*lnah; Fty += 8.*PI*mu*ai*ai*omx*1./10.*lnah; Ftz += 0.; Lox = 8.*PI*mu*ai*ai*Uty*1./10.*lnah; Loy = -8.*PI*mu*ai*ai*Utx*1./10.*lnah; Loz = 0.; Lox += -8.*PI*mu*ai*ai*ai*omx*2./5.*lnah; Loy += -8.*PI*mu*ai*ai*ai*omy*2./5.*lnah; Loz += 0.; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = -(parts[i].w - bc.wTD); real Utx = 0.5*(parts[i].u+parts[i].u0) - bc.uSD; real Uty = 0.5*(parts[i].v+parts[i].v0) - bc.vSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -15 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -15; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vx = -(Utx - (ai + 0.5*h)*omy); real Vy = -(Uty + (ai + 0.5*h)*omx); real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sx = Vx * dt; real sy = Vy * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Ftx = -kt * sx; real Fty = -kt * sy; real Ft = sqrt(Ftx*Ftx + Fty*Fty); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Ftx = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftx / Ft; Fty = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Fty / Ft; } parts[i].iFx += isTrue * Ftx; parts[i].iFy += isTrue * Fty; parts[i].iFz -= isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iLx -= isTrue * (ai+0.5*h) * Fty; parts[i].iLy += isTrue * (ai+0.5*h) * Ftx; } } } __global__ void spring_parts(part_struct *parts, int nparts) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i < nparts && parts[i].spring_k > 0.) { real nx = parts[i].x-parts[i].spring_x; real ny = parts[i].y-parts[i].spring_y; real nz = parts[i].z-parts[i].spring_z; real n = sqrt(nx*nx+ny*ny+nz*nz); real nhatx = nx / n; real nhaty = ny / n; real nhatz = nz / n; real lx = parts[i].spring_l*nhatx; real ly = parts[i].spring_l*nhaty; real lz = parts[i].spring_l*nhatz; real l = sqrt(lx*lx+ly*ly+lz*lz); real dx = parts[i].x-parts[i].spring_x-lx; real dy = parts[i].y-parts[i].spring_y-ly; real dz = parts[i].z-parts[i].spring_z-lz; parts[i].kFx = - parts[i].spring_k * dx; parts[i].kFy = - parts[i].spring_k * dy; parts[i].kFz = - parts[i].spring_k * dz; } } __global__ void yank_u_WE(real *u, dom_struct *dom, real *plane, real xpos, real vel) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; real ddx = 1. / dom->dx; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { // find index of node // for now, ignore motion tangential to plane int i = floor((xpos - dom->xs) * ddx) + DOM_BUF; if(i < dom->Gfx.is) i += dom->Gfx.inb; if(i > dom->Gfx.ie-1) i -= dom->Gfx.inb; real xx = (i-DOM_BUF) * dom->dx + dom->xs; int W = i + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b; int E = (i+1) + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b; real dudx = (u[E] - u[W]) * ddx; plane[tj + tk*dom->Gfx.jnb] = u[W] + dudx * (xpos - xx) + vel; } } __global__ void yank_v_WE(real *v, dom_struct *dom, real *plane_w, real *plane_e, real xpos, real vel) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; real ddx = 1. / dom->dx; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) { // find index of node // for now, ignore motion tangential to plane int i = floor((xpos - dom->xs) * ddx) + DOM_BUF; if(i < dom->Gfy.is) i += dom->Gfy.inb; if(i > dom->Gfy.ie-1) i -= dom->Gfy.inb; real xx_w = (i-DOM_BUF-0.5) * dom->dx + dom->xs; real xx_e = (i-DOM_BUF+0.5) * dom->dx + dom->xs; int W = (i-1) + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b; int M = i + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b; int E = (i+1) + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b; real dvdx_w = (v[M] - v[W]) * ddx; real dvdx_e = (v[E] - v[M]) * ddx; plane_w[tj + tk*dom->Gfy.jnb] = v[W] + dvdx_w * (xpos - 0.5*dom->dx - xx_w); plane_e[tj + tk*dom->Gfy.jnb] = v[M] + dvdx_e * (xpos + 0.5*dom->dx - xx_e); } } __global__ void yank_w_WE(real *w, dom_struct *dom, real *plane_w, real *plane_e, real xpos, real vel) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; real ddx = 1. / dom->dx; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) { // find index of node // for now, ignore motion tangential to plane int i = floor((xpos - dom->xs) * ddx) + DOM_BUF; if(i < dom->Gfz.is) i += dom->Gfz.inb; if(i > dom->Gfz.ie-1) i -= dom->Gfz.inb; real xx_w = (i-DOM_BUF - 0.5) * dom->dx + dom->xs; real xx_e = (i-DOM_BUF + 0.5) * dom->dx + dom->xs; int W = (i-1) + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b; int M = i + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b; int E = (i+1) + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b; real dwdx_w = (w[M] - w[W]) * ddx; real dwdx_e = (w[E] - w[M]) * ddx; plane_w[tj + tk*dom->Gfz.jnb] = w[W] + dwdx_w * (xpos -0.5*dom->dx - xx_w); plane_e[tj + tk*dom->Gfz.jnb] = w[M] + dwdx_e * (xpos +0.5*dom->dx - xx_e); } } __global__ void yank_u_SN(real *u, dom_struct *dom, real *plane_s, real *plane_n, real ypos, real vel) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; real ddy = 1. / dom->dy; if((tk < dom->Gfx._inb) && (ti < dom->Gfx._inb)) { // find index of node // for now, ignore motion tangential to plane int j = floor((ypos - dom->ys) * ddy) + DOM_BUF; if(j < dom->Gfx.js) j += dom->Gfx.jnb; if(j > dom->Gfx.je-1) j -= dom->Gfx.jnb; real yy_s = (j-DOM_BUF - 0.5) * dom->dy + dom->ys; real yy_n = (j-DOM_BUF + 0.5) * dom->dy + dom->ys; int S = ti + (j-1)*dom->Gfx.s1b + tk*dom->Gfx.s2b; int M = ti + j*dom->Gfx.s1b + tk*dom->Gfx.s2b; int N = ti + (j+1)*dom->Gfx.s1b + tk*dom->Gfx.s2b; real dudy_s = (u[M] - u[S]) * ddy; real dudy_n = (u[N] - u[M]) * ddy; plane_s[tk + ti*dom->Gfx.knb] = u[S] + dudy_s * (ypos - 0.5*dom->dy - yy_s); plane_n[tk + ti*dom->Gfx.knb] = u[M] + dudy_n * (ypos + 0.5*dom->dy - yy_n); } } __global__ void yank_v_SN(real *v, dom_struct *dom, real *plane, real ypos, real vel) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; real ddy = 1. / dom->dy; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { // find index of node // for now, ignore motion tangential to plane int j = floor((ypos - dom->ys) * ddy) + DOM_BUF; if(j < dom->Gfy.js) j += dom->Gfy.jnb; if(j > dom->Gfy.je-1) j -= dom->Gfy.jnb; real yy = (j-DOM_BUF) * dom->dy + dom->ys; int S = ti + j*dom->Gfy.s1b + tk*dom->Gfy.s2b; int N = ti + (j+1)*dom->Gfy.s1b + tk*dom->Gfy.s2b; real dvdy = (v[N] - v[S]) * ddy; plane[tk + ti*dom->Gfy.knb] = v[S] + dvdy * (ypos - yy) + vel; } } __global__ void yank_w_SN(real *w, dom_struct *dom, real *plane_s, real *plane_n, real ypos, real vel) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; real ddy = 1. / dom->dy; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) { // find index of node // for now, ignore motion tangential to plane int j = floor((ypos - dom->ys) * ddy) + DOM_BUF; if(j < dom->Gfz.js) j += dom->Gfz.jnb; if(j > dom->Gfz.je-1) j -= dom->Gfz.jnb; real yy_s = (j-DOM_BUF - 0.5) * dom->dy + dom->ys; real yy_n = (j-DOM_BUF + 0.5) * dom->dy + dom->ys; int S = ti + (j-1)*dom->Gfz.s1b + tk*dom->Gfz.s2b; int M = ti + j*dom->Gfz.s1b + tk*dom->Gfz.s2b; int N = ti + (j+1)*dom->Gfz.s1b + tk*dom->Gfz.s2b; real dwdy_s = (w[M] - w[S]) * ddy; real dwdy_n = (w[N] - w[M]) * ddy; plane_s[tk + ti*dom->Gfz.knb] = w[S] + dwdy_s * (ypos - 0.5*dom->dy - yy_s); plane_n[tk + ti*dom->Gfz.knb] = w[M] = dwdy_n * (ypos + 0.5*dom->dy - yy_n); } } __global__ void yank_u_BT(real *u, dom_struct *dom, real *plane_b, real *plane_t, real zpos, real vel) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; real ddz = 1. / dom->dz; // this is not the ideal situation, try to change to better interpolation if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) { // find index of node // for now, ignore motion tangential to plane int k = floor((zpos - dom->zs) * ddz) + DOM_BUF; if(k < dom->Gfx.ks) k += dom->Gfx.knb; if(k > dom->Gfx.ke-1) k -= dom->Gfx.knb; real zz_b = (k-DOM_BUF - 0.5) * dom->dz + dom->zs; real zz_t = (k-DOM_BUF + 0.5) * dom->dz + dom->zs; int B = ti + tj*dom->Gfx.s1b + (k-1)*dom->Gfx.s2b; int M = ti + tj*dom->Gfx.s1b + k*dom->Gfx.s2b; int T = ti + tj*dom->Gfx.s1b + (k+1)*dom->Gfx.s2b; real dudz_b = (u[M] - u[B]) * ddz; real dudz_t = (u[T] - u[M]) * ddz; plane_b[ti + tj*dom->Gfx.inb] = u[B] + dudz_b * (zpos - dom->dz*0.5 - zz_b); plane_t[ti + tj*dom->Gfx.inb] = u[M] + dudz_t * (zpos + dom->dz*0.5 - zz_t); } } __global__ void yank_v_BT(real *v, dom_struct *dom, real *plane_b, real *plane_t, real zpos, real vel) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; real ddz = 1. / dom->dz; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) { // find index of node // for now, ignore motion tangential to plane int k = floor((zpos - dom->zs) * ddz) + DOM_BUF; if(k < dom->Gfy.ks) k += dom->Gfy.knb; if(k > dom->Gfy.ke-1) k -= dom->Gfy.knb; real zz_b = (k-DOM_BUF - 0.5) * dom->dz + dom->zs; real zz_t = (k-DOM_BUF + 0.5) * dom->dz + dom->zs; int B = ti + tj*dom->Gfy.s1b + (k-1)*dom->Gfy.s2b; int M = ti + tj*dom->Gfy.s1b + k*dom->Gfy.s2b; int T = ti + tj*dom->Gfy.s1b + (k+1)*dom->Gfy.s2b; real dvdz_b = (v[M] - v[B]) * ddz; real dvdz_t = (v[T] - v[M]) * ddz; plane_b[ti + tj*dom->Gfy.inb] = v[B] + dvdz_b * (zpos - dom->dz*0.5 - zz_b); plane_t[ti + tj*dom->Gfy.inb] = v[M] + dvdz_t * (zpos + dom->dz*0.5 - zz_t); } } __global__ void yank_w_BT(real *w, dom_struct *dom, real *plane, real zpos, real vel) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; real ddz = 1. / dom->dx; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { // find index of node // for now, ignore motion tangential to plane int k = floor((zpos - dom->zs) * ddz) + DOM_BUF; if(k < dom->Gfz.ks) k += dom->Gfz.knb; if(k > dom->Gfz.ke-1) k -= dom->Gfz.knb; real zz = (k-DOM_BUF) * dom->dz + dom->zs; int B = ti + tj*dom->Gfz.s1b + k*dom->Gfz.s2b; int T = ti + tj*dom->Gfz.s1b + (k+1)*dom->Gfz.s2b; real dwdz = (w[T] - w[B]) * ddz; plane[ti + tj*dom->Gfz.inb] = w[B] + dwdz * (zpos - zz) + vel; } } __global__ void colocate_Gfx(real *u, real *u_co, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF; int tk = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF; if((tj < dom->Gfx.jnb-1) && (tk < dom->Gfx.knb-1)) { for(int i = dom->Gfx.is; i < dom->Gfx.ie-1; i++) { u_co[(i-DOM_BUF) + (tj-DOM_BUF)*dom->Gcc.s1 + (tk-DOM_BUF)*dom->Gcc.s2] = 0.5 * (u[i + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b] + u[(i+1) + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b]); } } } __global__ void colocate_Gfy(real *v, real *v_co, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF; int ti = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF; if((tk < dom->Gfy.knb-1) && (ti < dom->Gfy.inb-1)) { for(int j = dom->Gfy.js; j < dom->Gfy.je-1; j++) { v_co[(ti-DOM_BUF) + (j-DOM_BUF)*dom->Gcc.s1 + (tk-DOM_BUF)*dom->Gcc.s2] = 0.5 * (v[ti + j*dom->Gfy.s1b + tk*dom->Gfy.s2b] + v[ti + (j+1)*dom->Gfy.s1b + tk*dom->Gfy.s2b]); } } } __global__ void colocate_Gfz(real *w, real *w_co, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF; int tj = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF; if((ti < dom->Gfz.inb-1) && (tj < dom->Gfz.jnb-1)) { for(int k = dom->Gfz.ks; k < dom->Gfz.ke-1; k++) { w_co[(ti-DOM_BUF) + (tj-DOM_BUF)*dom->Gcc.s1 + (k-DOM_BUF)*dom->Gcc.s2] = 0.5 * (w[ti + tj*dom->Gfz.s1b + k*dom->Gfz.s2b] + w[ti + tj*dom->Gfz.s1b + (k+1)*dom->Gfz.s2b]); } } } __global__ void energy_multiply(real *u_co, real *v_co, real *w_co, real *co, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int C; // memory location if((tj < dom->Gcc.jn) && (tk < dom->Gcc.kn)) { for(int i = dom->Gcc.is-DOM_BUF; i < dom->Gcc.ie-DOM_BUF; i++) { C = i + tj*dom->Gcc.s1 + tk*dom->Gcc.s2; co[C] = u_co[C]*u_co[C] + v_co[C]*v_co[C] + w_co[C]*w_co[C]; } } } __device__ real ab_int(real dt0, real dt, real f0, real df0, real df) { real DT = dt/dt0; if(dt0 < 0) return f0 + df*dt; else return f0 + ((1+0.5*DT)*df - 0.5*DT*df0)*dt; } __global__ void internal_u(real *u, part_struct *parts, dom_struct *dom, int *flag_u, int *phase) { int tj = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tk = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tj < dom->Gfx._je && tk < dom->Gfx._ke) { for(int i = dom->Gfx._is; i < dom->Gfx._ie; i++) { int C = i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b; int W = (i-1) + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; int E = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; int pw = phase[W]; int pe = phase[E]; int f = flag_u[C]; int p = (pw > -1 && pe > -1) * phase[E]; real rx = (i - DOM_BUF) * dom->dx + dom->xs - parts[p].x; if(rx <= 2.*parts[p].r-dom->xl) rx += dom->xl; if(rx >= dom->xl-2.*parts[p].r) rx -= dom->xl; real ry = (tj - 0.5) * dom->dy + dom->ys - parts[p].y; if(ry <= 2.*parts[p].r-dom->yl) ry += dom->yl; if(ry >= dom->yl-2.*parts[p].r) ry -= dom->yl; real rz = (tk - 0.5) * dom->dz + dom->zs - parts[p].z; if(rz <= 2.*parts[p].r-dom->zl) rz += dom->zl; if(rz >= dom->zl-2.*parts[p].r) rz -= dom->zl; real ocrossr_x = parts[p].oy*rz - parts[p].oz*ry; u[C] = (pw == -1 || pe == -1 || f == -1) * u[C] + (pw > -1 && pe > -1 && f != -1) * (ocrossr_x + parts[p].u); } } } __global__ void internal_v(real *v, part_struct *parts, dom_struct *dom, int *flag_v, int *phase) { int tk = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int ti = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tk < dom->Gfy._ke && ti < dom->Gfy._ie) { for(int j = dom->Gfy._js; j < dom->Gfy._je; j++) { int C = ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b; int S = ti + (j-1)*dom->Gcc._s1b + tk*dom->Gcc._s2b; int N = ti + j*dom->Gcc._s1b + tk*dom->Gcc._s2b; int ps = phase[S]; int pn = phase[N]; int f = flag_v[C]; int p = (ps > -1 && pn > -1) * phase[N]; real rx = (ti - 0.5) * dom->dx + dom->xs - parts[p].x; if(rx <= 2.*parts[p].r-dom->xl) rx += dom->xl; if(rx >= dom->xl-2.*parts[p].r) rx -= dom->xl; real ry = (j - DOM_BUF) * dom->dy + dom->ys - parts[p].y; if(ry <= 2.*parts[p].r-dom->yl) ry += dom->yl; if(ry >= dom->yl-2.*parts[p].r) ry -= dom->yl; real rz = (tk - 0.5) * dom->dz + dom->zs - parts[p].z; if(rz <= 2.*parts[p].r-dom->zl) rz += dom->zl; if(rz >= dom->zl-2.*parts[p].r) rz -= dom->zl; real ocrossr_y = parts[p].oz*rx - parts[p].ox*rz; v[C] = (ps == -1 || pn == -1 || f == -1) * v[C] + (ps > -1 && pn > -1 && f != -1) * (ocrossr_y + parts[p].v); } } } __global__ void internal_w(real *w, part_struct *parts, dom_struct *dom, int *flag_w, int *phase) { int ti = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tj = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(ti < dom->Gfz._ie && tj < dom->Gfz._je) { for(int k = dom->Gfz._ks; k < dom->Gfz._ke; k++) { int C = ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b; int B = ti + tj*dom->Gcc._s1b + (k-1)*dom->Gcc._s2b; int T = ti + tj*dom->Gcc._s1b + k*dom->Gcc._s2b; int pb = phase[B]; int pt = phase[T]; int f = flag_w[C]; int p = (pb > -1 && pt > -1) * phase[T]; real rx = (ti - 0.5) * dom->dx + dom->xs - parts[p].x; if(rx <= 2.*parts[p].r-dom->xl) rx += dom->xl; if(rx >= dom->xl-2.*parts[p].r) rx -= dom->xl; real ry = (tj - 0.5) * dom->dy + dom->ys - parts[p].y; if(ry <= 2.*parts[p].r-dom->yl) ry += dom->yl; if(ry >= dom->yl-2.*parts[p].r) ry -= dom->yl; real rz = (k - DOM_BUF) * dom->dz + dom->zs - parts[p].z; if(rz <= 2.*parts[p].r-dom->zl) rz += dom->zl; if(rz >= dom->zl-2.*parts[p].r) rz -= dom->zl; real ocrossr_z = parts[p].ox*ry - parts[p].oy*rx; w[C] = (pb == -1 || pt == -1 || f == -1) * w[C] + (pb > -1 && pt > -1 && f != -1) * (ocrossr_z + parts[p].w); } } }
05b33a02436a62638b71a0ad9d499e47d3757e21.cu
/******************************************************************************* ********************************* BLUEBOTTLE ********************************** ******************************************************************************* * * Copyright 2012 - 2016 Adam Sierakowski, The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ #include "cuda_bluebottle.h" // pressure; west; periodic __global__ void BC_p_W_P(real *p, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) p[dom->Gcc._isb + tj*s1b + tk*s2b] = p[(dom->Gcc._ie-1) + tj*s1b + tk*s2b]; } // pressure; west; Neumann __global__ void BC_p_W_N(real *p, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) p[dom->Gcc._isb + tj*s1b + tk*s2b] = p[dom->Gcc._is + tj*s1b + tk*s2b]; } // pressure; east; periodic __global__ void BC_p_E_P(real *p, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) p[(dom->Gcc._ieb-1) + tj*s1b + tk*s2b] = p[dom->Gcc._is + tj*s1b + tk*s2b]; } // pressure; east; Neumann __global__ void BC_p_E_N(real *p, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((tj < dom->Gcc._jnb) && (tk < dom->Gcc._knb)) p[(dom->Gcc._ieb-1) + tj*s1b + tk*s2b] = p[(dom->Gcc._ie-1) + tj*s1b + tk*s2b]; } // pressure; south; periodic __global__ void BC_p_S_P(real *p, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tk < dom->Gcc._knb)) p[ti + dom->Gcc._jsb*s1b + tk*s2b] = p[ti + (dom->Gcc._je-1)*s1b + tk*s2b]; } // pressure; south; Neumann __global__ void BC_p_S_N(real *p, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tk < dom->Gcc._knb)) p[ti + dom->Gcc._jsb*s1b + tk*s2b] = p[ti + dom->Gcc._js*s1b + tk*s2b]; } // pressure; north; periodic __global__ void BC_p_N_P(real *p, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tk < dom->Gcc._knb)) p[ti + (dom->Gcc._jeb-1)*s1b + tk*s2b] = p[ti + dom->Gcc._js*s1b + tk*s2b]; } // pressure; north; Neumann __global__ void BC_p_N_N(real *p, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tk < dom->Gcc._knb)) p[ti + (dom->Gcc._jeb-1)*s1b + tk*s2b] = p[ti + (dom->Gcc._je-1)*s1b + tk*s2b]; } // pressure; bottom; periodic __global__ void BC_p_B_P(real *p, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tj < dom->Gcc._jnb)) p[ti + tj*s1b + dom->Gcc._ksb*s2b] = p[ti + tj*s1b + (dom->Gcc._ke-1)*s2b]; } // pressure; bottom; Neumann __global__ void BC_p_B_N(real *p, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tj < dom->Gcc._jnb)) p[ti + tj*s1b + dom->Gcc._ksb*s2b] = p[ti + tj*s1b + dom->Gcc._ks*s2b]; } // pressure; top; periodic __global__ void BC_p_T_P(real *p, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tj < dom->Gcc._jnb)) p[ti + tj*s1b + (dom->Gcc._keb-1)*s2b] = p[ti + tj*s1b + dom->Gcc._ks*s2b]; } // pressure; top; Neumann __global__ void BC_p_T_N(real *p, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gcc._s1b; int s2b = dom->Gcc._s2b; if((ti < dom->Gcc._inb) && (tj < dom->Gcc._jnb)) p[ti + tj*s1b + (dom->Gcc._keb-1)*s2b] = p[ti + tj*s1b + (dom->Gcc._ke-1)*s2b]; } // u-velocity; west; periodic __global__ void BC_u_W_P(real *u, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[dom->Gfx._isb + tj*s1b + tk*s2b] = u[(dom->Gfx._ie-2) + tj*s1b + tk*s2b]; u[dom->Gfx._is + tj*s1b + tk*s2b] = u[(dom->Gfx._ie-1) + tj*s1b + tk*s2b]; } } // u-velocity; west; Dirichlet __global__ void BC_u_W_D(real *u, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[dom->Gfx._isb + tj*s1b + tk*s2b] = 2. * bc - u[(dom->Gfx._is+1) + tj*s1b + tk*s2b]; u[dom->Gfx._is + tj*s1b + tk*s2b] = bc; } } // u-velocity; west; Neumann __global__ void BC_u_W_N(real *u, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) u[dom->Gfx._isb + tj*s1b + tk*s2b] = u[dom->Gfx._is + tj*s1b + tk*s2b]; } // u-velocity; west; Turbulent precursor __global__ void BC_u_W_T(real *u, dom_struct *dom, real* bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[dom->Gfx._isb + tj*s1b + tk*s2b] = 2. * bc[tj + tk*dom->Gfx.jnb] - u[(dom->Gfx._is+1) + tj*s1b + tk*s2b]; u[dom->Gfx._is + tj*s1b + tk*s2b] = bc[tj + tk*dom->Gfx.jnb]; } } // u-velocity; east; periodic __global__ void BC_u_E_P(real *u, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[(dom->Gfx._ieb-1) + tj*s1b + tk*s2b] = u[(dom->Gfx._is+1) + tj*s1b + tk*s2b]; u[(dom->Gfx._ie-1) + tj*s1b + tk*s2b] = u[dom->Gfx._is + tj*s1b + tk*s2b]; } } // u-velocity; east; Dirichlet __global__ void BC_u_E_D(real *u, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[(dom->Gfx._ieb-1) + tj*s1b + tk*s2b] = 2. * bc - u[(dom->Gfx._ie-2) + tj*s1b + tk*s2b]; u[(dom->Gfx._ie-1) + tj*s1b + tk*s2b] = bc; } } // u-velocity; east; Neumann __global__ void BC_u_E_N(real *u, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) u[(dom->Gfx._ieb-1) + tj*s1b + tk*s2b] = u[(dom->Gfx._ie-1) + tj*s1b + tk*s2b]; } // u-velocity; east; Turbulent precursor __global__ void BC_u_E_T(real *u, dom_struct *dom, real* bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { u[(dom->Gfx._ieb-1) + tj*s1b + tk*s2b] = 2. * bc[tj + tk*dom->Gfx.jnb] - u[(dom->Gfx._ie-2) + tj*s1b + tk*s2b]; u[(dom->Gfx._ie-1) + tj*s1b + tk*s2b] = bc[tj + tk*dom->Gfx.jnb]; } } // u-velocity; south; periodic __global__ void BC_u_S_P(real *u, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) { u[ti + dom->Gfx._jsb*s1b + tk*s2b] = u[ti + (dom->Gfx._je-1)*s1b + tk*s2b]; } } // u-velocity; south; Dirichlet __global__ void BC_u_S_D(real *u, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) { u[ti + dom->Gfx._jsb*s1b + tk*s2b] = 8./3. * bc - 2. * u[ti + dom->Gfx._js*s1b + tk*s2b] + 1./3. * u[ti + (dom->Gfx._js+1)*s1b + tk*s2b]; } } // u-velocity; south; Neumann __global__ void BC_u_S_N(real *u, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) u[ti + dom->Gfx._jsb*s1b + tk*s2b] = u[ti + dom->Gfx._js*s1b + tk*s2b]; } // u-velocity; south; Turbulent precursor __global__ void BC_u_S_T(real *u, dom_struct *dom, real* bc_s, real* bc_n) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) { u[ti + dom->Gfx._jsb*s1b + tk*s2b] = bc_s[tk + ti*dom->Gfx.knb]; u[ti + dom->Gfx._js*s1b + tk*s2b] = bc_n[tk + ti*dom->Gfx.knb]; } } // u-velocity; north; periodic __global__ void BC_u_N_P(real *u, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) u[ti + (dom->Gfx._jeb-1)*s1b + tk*s2b] = u[ti + dom->Gfx._js*s1b + tk*s2b]; } // u-velocity; north; Dirichlet __global__ void BC_u_N_D(real *u, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) { u[ti + (dom->Gfx._jeb-1)*s1b + tk*s2b] = 8./3. * bc - 2. * u[ti + (dom->Gfx._je-1)*s1b + tk*s2b] + 1./3. * u[ti + (dom->Gfx._je-2)*s1b + tk*s2b]; } } // u-velocity; north; Neumann __global__ void BC_u_N_N(real *u, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) u[ti + (dom->Gfx._jeb-1)*s1b + tk*s2b] = u[ti + (dom->Gfx._je-1)*s1b + tk*s2b]; } // u-velocity; north; Turbulent precursor __global__ void BC_u_N_T(real *u, dom_struct *dom, real* bc_s, real* bc_n) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tk < dom->Gfx._knb)) { // velocity within computational domain, near the boundary u[ti + (dom->Gfx._je-1)*s1b + tk*s2b] = bc_s[tk + ti*dom->Gfx.knb]; //velocity on ghost cells u[ti + (dom->Gfx._jeb-1)*s1b + tk*s2b] = bc_n[tk + ti*dom->Gfx.knb]; } } // u-velocity; bottom; periodic __global__ void BC_u_B_P(real *u, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + dom->Gfx._ksb*s2b] = u[ti + tj*s1b + (dom->Gfx._ke-1)*s2b]; } // u-velocity; bottom; Dirichlet __global__ void BC_u_B_D(real *u, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + dom->Gfx._ksb*s2b] = 8./3. * bc - 2. * u[ti + tj*s1b + dom->Gfx._ks*s2b] + 1./3. * u[ti + tj*s1b + (dom->Gfx._ks+1)*s2b]; } // u-velocity; bottom; Neumann __global__ void BC_u_B_N(real *u, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + dom->Gfx._ksb*s2b] = u[ti + tj*s1b + dom->Gfx._ks*s2b]; } // u-velocity; bottom; Turbulent precursor __global__ void BC_u_B_T(real *u, dom_struct *dom, real* bc_b, real* bc_t) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) { // copy u velocity on bottom from precursor to ghost point and first layer u[ti + tj*s1b + dom->Gfx._ksb*s2b] = bc_b[ti + tj*dom->Gfx.inb]; u[ti + tj*s1b + dom->Gfx._ks*s2b] = bc_t[ti + tj*dom->Gfx.inb]; } } // u-velocity; top; periodic __global__ void BC_u_T_P(real *u, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + (dom->Gfx._keb-1)*s2b] = u[ti + tj*s1b + dom->Gfx._ks*s2b]; } // u-velocity; top; Dirichlet __global__ void BC_u_T_D(real *u, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + (dom->Gfx._keb-1)*s2b] = 8./3. * bc - 2. * u[ti + tj*s1b + (dom->Gfx._ke-1)*s2b] + 1./3. * u[ti + tj*s1b + (dom->Gfx._ke-2)*s2b]; } // u-velocity; top; Neumann __global__ void BC_u_T_N(real *u, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) u[ti + tj*s1b + (dom->Gfx._keb-1)*s2b] = u[ti + tj*s1b + (dom->Gfx._ke-1)*s2b]; } // u-velocity; top; Turbulent precursor __global__ void BC_u_T_T(real *u, dom_struct *dom, real* bc_b, real* bc_t) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfx._s1b; int s2b = dom->Gfx._s2b; if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) { u[ti + tj*s1b + (dom->Gfx._ke-1)*s2b] = bc_b[ti + tj*dom->Gfx.inb]; // velocity on ghost cell u[ti + tj*s1b + (dom->Gfx._keb-1)*s2b] = bc_t[ti + tj*dom->Gfx.inb]; } } // v-velocity; west; periodic __global__ void BC_v_W_P(real *v, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) v[dom->Gfy._isb + tj*s1b + tk*s2b] = v[(dom->Gfy._ie-1) + tj*s1b + tk*s2b]; } // v-velocity; west; Dirichlet __global__ void BC_v_W_D(real *v, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) { v[dom->Gfy._isb + tj*s1b + tk*s2b] = 8./3. * bc - 2. * v[dom->Gfy._is + tj*s1b + tk*s2b] + 1./3. * v[(dom->Gfy._is+1) + tj*s1b + tk*s2b]; } } // v-velocity; west; Neumann __global__ void BC_v_W_N(real *v, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) v[dom->Gfy._isb + tj*s1b + tk*s2b] = v[dom->Gfy._is + tj*s1b + tk*s2b]; } // v-velocity; west; Turbulent precursor __global__ void BC_v_W_T(real *v, dom_struct *dom, real* bc_w, real* bc_e) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) { v[dom->Gfy._isb + tj*s1b + tk*s2b] = bc_w[tj + tk*dom->Gfy.jnb]; v[dom->Gfy._is + tj*s1b + tk*s2b] = bc_e[tj + tk*dom->Gfy.jnb]; } } // v-velocity; east; periodic __global__ void BC_v_E_P(real *v, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) v[(dom->Gfy._ieb-1) + tj*s1b + tk*s2b] = v[dom->Gfy._is + tj*s1b + tk*s2b]; } // v-velocity; east; Dirichlet __global__ void BC_v_E_D(real *v, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) { v[(dom->Gfy._ieb-1) + tj*s1b + tk*s2b] = 8./3. * bc - 2. * v[(dom->Gfy._ie-1) + tj*s1b + tk*s2b] + 1./3. * v[(dom->Gfy._ie-2) + tj*s1b + tk*s2b]; } } // v-velocity; east; Neumann __global__ void BC_v_E_N(real *v, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) v[(dom->Gfy._ieb-1) + tj*s1b + tk*s2b] = v[(dom->Gfy._ie-1) + tj*s1b + tk*s2b]; } // v-velocity; east; Turbulent precursor __global__ void BC_v_E_T(real *v, dom_struct *dom, real* bc_w, real* bc_e) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) { v[(dom->Gfy._ie-1) + tj*s1b + tk*s2b] = bc_w[tj + tk*dom->Gfy.jnb]; //velocity on ghost cell v[dom->Gfy._ieb-1 + tj*s1b + tk*s2b] = bc_e[tj + tk*dom->Gfy.jnb]; } } // v-velocity; south; periodic __global__ void BC_v_S_P(real *v, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + dom->Gfy._jsb*s1b + tk*s2b] = v[ti + (dom->Gfy._je-2)*s1b + tk*s2b]; v[ti + dom->Gfy._js*s1b + tk*s2b] = v[ti + (dom->Gfy._je-1)*s1b + tk*s2b]; } } // v-velocity; south; Dirichlet __global__ void BC_v_S_D(real *v, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + dom->Gfy._jsb*s1b + tk*s2b] = 2. * bc - v[ti + (dom->Gfy._js+1)*s1b + tk*s2b]; v[ti + dom->Gfy._js*s1b + tk*s2b] = bc; } } // v-velocity; south; Neumann __global__ void BC_v_S_N(real *v, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) v[ti + dom->Gfy._jsb*s1b + tk*s2b] = v[ti + dom->Gfy._js*s1b + tk*s2b]; } // v-velocity; south; Turbulent precursor __global__ void BC_v_S_T(real *v, dom_struct *dom, real* bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + dom->Gfy._jsb*s1b + tk*s2b] = 2. * bc[tk + ti*dom->Gfy.knb] - v[ti + (dom->Gfy._js+1)*s1b + tk*s2b]; v[ti + dom->Gfy._js*s1b + tk*s2b] = bc[tk + ti*dom->Gfy.knb]; } } // v-velocity; north; periodic __global__ void BC_v_N_P(real *v, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + (dom->Gfy._jeb-1)*s1b + tk*s2b] = v[ti + (dom->Gfy._js+1)*s1b + tk*s2b]; v[ti + (dom->Gfy._je-1)*s1b + tk*s2b] = v[ti + dom->Gfy._js*s1b + tk*s2b]; } } // v-velocity; north; Dirichlet __global__ void BC_v_N_D(real *v, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + (dom->Gfy._jeb-1)*s1b + tk*s2b] = 2. * bc - v[ti + (dom->Gfy._je-2)*s1b + tk*s2b]; v[ti + (dom->Gfy._je-1)*s1b + tk*s2b] = bc; } } // v-velocity; north; Neumann __global__ void BC_v_N_N(real *v, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) v[ti + (dom->Gfy._jeb-1)*s1b + tk*s2b] = v[ti + (dom->Gfy._je-1)*s1b + tk*s2b]; } // v-velocity; north; Turbulent precursor __global__ void BC_v_N_T(real *v, dom_struct *dom, real* bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { v[ti + (dom->Gfy._jeb-1)*s1b + tk*s2b] = 2. * bc[tk + ti*dom->Gfy.knb] - v[ti + (dom->Gfy._je-2)*s1b + tk*s2b]; v[ti + (dom->Gfy._je-1)*s1b + tk*s2b] = bc[tk + ti*dom->Gfy.knb]; } } // v-velocity; bottom; periodic __global__ void BC_v_B_P(real *v, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + dom->Gfy._ksb*s2b] = v[ti + tj*s1b + (dom->Gfy._ke-1)*s2b]; } // v-velocity; bottom; Dirichlet __global__ void BC_v_B_D(real *v, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + dom->Gfy._ksb*s2b] = 8./3. * bc - 2. * v[ti + tj*s1b + dom->Gfy._ks*s2b] + 1./3. * v[ti + tj*s1b + (dom->Gfy._ks+1)*s2b]; } // v-velocity; bottom; Neumann __global__ void BC_v_B_N(real *v, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + dom->Gfy._ksb*s2b] = v[ti + tj*s1b + dom->Gfy._ks*s2b]; } // v-velocity; bottom; Turbulent precursor __global__ void BC_v_B_T(real *v, dom_struct *dom, real* bc_b, real* bc_t) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) { v[ti + tj*s1b + dom->Gfy._ksb*s2b] = bc_b[ti + tj*dom->Gfy.inb]; v[ti + tj*s1b + dom->Gfy._ks*s2b] = bc_t[ti + tj*dom->Gfy.inb]; } } // v-velocity; top; periodic __global__ void BC_v_T_P(real *v, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + (dom->Gfy._keb-1)*s2b] = v[ti + tj*s1b + dom->Gfy._ks*s2b]; } // v-velocity; top; Dirichlet __global__ void BC_v_T_D(real *v, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + (dom->Gfy._keb-1)*s2b] = 8./3. * bc - 2. * v[ti + tj*s1b + (dom->Gfy._ke-1)*s2b] + 1./3. * v[ti + tj*s1b + (dom->Gfy._ke-2)*s2b]; } // v-velocity; top; Neumann __global__ void BC_v_T_N(real *v, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) v[ti + tj*s1b + (dom->Gfy._keb-1)*s2b] = v[ti + tj*s1b + (dom->Gfy._ke-1)*s2b]; } // v-velocity; top; Turbulent precursor __global__ void BC_v_T_T(real *v, dom_struct *dom, real* bc_b, real* bc_t) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfy._s1b; int s2b = dom->Gfy._s2b; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) { v[ti + tj*s1b + (dom->Gfy._ke-1)*s2b] = bc_b[ti + tj*dom->Gfy.inb]; // velocity on ghost cell v[ti + tj*s1b + (dom->Gfy._keb-1)*s2b] = bc_t[ti + tj*dom->Gfy.inb]; } } // w-velocity; west; periodic __global__ void BC_w_W_P(real *w, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[dom->Gfz._isb + tj*s1b + tk*s2b] = w[(dom->Gfz._ie-1) + tj*s1b + tk*s2b]; } // w-velocity; west; Dirichlet __global__ void BC_w_W_D(real *w, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[dom->Gfz._isb + tj*s1b + tk*s2b] = 8./3. * bc - 2. * w[dom->Gfz._is + tj*s1b + tk*s2b] + 1./3. * w[(dom->Gfz._is+1) + tj*s1b + tk*s2b]; } // w-velocity; west; Neumann __global__ void BC_w_W_N(real *w, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[dom->Gfz._isb + tj*s1b + tk*s2b] = w[dom->Gfz._is + tj*s1b + tk*s2b]; } // w-velocity; west; Turbulent precursor __global__ void BC_w_W_T(real *w, dom_struct *dom, real* bc_w, real* bc_e) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) { w[dom->Gfz._isb + tj*s1b + tk*s2b] = bc_w[tj + tk*dom->Gfz.jnb]; w[dom->Gfz._is + tj*s1b + tk*s2b] = bc_e[tj + tk*dom->Gfz.jnb]; } } // w-velocity; east; periodic __global__ void BC_w_E_P(real *w, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[(dom->Gfz._ieb-1) + tj*s1b + tk*s2b] = w[dom->Gfz._is + tj*s1b + tk*s2b]; } // w-velocity; east; Dirichlet __global__ void BC_w_E_D(real *w, dom_struct *dom, real bc) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[(dom->Gfz._ieb-1) + tj*s1b + tk*s2b] = 8./3. * bc - 2. * w[(dom->Gfz._ie-1) + tj*s1b + tk*s2b] + 1./3. * w[(dom->Gfz._ie-2) + tj*s1b + tk*s2b]; } // w-velocity; east; Neumann __global__ void BC_w_E_N(real *w, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) w[(dom->Gfz._ieb-1) + tj*s1b + tk*s2b] = w[(dom->Gfz._ie-1) + tj*s1b + tk*s2b]; } // w-velocity; east; Turbulent precursor __global__ void BC_w_E_T(real *w, dom_struct *dom, real* bc_w, real* bc_e) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) { w[(dom->Gfz._ie-1) + tj*s1b + tk*s2b] = bc_w[tj + tk*dom->Gfz.jnb]; //velocity on ghost cell w[(dom->Gfz._ieb-1) + tj*s1b + tk*s2b] = bc_e[tj + tk*dom->Gfz.jnb]; } } // w-velocity; south; periodic __global__ void BC_w_S_P(real *w, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) { w[ti + dom->Gfz._jsb*s1b + tk*s2b] = w[ti + (dom->Gfz._je-1)*s1b + tk*s2b]; } } // w-velocity; south; Dirichlet __global__ void BC_w_S_D(real *w, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) w[ti + dom->Gfz._jsb*s1b + tk*s2b] = 8./3. * bc - 2. * w[ti + dom->Gfz._js*s1b + tk*s2b] + 1./3. * w[ti + (dom->Gfz._js+1)*s1b + tk*s2b]; } // w-velocity; south; Neumann __global__ void BC_w_S_N(real *w, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) w[ti + dom->Gfz._jsb*s1b + tk*s2b] = w[ti + dom->Gfz._js*s1b + tk*s2b]; } // w-velocity; south; Turbulent precursor __global__ void BC_w_S_T(real *w, dom_struct *dom, real* bc_s, real* bc_n) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) { w[ti + dom->Gfz._jsb*s1b + tk*s2b] = bc_s[tk + ti*dom->Gfz.knb]; w[ti + dom->Gfz._js*s1b + tk*s2b] = bc_n[tk + ti*dom->Gfz.knb]; } } // w-velocity; north; periodic __global__ void BC_w_N_P(real *w, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) w[ti + (dom->Gfz._jeb-1)*s1b + tk*s2b] = w[ti + dom->Gfz._js*s1b + tk*s2b]; } // w-velocity; north; Dirichlet __global__ void BC_w_N_D(real *w, dom_struct *dom, real bc) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) w[ti + (dom->Gfz._jeb-1)*s1b + tk*s2b] = 8./3. * bc - 2. * w[ti + (dom->Gfz._je-1)*s1b + tk*s2b] + 1./3. * w[ti + (dom->Gfz._je-2)*s1b + tk*s2b]; } // w-velocity; north; Neumann __global__ void BC_w_N_N(real *w, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) w[ti + (dom->Gfz._jeb-1)*s1b + tk*s2b] = w[ti + (dom->Gfz._je-1)*s1b + tk*s2b]; } // w-velocity; north; Turbulent precursor __global__ void BC_w_N_T(real *w, dom_struct *dom, real* bc_s, real* bc_n) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) { w[ti + (dom->Gfz._je-1)*s1b + tk*s2b] = bc_s[tk + ti*dom->Gfz.knb]; //velocity on ghost cell w[ti + (dom->Gfz._jeb-1)*s1b + tk*s2b] = bc_n[tk + ti*dom->Gfz.knb]; } } // w-velocity; bottom; periodic __global__ void BC_w_B_P(real *w, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + dom->Gfz._ksb*s2b] = w[ti + tj*s1b + (dom->Gfz._ke-2)*s2b]; w[ti + tj*s1b + dom->Gfz._ks*s2b] = w[ti + tj*s1b + (dom->Gfz._ke-1)*s2b]; } } // w-velocity; bottom; Dirichlet __global__ void BC_w_B_D(real *w, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + dom->Gfz._ksb*s2b] = 2. * bc - w[ti + tj*s1b + (dom->Gfz._ks+1)*s2b]; w[ti + tj*s1b + dom->Gfz._ks*s2b] = bc; } } // w-velocity; bottom; Neumann __global__ void BC_w_B_N(real *w, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) w[ti + tj*s1b + dom->Gfz._ksb*s2b] = w[ti + tj*s1b + dom->Gfz._ks*s2b]; } // w-velocity; bottom; Turbulent precursor __global__ void BC_w_B_T(real *w, dom_struct *dom, real* bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + dom->Gfz._ksb*s2b] = 2. * bc[ti + tj*dom->Gfz.inb] - w[ti + tj*s1b + (dom->Gfz._ks+1)*s2b]; w[ti + tj*s1b + dom->Gfz._ks*s2b] = bc[ti + tj*dom->Gfz.inb]; } } // w-velocity; top; periodic __global__ void BC_w_T_P(real *w, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + (dom->Gfz._keb-1)*s2b] = w[ti + tj*s1b + (dom->Gfz._ks+1)*s2b]; w[ti + tj*s1b + (dom->Gfz._ke-1)*s2b] = w[ti + tj*s1b + dom->Gfz._ks*s2b]; } } // w-velocity; top; Dirichlet __global__ void BC_w_T_D(real *w, dom_struct *dom, real bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + (dom->Gfz._keb-1)*s2b] = 2. * bc - w[ti + tj*s1b + (dom->Gfz._ke-2)*s2b]; w[ti + tj*s1b + (dom->Gfz._ke-1)*s2b] = bc; } } // w-velocity; top; Neumann __global__ void BC_w_T_N(real *w, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) w[ti + tj*s1b + (dom->Gfz._keb-1)*s2b] = w[ti + tj*s1b + (dom->Gfz._ke-1)*s2b]; } // w-velocity; top; Turbulent precursor __global__ void BC_w_T_T(real *w, dom_struct *dom, real* bc) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; int s1b = dom->Gfz._s1b; int s2b = dom->Gfz._s2b; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { w[ti + tj*s1b + (dom->Gfz._keb-1)*s2b] = 2. * bc[ti + tj*dom->Gfz.inb] - w[ti + tj*s1b + (dom->Gfz._ke-2)*s2b]; w[ti + tj*s1b + (dom->Gfz._ke-1)*s2b] = bc[ti + tj*dom->Gfz.inb]; } } __global__ void project_u(real *u_star, real *p, real rho_f, real dt, real *u, dom_struct *dom, real ddx, int *flag_u, int *phase) { int tj = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tk = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tj < dom->Gfx._je && tk < dom->Gfx._ke) { for(int i = dom->Gfx._is; i < dom->Gfx._ie; i++) { real gradPhi = abs(flag_u[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b]) * ddx * (p[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b] - p[(i-1) + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b]); u[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] = (u_star[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] - dt / rho_f * gradPhi); } } } __global__ void project_v(real *v_star, real *p, real rho_f, real dt, real *v, dom_struct *dom, real ddy, int *flag_v, int *phase) { int tk = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int ti = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tk < dom->Gfy._ke && ti < dom->Gfy._ie) { for(int j = dom->Gfy._js; j < dom->Gfy._je; j++) { real gradPhi = abs(flag_v[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b]) * ddy * (p[ti + j*dom->Gcc._s1b + tk*dom->Gcc._s2b] - p[ti + (j-1)*dom->Gcc._s1b + tk*dom->Gcc._s2b]); v[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] = (v_star[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] - dt / rho_f * gradPhi); } } } __global__ void project_w(real *w_star, real *p, real rho_f, real dt, real *w, dom_struct *dom, real ddz, int *flag_w, int *phase) { int ti = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tj = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(ti < dom->Gfz._ie && tj < dom->Gfz._je) { for(int k = dom->Gfz._ks; k < dom->Gfz._ke; k++) { real gradPhi = abs(flag_w[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b]) * ddz * (p[ti + tj*dom->Gcc._s1b + k*dom->Gcc._s2b] - p[ti + tj*dom->Gcc._s1b + (k-1)*dom->Gcc._s2b]); w[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] = (w_star[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] - dt / rho_f * gradPhi); } } } __global__ void update_p_laplacian(real *Lp, real *p, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tk = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tj < dom->Gcc._je && tk < dom->Gcc._ke) { for(int i = dom->Gcc._is; i < dom->Gcc._ie; i++) { int C = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; int W = (i-1) + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; int E = (i+1) + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; int S = i + (tj-1)*dom->Gcc._s1b + tk*dom->Gcc._s2b; int N = i + (tj+1)*dom->Gcc._s1b + tk*dom->Gcc._s2b; int B = i + tj*dom->Gcc._s1b + (tk-1)*dom->Gcc._s2b; int T = i + tj*dom->Gcc._s1b + (tk+1)*dom->Gcc._s2b; real ddpdxx = (p[E]-2.*p[C]+p[W])/dom->dx/dom->dx; real ddpdyy = (p[N]-2.*p[C]+p[S])/dom->dy/dom->dy; real ddpdzz = (p[T]-2.*p[C]+p[B])/dom->dz/dom->dz; Lp[C] = ddpdxx+ddpdyy+ddpdzz; } } } __global__ void update_p(real *Lp, real *p0, real *p, real *phi, dom_struct *dom, real nu, real dt, int *phase) { int tj = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tk = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tj < dom->Gcc._je && tk < dom->Gcc._ke) { for(int i = dom->Gcc._is; i < dom->Gcc._ie; i++) { int C = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; p[C] = (phase[C] < 0) * (p0[C] + phi[C]);// - 0.5*nu*dt*Lp[C]); } } } __global__ void copy_p_ghost(real *p, real *p_tmp, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gcc.je-DOM_BUF && tk < dom->Gcc.ke-DOM_BUF) { for(int i = dom->Gcc.is-DOM_BUF; i < dom->Gcc.ie-DOM_BUF; i++) { p[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gcc.s1b + (tk+DOM_BUF)*dom->Gcc.s2b] = p_tmp[i + tj*dom->Gcc.s1 + tk*dom->Gcc.s2]; } } } __global__ void copy_p_noghost(real *p_noghost, real *p_ghost, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gcc.je-DOM_BUF && tk < dom->Gcc.ke-DOM_BUF) { for(int i = dom->Gcc.is-DOM_BUF; i < dom->Gcc.ie-DOM_BUF; i++) { p_noghost[i + tj*dom->Gcc._s1 + tk*dom->Gcc._s2] = p_ghost[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gcc._s1b + (tk+DOM_BUF)*dom->Gcc._s2b]; } } } __global__ void copy_u_ghost(real *u_ghost, real *u_noghost, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.je-DOM_BUF && tk < dom->Gfx.ke-DOM_BUF) { for(int i = dom->Gfx.is-DOM_BUF; i < dom->Gfx.ie-DOM_BUF; i++) { u_ghost[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b] = u_noghost[i + tj*dom->Gfx._s1 + tk*dom->Gfx._s2]; } } } __global__ void copy_u_noghost(real *u_noghost, real *u_ghost, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.je-DOM_BUF && tk < dom->Gfx.ke-DOM_BUF) { for(int i = dom->Gfx.is-DOM_BUF; i < dom->Gfx.ie-DOM_BUF; i++) { u_noghost[i + tj*dom->Gfx._s1 + tk*dom->Gfx._s2] = u_ghost[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b]; } } } __global__ void copy_v_ghost(real *v_ghost, real *v_noghost, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.ke-DOM_BUF && ti < dom->Gfy.ie-DOM_BUF) { for(int j = dom->Gfy.js-DOM_BUF; j < dom->Gfy.je-DOM_BUF; j++) { v_ghost[(ti+DOM_BUF) + (j+DOM_BUF)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b] = v_noghost[ti + j*dom->Gfy._s1 + tk*dom->Gfy._s2]; } } } __global__ void copy_v_noghost(real *v_noghost, real *v_ghost, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.ke-DOM_BUF && ti < dom->Gfy.ie-DOM_BUF) { for(int j = dom->Gfy.js-DOM_BUF; j < dom->Gfy.je-DOM_BUF; j++) { v_noghost[ti + j*dom->Gfy._s1 + tk*dom->Gfy._s2] = v_ghost[(ti+DOM_BUF) + (j+DOM_BUF)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b]; } } } __global__ void copy_w_ghost(real *w_ghost, real *w_noghost, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.ie-DOM_BUF && tj < dom->Gfz.je-DOM_BUF) { for(int k = dom->Gfz.ks-DOM_BUF; k < dom->Gfz.ke-DOM_BUF; k++) { w_ghost[(ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (k+DOM_BUF)*dom->Gfz._s2b] = w_noghost[ti + tj*dom->Gfz._s1 + k*dom->Gfz._s2]; } } } __global__ void copy_w_noghost(real *w_noghost, real *w_ghost, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.ie-DOM_BUF && tj < dom->Gfz.je-DOM_BUF) { for(int k = dom->Gfz.ks-DOM_BUF; k < dom->Gfz.ke-DOM_BUF; k++) { w_noghost[ti + tj*dom->Gfz._s1 + k*dom->Gfz._s2] = w_ghost[(ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (k+DOM_BUF)*dom->Gfz._s2b]; } } } __global__ void copy_u_fluid(real *u_noghost, real *u_ghost, int *phase, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.je-DOM_BUF && tk < dom->Gfx.ke-DOM_BUF) { for(int i = dom->Gfx.is-DOM_BUF; i < dom->Gfx.ie-DOM_BUF; i++) { int boo = 1; if(phase[(i+DOM_BUF-1) + (tj+DOM_BUF)*dom->Gcc._s1b + (tk+DOM_BUF)*dom->Gcc._s2b] > -1) boo = 0; else if(phase[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gcc._s1b + (tk+DOM_BUF)*dom->Gcc._s2b] > -1) boo = 0; u_noghost[i + tj*dom->Gfx._s1 + tk*dom->Gfx._s2] = boo * u_ghost[(i+DOM_BUF) + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b]; } } } __global__ void copy_v_fluid(real *v_noghost, real *v_ghost, int *phase, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.ke-DOM_BUF && ti < dom->Gfy.ie-DOM_BUF) { for(int j = dom->Gfy.js-DOM_BUF; j < dom->Gfy.je-DOM_BUF; j++) { int boo = 1; if(phase[(ti+DOM_BUF) + (j+DOM_BUF-1)*dom->Gcc._s1b + (tk+DOM_BUF)*dom->Gcc._s2b] > -1) boo = 0; else if(phase[(ti+DOM_BUF) + (j+DOM_BUF)*dom->Gcc._s1b + (tk+DOM_BUF)*dom->Gcc._s2b] > -1) boo = 0; v_noghost[ti + j*dom->Gfy._s1 + tk*dom->Gfy._s2] = boo * v_ghost[(ti+DOM_BUF) + (j+DOM_BUF)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b]; } } } __global__ void copy_w_fluid(real *w_noghost, real *w_ghost, int *phase, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.ie-DOM_BUF && tj < dom->Gfz.je-DOM_BUF) { for(int k = dom->Gfz.ks-DOM_BUF; k < dom->Gfz.ke-DOM_BUF; k++) { int boo = 1; if(phase[(ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gcc._s1b + (k+DOM_BUF-1)*dom->Gcc._s2b] > -1) boo = 0; else if(phase[(ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gcc._s1b + (k+DOM_BUF)*dom->Gcc._s2b] > -1) boo = 0; w_noghost[ti + tj*dom->Gfz._s1 + k*dom->Gfz._s2] = boo * w_ghost[(ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (k+DOM_BUF)*dom->Gfz._s2b]; } } } #ifndef IMPLICIT __global__ void u_star_2(real rho_f, real nu, real *u0, real *v0, real *w0, real *p, real *f, real *diff0, real *conv0, real *diff, real *conv, real *u_star, dom_struct *dom, real dt0, real dt, int *phase) { // create shared memory // no reason to load pressure into shared memory, but leaving it in global // will require additional if statements, so keep it in shared __shared__ real s_u0[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u back __shared__ real s_u1[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u center __shared__ real s_u2[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u forward __shared__ real s_v01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v back __shared__ real s_v12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v forward __shared__ real s_w01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w back __shared__ real s_w12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w forward __shared__ real s_d[MAX_THREADS_DIM * MAX_THREADS_DIM]; // diff __shared__ real s_c[MAX_THREADS_DIM * MAX_THREADS_DIM]; // conv __shared__ real s_u_star[MAX_THREADS_DIM * MAX_THREADS_DIM]; // solution // working constants real ab0 = 0.5 * dt / dt0; // for Adams-Bashforth stepping real ab = 1. + ab0; // for Adams-Bashforth stepping real ddx = 1. / dom->dx; // to limit the number of divisions needed real ddy = 1. / dom->dy; // to limit the number of divisions needed real ddz = 1. / dom->dz; // to limit the number of divisions needed // loop over u-planes for(int i = dom->Gfx._is; i < dom->Gfx._ie; i++) { // subdomain indices // the extra 2*blockIdx.X terms implement the necessary overlapping of // shared memory blocks in the subdomain int j = blockIdx.x*blockDim.x + threadIdx.x - 2*blockIdx.x; int k = blockIdx.y*blockDim.y + threadIdx.y - 2*blockIdx.y; // shared memory indices int tj = threadIdx.x; int tk = threadIdx.y; // load shared memory // TODO: look into the effect of removing these if statements and simply // allowing memory overruns for threads that don't matter for particular // discretizations // TODO: THIS CAN BE FIXED BY PADDING ALL OF THESE ARRAYS WHEN COPYING FROM // HOST TO DEVICE if((k >= dom->Gfx._ksb && k < dom->Gfx._keb) && (j >= dom->Gfx._jsb && j < dom->Gfx._jeb)) { s_u0[tj + tk*blockDim.x] = u0[(i-1) + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; s_u1[tj + tk*blockDim.x] = u0[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; s_u2[tj + tk*blockDim.x] = u0[(i+1) + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; } if((k >= dom->Gfy._ksb && k < dom->Gfy._keb) && (j >= dom->Gfy._jsb && j < dom->Gfy._jeb)) { s_v01[tj + tk*blockDim.x] = v0[(i-1) + j*dom->Gfy._s1b + k*dom->Gfy._s2b]; s_v12[tj + tk*blockDim.x] = v0[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]; } if((k >= dom->Gfz._ksb && k < dom->Gfz._keb) && (j >= dom->Gfz._jsb && j < dom->Gfz._jeb)) { s_w01[tj + tk*blockDim.x] = w0[(i-1) + j*dom->Gfz._s1b + k*dom->Gfz._s2b]; s_w12[tj + tk*blockDim.x] = w0[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]; } s_u_star[tj + tk*blockDim.x] = 0.0; // make sure all threads complete shared memory copy __syncthreads(); // compute right-hand side // if off the shared memory block boundary if((tj > 0 && tj < blockDim.x-1) && (tk > 0 && tk < blockDim.y-1) && j < dom->Gfx.jeb && k < dom->Gfx.keb) { // pressure gradient s_u_star[tj + tk*blockDim.x] = (p[(i-1) + j*dom->Gcc._s1b + k*dom->Gcc._s2b] - p[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b]) * ddx / rho_f; // grab the required data points for calculations real u011 = s_u0[tj + tk*blockDim.x]; real u111 = s_u1[tj + tk*blockDim.x]; real u211 = s_u2[tj + tk*blockDim.x]; real u101 = s_u1[(tj-1) + tk*blockDim.x]; real u121 = s_u1[(tj+1) + tk*blockDim.x]; real v011 = s_v01[tj + tk*blockDim.x]; real v111 = s_v12[tj + tk*blockDim.x]; real v021 = s_v01[(tj+1) + tk*blockDim.x]; real v121 = s_v12[(tj+1) + tk*blockDim.x]; real u110 = s_u1[tj + (tk-1)*blockDim.x]; real u112 = s_u1[tj + (tk+1)*blockDim.x]; real w011 = s_w01[tj + tk*blockDim.x]; real w111 = s_w12[tj + tk*blockDim.x]; real w012 = s_w01[tj + (tk+1)*blockDim.x]; real w112 = s_w12[tj + (tk+1)*blockDim.x]; // compute convection term (Adams-Bashforth stepping) real duudx = (u211 + u111)*(u211 + u111) - (u111 + u011)*(u111 + u011); duudx *= 0.25 * ddx; real duvdy = (u121 + u111)*(v121 + v021) - (u111 + u101)*(v111 + v011); duvdy *= 0.25 * ddy; real duwdz = (u112 + u111)*(w112 + w012) - (u111 + u110)*(w111 + w011); duwdz *= 0.25 * ddz; s_c[tj + tk*blockDim.x] = duudx + duvdy + duwdz; // convection term sums into right-hand side #ifndef STOKESFLOW if(dt0 > 0) // Adams-Bashforth s_u_star[tj + tk*blockDim.x] += (-ab * s_c[tj + tk*blockDim.x] + ab0 * conv0[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]); else // forward Euler s_u_star[tj + tk*blockDim.x] += -s_c[tj + tk*blockDim.x]; #endif // compute diffusion term (Adams-Bashforth stepping) real dud1 = (u211 - u111) * ddx; real dud0 = (u111 - u011) * ddx; real ddudxx = (dud1 - dud0) * ddx; dud1 = (u121 - u111) * ddy; dud0 = (u111 - u101) * ddy; real ddudyy = (dud1 - dud0) * ddy; dud1 = (u112 - u111) * ddz; dud0 = (u111 - u110) * ddz; real ddudzz = (dud1 - dud0) * ddz; s_d[tj + tk*blockDim.x] = nu * (ddudxx + ddudyy + ddudzz); // diffusive term sums into right-hand side if(dt0 > 0) // Adams-Bashforth s_u_star[tj + tk*blockDim.x] += (ab * s_d[tj + tk*blockDim.x] - ab0 * diff0[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]); else s_u_star[tj + tk*blockDim.x] += s_d[tj + tk*blockDim.x]; // add on imposed pressure gradient s_u_star[tj + tk*blockDim.x] += f[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; // multiply by dt s_u_star[tj + tk*blockDim.x] *= dt; // velocity term sums into right-hand side s_u_star[tj + tk*blockDim.x] += u111; // zero contribution inside particles s_u_star[tj + tk*blockDim.x] *= (phase[(i-1) + j*dom->Gcc._s1b + k*dom->Gcc._s2b] < 0 && phase[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b] < 0); } // make sure all threads complete computations __syncthreads(); // copy shared memory back to global if((k >= dom->Gfx._ks && k < dom->Gfx._ke) && (j >= dom->Gfx._js && j < dom->Gfx._je) && (tj > 0 && tj < (blockDim.x-1)) && (tk > 0 && tk < (blockDim.y-1))) { u_star[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b] = s_u_star[tj + tk*blockDim.x]; conv[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b] = s_c[tj + tk*blockDim.x]; diff[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b] = s_d[tj + tk*blockDim.x]; } } } #endif #ifndef IMPLICIT __global__ void v_star_2(real rho_f, real nu, real *u0, real *v0, real *w0, real *p, real *f, real *diff0, real *conv0, real *diff, real *conv, real *v_star, dom_struct *dom, real dt0, real dt, int *phase) { // create shared memory // no reason to load pressure into shared memory, but leaving it in global // will require additional if statements, so keep it in shared __shared__ real s_v0[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v back __shared__ real s_v1[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v center __shared__ real s_v2[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v forward __shared__ real s_w01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w back __shared__ real s_w12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w forward __shared__ real s_u01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u back __shared__ real s_u12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u forward __shared__ real s_d[MAX_THREADS_DIM * MAX_THREADS_DIM]; // diff __shared__ real s_c[MAX_THREADS_DIM * MAX_THREADS_DIM]; // conv __shared__ real s_v_star[MAX_THREADS_DIM * MAX_THREADS_DIM]; // solution // working constants real ab0 = 0.5 * dt / dt0; // for Adams-Bashforth stepping real ab = 1. + ab0; // for Adams-Bashforth stepping real ddx = 1. / dom->dx; // to limit the number of divisions needed real ddy = 1. / dom->dy; // to limit the number of divisions needed real ddz = 1. / dom->dz; // to limit the number of divisions needed // loop over v-planes for(int j = dom->Gfy._js; j < dom->Gfy._je; j++) { // subdomain indices // the extra 2*blockIdx.X terms implement the necessary overlapping of // shared memory blocks in the subdomain int k = blockIdx.x*blockDim.x + threadIdx.x - 2*blockIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y - 2*blockIdx.y; // shared memory indices int tk = threadIdx.x; int ti = threadIdx.y; // load shared memory // TODO: look into the effect of removing these if statements and simply // allowing memory overruns for threads that don't matter for particular // discretizations if((i >= dom->Gfy._isb && i < dom->Gfy._ieb) && (k >= dom->Gfy._ksb && k < dom->Gfy._keb)) { s_v0[tk + ti*blockDim.x] = v0[i + (j-1)*dom->Gfy._s1b + k*dom->Gfy._s2b]; s_v1[tk + ti*blockDim.x] = v0[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]; s_v2[tk + ti*blockDim.x] = v0[i + (j+1)*dom->Gfy._s1b + k*dom->Gfy._s2b]; } if((i >= dom->Gfz._isb && i < dom->Gfz._ieb) && (k >= dom->Gfz._ksb && k < dom->Gfz._keb)) { s_w01[tk + ti*blockDim.x] = w0[i + (j-1)*dom->Gfz._s1b + k*dom->Gfz._s2b]; s_w12[tk + ti*blockDim.x] = w0[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]; } if((i >= dom->Gfx._isb && i < dom->Gfx._ieb) && (k >= dom->Gfx._ksb && k < dom->Gfx._keb)) { s_u01[tk + ti*blockDim.x] = u0[i + (j-1)*dom->Gfx._s1b + k*dom->Gfx._s2b]; s_u12[tk + ti*blockDim.x] = u0[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; } s_v_star[tk + ti*blockDim.x] = 0.0; // make sure all threads complete shared memory copy __syncthreads(); // compute right-hand side // if off the shared memory block boundary if((tk > 0 && tk < blockDim.x-1) && (ti > 0 && ti < blockDim.y-1) && k < dom->Gfy.keb && i < dom->Gfy.ieb) { // pressure gradient s_v_star[tk + ti*blockDim.x] = (p[i + (j-1)*dom->Gcc._s1b + k*dom->Gcc._s2b] - p[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b]) * ddy / rho_f; // grab the required data points for calculations real v101 = s_v0[tk + ti*blockDim.x]; real v111 = s_v1[tk + ti*blockDim.x]; real v121 = s_v2[tk + ti*blockDim.x]; real v110 = s_v1[(tk-1) + ti*blockDim.x]; real v112 = s_v1[(tk+1) + ti*blockDim.x]; real w101 = s_w01[tk + ti*blockDim.x]; real w111 = s_w12[tk + ti*blockDim.x]; real w102 = s_w01[(tk+1) + ti*blockDim.x]; real w112 = s_w12[(tk+1) + ti*blockDim.x]; real v011 = s_v1[tk + (ti-1)*blockDim.x]; real v211 = s_v1[tk + (ti+1)*blockDim.x]; real u101 = s_u01[tk + ti*blockDim.x]; real u111 = s_u12[tk + ti*blockDim.x]; real u201 = s_u01[tk + (ti+1)*blockDim.x]; real u211 = s_u12[tk + (ti+1)*blockDim.x]; // compute convection term (Adams-Bashforth stepping) real dvudx = (v211 + v111)*(u211 + u201) - (v111 + v011)*(u111 + u101); dvudx *= 0.25 * ddx; real dvvdy = (v121 + v111)*(v121 + v111) - (v111 + v101)*(v111 + v101); dvvdy *= 0.25 * ddy; real dvwdz = (v112 + v111)*(w112 + w102) - (v111 + v110)*(w111 + w101); dvwdz *= 0.25 * ddz; s_c[tk + ti*blockDim.x] = dvudx + dvvdy + dvwdz; // convection term sums into right-hand side #ifndef STOKESFLOW if(dt0 > 0) // Adams-Bashforth s_v_star[tk + ti*blockDim.x] += (-ab * s_c[tk + ti*blockDim.x] + ab0 * conv0[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]); else s_v_star[tk + ti*blockDim.x] += -s_c[tk + ti*blockDim.x]; #endif // compute diffusive term real dvd1 = (v211 - v111) * ddx; real dvd0 = (v111 - v011) * ddx; real ddvdxx = (dvd1 - dvd0) * ddx; dvd1 = (v121 - v111) * ddy; dvd0 = (v111 - v101) * ddy; real ddvdyy = (dvd1 - dvd0) * ddy; dvd1 = (v112 - v111) * ddz; dvd0 = (v111 - v110) * ddz; real ddvdzz = (dvd1 - dvd0) * ddz; s_d[tk + ti*blockDim.x] = nu * (ddvdxx + ddvdyy + ddvdzz); // diffusive term sums into right-hand side if(dt0 > 0) // Adams-Bashforth s_v_star[tk + ti*blockDim.x] += (ab * s_d[tk + ti*blockDim.x] - ab0 * diff0[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]); else s_v_star[tk + ti*blockDim.x] += s_d[tk + ti*blockDim.x]; // add on imposed pressure gradient s_v_star[tk + ti*blockDim.x] += f[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]; // multiply by dt s_v_star[tk + ti*blockDim.x] *= dt; // velocity term sums into right-hand side s_v_star[tk + ti*blockDim.x] += v111; // zero contribution inside particles s_v_star[tk + ti*blockDim.x] *= (phase[i + (j-1)*dom->Gcc._s1b + k*dom->Gcc._s2b] < 0 && phase[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b] < 0); } // make sure all threads complete computations __syncthreads(); // copy shared memory back to global if((i >= dom->Gfy._is && i < dom->Gfy._ie) && (k >= dom->Gfy._ks && k < dom->Gfy._ke) && (tk > 0 && tk < (blockDim.x-1)) && (ti > 0 && ti < (blockDim.y-1))) { v_star[i+ j*dom->Gfy._s1b + k*dom->Gfy._s2b] = s_v_star[tk + ti*blockDim.x]; conv[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b] = s_c[tk + ti*blockDim.x]; diff[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b] = s_d[tk + ti*blockDim.x]; } } } #endif #ifndef IMPLICIT __global__ void w_star_2(real rho_f, real nu, real *u0, real *v0, real *w0, real *p, real *f, real *diff0, real *conv0, real *diff, real *conv, real *w_star, dom_struct *dom, real dt0, real dt, int *phase) { // create shared memory // no reason to load pressure into shared memory, but leaving it in global // will require additional if statements, so keep it in shared __shared__ real s_w0[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w back __shared__ real s_w1[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w center __shared__ real s_w2[MAX_THREADS_DIM * MAX_THREADS_DIM]; // w forward __shared__ real s_u01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u back __shared__ real s_u12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // u forward __shared__ real s_v01[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v back __shared__ real s_v12[MAX_THREADS_DIM * MAX_THREADS_DIM]; // v forward __shared__ real s_d[MAX_THREADS_DIM * MAX_THREADS_DIM]; // diff0 __shared__ real s_c[MAX_THREADS_DIM * MAX_THREADS_DIM]; // conv0 __shared__ real s_w_star[MAX_THREADS_DIM * MAX_THREADS_DIM]; // solution // working constants real ab0 = 0.5 * dt / dt0; // for Adams-Bashforth stepping real ab = 1. + ab0; // for Adams-Bashforth stepping real ddx = 1. / dom->dx; // to limit the number of divisions needed real ddy = 1. / dom->dy; // to limit the number of divisions needed real ddz = 1. / dom->dz; // to limit the number of divisions needed // loop over w-planes for(int k = dom->Gfz._ks; k < dom->Gfz._ke; k++) { // subdomain indices // the extra 2*blockIdx.X terms implement the necessary overlapping of // shared memory blocks in the subdomain int i = blockIdx.x*blockDim.x + threadIdx.x - 2*blockIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y - 2*blockIdx.y; // shared memory indices int ti = threadIdx.x; int tj = threadIdx.y; // load shared memory // TODO: look into the effect of removing these if statements and simply // allowing memory overruns for threads that don't matter for particular // discretizations if((j >= dom->Gfz._jsb && j < dom->Gfz._jeb) && (i >= dom->Gfz._isb && i < dom->Gfz._ieb)) { s_w0[ti + tj*blockDim.x] = w0[i + j*dom->Gfz._s1b + (k-1)*dom->Gfz._s2b]; s_w1[ti + tj*blockDim.x] = w0[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]; s_w2[ti + tj*blockDim.x] = w0[i + j*dom->Gfz._s1b + (k+1)*dom->Gfz._s2b]; } if((j >= dom->Gfx._jsb && j < dom->Gfx._jeb) && (i >= dom->Gfx._isb && i < dom->Gfx._ieb)) { s_u01[ti + tj*blockDim.x] = u0[i + j*dom->Gfx._s1b + (k-1)*dom->Gfx._s2b]; s_u12[ti + tj*blockDim.x] = u0[i + j*dom->Gfx._s1b + k*dom->Gfx._s2b]; } if((j >= dom->Gfy._jsb && j < dom->Gfy._jeb) && (i >= dom->Gfy._isb && i < dom->Gfy._ieb)) { s_v01[ti + tj*blockDim.x] = v0[i + j*dom->Gfy._s1b + (k-1)*dom->Gfy._s2b]; s_v12[ti + tj*blockDim.x] = v0[i + j*dom->Gfy._s1b + k*dom->Gfy._s2b]; } s_w_star[ti + tj*blockDim.x] = 0.0; // make sure all threads complete shared memory copy __syncthreads(); // compute right-hand side // if off the shared memory block boundary if((ti > 0 && ti < blockDim.x-1) && (tj > 0 && tj < blockDim.y-1) && i < dom->Gfz.ieb && j < dom->Gfz.jeb) { // pressure gradient s_w_star[ti + tj*blockDim.x] = (p[i + j*dom->Gcc._s1b + (k-1)*dom->Gcc._s2b] - p[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b]) * ddz / rho_f; // grab the required data points for calculations real w110 = s_w0[ti + tj*blockDim.x]; real w111 = s_w1[ti + tj*blockDim.x]; real w112 = s_w2[ti + tj*blockDim.x]; real w011 = s_w1[(ti-1) + tj*blockDim.x]; real w211 = s_w1[(ti+1) + tj*blockDim.x]; real u110 = s_u01[ti + tj*blockDim.x]; real u111 = s_u12[ti + tj*blockDim.x]; real u210 = s_u01[(ti+1) + tj*blockDim.x]; real u211 = s_u12[(ti+1) + tj*blockDim.x]; real w101 = s_w1[ti + (tj-1)*blockDim.x]; real w121 = s_w1[ti + (tj+1)*blockDim.x]; real v110 = s_v01[ti + tj*blockDim.x]; real v111 = s_v12[ti + tj*blockDim.x]; real v120 = s_v01[ti + (tj+1)*blockDim.x]; real v121 = s_v12[ti + (tj+1)*blockDim.x]; // compute convection term (Adams-Bashforth stepping) real dwudx = (w211 + w111)*(u211 + u210) - (w111 + w011)*(u111 + u110); dwudx *= 0.25 * ddx; real dwvdy = (w121 + w111)*(v121 + v120) - (w111 + w101)*(v111 + v110); dwvdy *= 0.25 * ddy; real dwwdz = (w112 + w111)*(w112 + w111) - (w111 + w110)*(w111 + w110); dwwdz *= 0.25 * ddz; s_c[ti + tj*blockDim.x] = dwudx + dwvdy + dwwdz; // convection term sums into right-hand side #ifndef STOKESFLOW if(dt0 > 0) // Adams-Bashforth s_w_star[ti + tj*blockDim.x] += (-ab * s_c[ti + tj*blockDim.x] + ab0 * conv0[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]); else // forward Euler s_w_star[ti + tj*blockDim.x] += -s_c[ti + tj*blockDim.x]; #endif // compute diffusive term real dwd1 = (w211 - w111) * ddx; real dwd0 = (w111 - w011) * ddx; real ddwdxx = (dwd1 - dwd0) * ddx; dwd1 = (w121 - w111) * ddy; dwd0 = (w111 - w101) * ddy; real ddwdyy = (dwd1 - dwd0) * ddy; dwd1 = (w112 - w111) * ddz; dwd0 = (w111 - w110) * ddz; real ddwdzz = (dwd1 - dwd0) * ddz; s_d[ti + tj*blockDim.x] = nu * (ddwdxx + ddwdyy + ddwdzz); // diffusive term sums into right-hand side if(dt0 > 0) // Adams-Bashforth s_w_star[ti + tj*blockDim.x] += (ab * s_d[ti + tj*blockDim.x] - ab0 * diff0[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]); else // forward Euler s_w_star[ti + tj*blockDim.x] += s_d[ti + tj*blockDim.x]; // add on imposed pressure gradient s_w_star[ti + tj*blockDim.x] += f[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b]; // multiply by dt s_w_star[ti + tj*blockDim.x] *= dt; // velocity term sums into right-hand side s_w_star[ti + tj*blockDim.x] += w111; // zero contribution inside particles s_w_star[ti + tj*blockDim.x] *= (phase[i + j*dom->Gcc._s1b + (k-1)*dom->Gcc._s2b] < 0 && phase[i + j*dom->Gcc._s1b + k*dom->Gcc._s2b] < 0); } // make sure all threads complete computations __syncthreads(); // copy shared memory back to global if((j >= dom->Gfz._js && j < dom->Gfz._je) && (i >= dom->Gfz._is && i < dom->Gfz._ie) && (ti > 0 && ti < (blockDim.x-1)) && (tj > 0 && tj < (blockDim.y-1))) { w_star[i+ j*dom->Gfz._s1b + k*dom->Gfz._s2b] = s_w_star[ti + tj*blockDim.x]; conv[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b] = s_c[ti + tj*blockDim.x]; diff[i + j*dom->Gfz._s1b + k*dom->Gfz._s2b] = s_d[ti + tj*blockDim.x]; } } } #endif __global__ void forcing_reset_x(real *fx, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; for(int i = dom->Gfx._isb; i < dom->Gfx._ieb; i++) { if(tj < dom->Gfx._jnb && tk < dom->Gfx._knb) { fx[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] = 0.; } } } __global__ void forcing_reset_y(real *fy, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; for(int j = dom->Gfy._jsb; j < dom->Gfy._jeb; j++) { if(tk < dom->Gfy._knb && ti < dom->Gfy._inb) { fy[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] = 0.; } } } __global__ void forcing_reset_z(real *fz, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; for(int k = dom->Gfz._ksb; k < dom->Gfz._keb; k++) { if(ti < dom->Gfz._inb && tj < dom->Gfz._jnb) { fz[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] = 0.; } } } __global__ void forcing_add_c_const(real val, real *cc, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tk = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; for(int i = dom->Gcc._isb; i < dom->Gcc._ieb; i++) { if(tj < dom->Gcc._jnb && tk < dom->Gcc._knb) { cc[i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b] += val; } } } __global__ void forcing_add_x_const(real val, real *fx, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; for(int i = dom->Gfx._isb; i < dom->Gfx._ieb; i++) { if(tj < dom->Gfx._jnb && tk < dom->Gfx._knb) { fx[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] += val; } } } __global__ void forcing_add_y_const(real val, real *fy, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; for(int j = dom->Gfy._jsb; j < dom->Gfy._jeb; j++) { if(tk < dom->Gfy._knb && ti < dom->Gfy._inb) { fy[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] += val; } } } __global__ void forcing_add_z_const(real val, real *fz, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; for(int k = dom->Gfz._ksb; k < dom->Gfz._keb; k++) { if(ti < dom->Gfz._inb && tj < dom->Gfz._jnb) { fz[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] += val; } } } __global__ void forcing_add_x_field(real scale, real *val, real *fx, dom_struct *dom, int *phase) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; for(int i = dom->Gfx._isb; i < dom->Gfx._ieb; i++) { if(tj < dom->Gfx._jnb && tk < dom->Gfx._knb) { fx[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b] += scale * val[i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b]; } } } __global__ void forcing_add_y_field(real scale, real *val, real *fy, dom_struct *dom, int *phase) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; for(int j = dom->Gfy._jsb; j < dom->Gfy._jeb; j++) { if(tk < dom->Gfy._knb && ti < dom->Gfy._inb) { fy[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b] += scale * val[ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b]; } } } __global__ void forcing_add_z_field(real scale, real *val, real *fz, dom_struct *dom, int *phase) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; for(int k = dom->Gfz._ksb; k < dom->Gfz._keb; k++) { if(ti < dom->Gfz._inb && tj < dom->Gfz._jnb) { fz[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b] += scale * val[ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b]; } } } __global__ void surf_int_x_copy(real *u_star, real *u_star_tmp, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.jn && tk < dom->Gfx.kn) { int C = dom->Gfx.is + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b; int CC = 0 + tj + tk*dom->Gfx.jn; u_star_tmp[CC] = -u_star[C]; C = dom->Gfx.ie-1 + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b; CC = dom->Gfx.jn*dom->Gfx.kn + tj + tk*dom->Gfx.jn; u_star_tmp[CC] = u_star[C]; } } __global__ void surf_int_y_copy(real *v_star, real *v_star_tmp, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.kn && ti < dom->Gfy.in) { int C = (ti+DOM_BUF) + dom->Gfy.js*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b; int CC = ti + 0 + tk*dom->Gfy.in; v_star_tmp[CC] = -v_star[C]; C = (ti+DOM_BUF) + (dom->Gfy.je-1)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b; CC = ti + dom->Gfy.in*dom->Gfy.kn + tk*dom->Gfy.in; v_star_tmp[CC] = v_star[C]; } } __global__ void surf_int_z_copy(real *w_star, real *w_star_tmp, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.in && tj < dom->Gfz.jn) { int C = (ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + dom->Gfz.ks*dom->Gfz._s2b; int CC = ti + tj*dom->Gfz.in + 0; w_star_tmp[CC] = -w_star[C]; C = (ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (dom->Gfz.ke-1)*dom->Gfz._s2b; CC = ti + tj*dom->Gfz.in + dom->Gfz.in*dom->Gfz.jn; w_star_tmp[CC] = w_star[C]; } } __global__ void plane_eps_x_W(real eps, real *u_star, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.jn && tk < dom->Gfx.kn) { int C = dom->Gfx.is + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b; u_star[C] = u_star[C] + eps; } } __global__ void plane_eps_x_E(real eps, real *u_star, dom_struct *dom) { int tj = blockIdx.x * blockDim.x + threadIdx.x; int tk = blockIdx.y * blockDim.y + threadIdx.y; if(tj < dom->Gfx.jn && tk < dom->Gfx.kn) { int C = dom->Gfx.ie-1 + (tj+DOM_BUF)*dom->Gfx._s1b + (tk+DOM_BUF)*dom->Gfx._s2b; u_star[C] = u_star[C] - eps; } } __global__ void plane_eps_y_S(real eps, real *v_star, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.kn && ti < dom->Gfy.in) { int C = (ti+DOM_BUF) + (dom->Gfy.js)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b; v_star[C] = v_star[C] + eps; } } __global__ void plane_eps_y_N(real eps, real *v_star, dom_struct *dom) { int tk = blockIdx.x * blockDim.x + threadIdx.x; int ti = blockIdx.y * blockDim.y + threadIdx.y; if(tk < dom->Gfy.kn && ti < dom->Gfy.in) { int C = (ti+DOM_BUF) + (dom->Gfy.je-1)*dom->Gfy._s1b + (tk+DOM_BUF)*dom->Gfy._s2b; v_star[C] = v_star[C] - eps; } } __global__ void plane_eps_z_B(real eps, real *w_star, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.in && tj < dom->Gfz.jn) { int C = (ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (dom->Gfz.ks)*dom->Gfz._s2b; w_star[C] = w_star[C] + eps; } } __global__ void plane_eps_z_T(real eps, real *w_star, dom_struct *dom) { int ti = blockIdx.x * blockDim.x + threadIdx.x; int tj = blockIdx.y * blockDim.y + threadIdx.y; if(ti < dom->Gfz.in && tj < dom->Gfz.jn) { int C = (ti+DOM_BUF) + (tj+DOM_BUF)*dom->Gfz._s1b + (dom->Gfz.ke-1)*dom->Gfz._s2b; w_star[C] = w_star[C] - eps; } } __global__ void move_parts_a(dom_struct *dom, part_struct *parts, int nparts, real dt, real dt0, g_struct g, gradP_struct gradP, real rho_f, real ttime) { int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r; real m = vol * parts[pp].rho; if(pp < nparts) { if(parts[pp].translating) { // update linear accelerations parts[pp].udot = (parts[pp].Fx + parts[pp].kFx + parts[pp].iFx + parts[pp].aFx - vol*gradP.x) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.x; parts[pp].vdot = (parts[pp].Fy + parts[pp].kFy + parts[pp].iFy + parts[pp].aFy - vol*gradP.y) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.y; parts[pp].wdot = (parts[pp].Fz + parts[pp].kFz + parts[pp].iFz + parts[pp].aFz - vol*gradP.z) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.z; // update linear velocities parts[pp].u = parts[pp].u0 + 0.5*dt*(parts[pp].udot + parts[pp].udot0); parts[pp].v = parts[pp].v0 + 0.5*dt*(parts[pp].vdot + parts[pp].vdot0); parts[pp].w = parts[pp].w0 + 0.5*dt*(parts[pp].wdot + parts[pp].wdot0); // do not update position } if(parts[pp].rotating) { // update angular accelerations real I = 0.4 * m * parts[pp].r*parts[pp].r; parts[pp].oxdot = (parts[pp].Lx + parts[pp].iLx + parts[pp].aLx) / I; parts[pp].oydot = (parts[pp].Ly + parts[pp].iLy + parts[pp].aLy) / I; parts[pp].ozdot = (parts[pp].Lz + parts[pp].iLz + parts[pp].aLz) / I; // update angular velocities parts[pp].ox = parts[pp].ox0 + 0.5*dt*(parts[pp].oxdot + parts[pp].oxdot0); parts[pp].oy = parts[pp].oy0 + 0.5*dt*(parts[pp].oydot + parts[pp].oydot0); parts[pp].oz = parts[pp].oz0 + 0.5*dt*(parts[pp].ozdot + parts[pp].ozdot0); } } } __global__ void move_parts_b(dom_struct *dom, part_struct *parts, int nparts, real dt, real dt0, g_struct g, gradP_struct gradP, real rho_f, real ttime) { int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r; real m = vol * parts[pp].rho; if(pp < nparts) { if(parts[pp].translating) { // update linear accelerations parts[pp].udot = (parts[pp].Fx + parts[pp].kFx + parts[pp].iFx + parts[pp].aFx - vol*gradP.x) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.x; parts[pp].vdot = (parts[pp].Fy + parts[pp].kFy + parts[pp].iFy + parts[pp].aFy - vol*gradP.y) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.y; parts[pp].wdot = (parts[pp].Fz + parts[pp].kFz + parts[pp].iFz + parts[pp].aFz - vol*gradP.z) / m + (parts[pp].rho - rho_f) / parts[pp].rho * g.z; // update linear velocities parts[pp].u = parts[pp].u0 + 0.5*dt*(parts[pp].udot + parts[pp].udot0); parts[pp].v = parts[pp].v0 + 0.5*dt*(parts[pp].vdot + parts[pp].vdot0); parts[pp].w = parts[pp].w0 + 0.5*dt*(parts[pp].wdot + parts[pp].wdot0); // update position (trapezoidal rule) parts[pp].x = parts[pp].x0 + 0.5*dt*(parts[pp].u + parts[pp].u0); if(parts[pp].x < dom->xs) parts[pp].x = parts[pp].x + dom->xl; else if(parts[pp].x > dom->xe) parts[pp].x = parts[pp].x - dom->xl; parts[pp].y = parts[pp].y0 + 0.5*dt*(parts[pp].v + parts[pp].v0); if(parts[pp].y < dom->ys) parts[pp].y = parts[pp].y + dom->yl; else if(parts[pp].y > dom->ye) parts[pp].y = parts[pp].y - dom->yl; parts[pp].z = parts[pp].z0 + 0.5*dt*(parts[pp].w + parts[pp].w0); if(parts[pp].z < dom->zs) parts[pp].z = parts[pp].z + dom->zl; else if(parts[pp].z > dom->ze) parts[pp].z = parts[pp].z - dom->zl; // store for next time step parts[pp].x0 = parts[pp].x; parts[pp].y0 = parts[pp].y; parts[pp].z0 = parts[pp].z; parts[pp].u0 = parts[pp].u; parts[pp].v0 = parts[pp].v; parts[pp].w0 = parts[pp].w; parts[pp].udot0 = parts[pp].udot; parts[pp].vdot0 = parts[pp].vdot; parts[pp].wdot0 = parts[pp].wdot; } if(parts[pp].rotating) { // update angular accelerations real I = 0.4 * m * parts[pp].r*parts[pp].r; parts[pp].oxdot = (parts[pp].Lx + parts[pp].iLx + parts[pp].aLx) / I; parts[pp].oydot = (parts[pp].Ly + parts[pp].iLy + parts[pp].aLy) / I; parts[pp].ozdot = (parts[pp].Lz + parts[pp].iLz + parts[pp].aLz) / I; // update angular velocities parts[pp].ox = parts[pp].ox0 + 0.5*dt*(parts[pp].oxdot + parts[pp].oxdot0); parts[pp].oy = parts[pp].oy0 + 0.5*dt*(parts[pp].oydot + parts[pp].oydot0); parts[pp].oz = parts[pp].oz0 + 0.5*dt*(parts[pp].ozdot + parts[pp].ozdot0); /* update basis vectors */ // calculate rotation magnitude (trapezoidal rule) real mag = 0.5*sqrt(parts[pp].ox*parts[pp].ox + parts[pp].oy*parts[pp].oy + parts[pp].oz*parts[pp].oz); mag += 0.5*sqrt(parts[pp].ox0*parts[pp].ox0 + parts[pp].oy0*parts[pp].oy0 + parts[pp].oz0*parts[pp].oz0); // calculate normalized rotation axis real X = 0; real Y = 0; real Z = 0; if(mag > 0) { X = 0.5 * (parts[pp].ox + parts[pp].ox0) / mag; Y = 0.5 * (parts[pp].oy + parts[pp].oy0) / mag; Z = 0.5 * (parts[pp].oz + parts[pp].oz0) / mag; } // calculate rotation quaternion real theta = mag * dt; real qr = cos(0.5*theta); real qi = X * sin(0.5*theta); real qj = Y * sin(0.5*theta); real qk = Z * sin(0.5*theta); // compute quaternion conjugation to apply rotation to basis vectors rotate(qr, qi, qj, qk, &parts[pp].axx, &parts[pp].axy, &parts[pp].axz); rotate(qr, qi, qj, qk, &parts[pp].ayx, &parts[pp].ayy, &parts[pp].ayz); rotate(qr, qi, qj, qk, &parts[pp].azx, &parts[pp].azy, &parts[pp].azz); // store for next time step parts[pp].ox0 = parts[pp].ox; parts[pp].oy0 = parts[pp].oy; parts[pp].oz0 = parts[pp].oz; parts[pp].oxdot0 = parts[pp].oxdot; parts[pp].oydot0 = parts[pp].oydot; parts[pp].ozdot0 = parts[pp].ozdot; } } } __device__ void rotate(real qr, real qi, real qj, real qk, real *pi, real *pj, real *pk) { real Pr = *pi*qi + *pj*qj + *pk*qk; real Pi = *pi*qr - *pj*qk + *pk*qj; real Pj = *pi*qk + *pj*qr - *pk*qi; real Pk = -*pi*qj + *pj*qi + *pk*qr; *pi = qr*Pi + qi*Pr + qj*Pk - qk*Pj; *pj = qr*Pj - qi*Pk + qj*Pr + qk*Pi; *pk = qr*Pk + qi*Pj - qj*Pi + qk*Pr; } __global__ void collision_init(part_struct *parts, int nparts) { int j = threadIdx.x + blockIdx.x*blockDim.x; if(j < nparts) { parts[j].iFx = 0.; parts[j].iFy = 0.; parts[j].iFz = 0.; parts[j].iLx = 0.; parts[j].iLy = 0.; parts[j].iLz = 0.; } } __global__ void init(int *vector, int N, int val) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < N) { vector[i] = val; } } __global__ void bin_fill(int *partInd, int *partBin, int nparts, part_struct *parts, dom_struct *binDom, BC bc) { int pp = threadIdx.x + blockIdx.x*blockDim.x; int c; int ibin, jbin, kbin; // find the correct bin index for each part and store it if (pp < nparts) { ibin = floor((parts[pp].x - binDom->xs)/binDom->dx); jbin = floor((parts[pp].y - binDom->ys)/binDom->dy); kbin = floor((parts[pp].z - binDom->zs)/binDom->dz); c = ibin + jbin*binDom->Gcc.s1 + kbin*binDom->Gcc.s2; partInd[pp] = pp; // index of particle partBin[pp] = c; // bin index parts[pp].bin = c; // bin index (stored in particle) } } __global__ void bin_partCount(int *binCount, int *binStart, int *binEnd, dom_struct *binDom, BC bc, int nBins) { int bin = threadIdx.x + blockIdx.x*blockDim.x; // fill binCount if (bin < nBins) { binCount[bin] = binEnd[bin] - binStart[bin]; } } __global__ void bin_start(int *binStart, int *binEnd, int *partBin, int nparts) { // This kernel function was adapted from NVIDIA CUDA 5.5 Examples // This software contains source code provided by NVIDIA Corporation extern __shared__ int sharedBin[]; //blockSize + 1 int index = threadIdx.x + blockIdx.x*blockDim.x; int bin; // for a given bin index, the previous bins's index is stored in sharedBin if (index < nparts) { bin = partBin[index]; // Load bin data into shared memory so that we can look // at neighboring particle's hash value without loading // two bin values per thread sharedBin[threadIdx.x + 1] = bin; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle bin sharedBin[0] = partBin[index - 1]; } } __syncthreads(); if (index < nparts) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell bin = partBin[index]; if (index == 0 || bin != sharedBin[threadIdx.x]) { binStart[bin] = index; if (index > 0) binEnd[sharedBin[threadIdx.x]] = index; } if (index == nparts - 1) { binEnd[bin] = index + 1; } } } __global__ void collision_parts(part_struct *parts, int nparts, dom_struct *dom, real eps, real mu, real rhof, real nu, BC bc, int *binStart, int *binEnd, int *partBin, int *partInd, dom_struct *binDom, int interactionLengthRatio, real dt) { int index = threadIdx.x + blockIdx.x*blockDim.x; if (index < nparts) { int i = partInd[index]; int bin = partBin[index]; int kbin = floorf(bin/binDom->Gcc.s2); int jbin = floorf((bin - kbin*binDom->Gcc.s2)/binDom->Gcc.s1); int ibin = bin - kbin*binDom->Gcc.s2 - jbin*binDom->Gcc.s1; int l, m, n; // adjacent bin iterators int target, j; // target indices int adjBin, adjStart, adjEnd; // adjacent bin stuff int iStride, kStride, jStride; // how to get to adjacent bin int q; // iterator // predefine face locations // -1, -2 due to local vs global indexing and defiinition of dom_struct int fW = binDom->Gcc.is - 1; int fE = binDom->Gcc.ie - 2; int fS = binDom->Gcc.js - 1; int fN = binDom->Gcc.je - 2; int fB = binDom->Gcc.ks - 1; int fT = binDom->Gcc.ke - 2; // size checks int xnBin = (binDom->xn > 2); int ynBin = (binDom->yn > 2); int znBin = (binDom->zn > 2); // loop over adjacent bins and take care of periodic conditions for (n = -1; n <= 1; n++) { // if on a face and not periodic, continue // if on a face and periodic but only 2 bins, continue if ((n == -1 && kbin == fB && bc.uB != PERIODIC) || (n == 1 && kbin == fT && bc.uT != PERIODIC) || (n == -1 && kbin == fB && bc.uB == PERIODIC && znBin == 0) || (n == 1 && kbin == fT && bc.uT == PERIODIC && znBin == 0)) { continue; // if on a face and periodic, flip to other side } else if (n == -1 && kbin == fB && bc.uB == PERIODIC) { kStride = fT*binDom->Gcc.s2; } else if (n == 1 && kbin == fT && bc.uT == PERIODIC) { kStride = fB*binDom->Gcc.s2; // else, we are in the middle, do nothing special } else { kStride = (kbin + n)*binDom->Gcc.s2; } for (m = -1; m <= 1; m++) { if ((m == -1 && jbin == fS && bc.uS != PERIODIC) || (m == 1 && jbin == fN && bc.uN != PERIODIC) || (m == -1 && jbin == fS && bc.uS == PERIODIC && ynBin == 0) || (m == 1 && jbin == fN && bc.uN == PERIODIC && ynBin == 0)) { continue; } else if (m == -1 && jbin == fS && bc.uS == PERIODIC) { jStride = fN*binDom->Gcc.s1; } else if (m == 1 && jbin == fN && bc.uN == PERIODIC) { jStride = fS*binDom->Gcc.s1; } else { jStride = (jbin + m)*binDom->Gcc.s1; } for (l = -1; l <= 1; l++) { if ((l == -1 && ibin == fW && bc.uW != PERIODIC) || (l == 1 && ibin == fE && bc.uE != PERIODIC) || (l == -1 && ibin == fW && bc.uW == PERIODIC && xnBin == 0) || (l == 1 && ibin == fE && bc.uE == PERIODIC && xnBin == 0)) { continue; } else if (l == -1 && ibin == fW && bc.uW == PERIODIC) { iStride = fE; } else if (l == 1 && ibin == fE && bc.uE == PERIODIC) { iStride = fW; } else { iStride = ibin + l; } adjBin = iStride + jStride + kStride; adjStart = binStart[adjBin]; // find start and end of bins adjEnd = binEnd[adjBin]; if (adjStart != -1) { // if bin is not empty for (target = adjStart; target < adjEnd; target++) { j = partInd[target]; if (j != i) { // if its not original part // calculate forces real ai = parts[i].r; real aj = parts[j].r; real B = aj / ai; real hN = interactionLengthRatio * parts[i].r; real ux, uy, uz; real rx, rx1, rx2, ry, ry1, ry2, rz, rz1, rz2, r; real h, ah, lnah; real nx, ny, nz, udotn; real unx, uny, unz, utx, uty, utz, ut; real tx, ty, tz, t, bx, by, bz, b; real omegax, omegay, omegaz, omega; real ocrossnx, ocrossny, ocrossnz; real utcrossnx, utcrossny, utcrossnz; real opB; real Fnx, Fny, Fnz, Ftx, Fty, Ftz, Lox, Loy, Loz; real xi = parts[i].x; real xj = parts[j].x; // check for neighbors across the domain when using periodic // boundaries rx = xi - xj; rx1 = xi - (xj + dom->xl); rx2 = xi - (xj - dom->xl); if(rx1*rx1 < rx*rx) rx = rx1; if(rx2*rx2 < rx*rx) rx = rx2; rx = (bc.uW == PERIODIC) * rx + (bc.uW != PERIODIC) * (xi - xj); real yi = parts[i].y; real yj = parts[j].y; // check for neighbors across the domain when using periodic // boundaries ry = yi - yj; ry1 = yi - (yj + dom->yl); ry2 = yi - (yj - dom->yl); if(ry1*ry1 < ry*ry) ry = ry1; if(ry2*ry2 < ry*ry) ry = ry2; ry = (bc.vS == PERIODIC) * ry + (bc.vS != PERIODIC) * (yi - yj); real zi = parts[i].z; real zj = parts[j].z; // check for neighbors across the domain when using periodic // boundaries rz = zi - zj; rz1 = zi - (zj + dom->zl); rz2 = zi - (zj - dom->zl); if(rz1*rz1 < rz*rz) rz = rz1; if(rz2*rz2 < rz*rz) rz = rz2; rz = (bc.wB == PERIODIC) * rz + (bc.wB != PERIODIC) * (zi - zj); ux = 0.5*((parts[i].u - parts[j].u) + (parts[i].u0 - parts[j].u0)); uy = 0.5*((parts[i].v - parts[j].v) + (parts[i].v0 - parts[j].v0)); uz = 0.5*((parts[i].w - parts[j].w) + (parts[i].w0 - parts[j].w0)); r = sqrt(rx*rx + ry*ry + rz*rz); omegax = 0.5*((parts[i].ox + parts[j].ox) + (parts[i].ox0 + parts[j].ox0)); omegay = 0.5*((parts[i].oy + parts[j].oy) + (parts[i].oy0 + parts[j].oy0)); omegaz = 0.5*((parts[i].oz + parts[j].oz) + (parts[i].oz0 + parts[j].oz0)); omega = sqrt(omegax*omegax + omegay*omegay + omegaz*omegaz); h = r - ai - aj; nx = rx / r; ny = ry / r; nz = rz / r; udotn = ux * nx + uy * ny + uz * nz; unx = udotn * nx; uny = udotn * ny; unz = udotn * nz; utx = ux - unx; uty = uy - uny; utz = uz - unz; ut = sqrt(utx*utx + uty*uty + utz*utz); if(ut > 0) { tx = utx / ut; ty = uty / ut; tz = utz / ut; bx = ny*tz - nz*ty; by = -nx*tz + nz*tx; bz = nx*ty - ny*tx; b = sqrt(bx*bx + by*by + bz*bz); bx = bx / b; by = by / b; bz = bz / b; } else if(omega > 0) { bx = omegax / omega; by = omegay / omega; bz = omegaz / omega; tx = by*nz - bz*ny; ty = -bx*nz + bz*nx; tz = bx*ny - by*nx; t = sqrt(tx*tx + ty*ty + tz*tz); tx = tx / t; ty = ty / t; tz = tz / t; } else { tx = 1.; ty = 0.; tz = 0.; bx = ny*tz - nz*ty; by = -nx*tz + nz*tx; bz = nx*ty - ny*tx; b = sqrt(bx*bx + by*by + bz*bz); bx = bx / b; by = by / b; bz = bz / b; } opB = 1 + B; ocrossnx = omegay*nz - omegaz*ny; ocrossny = -omegax*nz + omegaz*nx; ocrossnz = omegax*ny - omegay*nx; utcrossnx = uty*nz - utz*ny; utcrossny = -utx*nz + utz*nx; utcrossnz = utx*ny - uty*nx; if(h < hN && h > 0) { // remove contact from list if it is there q = 0; while(parts[i].iSt[q] != j && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == j) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Fnx = -1. * B*B / (opB*opB) * ah - B*(1.+7.*B+B*B)/(5.*opB*opB*opB)*lnah; Fny = Fnx; Fnz = Fnx; Fnx *= 6.*PI*mu*ai*unx; Fny *= 6.*PI*mu*ai*uny; Fnz *= 6.*PI*mu*ai*unz; Ftx = -6.*PI*mu*ai*utx*4.*B*(2.+B+2.*B*B) /(15.*opB*opB*opB)*lnah; Fty = -6.*PI*mu*ai*uty*4.*B*(2.+B+2.*B*B) /(15.*opB*opB*opB)*lnah; Ftz = -6.*PI*mu*ai*utz*4.*B*(2.+B+2.*B*B) /(15.*opB*opB*opB)*lnah; Ftx += 8.*PI*mu*ai*ai*ocrossnx*B*(4.+B)/(10.*opB*opB)*lnah; Fty += 8.*PI*mu*ai*ai*ocrossny*B*(4.+B)/(10.*opB*opB)*lnah; Ftz += 8.*PI*mu*ai*ai*ocrossnz*B*(4.+B)/(10.*opB*opB)*lnah; Lox = -8.*PI*mu*ai*ai*utcrossnx*B*(4.+B)/(10.*opB*opB)*lnah; Loy = -8.*PI*mu*ai*ai*utcrossny*B*(4.+B)/(10.*opB*opB)*lnah; Loz = -8.*PI*mu*ai*ai*utcrossnz*B*(4.+B)/(10.*opB*opB)*lnah; Lox += -8.*PI*mu*ai*ai*ai*omegax*2.*B/(5.*opB)*lnah; Loy += -8.*PI*mu*ai*ai*ai*omegay*2.*B/(5.*opB)*lnah; Loz += -8.*PI*mu*ai*ai*ai*omegaz*2.*B/(5.*opB)*lnah; } else { ah = 0; lnah = 0; Fnx = 0; Fny = 0; Fnz = 0; Ftx = 0; Fty = 0; Ftz = 0; Lox = 0; Loy = 0; Loz = 0; } if(h < 0) { // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != j && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = j; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r *fabs(udotn)/nu; } real Vx = -utx + 0.5*(ai + aj + h)*ocrossnx; real Vy = -uty + 0.5*(ai + aj + h)*ocrossny; real Vz = -utz + 0.5*(ai + aj + h)*ocrossnz; real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai) *sqrt(-h); real sx = (Vx - Vx * nx) * dt; real sy = (Vy - Vy * ny) * dt; real sz = (Vz - Vz * nz) * dt; ah = 0; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[j].sigma*parts[j].sigma)/parts[j].E) /sqrt(1./ai + 1./aj); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q] *log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho *k*sqrt(-h)); // normal contact forces Fnx = (sqrt(-h*h*h)*k - eta*udotn)*nx; Fny = (sqrt(-h*h*h)*k - eta*udotn)*ny; Fnz = (sqrt(-h*h*h)*k - eta*udotn)*nz; // tangential contact forces real coeff_fric = 0.5 * (parts[i].coeff_fric + parts[j].coeff_fric); Ftx = -kt * sx; Fty = -kt * sy; Ftz = -kt * sz; Ftx = Ftx - Ftx * nx; Fty = Fty - Fty * ny; Ftz = Ftz - Ftz * nz; real Ft = sqrt(Ftx*Ftx + Fty*Fty + Ftz*Ftz); real Fn = sqrt(Fnx*Fnx + Fny*Fny + Fnz*Fnz); if(Ft > coeff_fric * Fn) { Ftx = coeff_fric * Fn * Ftx / Ft; Fty = coeff_fric * Fn * Fty / Ft; Ftz = coeff_fric * Fn * Ftz / Ft; } Lox = -(ai+0.5*h)*((Fny+Fty)*nz-(Fnz+Ftz)*ny); Loy = (ai+0.5*h)*((Fnx+Ftx)*nz-(Fnz+Ftz)*nx); Loz = -(ai+0.5*h)*((Fnx+Ftx)*ny-(Fny+Fty)*nx); } // assign forces parts[i].iFx += Fnx + Ftx; parts[i].iFy += Fny + Fty; parts[i].iFz += Fnz + Ftz; parts[i].iLx += Lox; parts[i].iLy += Loy; parts[i].iLz += Loz; } } } } } } } } __global__ void collision_walls(dom_struct *dom, part_struct *parts, int nparts, BC bc, real eps, real mu, real rhof, real nu, int interactionLengthRatio, real dt) { int i = threadIdx.x + blockIdx.x*blockDim.x; /**** parallelize this further by using a CUDA block for each wall ****/ int q; // iterator if(i < nparts) { real dx = 0; real dy = 0; real dz = 0; real Un, Utx, Uty, Utz; real omx, omy, omz; real ai = parts[i].r; real h = 0; real hN = interactionLengthRatio * parts[i].r; real ah, lnah; real Fnx, Fny, Fnz, Ftx, Fty, Ftz; real Lox, Loy, Loz; int isTrue = 0; // west wall dx = fabs(parts[i].x - (dom->xs + bc.dsW)); h = dx - ai; isTrue = (bc.pW == NEUMANN); // collision force applied ifTrue if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -10 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -10) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].u - bc.uWD; Utx = 0.; Uty = parts[i].v - bc.vWD; Utz = parts[i].w - bc.wWD; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = -6.*PI*mu*ai*Un*ah; Fny = 0.; Fnz = 0.; Ftx = 0.; Fty = -6.*PI*mu*ai*Uty*8./15.*lnah; Ftz = -6.*PI*mu*ai*Utz*8./15.*lnah; Ftx += 0.; Fty += 8.*PI*mu*ai*ai*omz*1./10.*lnah; Ftz += -8.*PI*mu*ai*ai*omy*1./10.*lnah; Lox = 0.; Loy = -8.*PI*mu*ai*ai*Utz*1./10.*lnah; Loz = 8.*PI*mu*ai*ai*Uty*1./10.*lnah; Lox += 0.; Loy += -8.*PI*mu*ai*ai*ai*omy*2./5.*lnah; Loz += -8.*PI*mu*ai*ai*ai*omz*2./5.*lnah; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = parts[i].u - bc.uWD; real Uty = 0.5*(parts[i].v+parts[i].v0) - bc.vSD; real Utz = 0.5*(parts[i].w+parts[i].w0) - bc.wSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -10 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -10; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vy = Uty - (ai + 0.5*h)*omz; real Vz = Utz + (ai + 0.5*h)*omx; real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sy = Vy * dt; real sz = Vz * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Fty = -kt * sy; real Ftz = -kt * sz; real Ft = sqrt(Fty*Fty + Ftz*Ftz); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Fty = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Fty / Ft; Ftz = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftz / Ft; } parts[i].iFx += isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iFy += isTrue * Fty; parts[i].iFz += isTrue * Ftz; parts[i].iLy += isTrue * (ai+0.5*h) * Ftz; parts[i].iLz -= isTrue * (ai+0.5*h) * Fty; } // east wall dx = fabs(parts[i].x - (dom->xe - bc.dsE)); h = dx - ai; isTrue = (bc.pE == NEUMANN); if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -11 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -11) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].u - bc.uED; Utx = 0.; Uty = parts[i].v - bc.vED; Utz = parts[i].w - bc.wED; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = -6.*PI*mu*ai*Un*ah; Fny = 0.; Fnz = 0.; Ftx = 0.; Fty = -6.*PI*mu*ai*Uty*8./15.*lnah; Ftz = -6.*PI*mu*ai*Utz*8./15.*lnah; Ftx += 0.; Fty += -8.*PI*mu*ai*ai*omz*1./10.*lnah; Ftz += 8.*PI*mu*ai*ai*omy*1./10.*lnah; Lox = 0.; Loy = 8.*PI*mu*ai*ai*Utz*1./10.*lnah; Loz = -8.*PI*mu*ai*ai*Uty*1./10.*lnah; Lox += 0.; Loy += -8.*PI*mu*ai*ai*ai*omy*2./5.*lnah; Loz += -8.*PI*mu*ai*ai*ai*omz*2./5.*lnah; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = -(parts[i].u - bc.uED); real Uty = 0.5*(parts[i].v+parts[i].v0) - bc.vSD; real Utz = 0.5*(parts[i].w+parts[i].w0) - bc.wSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -11 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -11; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vy = -(Uty - (ai + 0.5*h)*omz); real Vz = -(Utz + (ai + 0.5*h)*omx); real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sy = Vy * dt; real sz = Vz * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Fty = -kt * sy; real Ftz = -kt * sz; real Ft = sqrt(Fty*Fty + Ftz*Ftz); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Fty = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Fty / Ft; Ftz = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftz / Ft; } parts[i].iFx -= isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iFy += isTrue * Fty; parts[i].iFz += isTrue * Ftz; parts[i].iLy -= isTrue * (ai+0.5*h) * Ftz; parts[i].iLz += isTrue * (ai+0.5*h) * Ftx; } // south wall dy = fabs(parts[i].y - (dom->ys + bc.dsS)); h = dy - ai; isTrue = (bc.pS == NEUMANN); if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -12 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -12) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].v - bc.vSD; Utx = parts[i].u - bc.uSD; Uty = 0.; Utz = parts[i].w - bc.wSD; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = 0.; Fny = -6.*PI*mu*ai*Un*ah; Fnz = 0.; Ftx = -6.*PI*mu*ai*Utx*8./15.*lnah; Fty = 0.; Ftz = -6.*PI*mu*ai*Utz*8./15.*lnah; Ftx += -8.*PI*mu*ai*ai*omz*1./10.*lnah; Fty += 0.; Ftz += 8.*PI*mu*ai*ai*omx*1./10.*lnah; Lox = 8.*PI*mu*ai*ai*Utz*1./10.*lnah; Loy = 0.; Loz = -8.*PI*mu*ai*ai*Utx*1./10.*lnah; Lox += -8.*PI*mu*ai*ai*ai*omx*2./5.*lnah; Loy += 0.; Loz += -8.*PI*mu*ai*ai*ai*omz*2./5.*lnah; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = parts[i].v - bc.vSD; real Utx = 0.5*(parts[i].u+parts[i].u0) - bc.uSD; real Utz = 0.5*(parts[i].w+parts[i].w0) - bc.wSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -12 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -12; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vx = Utx + (ai + 0.5*h)*omz; real Vz = Utz - (ai + 0.5*h)*omx; real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sx = Vx * dt; real sz = Vz * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Ftx = -kt * sx; real Ftz = -kt * sz; real Ft = sqrt(Ftx*Ftx + Ftz*Ftz); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Ftx = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftx / Ft; Ftz = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftz / Ft; } parts[i].iFx += isTrue * Ftx; parts[i].iFy += isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iFz += isTrue * Ftz; parts[i].iLx -= isTrue * (ai+0.5*h) * Ftz; parts[i].iLz += isTrue * (ai+0.5*h) * Ftx; } // north wall dy = fabs(parts[i].y - (dom->ye - bc.dsN)); h = dy - ai; isTrue = (bc.pN == NEUMANN); if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -13 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -13) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].v - bc.vND; Utx = parts[i].u - bc.uND; Uty = 0.; Utz = parts[i].w - bc.wND; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = 0.; Fny = -6.*PI*mu*ai*Un*ah; Fnz = 0.; Ftx = -6.*PI*mu*ai*Utx*8./15.*lnah; Fty = 0.; Ftz = -6.*PI*mu*ai*Utz*8./15.*lnah; Ftx += 8.*PI*mu*ai*ai*omz*1./10.*lnah; Fty += 0.; Ftz += -8.*PI*mu*ai*ai*omx*1./10.*lnah; Lox = -8.*PI*mu*ai*ai*Utz*1./10.*lnah; Loy = 0.; Loz = 8.*PI*mu*ai*ai*Utx*1./10.*lnah; Lox += -8.*PI*mu*ai*ai*ai*omx*2./5.*lnah; Loy += 0.; Loz += -8.*PI*mu*ai*ai*ai*omz*2./5.*lnah; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = -(parts[i].v - bc.vND); real Utx = 0.5*(parts[i].u+parts[i].u0) - bc.uSD; real Utz = 0.5*(parts[i].w+parts[i].w0) - bc.wSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -13 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -13; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vx = -(Utx + (ai + 0.5*h)*omz); real Vz = -(Utz - (ai + 0.5*h)*omx); real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sx = Vx * dt; real sz = Vz * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Ftx = -kt * sx; real Ftz = -kt * sz; real Ft = sqrt(Ftx*Ftx + Ftz*Ftz); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Ftx = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftx / Ft; Ftz = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftz / Ft; } parts[i].iFx += isTrue * Ftx; parts[i].iFy -= isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iFz += isTrue * Ftz; parts[i].iLx += isTrue * (ai+0.5*h) * Ftz; parts[i].iLz -= isTrue * (ai+0.5*h) * Ftx; } // bottom wall dz = fabs(parts[i].z - (dom->zs + bc.dsB)); h = dz - ai; isTrue = (bc.pB == NEUMANN); if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -14 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -14) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].w - bc.wBD; Utx = parts[i].u - bc.uBD; Uty = parts[i].v - bc.vBD; Utz = 0.; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = 0.; Fny = 0.; Fnz = -6.*PI*mu*ai*Un*ah; Ftx = -6.*PI*mu*ai*Utx*8./15.*lnah; Fty = -6.*PI*mu*ai*Uty*8./15.*lnah; Ftz = 0.; Ftx += 8.*PI*mu*ai*ai*omy*1./10.*lnah; Fty += -8.*PI*mu*ai*ai*omx*1./10.*lnah; Ftz += 0.; Lox = -8.*PI*mu*ai*ai*Uty*1./10.*lnah; Loy = 8.*PI*mu*ai*ai*Utx*1./10.*lnah; Loz = 0.; Lox += -8.*PI*mu*ai*ai*ai*omx*2./5.*lnah; Loy += -8.*PI*mu*ai*ai*ai*omy*2./5.*lnah; Loz += 0.; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = parts[i].w - bc.wBD; real Utx = 0.5*(parts[i].u+parts[i].u0) - bc.uSD; real Uty = 0.5*(parts[i].v+parts[i].v0) - bc.vSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -14 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -14; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vx = Utx - (ai + 0.5*h)*omy; real Vy = Uty + (ai + 0.5*h)*omx; real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sx = Vx * dt; real sy = Vy * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Ftx = -kt * sx; real Fty = -kt * sy; real Ft = sqrt(Ftx*Ftx + Fty*Fty); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Ftx = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftx / Ft; Fty = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Fty / Ft; } parts[i].iFx += isTrue * Ftx; parts[i].iFy += isTrue * Fty; parts[i].iFz += isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iLx += isTrue * (ai+0.5*h) * Fty; parts[i].iLy -= isTrue * (ai+0.5*h) * Ftx; } // top wall dz = fabs(parts[i].z - (dom->ze - bc.dsT)); h = dz - ai; isTrue = (bc.pT == NEUMANN); if(h < hN && h > 0) { // remove from contact list if it is there q = 0; while(parts[i].iSt[q] != -15 && q < MAX_NEIGHBORS) { q++; } if(parts[i].iSt[q] == -15) { parts[i].iSt[q] = -1; parts[i].St[q] = 0.; } if(h < eps*parts[i].r) h = eps*parts[i].r; ah = ai/h - ai/hN; lnah = log(hN/h); Un = parts[i].w - bc.wTD; Utx = parts[i].u - bc.uTD; Uty = parts[i].v - bc.vTD; Utz = 0.; omx = parts[i].ox; omy = parts[i].oy; omz = parts[i].oz; Fnx = 0.; Fny = 0.; Fnz = -6.*PI*mu*ai*Un*ah; Ftx = -6.*PI*mu*ai*Utx*8./15.*lnah; Fty = -6.*PI*mu*ai*Uty*8./15.*lnah; Ftz = 0.; Ftx += -8.*PI*mu*ai*ai*omy*1./10.*lnah; Fty += 8.*PI*mu*ai*ai*omx*1./10.*lnah; Ftz += 0.; Lox = 8.*PI*mu*ai*ai*Uty*1./10.*lnah; Loy = -8.*PI*mu*ai*ai*Utx*1./10.*lnah; Loz = 0.; Lox += -8.*PI*mu*ai*ai*ai*omx*2./5.*lnah; Loy += -8.*PI*mu*ai*ai*ai*omy*2./5.*lnah; Loz += 0.; parts[i].iFx += isTrue * (Fnx + Ftx); parts[i].iFy += isTrue * (Fny + Fty); parts[i].iFz += isTrue * (Fnz + Ftz); parts[i].iLx += isTrue * Lox; parts[i].iLy += isTrue * Loy; parts[i].iLz += isTrue * Loz; } if(h < 0) { Un = -(parts[i].w - bc.wTD); real Utx = 0.5*(parts[i].u+parts[i].u0) - bc.uSD; real Uty = 0.5*(parts[i].v+parts[i].v0) - bc.vSD; // determine whether this is a new contact q = 0; while(parts[i].iSt[q] != -15 && q < MAX_NEIGHBORS) { q++; } if(q == MAX_NEIGHBORS) { q = 0; while(parts[i].iSt[q] != -1) { q++; } parts[i].iSt[q] = -15; parts[i].St[q] = 1./9.*parts[i].rho/rhof*2.*parts[i].r*fabs(Un)/nu; } omx = 0.5*(parts[i].ox+parts[i].ox0); omy = 0.5*(parts[i].oy+parts[i].oy0); omz = 0.5*(parts[i].oz+parts[i].oz0); real Vx = -(Utx - (ai + 0.5*h)*omy); real Vy = -(Uty + (ai + 0.5*h)*omx); real Hi = 0.5*parts[i].E/(1.+parts[i].sigma); real kt = 8./((1.-parts[i].sigma*parts[i].sigma)/Hi +(1.-parts[i].sigma*parts[i].sigma)/Hi)/sqrt(1./ai)*sqrt(-h); real sx = Vx * dt; real sy = Vy * dt; lnah = 0; real k = 4./3./((1.-parts[i].sigma*parts[i].sigma)/parts[i].E + (1.-parts[i].sigma*parts[i].sigma)/parts[i].E)/sqrt(1./ai); // estimate damping coefficient real xcx0 = 1.e-4; real e = parts[i].e_dry + (1.+parts[i].e_dry)/parts[i].St[q]*log(xcx0); if(e < 0) e = 0; real alpha = -2.263*pow(e,0.3948)+2.22; real eta = alpha*sqrt(4./3.*PI*ai*ai*ai*parts[i].rho*k*sqrt(-h)); // use the same coeff_fric for particle and wall real coeff_fric = parts[i].coeff_fric; real Ftx = -kt * sx; real Fty = -kt * sy; real Ft = sqrt(Ftx*Ftx + Fty*Fty); if(Ft > fabs(coeff_fric * (sqrt(-h*h*h)*k - eta*Un))) { Ftx = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Ftx / Ft; Fty = coeff_fric * (sqrt(-h*h*h)*k - eta*Un) * Fty / Ft; } parts[i].iFx += isTrue * Ftx; parts[i].iFy += isTrue * Fty; parts[i].iFz -= isTrue * (sqrt(-h*h*h)*k - eta*Un); parts[i].iLx -= isTrue * (ai+0.5*h) * Fty; parts[i].iLy += isTrue * (ai+0.5*h) * Ftx; } } } __global__ void spring_parts(part_struct *parts, int nparts) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i < nparts && parts[i].spring_k > 0.) { real nx = parts[i].x-parts[i].spring_x; real ny = parts[i].y-parts[i].spring_y; real nz = parts[i].z-parts[i].spring_z; real n = sqrt(nx*nx+ny*ny+nz*nz); real nhatx = nx / n; real nhaty = ny / n; real nhatz = nz / n; real lx = parts[i].spring_l*nhatx; real ly = parts[i].spring_l*nhaty; real lz = parts[i].spring_l*nhatz; real l = sqrt(lx*lx+ly*ly+lz*lz); real dx = parts[i].x-parts[i].spring_x-lx; real dy = parts[i].y-parts[i].spring_y-ly; real dz = parts[i].z-parts[i].spring_z-lz; parts[i].kFx = - parts[i].spring_k * dx; parts[i].kFy = - parts[i].spring_k * dy; parts[i].kFz = - parts[i].spring_k * dz; } } __global__ void yank_u_WE(real *u, dom_struct *dom, real *plane, real xpos, real vel) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; real ddx = 1. / dom->dx; if((tj < dom->Gfx._jnb) && (tk < dom->Gfx._knb)) { // find index of node // for now, ignore motion tangential to plane int i = floor((xpos - dom->xs) * ddx) + DOM_BUF; if(i < dom->Gfx.is) i += dom->Gfx.inb; if(i > dom->Gfx.ie-1) i -= dom->Gfx.inb; real xx = (i-DOM_BUF) * dom->dx + dom->xs; int W = i + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b; int E = (i+1) + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b; real dudx = (u[E] - u[W]) * ddx; plane[tj + tk*dom->Gfx.jnb] = u[W] + dudx * (xpos - xx) + vel; } } __global__ void yank_v_WE(real *v, dom_struct *dom, real *plane_w, real *plane_e, real xpos, real vel) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; real ddx = 1. / dom->dx; if((tj < dom->Gfy._jnb) && (tk < dom->Gfy._knb)) { // find index of node // for now, ignore motion tangential to plane int i = floor((xpos - dom->xs) * ddx) + DOM_BUF; if(i < dom->Gfy.is) i += dom->Gfy.inb; if(i > dom->Gfy.ie-1) i -= dom->Gfy.inb; real xx_w = (i-DOM_BUF-0.5) * dom->dx + dom->xs; real xx_e = (i-DOM_BUF+0.5) * dom->dx + dom->xs; int W = (i-1) + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b; int M = i + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b; int E = (i+1) + tj*dom->Gfy.s1b + tk*dom->Gfy.s2b; real dvdx_w = (v[M] - v[W]) * ddx; real dvdx_e = (v[E] - v[M]) * ddx; plane_w[tj + tk*dom->Gfy.jnb] = v[W] + dvdx_w * (xpos - 0.5*dom->dx - xx_w); plane_e[tj + tk*dom->Gfy.jnb] = v[M] + dvdx_e * (xpos + 0.5*dom->dx - xx_e); } } __global__ void yank_w_WE(real *w, dom_struct *dom, real *plane_w, real *plane_e, real xpos, real vel) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; real ddx = 1. / dom->dx; if((tj < dom->Gfz._jnb) && (tk < dom->Gfz._knb)) { // find index of node // for now, ignore motion tangential to plane int i = floor((xpos - dom->xs) * ddx) + DOM_BUF; if(i < dom->Gfz.is) i += dom->Gfz.inb; if(i > dom->Gfz.ie-1) i -= dom->Gfz.inb; real xx_w = (i-DOM_BUF - 0.5) * dom->dx + dom->xs; real xx_e = (i-DOM_BUF + 0.5) * dom->dx + dom->xs; int W = (i-1) + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b; int M = i + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b; int E = (i+1) + tj*dom->Gfz.s1b + tk*dom->Gfz.s2b; real dwdx_w = (w[M] - w[W]) * ddx; real dwdx_e = (w[E] - w[M]) * ddx; plane_w[tj + tk*dom->Gfz.jnb] = w[W] + dwdx_w * (xpos -0.5*dom->dx - xx_w); plane_e[tj + tk*dom->Gfz.jnb] = w[M] + dwdx_e * (xpos +0.5*dom->dx - xx_e); } } __global__ void yank_u_SN(real *u, dom_struct *dom, real *plane_s, real *plane_n, real ypos, real vel) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; real ddy = 1. / dom->dy; if((tk < dom->Gfx._inb) && (ti < dom->Gfx._inb)) { // find index of node // for now, ignore motion tangential to plane int j = floor((ypos - dom->ys) * ddy) + DOM_BUF; if(j < dom->Gfx.js) j += dom->Gfx.jnb; if(j > dom->Gfx.je-1) j -= dom->Gfx.jnb; real yy_s = (j-DOM_BUF - 0.5) * dom->dy + dom->ys; real yy_n = (j-DOM_BUF + 0.5) * dom->dy + dom->ys; int S = ti + (j-1)*dom->Gfx.s1b + tk*dom->Gfx.s2b; int M = ti + j*dom->Gfx.s1b + tk*dom->Gfx.s2b; int N = ti + (j+1)*dom->Gfx.s1b + tk*dom->Gfx.s2b; real dudy_s = (u[M] - u[S]) * ddy; real dudy_n = (u[N] - u[M]) * ddy; plane_s[tk + ti*dom->Gfx.knb] = u[S] + dudy_s * (ypos - 0.5*dom->dy - yy_s); plane_n[tk + ti*dom->Gfx.knb] = u[M] + dudy_n * (ypos + 0.5*dom->dy - yy_n); } } __global__ void yank_v_SN(real *v, dom_struct *dom, real *plane, real ypos, real vel) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; real ddy = 1. / dom->dy; if((ti < dom->Gfy._inb) && (tk < dom->Gfy._knb)) { // find index of node // for now, ignore motion tangential to plane int j = floor((ypos - dom->ys) * ddy) + DOM_BUF; if(j < dom->Gfy.js) j += dom->Gfy.jnb; if(j > dom->Gfy.je-1) j -= dom->Gfy.jnb; real yy = (j-DOM_BUF) * dom->dy + dom->ys; int S = ti + j*dom->Gfy.s1b + tk*dom->Gfy.s2b; int N = ti + (j+1)*dom->Gfy.s1b + tk*dom->Gfy.s2b; real dvdy = (v[N] - v[S]) * ddy; plane[tk + ti*dom->Gfy.knb] = v[S] + dvdy * (ypos - yy) + vel; } } __global__ void yank_w_SN(real *w, dom_struct *dom, real *plane_s, real *plane_n, real ypos, real vel) { int tk = blockDim.x*blockIdx.x + threadIdx.x; int ti = blockDim.y*blockIdx.y + threadIdx.y; real ddy = 1. / dom->dy; if((ti < dom->Gfz._inb) && (tk < dom->Gfz._knb)) { // find index of node // for now, ignore motion tangential to plane int j = floor((ypos - dom->ys) * ddy) + DOM_BUF; if(j < dom->Gfz.js) j += dom->Gfz.jnb; if(j > dom->Gfz.je-1) j -= dom->Gfz.jnb; real yy_s = (j-DOM_BUF - 0.5) * dom->dy + dom->ys; real yy_n = (j-DOM_BUF + 0.5) * dom->dy + dom->ys; int S = ti + (j-1)*dom->Gfz.s1b + tk*dom->Gfz.s2b; int M = ti + j*dom->Gfz.s1b + tk*dom->Gfz.s2b; int N = ti + (j+1)*dom->Gfz.s1b + tk*dom->Gfz.s2b; real dwdy_s = (w[M] - w[S]) * ddy; real dwdy_n = (w[N] - w[M]) * ddy; plane_s[tk + ti*dom->Gfz.knb] = w[S] + dwdy_s * (ypos - 0.5*dom->dy - yy_s); plane_n[tk + ti*dom->Gfz.knb] = w[M] = dwdy_n * (ypos + 0.5*dom->dy - yy_n); } } __global__ void yank_u_BT(real *u, dom_struct *dom, real *plane_b, real *plane_t, real zpos, real vel) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; real ddz = 1. / dom->dz; // this is not the ideal situation, try to change to better interpolation if((ti < dom->Gfx._inb) && (tj < dom->Gfx._jnb)) { // find index of node // for now, ignore motion tangential to plane int k = floor((zpos - dom->zs) * ddz) + DOM_BUF; if(k < dom->Gfx.ks) k += dom->Gfx.knb; if(k > dom->Gfx.ke-1) k -= dom->Gfx.knb; real zz_b = (k-DOM_BUF - 0.5) * dom->dz + dom->zs; real zz_t = (k-DOM_BUF + 0.5) * dom->dz + dom->zs; int B = ti + tj*dom->Gfx.s1b + (k-1)*dom->Gfx.s2b; int M = ti + tj*dom->Gfx.s1b + k*dom->Gfx.s2b; int T = ti + tj*dom->Gfx.s1b + (k+1)*dom->Gfx.s2b; real dudz_b = (u[M] - u[B]) * ddz; real dudz_t = (u[T] - u[M]) * ddz; plane_b[ti + tj*dom->Gfx.inb] = u[B] + dudz_b * (zpos - dom->dz*0.5 - zz_b); plane_t[ti + tj*dom->Gfx.inb] = u[M] + dudz_t * (zpos + dom->dz*0.5 - zz_t); } } __global__ void yank_v_BT(real *v, dom_struct *dom, real *plane_b, real *plane_t, real zpos, real vel) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; real ddz = 1. / dom->dz; if((ti < dom->Gfy._inb) && (tj < dom->Gfy._jnb)) { // find index of node // for now, ignore motion tangential to plane int k = floor((zpos - dom->zs) * ddz) + DOM_BUF; if(k < dom->Gfy.ks) k += dom->Gfy.knb; if(k > dom->Gfy.ke-1) k -= dom->Gfy.knb; real zz_b = (k-DOM_BUF - 0.5) * dom->dz + dom->zs; real zz_t = (k-DOM_BUF + 0.5) * dom->dz + dom->zs; int B = ti + tj*dom->Gfy.s1b + (k-1)*dom->Gfy.s2b; int M = ti + tj*dom->Gfy.s1b + k*dom->Gfy.s2b; int T = ti + tj*dom->Gfy.s1b + (k+1)*dom->Gfy.s2b; real dvdz_b = (v[M] - v[B]) * ddz; real dvdz_t = (v[T] - v[M]) * ddz; plane_b[ti + tj*dom->Gfy.inb] = v[B] + dvdz_b * (zpos - dom->dz*0.5 - zz_b); plane_t[ti + tj*dom->Gfy.inb] = v[M] + dvdz_t * (zpos + dom->dz*0.5 - zz_t); } } __global__ void yank_w_BT(real *w, dom_struct *dom, real *plane, real zpos, real vel) { int ti = blockDim.x*blockIdx.x + threadIdx.x; int tj = blockDim.y*blockIdx.y + threadIdx.y; real ddz = 1. / dom->dx; if((ti < dom->Gfz._inb) && (tj < dom->Gfz._jnb)) { // find index of node // for now, ignore motion tangential to plane int k = floor((zpos - dom->zs) * ddz) + DOM_BUF; if(k < dom->Gfz.ks) k += dom->Gfz.knb; if(k > dom->Gfz.ke-1) k -= dom->Gfz.knb; real zz = (k-DOM_BUF) * dom->dz + dom->zs; int B = ti + tj*dom->Gfz.s1b + k*dom->Gfz.s2b; int T = ti + tj*dom->Gfz.s1b + (k+1)*dom->Gfz.s2b; real dwdz = (w[T] - w[B]) * ddz; plane[ti + tj*dom->Gfz.inb] = w[B] + dwdz * (zpos - zz) + vel; } } __global__ void colocate_Gfx(real *u, real *u_co, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF; int tk = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF; if((tj < dom->Gfx.jnb-1) && (tk < dom->Gfx.knb-1)) { for(int i = dom->Gfx.is; i < dom->Gfx.ie-1; i++) { u_co[(i-DOM_BUF) + (tj-DOM_BUF)*dom->Gcc.s1 + (tk-DOM_BUF)*dom->Gcc.s2] = 0.5 * (u[i + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b] + u[(i+1) + tj*dom->Gfx.s1b + tk*dom->Gfx.s2b]); } } } __global__ void colocate_Gfy(real *v, real *v_co, dom_struct *dom) { int tk = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF; int ti = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF; if((tk < dom->Gfy.knb-1) && (ti < dom->Gfy.inb-1)) { for(int j = dom->Gfy.js; j < dom->Gfy.je-1; j++) { v_co[(ti-DOM_BUF) + (j-DOM_BUF)*dom->Gcc.s1 + (tk-DOM_BUF)*dom->Gcc.s2] = 0.5 * (v[ti + j*dom->Gfy.s1b + tk*dom->Gfy.s2b] + v[ti + (j+1)*dom->Gfy.s1b + tk*dom->Gfy.s2b]); } } } __global__ void colocate_Gfz(real *w, real *w_co, dom_struct *dom) { int ti = blockDim.x*blockIdx.x + threadIdx.x + DOM_BUF; int tj = blockDim.y*blockIdx.y + threadIdx.y + DOM_BUF; if((ti < dom->Gfz.inb-1) && (tj < dom->Gfz.jnb-1)) { for(int k = dom->Gfz.ks; k < dom->Gfz.ke-1; k++) { w_co[(ti-DOM_BUF) + (tj-DOM_BUF)*dom->Gcc.s1 + (k-DOM_BUF)*dom->Gcc.s2] = 0.5 * (w[ti + tj*dom->Gfz.s1b + k*dom->Gfz.s2b] + w[ti + tj*dom->Gfz.s1b + (k+1)*dom->Gfz.s2b]); } } } __global__ void energy_multiply(real *u_co, real *v_co, real *w_co, real *co, dom_struct *dom) { int tj = blockDim.x*blockIdx.x + threadIdx.x; int tk = blockDim.y*blockIdx.y + threadIdx.y; int C; // memory location if((tj < dom->Gcc.jn) && (tk < dom->Gcc.kn)) { for(int i = dom->Gcc.is-DOM_BUF; i < dom->Gcc.ie-DOM_BUF; i++) { C = i + tj*dom->Gcc.s1 + tk*dom->Gcc.s2; co[C] = u_co[C]*u_co[C] + v_co[C]*v_co[C] + w_co[C]*w_co[C]; } } } __device__ real ab_int(real dt0, real dt, real f0, real df0, real df) { real DT = dt/dt0; if(dt0 < 0) return f0 + df*dt; else return f0 + ((1+0.5*DT)*df - 0.5*DT*df0)*dt; } __global__ void internal_u(real *u, part_struct *parts, dom_struct *dom, int *flag_u, int *phase) { int tj = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tk = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tj < dom->Gfx._je && tk < dom->Gfx._ke) { for(int i = dom->Gfx._is; i < dom->Gfx._ie; i++) { int C = i + tj*dom->Gfx._s1b + tk*dom->Gfx._s2b; int W = (i-1) + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; int E = i + tj*dom->Gcc._s1b + tk*dom->Gcc._s2b; int pw = phase[W]; int pe = phase[E]; int f = flag_u[C]; int p = (pw > -1 && pe > -1) * phase[E]; real rx = (i - DOM_BUF) * dom->dx + dom->xs - parts[p].x; if(rx <= 2.*parts[p].r-dom->xl) rx += dom->xl; if(rx >= dom->xl-2.*parts[p].r) rx -= dom->xl; real ry = (tj - 0.5) * dom->dy + dom->ys - parts[p].y; if(ry <= 2.*parts[p].r-dom->yl) ry += dom->yl; if(ry >= dom->yl-2.*parts[p].r) ry -= dom->yl; real rz = (tk - 0.5) * dom->dz + dom->zs - parts[p].z; if(rz <= 2.*parts[p].r-dom->zl) rz += dom->zl; if(rz >= dom->zl-2.*parts[p].r) rz -= dom->zl; real ocrossr_x = parts[p].oy*rz - parts[p].oz*ry; u[C] = (pw == -1 || pe == -1 || f == -1) * u[C] + (pw > -1 && pe > -1 && f != -1) * (ocrossr_x + parts[p].u); } } } __global__ void internal_v(real *v, part_struct *parts, dom_struct *dom, int *flag_v, int *phase) { int tk = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int ti = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(tk < dom->Gfy._ke && ti < dom->Gfy._ie) { for(int j = dom->Gfy._js; j < dom->Gfy._je; j++) { int C = ti + j*dom->Gfy._s1b + tk*dom->Gfy._s2b; int S = ti + (j-1)*dom->Gcc._s1b + tk*dom->Gcc._s2b; int N = ti + j*dom->Gcc._s1b + tk*dom->Gcc._s2b; int ps = phase[S]; int pn = phase[N]; int f = flag_v[C]; int p = (ps > -1 && pn > -1) * phase[N]; real rx = (ti - 0.5) * dom->dx + dom->xs - parts[p].x; if(rx <= 2.*parts[p].r-dom->xl) rx += dom->xl; if(rx >= dom->xl-2.*parts[p].r) rx -= dom->xl; real ry = (j - DOM_BUF) * dom->dy + dom->ys - parts[p].y; if(ry <= 2.*parts[p].r-dom->yl) ry += dom->yl; if(ry >= dom->yl-2.*parts[p].r) ry -= dom->yl; real rz = (tk - 0.5) * dom->dz + dom->zs - parts[p].z; if(rz <= 2.*parts[p].r-dom->zl) rz += dom->zl; if(rz >= dom->zl-2.*parts[p].r) rz -= dom->zl; real ocrossr_y = parts[p].oz*rx - parts[p].ox*rz; v[C] = (ps == -1 || pn == -1 || f == -1) * v[C] + (ps > -1 && pn > -1 && f != -1) * (ocrossr_y + parts[p].v); } } } __global__ void internal_w(real *w, part_struct *parts, dom_struct *dom, int *flag_w, int *phase) { int ti = blockIdx.x * blockDim.x + threadIdx.x + DOM_BUF; int tj = blockIdx.y * blockDim.y + threadIdx.y + DOM_BUF; if(ti < dom->Gfz._ie && tj < dom->Gfz._je) { for(int k = dom->Gfz._ks; k < dom->Gfz._ke; k++) { int C = ti + tj*dom->Gfz._s1b + k*dom->Gfz._s2b; int B = ti + tj*dom->Gcc._s1b + (k-1)*dom->Gcc._s2b; int T = ti + tj*dom->Gcc._s1b + k*dom->Gcc._s2b; int pb = phase[B]; int pt = phase[T]; int f = flag_w[C]; int p = (pb > -1 && pt > -1) * phase[T]; real rx = (ti - 0.5) * dom->dx + dom->xs - parts[p].x; if(rx <= 2.*parts[p].r-dom->xl) rx += dom->xl; if(rx >= dom->xl-2.*parts[p].r) rx -= dom->xl; real ry = (tj - 0.5) * dom->dy + dom->ys - parts[p].y; if(ry <= 2.*parts[p].r-dom->yl) ry += dom->yl; if(ry >= dom->yl-2.*parts[p].r) ry -= dom->yl; real rz = (k - DOM_BUF) * dom->dz + dom->zs - parts[p].z; if(rz <= 2.*parts[p].r-dom->zl) rz += dom->zl; if(rz >= dom->zl-2.*parts[p].r) rz -= dom->zl; real ocrossr_z = parts[p].ox*ry - parts[p].oy*rx; w[C] = (pb == -1 || pt == -1 || f == -1) * w[C] + (pb > -1 && pt > -1 && f != -1) * (ocrossr_z + parts[p].w); } } }
c03aaa767c6eb4371196f61debf201e629ed026c.hip
// !!! This is a file automatically generated by hipify!!! /* multiplication table using CUDA refer : http://blog.daum.net/heoly/7 (Thank you) */ #include <stdio.h> #include <malloc.h> #include <hip/hip_runtime.h> #define BLOCK_SIZE 8 #define THREAD_SIZE 9 // Device code __global__ void test(int *result) { int tidx, bidx; tidx = threadIdx.x; //x-coordinate of thread bidx = blockIdx.x; //x-coordinate of block result[THREAD_SIZE * bidx + tidx] = (bidx + 2) * (tidx + 1); } // Host code int main() { int *host_Result; //Save result data of host int *device_Result; //Save result data of device int i=0, j=0; //Allocate host memory host_Result = (int *)malloc( BLOCK_SIZE * THREAD_SIZE * sizeof(int) ); //Allocate device memory hipMalloc( (void**) &device_Result, sizeof(int) * BLOCK_SIZE * THREAD_SIZE); //Function name <<BLOCK_SIZE, THREAD_SIZE>>> parameters hipLaunchKernelGGL(( test) , dim3(BLOCK_SIZE), dim3(THREAD_SIZE), 0, 0, device_Result); //Execute Device code //Copy device result to host result hipMemcpy( host_Result, device_Result, sizeof(int) * BLOCK_SIZE * THREAD_SIZE, hipMemcpyDeviceToHost ); //Print result for(j=0; j<BLOCK_SIZE; j++) { printf("%3d step\n", (j + 2)); for(i=0; i<THREAD_SIZE; i++) { printf("%3d X %3d = %3d\n", j+2, i+1, host_Result[j * THREAD_SIZE + i]); } printf("\n"); } free(host_Result); //Free host memory hipFree(device_Result); //Free device memory return 1; }
c03aaa767c6eb4371196f61debf201e629ed026c.cu
/* multiplication table using CUDA refer : http://blog.daum.net/heoly/7 (Thank you) */ #include <stdio.h> #include <malloc.h> #include <cuda_runtime.h> #define BLOCK_SIZE 8 #define THREAD_SIZE 9 // Device code __global__ void test(int *result) { int tidx, bidx; tidx = threadIdx.x; //x-coordinate of thread bidx = blockIdx.x; //x-coordinate of block result[THREAD_SIZE * bidx + tidx] = (bidx + 2) * (tidx + 1); } // Host code int main() { int *host_Result; //Save result data of host int *device_Result; //Save result data of device int i=0, j=0; //Allocate host memory host_Result = (int *)malloc( BLOCK_SIZE * THREAD_SIZE * sizeof(int) ); //Allocate device memory cudaMalloc( (void**) &device_Result, sizeof(int) * BLOCK_SIZE * THREAD_SIZE); //Function name <<BLOCK_SIZE, THREAD_SIZE>>> parameters test <<<BLOCK_SIZE, THREAD_SIZE>>>(device_Result); //Execute Device code //Copy device result to host result cudaMemcpy( host_Result, device_Result, sizeof(int) * BLOCK_SIZE * THREAD_SIZE, cudaMemcpyDeviceToHost ); //Print result for(j=0; j<BLOCK_SIZE; j++) { printf("%3d step\n", (j + 2)); for(i=0; i<THREAD_SIZE; i++) { printf("%3d X %3d = %3d\n", j+2, i+1, host_Result[j * THREAD_SIZE + i]); } printf("\n"); } free(host_Result); //Free host memory cudaFree(device_Result); //Free device memory return 1; }
1ef2166ec0d621f6cf60c48c6c90b7bc56742d17.hip
// !!! This is a file automatically generated by hipify!!! #include "costVolumeFilter_box.h" #include "helper.h" #include "opencv2/ximgproc/edge_filter.hpp" #include "opencv2/cudafilters.hpp" #include <npp.h> using namespace std; using namespace cv; void costVolumeFilter_box_gpu(struct cost_volume_t& vol, int ksize){ int nrows = vol.nrows; int ncols = vol.ncols; int ndisp = vol.ndisp; int stride = vol.stride; // output volume float* d_output; hipMalloc(&d_output, ndisp*nrows*stride*sizeof(float)); struct timespec timer; check_timer(NULL,&timer); for(int disp = 0; disp < ndisp; disp++){ float* src_data = &(vol.volume[disp*nrows*stride]); float* out_data = &(d_output[disp*nrows*stride]); int src_pitch = stride*sizeof(float); int out_pitch = stride*sizeof(float); NppiSize size = {ncols , nrows }; NppiSize sizeROI = {ncols , nrows }; NppiSize kernel = {ksize , ksize }; NppiPoint offset = {0 , 0 }; NppiPoint anchor = {ksize/2 , ksize/2 }; nppiFilterBoxBorder_32f_C1R( src_data, src_pitch, size, offset, out_data, out_pitch, sizeROI, kernel, anchor, NPP_BORDER_REPLICATE); } check_timer("costVolumeFilter_box_gpu time",&timer); // shuffle pointers hipFree(vol.volume); vol.volume = d_output; } void costVolumeFilter_box(struct cost_volume_t& cost_volume, int kernelSize){ int nrows = cost_volume.nrows; int ncols = cost_volume.ncols; int ndisp = cost_volume.ndisp; float* vin = cost_volume.volume; // doesn't do in-place editing... need second float* float* vout = (float*)malloc(nrows*ncols*ndisp*sizeof(float)); struct timespec timer; check_timer(NULL,&timer); for(int disp = 0; disp < ndisp; disp++){ Mat slicein(nrows,ncols,CV_32F,&(vin[nrows*ncols*disp])); Mat sliceout(nrows,ncols,CV_32F,&(vout[nrows*ncols*disp])); boxFilter(slicein, sliceout, -1, Size(kernelSize,kernelSize)); } check_timer("costVolumeFilter_box time",&timer); // free old cost_volume float* free(cost_volume.volume); // replace with new cost_volume float* cost_volume.volume = vout; }
1ef2166ec0d621f6cf60c48c6c90b7bc56742d17.cu
#include "costVolumeFilter_box.h" #include "helper.h" #include "opencv2/ximgproc/edge_filter.hpp" #include "opencv2/cudafilters.hpp" #include <npp.h> using namespace std; using namespace cv; void costVolumeFilter_box_gpu(struct cost_volume_t& vol, int ksize){ int nrows = vol.nrows; int ncols = vol.ncols; int ndisp = vol.ndisp; int stride = vol.stride; // output volume float* d_output; cudaMalloc(&d_output, ndisp*nrows*stride*sizeof(float)); struct timespec timer; check_timer(NULL,&timer); for(int disp = 0; disp < ndisp; disp++){ float* src_data = &(vol.volume[disp*nrows*stride]); float* out_data = &(d_output[disp*nrows*stride]); int src_pitch = stride*sizeof(float); int out_pitch = stride*sizeof(float); NppiSize size = {ncols , nrows }; NppiSize sizeROI = {ncols , nrows }; NppiSize kernel = {ksize , ksize }; NppiPoint offset = {0 , 0 }; NppiPoint anchor = {ksize/2 , ksize/2 }; nppiFilterBoxBorder_32f_C1R( src_data, src_pitch, size, offset, out_data, out_pitch, sizeROI, kernel, anchor, NPP_BORDER_REPLICATE); } check_timer("costVolumeFilter_box_gpu time",&timer); // shuffle pointers cudaFree(vol.volume); vol.volume = d_output; } void costVolumeFilter_box(struct cost_volume_t& cost_volume, int kernelSize){ int nrows = cost_volume.nrows; int ncols = cost_volume.ncols; int ndisp = cost_volume.ndisp; float* vin = cost_volume.volume; // doesn't do in-place editing... need second float* float* vout = (float*)malloc(nrows*ncols*ndisp*sizeof(float)); struct timespec timer; check_timer(NULL,&timer); for(int disp = 0; disp < ndisp; disp++){ Mat slicein(nrows,ncols,CV_32F,&(vin[nrows*ncols*disp])); Mat sliceout(nrows,ncols,CV_32F,&(vout[nrows*ncols*disp])); boxFilter(slicein, sliceout, -1, Size(kernelSize,kernelSize)); } check_timer("costVolumeFilter_box time",&timer); // free old cost_volume float* free(cost_volume.volume); // replace with new cost_volume float* cost_volume.volume = vout; }
baf4cc3605750e99dc9ea5e4eb639fc52440d689.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../gtest.h" #include <backends/gpu/stack.hpp> #include <backends/gpu/stack_cu.hpp> #include <backends/gpu/managed_ptr.hpp> #include <arbor/execution_context.hpp> using namespace arb; TEST(stack, construction) { using T = int; execution_context context; gpu::stack<T> s(10, context.gpu); EXPECT_EQ(0u, s.size()); EXPECT_EQ(10u, s.capacity()); } // kernel and functors for testing push_back functionality namespace kernels { template <typename F> __global__ void push_back(gpu::stack_storage<int>& s, F f) { if (f(threadIdx.x)) { arb::gpu::push_back(s, int(threadIdx.x)); } } struct all_ftor { __host__ __device__ bool operator() (int i) { return true; } }; struct even_ftor { __host__ __device__ bool operator() (int i) { return (i%2)==0; } }; struct odd_ftor { __host__ __device__ bool operator() (int i) { return i%2; } }; } TEST(stack, push_back) { using T = int; using stack = gpu::stack<T>; execution_context context; const unsigned n = 10; EXPECT_TRUE(n%2 == 0); // require n is even for tests to work auto s = stack(n, context.gpu); auto& sstorage = s.storage(); hipLaunchKernelGGL(( kernels::push_back), dim3(1), dim3(n), 0, 0, sstorage, kernels::all_ftor()); hipDeviceSynchronize(); EXPECT_EQ(n, s.size()); for (auto i=0; i<int(s.size()); ++i) { EXPECT_EQ(i, s[i]); } s.clear(); hipLaunchKernelGGL(( kernels::push_back), dim3(1), dim3(n), 0, 0, sstorage, kernels::even_ftor()); hipDeviceSynchronize(); EXPECT_EQ(n/2, s.size()); for (auto i=0; i<int(s.size())/2; ++i) { EXPECT_EQ(2*i, s[i]); } s.clear(); hipLaunchKernelGGL(( kernels::push_back), dim3(1), dim3(n), 0, 0, sstorage, kernels::odd_ftor()); hipDeviceSynchronize(); EXPECT_EQ(n/2, s.size()); for (auto i=0; i<int(s.size())/2; ++i) { EXPECT_EQ(2*i+1, s[i]); } } TEST(stack, overflow) { using T = int; using stack = gpu::stack<T>; execution_context context; const unsigned n = 10; auto s = stack(n, context.gpu); auto& sstorage = s.storage(); EXPECT_FALSE(s.overflow()); // push 2n items into a stack of size n hipLaunchKernelGGL(( kernels::push_back), dim3(1), dim3(2*n), 0, 0, sstorage, kernels::all_ftor()); hipDeviceSynchronize(); EXPECT_EQ(n, s.size()); EXPECT_EQ(2*n, s.pushes()); EXPECT_TRUE(s.overflow()); } TEST(stack, empty) { using T = int; using stack = gpu::stack<T>; execution_context context; stack s(0u, context.gpu); EXPECT_EQ(s.size(), 0u); EXPECT_EQ(s.capacity(), 0u); EXPECT_EQ(s.storage().data, nullptr); }
baf4cc3605750e99dc9ea5e4eb639fc52440d689.cu
#include "../gtest.h" #include <backends/gpu/stack.hpp> #include <backends/gpu/stack_cu.hpp> #include <backends/gpu/managed_ptr.hpp> #include <arbor/execution_context.hpp> using namespace arb; TEST(stack, construction) { using T = int; execution_context context; gpu::stack<T> s(10, context.gpu); EXPECT_EQ(0u, s.size()); EXPECT_EQ(10u, s.capacity()); } // kernel and functors for testing push_back functionality namespace kernels { template <typename F> __global__ void push_back(gpu::stack_storage<int>& s, F f) { if (f(threadIdx.x)) { arb::gpu::push_back(s, int(threadIdx.x)); } } struct all_ftor { __host__ __device__ bool operator() (int i) { return true; } }; struct even_ftor { __host__ __device__ bool operator() (int i) { return (i%2)==0; } }; struct odd_ftor { __host__ __device__ bool operator() (int i) { return i%2; } }; } TEST(stack, push_back) { using T = int; using stack = gpu::stack<T>; execution_context context; const unsigned n = 10; EXPECT_TRUE(n%2 == 0); // require n is even for tests to work auto s = stack(n, context.gpu); auto& sstorage = s.storage(); kernels::push_back<<<1, n>>>(sstorage, kernels::all_ftor()); cudaDeviceSynchronize(); EXPECT_EQ(n, s.size()); for (auto i=0; i<int(s.size()); ++i) { EXPECT_EQ(i, s[i]); } s.clear(); kernels::push_back<<<1, n>>>(sstorage, kernels::even_ftor()); cudaDeviceSynchronize(); EXPECT_EQ(n/2, s.size()); for (auto i=0; i<int(s.size())/2; ++i) { EXPECT_EQ(2*i, s[i]); } s.clear(); kernels::push_back<<<1, n>>>(sstorage, kernels::odd_ftor()); cudaDeviceSynchronize(); EXPECT_EQ(n/2, s.size()); for (auto i=0; i<int(s.size())/2; ++i) { EXPECT_EQ(2*i+1, s[i]); } } TEST(stack, overflow) { using T = int; using stack = gpu::stack<T>; execution_context context; const unsigned n = 10; auto s = stack(n, context.gpu); auto& sstorage = s.storage(); EXPECT_FALSE(s.overflow()); // push 2n items into a stack of size n kernels::push_back<<<1, 2*n>>>(sstorage, kernels::all_ftor()); cudaDeviceSynchronize(); EXPECT_EQ(n, s.size()); EXPECT_EQ(2*n, s.pushes()); EXPECT_TRUE(s.overflow()); } TEST(stack, empty) { using T = int; using stack = gpu::stack<T>; execution_context context; stack s(0u, context.gpu); EXPECT_EQ(s.size(), 0u); EXPECT_EQ(s.capacity(), 0u); EXPECT_EQ(s.storage().data, nullptr); }
ee81ba7da0e95e0a898364d1d32b5e65f69efb92.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // map.cu // OpenCL-Primitives/cuda // // Created by Zhuohang Lai on 01/19/16. // Copyright (c) 2015-2016 Zhuohang Lai. All rights reserved. // #include "kernels.h" //mapping function 1: template<class T> __device__ T floorOfPower2(T a) { int base = 1; while (base < (int)a) { base <<= 1; } return base>>1; } template<class T> __global__ void map_kernel( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif T *d_source_values, T *d_dest_values, const int r_len #ifdef RECORDS ,bool isRecord #endif ) { int threadId = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = gridDim.x * blockDim.x; while (threadId < r_len) { #ifdef RECORDS if (isRecord) d_dest_keys[threadId] = d_source_keys[threadId]; #endif d_dest_values[threadId] = floorOfPower2<T>(d_source_values[threadId]); threadId += threadNum; } } template<class T> float map( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif T *d_source_values, T *d_dest_values, int r_len, int blockSize, int gridSize #ifdef RECORDS ,bool isRecord #endif ) { dim3 grid(gridSize); dim3 block(blockSize); float totalTime = 0.0f; hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start); hipLaunchKernelGGL(( map_kernel<T>), dim3(grid), dim3(block), 0, 0, #ifdef RECORDS d_source_keys, d_dest_keys, #endif d_source_values, d_dest_values, r_len #ifdef RECORDS ,isRecord #endif ); hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&totalTime, start, end); return totalTime; } template float map<int>( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif int *d_source_values, int *d_dest_values, int r_len, int blockSize, int gridSize #ifdef RECORDS ,bool isRecord #endif ); template float map<long>( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif long *d_source_values, long *d_dest_values, int r_len, int blockSize, int gridSize #ifdef RECORDS ,bool isRecord #endif ); template float map<float>( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif float *d_source_values, float *d_dest_values, int r_len, int blockSize, int gridSize #ifdef RECORDS ,bool isRecord #endif ); template float map<double>( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif double *d_source_values, double *d_dest_values, int r_len, int blockSize, int gridSize #ifdef RECORDS ,bool isRecord #endif );
ee81ba7da0e95e0a898364d1d32b5e65f69efb92.cu
// // map.cu // OpenCL-Primitives/cuda // // Created by Zhuohang Lai on 01/19/16. // Copyright (c) 2015-2016 Zhuohang Lai. All rights reserved. // #include "kernels.h" //mapping function 1: template<class T> __device__ T floorOfPower2(T a) { int base = 1; while (base < (int)a) { base <<= 1; } return base>>1; } template<class T> __global__ void map_kernel( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif T *d_source_values, T *d_dest_values, const int r_len #ifdef RECORDS ,bool isRecord #endif ) { int threadId = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = gridDim.x * blockDim.x; while (threadId < r_len) { #ifdef RECORDS if (isRecord) d_dest_keys[threadId] = d_source_keys[threadId]; #endif d_dest_values[threadId] = floorOfPower2<T>(d_source_values[threadId]); threadId += threadNum; } } template<class T> float map( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif T *d_source_values, T *d_dest_values, int r_len, int blockSize, int gridSize #ifdef RECORDS ,bool isRecord #endif ) { dim3 grid(gridSize); dim3 block(blockSize); float totalTime = 0.0f; cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); map_kernel<T><<<grid, block>>>( #ifdef RECORDS d_source_keys, d_dest_keys, #endif d_source_values, d_dest_values, r_len #ifdef RECORDS ,isRecord #endif ); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&totalTime, start, end); return totalTime; } template float map<int>( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif int *d_source_values, int *d_dest_values, int r_len, int blockSize, int gridSize #ifdef RECORDS ,bool isRecord #endif ); template float map<long>( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif long *d_source_values, long *d_dest_values, int r_len, int blockSize, int gridSize #ifdef RECORDS ,bool isRecord #endif ); template float map<float>( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif float *d_source_values, float *d_dest_values, int r_len, int blockSize, int gridSize #ifdef RECORDS ,bool isRecord #endif ); template float map<double>( #ifdef RECORDS int *d_source_keys, int *d_dest_keys, #endif double *d_source_values, double *d_dest_values, int r_len, int blockSize, int gridSize #ifdef RECORDS ,bool isRecord #endif );
ab1333b0ab3df5b560a156b92fbf3088dabeb17e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void count_newlines(char *arr, long n, int *result) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; long chars_per_thread = (n+stride-1) / stride; long start = index * chars_per_thread; long end = start + chars_per_thread; int count = 0; for (long i = start; i < end && i < n; i += 1) { if (arr[i] == '\n') { count += 1; } } result[index] = count; }
ab1333b0ab3df5b560a156b92fbf3088dabeb17e.cu
__global__ void count_newlines(char *arr, long n, int *result) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; long chars_per_thread = (n+stride-1) / stride; long start = index * chars_per_thread; long end = start + chars_per_thread; int count = 0; for (long i = start; i < end && i < n; i += 1) { if (arr[i] == '\n') { count += 1; } } result[index] = count; }
871a9c30d92a7d754e8dc6bfc95a7e74701e4916.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* EEG parsing application for 5SIA0 * * Code by Glenn Bergmans ([email protected]) * Code is based on various sources, most notably * the TU/e ES group code base and a Matlab * implementation by Mohammad Tahghighi */ #include "eeg.h" __global__ void doNothingFucker() { //Chillin'' } int32_t randint(int32_t vmin, int32_t vmax) { return (vmin + (int32_t) (rand() / (RAND_MAX / ((uint32_t) (vmax - vmin + 1)) + 1))); } int main(int argc, char *argv[]) { float features[CHANNELS][FEATURE_LENGTH]; clock_t times[CHANNELS][FEATURE_TIME_LENGTH]; clock_t ttot; char *timenames[FEATURE_TIME_LENGTH]; float favg[FEATURE_LENGTH] = {0}; int32_t x[CHANNELS][DATAPOINTS]; uint32_t i, j; int devnum; hipDeviceProp_t prop; int blocksize; cudaCheckError(hipGetDevice(&devnum)); cudaCheckError(hipGetDeviceProperties(&prop, devnum)); blocksize = (prop.maxThreadsPerBlock > prop.maxThreadsDim[0]) ? prop.maxThreadsDim[0] : prop.maxThreadsPerBlock; //printf("Blocksize: %d\n", blocksize); hipLaunchKernelGGL(( doNothingFucker), dim3(1), dim3(1), 0, 0, ); read_data(x, CHANNELS, DATAPOINTS); for (i = 0; i < CHANNELS; i++) { #ifdef VERBOSE printf("Running channel %d...\n", i); #endif run_channel(DATAPOINTS, x[i], features[i], times[i], timenames, blocksize); } // Averaging channels for (i = 0; i < CHANNELS; i++) { for (j = 0; j < FEATURE_LENGTH; j++) { favg[j] += features[i][j] / FEATURE_LENGTH; } } printf("\n"); for (i=0; i<FEATURE_LENGTH; i++) fprintf(stderr,"Feature %d: %.6f\n", i, favg[i]); for(i = 0; i < FEATURE_TIME_LENGTH; i++){ ttot = 0; for(j = 0; j < CHANNELS; j++){ ttot += times[j][i]; } printf("Clock ticks for '%s': %ld, %f sec\n", timenames[i], ttot, (double)ttot / (double)CLOCKS_PER_SEC); } return 0; } void read_data(int32_t x[CHANNELS][DATAPOINTS], int nc, int np) { FILE *fp; char * line = NULL; size_t len = 0; int l, c; fp = fopen("EEG.csv", "r"); if (fp == NULL) { printf("Error opening EEG.csv\n"); exit(EXIT_FAILURE); } // Skip the first line getline(&line, &len, fp); l = 0; while ((l < np) && (getline(&line, &len, fp)) != -1) { char *tok; tok = strtok(line, ","); float v; for (c = 0; c < nc; c++) { sscanf(tok, "%f", &v); x[c][l] = (int32_t) round(v); tok = strtok(NULL, ","); } l++; } } void run_channel(int np, int32_t *x, float *features, clock_t *times, char **timenames, int blocksize) { // Butterworth returns np + 1 samples int32_t *X = (int32_t *) malloc((np + 1) * sizeof(int32_t)); clock_t clk; // Clean signal using butterworth #ifdef VERBOSE printf(" Butterworth filter...\n"); #endif bw0_int(np, x, X); // 4 features: mean, std dev, abs sum, mean crossings #ifdef VERBOSE printf(" Standard features...\n"); #endif clk = clock(); stafeature(np, X, &features[0]); times[0] = clock() - clk; timenames[0] = (char*)"standard"; // 2 features: mean p2p, std dev p2p #ifdef VERBOSE printf(" Peak 2 peak features...\n"); #endif clk = clock(); p2p(np, X, &features[4], 7); times[1] = clock() - clk; timenames[1] = (char*)"peak 2 peak"; // 1 feature: approximate entropy #ifdef VERBOSE printf(" Approximate Entropy feature...\n"); #endif clk = clock(); printf("##########\n"); apen(np, X, &features[6], 3, 0.2, blocksize); printf("##########\n"); times[2] = clock() - clk; timenames[2] = (char*)"approximate entropy"; // 1 feature: hurst coefficient #ifdef VERBOSE printf(" Hurst Coefficient feature...\n"); #endif clk = clock(); hurst(np, X, &features[7]); times[3] = clock() - clk; timenames[3] = (char*)"hurst"; // 6 features: power in 5 frequency bands & total power #ifdef VERBOSE printf(" Power Spectral Density features...\n"); #endif clk = clock(); power_per_band(np, X, &features[8]); times[4] = clock() - clk; timenames[4] = (char*)"power spectral density"; #ifdef VERBOSE printf("Channel done\n"); #endif free(X); }
871a9c30d92a7d754e8dc6bfc95a7e74701e4916.cu
/* EEG parsing application for 5SIA0 * * Code by Glenn Bergmans ([email protected]) * Code is based on various sources, most notably * the TU/e ES group code base and a Matlab * implementation by Mohammad Tahghighi */ #include "eeg.h" __global__ void doNothingFucker() { //Chillin'' } int32_t randint(int32_t vmin, int32_t vmax) { return (vmin + (int32_t) (rand() / (RAND_MAX / ((uint32_t) (vmax - vmin + 1)) + 1))); } int main(int argc, char *argv[]) { float features[CHANNELS][FEATURE_LENGTH]; clock_t times[CHANNELS][FEATURE_TIME_LENGTH]; clock_t ttot; char *timenames[FEATURE_TIME_LENGTH]; float favg[FEATURE_LENGTH] = {0}; int32_t x[CHANNELS][DATAPOINTS]; uint32_t i, j; int devnum; cudaDeviceProp prop; int blocksize; cudaCheckError(cudaGetDevice(&devnum)); cudaCheckError(cudaGetDeviceProperties(&prop, devnum)); blocksize = (prop.maxThreadsPerBlock > prop.maxThreadsDim[0]) ? prop.maxThreadsDim[0] : prop.maxThreadsPerBlock; //printf("Blocksize: %d\n", blocksize); doNothingFucker<<<1, 1>>>(); read_data(x, CHANNELS, DATAPOINTS); for (i = 0; i < CHANNELS; i++) { #ifdef VERBOSE printf("Running channel %d...\n", i); #endif run_channel(DATAPOINTS, x[i], features[i], times[i], timenames, blocksize); } // Averaging channels for (i = 0; i < CHANNELS; i++) { for (j = 0; j < FEATURE_LENGTH; j++) { favg[j] += features[i][j] / FEATURE_LENGTH; } } printf("\n"); for (i=0; i<FEATURE_LENGTH; i++) fprintf(stderr,"Feature %d: %.6f\n", i, favg[i]); for(i = 0; i < FEATURE_TIME_LENGTH; i++){ ttot = 0; for(j = 0; j < CHANNELS; j++){ ttot += times[j][i]; } printf("Clock ticks for '%s': %ld, %f sec\n", timenames[i], ttot, (double)ttot / (double)CLOCKS_PER_SEC); } return 0; } void read_data(int32_t x[CHANNELS][DATAPOINTS], int nc, int np) { FILE *fp; char * line = NULL; size_t len = 0; int l, c; fp = fopen("EEG.csv", "r"); if (fp == NULL) { printf("Error opening EEG.csv\n"); exit(EXIT_FAILURE); } // Skip the first line getline(&line, &len, fp); l = 0; while ((l < np) && (getline(&line, &len, fp)) != -1) { char *tok; tok = strtok(line, ","); float v; for (c = 0; c < nc; c++) { sscanf(tok, "%f", &v); x[c][l] = (int32_t) round(v); tok = strtok(NULL, ","); } l++; } } void run_channel(int np, int32_t *x, float *features, clock_t *times, char **timenames, int blocksize) { // Butterworth returns np + 1 samples int32_t *X = (int32_t *) malloc((np + 1) * sizeof(int32_t)); clock_t clk; // Clean signal using butterworth #ifdef VERBOSE printf(" Butterworth filter...\n"); #endif bw0_int(np, x, X); // 4 features: mean, std dev, abs sum, mean crossings #ifdef VERBOSE printf(" Standard features...\n"); #endif clk = clock(); stafeature(np, X, &features[0]); times[0] = clock() - clk; timenames[0] = (char*)"standard"; // 2 features: mean p2p, std dev p2p #ifdef VERBOSE printf(" Peak 2 peak features...\n"); #endif clk = clock(); p2p(np, X, &features[4], 7); times[1] = clock() - clk; timenames[1] = (char*)"peak 2 peak"; // 1 feature: approximate entropy #ifdef VERBOSE printf(" Approximate Entropy feature...\n"); #endif clk = clock(); printf("##########\n"); apen(np, X, &features[6], 3, 0.2, blocksize); printf("##########\n"); times[2] = clock() - clk; timenames[2] = (char*)"approximate entropy"; // 1 feature: hurst coefficient #ifdef VERBOSE printf(" Hurst Coefficient feature...\n"); #endif clk = clock(); hurst(np, X, &features[7]); times[3] = clock() - clk; timenames[3] = (char*)"hurst"; // 6 features: power in 5 frequency bands & total power #ifdef VERBOSE printf(" Power Spectral Density features...\n"); #endif clk = clock(); power_per_band(np, X, &features[8]); times[4] = clock() - clk; timenames[4] = (char*)"power spectral density"; #ifdef VERBOSE printf("Channel done\n"); #endif free(X); }
8de6b4768559f690afa50700ec5475ee5ebe3ef5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathMagma.cu" #else #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) #ifdef USE_MAGMA static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k) { long size[1] = { k }; long stride[1] = { 1 }; THCTensor_(resizeNd)(state, self, 1, size, stride); size_t len = k * sizeof(real); THCudaCheck(hipMemcpy(self->storage->data + self->storageOffset, src, len, hipMemcpyHostToDevice)); } static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n) { long size[2] = { m, n }; long stride[2] = { 1, m }; THCTensor_(resizeNd)(state, self, 2, size, stride); size_t len = m * n * sizeof(real); THCudaCheck(hipMemcpy(self->storage->data + self->storageOffset, src, len, hipMemcpyHostToDevice)); } static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self) { THAssert(self->nDimension == 2); size_t len = THCTensor_(nElement)(state, self)*sizeof(real); THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1); THCTensor *selfc = THCTensor_(newContiguous)(state, temp); THCudaCheck(hipMemcpy(dst, selfc->storage->data + selfc->storageOffset, len, hipMemcpyDeviceToHost)); THCTensor_(free)(state, temp); THCTensor_(free)(state, selfc); } #endif // USE_MAGMA static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src) { THAssert(src->nDimension == 2); if (self == src && self->stride[0] == 1 && self->stride[1] == self->size[0]) { THCTensor_(retain)(state, self); return self; } if (self == src) self = THCTensor_(new)(state); else THCTensor_(retain)(state, self); long size[2] = { src->size[0], src->size[1] }; long stride[2] = { 1, src->size[0] }; THCTensor_(resizeNd)(state, self, 2, size, stride); THCTensor_(copy)(state, self, src); return self; } THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional"); THArgCheck(b_->nDimension == 2, 2, "b should be 2 dimensional"); THArgCheck(a_->size[0] == a_->size[1], 1, "A should be square"); THArgCheck(b_->size[0] == a_->size[0], 2, "A,b size incompatible"); int n = a_->size[0]; int nrhs = b_->size[1]; THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); int *ipiv = th_magma_malloc_pinned<int>(n); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); #else magma_dgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); #endif if (info < 0) THError("MAGMA gesv : Argument %d : illegal value", -info); else if (info > 0) THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gesv)); #endif } THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional"); THArgCheck(b_->nDimension == 2, 1, "b should be 2 dimensional"); THArgCheck(a_->size[0] == b_->size[0], 2, "size incompatible A,b"); THArgCheck(a_->size[0] >= a_->size[1], 2, "A should have m >= n"); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); int m = a->size[0]; int n = a->size[1]; int nrhs = b->size[1]; real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #endif real *hwork = th_magma_malloc_pinned<real>((size_t)wkopt); #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #endif magma_free_pinned(hwork); if (info != 0) THError("MAGMA gels : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gels)); #endif } THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos) { #ifdef USE_MAGMA int n = a->size[0]; int lda = n; magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower; magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec; THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a); real *input_data = THCTensor_(data)(state, input); // eigen values and workspace real *w = th_magma_malloc_pinned<real>(n); real *wA = th_magma_malloc_pinned<real>(lda); // compute optimal size of work array int info; real lwork; int liwork; #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #endif real *work = th_magma_malloc_pinned<real>((size_t)lwork); int *iwork = th_magma_malloc_pinned<int>(liwork); // compute eigenvalues and, optionally, eigenvectors #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #endif // copy eigen values from w to re_ if (info == 0) THCTensor_(copyArray1d)(state, re_, w, n); magma_free_pinned(iwork); magma_free_pinned(work); magma_free_pinned(wA); magma_free_pinned(w); // check error value if (info > 0) THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA syev : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, input, rv_); #else THError(NoMagma(syev)); #endif } THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 3, "A should be 2 dimensional"); THArgCheck(a_->size[0] == a_->size[1], 3, "A should be square"); magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec; int n = a_->size[0]; real *a_data = th_magma_malloc_pinned<real>(n * n); THCTensor_(copyTensor2d)(state, a_data, a_); real *wr = th_magma_malloc_pinned<real>(n); real *wi = th_magma_malloc_pinned<real>(n); real *vr_data = NULL; int ldvr = 1; if (jobvr == MagmaVec) { vr_data = th_magma_malloc_pinned<real>(n * n); ldvr = n; } real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #endif int lwork = (int) wkopt; real *work_data = th_magma_malloc_pinned<real>(lwork); #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA geev : Argument %d : illegal value", -info); { THCTensor_(resize2d)(state, re_, 2, n); THCTensor *re = THCTensor_(newContiguous)(state, re_); THCudaCheck(hipMemcpy(re->storage->data + re->storageOffset, wr, n*sizeof(real), hipMemcpyHostToDevice)); THCudaCheck(hipMemcpy(re->storage->data + re->storageOffset + n, wi, n*sizeof(real), hipMemcpyHostToDevice)); THCTensor_(freeCopyTo)(state, re, re_); THCTensor_(transpose)(state, re_, NULL, 0, 1); } if (jobvr == MagmaVec) THCTensor_(copyArray2d)(state, rv_, vr_data, n, n); magma_free_pinned(work_data); magma_free_pinned(vr_data); magma_free_pinned(wi); magma_free_pinned(wr); magma_free_pinned(a_data); #else THError(NoMagma(geev)); #endif } THC_API void THCTensor_(gesvd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *jobu) { #ifdef USE_MAGMA THCTensor *ra_ = THCTensor_(new)(state); THCTensor_(gesvd2)(state, ru_, rs_, rv_, ra_, a, jobu); THCTensor_(free)(state, ra_); #else THError(NoMagma(gesvd)); #endif } THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *jobus) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); magma_vec_t jobz = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec; int iunused[1]; int m = a->size[0]; int n = a->size[1]; int k = m < n ? m : n; int j = (jobz == MagmaAllVec) ? m : k; int jv = (jobz == MagmaAllVec) ? n : k; real *a_data = th_magma_malloc_pinned<real>(m * n); THCTensor_(copyTensor2d)(state, a_data, a); real *rs_data = th_magma_malloc_pinned<real>(k); real *ru_data = th_magma_malloc_pinned<real>(m * j); real *rv_data = th_magma_malloc_pinned<real>(n * n); real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #endif int lwork = (int) wkopt; real *work_data = th_magma_malloc_pinned<real>(lwork); int *iwork = th_magma_malloc_pinned<int>(8 * k); #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #endif if (info > 0) THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info); else if (info < 0) THError("MAGMA gesdd : Argument %d : illegal value", -info); THCTensor_(copyArray2d)(state, rv_, rv_data, n, n); THCTensor_(transpose)(state, rv_, NULL, 0, 1); if (jobz != MagmaAllVec) THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv); THCTensor_(copyArray2d)(state, ru_, ru_data, m, j); THCTensor_(copyArray1d)(state, rs_, rs_data, k); THCTensor_(copyArray2d)(state, ra_, a_data, m, n); magma_free_pinned(work_data); magma_free_pinned(iwork); magma_free_pinned(rv_data); magma_free_pinned(ru_data); magma_free_pinned(rs_data); magma_free_pinned(a_data); #else THError(NoMagma(gesvd2)); #endif } THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a) { THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); #ifdef USE_MAGMA int info; int n = a->size[0]; int lwork = n * magma_get_sgetri_nb(n); THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int *ipiv = th_magma_malloc_pinned<int>(n); THCTensor *work = THCTensor_(newWithSize1d)(state, lwork); real *work_data = THCTensor_(data)(state, work); // Run LU #if defined(THC_REAL_IS_FLOAT) magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info); #else magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info); #endif if (info > 0) THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #else magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getri : Argument %d : illegal value", -info); THCTensor_(free)(state, work); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, input, ra_); #else int n = a->size[0]; // input THCTensor *input = THCTensor_(newColumnMajor)(state, a, a); THCTensor_(resizeNd)(state, ra_, 2, input->size, input->stride); real *matrices1[1] = { THCTensor_(data)(state, input) }; real *matrices2[1] = { THCTensor_(data)(state, ra_) }; // Copy pointers to device. real **d_matrices1, **d_matrices2; THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, sizeof(real*))); THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, sizeof(real*))); THCudaCheck(hipMemcpyAsync(d_matrices1, matrices1, sizeof(real*), hipMemcpyHostToDevice, THCState_getCurrentStream(state))); THCudaCheck(hipMemcpyAsync(d_matrices2, matrices2, sizeof(real*), hipMemcpyHostToDevice, THCState_getCurrentStream(state))); int info; int *info_gpu; THCudaCheck(THCudaMalloc(state, (void**)&info_gpu, sizeof(int))); int *ipiv_gpu; THCudaCheck(THCudaMalloc(state, (void**)&ipiv_gpu, n * sizeof(int))); // Run LU #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #else THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #endif THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #else THCudaBlas_Dgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #endif THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getri : Argument %d : illegal value", -info); THCudaCheck(THCudaFree(state, ipiv_gpu)); THCudaCheck(THCudaFree(state, info_gpu)); THCudaCheck(THCudaFree(state, d_matrices1)); THCudaCheck(THCudaFree(state, d_matrices2)); THCTensor_(free)(state, input); #endif } __global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r > c) { input[idx] = input[r*n + c]; } } } __global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r < c) { input[idx] = input[r*n + c]; } } } THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotri_gpu(ul, n, input_data, n, &info); #else magma_dpotri_gpu(ul, n, input_data, n, &info); #endif if (info > 0) THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potri : Argument %d : illegal value", -info); hipStream_t stream = THCState_getCurrentStream(state); const int len = n*n; dim3 blocks(::min(DIVUP(len, 128), 65535)); dim3 threads(128); if (uplo[0] == 'U') { hipLaunchKernelGGL(( THCTensor_(copyUpperSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len); } else { hipLaunchKernelGGL(( THCTensor_(copyLowerSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len); } THCTensor_(freeCopyTo)(state, input, ra_); #else THError(NoMagma(potri)); #endif } THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotrf_gpu(ul, n, input_data, n, &info); #else magma_dpotrf_gpu(ul, n, input_data, n, &info); #endif // check error value if (info > 0) THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potrf : Argument %d : illegal value", -info); if (uplo[0] == 'U') { THCTensor_(triu)(state, ra_, input, 0); } else { THCTensor_(tril)(state, ra_, input, 0); } THCTensor_(free)(state, input); #else THError(NoMagma(potrf)); #endif } THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; int nrhs = b->size[1]; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b); real *b_data = THCTensor_(data)(state, b_); THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a); real *a_data = THCTensor_(data)(state, a_); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info); #else magma_dpotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info); #endif // check error value if (info < 0) THError("MAGMA potrs : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, b_, rb_); THCTensor_(free)(state, a_); #else THError(NoMagma(potrs)); #endif } THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional"); THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_); int m = a->size[0]; int n = a->size[1]; int k = (m < n ? m : n); #ifdef MAGMA_V2 #if defined(THC_REAL_IS_FLOAT) int nb = magma_get_sgeqrf_nb(m, n); #else int nb = magma_get_dgeqrf_nb(m, n); #endif #else #if defined(THC_REAL_IS_FLOAT) int nb = magma_get_sgeqrf_nb(m); #else int nb = magma_get_dgeqrf_nb(m); #endif #endif real *a_data = THCTensor_(data)(state, a); real *tau_data = th_magma_malloc_pinned<real>(k); THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb); real *work_data = THCTensor_(data)(state, work); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #else magma_dgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #endif if (info != 0) THError("MAGMA geqrf2 : Argument %d : illegal value.", -info); THCTensor_(narrow)(state, a, a, 0, 0, k); THCTensor_(triu)(state, rr_, a, 0); THCTensor_(free)(state, a); a = THCTensor_(newColumnMajor)(state, rq_, a_); a_data = THCTensor_(data)(state, a); #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #else magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #endif if (info != 0) THError("MAGMA geqrf : Argument %d : illegal value.", -info); THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a); real *q_data = THCTensor_(data)(state, q); #if defined(THC_REAL_IS_FLOAT) magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #else magma_dorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #endif if (info != 0) THError("MAGMA orgqr : Argument %d : illegal value.", -info); THCTensor_(free)(state, work); magma_free_pinned(tau_data); THCTensor_(narrow)(state, q, q, 1, 0, k); THCTensor_(freeCopyTo)(state, q, rq_); #else THError(NoMagma(qr)); #endif } #endif #endif
8de6b4768559f690afa50700ec5475ee5ebe3ef5.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathMagma.cu" #else #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) #ifdef USE_MAGMA static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k) { long size[1] = { k }; long stride[1] = { 1 }; THCTensor_(resizeNd)(state, self, 1, size, stride); size_t len = k * sizeof(real); THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice)); } static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n) { long size[2] = { m, n }; long stride[2] = { 1, m }; THCTensor_(resizeNd)(state, self, 2, size, stride); size_t len = m * n * sizeof(real); THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice)); } static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self) { THAssert(self->nDimension == 2); size_t len = THCTensor_(nElement)(state, self)*sizeof(real); THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1); THCTensor *selfc = THCTensor_(newContiguous)(state, temp); THCudaCheck(cudaMemcpy(dst, selfc->storage->data + selfc->storageOffset, len, cudaMemcpyDeviceToHost)); THCTensor_(free)(state, temp); THCTensor_(free)(state, selfc); } #endif // USE_MAGMA static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src) { THAssert(src->nDimension == 2); if (self == src && self->stride[0] == 1 && self->stride[1] == self->size[0]) { THCTensor_(retain)(state, self); return self; } if (self == src) self = THCTensor_(new)(state); else THCTensor_(retain)(state, self); long size[2] = { src->size[0], src->size[1] }; long stride[2] = { 1, src->size[0] }; THCTensor_(resizeNd)(state, self, 2, size, stride); THCTensor_(copy)(state, self, src); return self; } THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional"); THArgCheck(b_->nDimension == 2, 2, "b should be 2 dimensional"); THArgCheck(a_->size[0] == a_->size[1], 1, "A should be square"); THArgCheck(b_->size[0] == a_->size[0], 2, "A,b size incompatible"); int n = a_->size[0]; int nrhs = b_->size[1]; THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); int *ipiv = th_magma_malloc_pinned<int>(n); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); #else magma_dgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); #endif if (info < 0) THError("MAGMA gesv : Argument %d : illegal value", -info); else if (info > 0) THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gesv)); #endif } THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional"); THArgCheck(b_->nDimension == 2, 1, "b should be 2 dimensional"); THArgCheck(a_->size[0] == b_->size[0], 2, "size incompatible A,b"); THArgCheck(a_->size[0] >= a_->size[1], 2, "A should have m >= n"); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); int m = a->size[0]; int n = a->size[1]; int nrhs = b->size[1]; real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #endif real *hwork = th_magma_malloc_pinned<real>((size_t)wkopt); #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #endif magma_free_pinned(hwork); if (info != 0) THError("MAGMA gels : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gels)); #endif } THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos) { #ifdef USE_MAGMA int n = a->size[0]; int lda = n; magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower; magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec; THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a); real *input_data = THCTensor_(data)(state, input); // eigen values and workspace real *w = th_magma_malloc_pinned<real>(n); real *wA = th_magma_malloc_pinned<real>(lda); // compute optimal size of work array int info; real lwork; int liwork; #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #endif real *work = th_magma_malloc_pinned<real>((size_t)lwork); int *iwork = th_magma_malloc_pinned<int>(liwork); // compute eigenvalues and, optionally, eigenvectors #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #endif // copy eigen values from w to re_ if (info == 0) THCTensor_(copyArray1d)(state, re_, w, n); magma_free_pinned(iwork); magma_free_pinned(work); magma_free_pinned(wA); magma_free_pinned(w); // check error value if (info > 0) THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA syev : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, input, rv_); #else THError(NoMagma(syev)); #endif } THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 3, "A should be 2 dimensional"); THArgCheck(a_->size[0] == a_->size[1], 3, "A should be square"); magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec; int n = a_->size[0]; real *a_data = th_magma_malloc_pinned<real>(n * n); THCTensor_(copyTensor2d)(state, a_data, a_); real *wr = th_magma_malloc_pinned<real>(n); real *wi = th_magma_malloc_pinned<real>(n); real *vr_data = NULL; int ldvr = 1; if (jobvr == MagmaVec) { vr_data = th_magma_malloc_pinned<real>(n * n); ldvr = n; } real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #endif int lwork = (int) wkopt; real *work_data = th_magma_malloc_pinned<real>(lwork); #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA geev : Argument %d : illegal value", -info); { THCTensor_(resize2d)(state, re_, 2, n); THCTensor *re = THCTensor_(newContiguous)(state, re_); THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset, wr, n*sizeof(real), cudaMemcpyHostToDevice)); THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset + n, wi, n*sizeof(real), cudaMemcpyHostToDevice)); THCTensor_(freeCopyTo)(state, re, re_); THCTensor_(transpose)(state, re_, NULL, 0, 1); } if (jobvr == MagmaVec) THCTensor_(copyArray2d)(state, rv_, vr_data, n, n); magma_free_pinned(work_data); magma_free_pinned(vr_data); magma_free_pinned(wi); magma_free_pinned(wr); magma_free_pinned(a_data); #else THError(NoMagma(geev)); #endif } THC_API void THCTensor_(gesvd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *jobu) { #ifdef USE_MAGMA THCTensor *ra_ = THCTensor_(new)(state); THCTensor_(gesvd2)(state, ru_, rs_, rv_, ra_, a, jobu); THCTensor_(free)(state, ra_); #else THError(NoMagma(gesvd)); #endif } THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *jobus) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); magma_vec_t jobz = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec; int iunused[1]; int m = a->size[0]; int n = a->size[1]; int k = m < n ? m : n; int j = (jobz == MagmaAllVec) ? m : k; int jv = (jobz == MagmaAllVec) ? n : k; real *a_data = th_magma_malloc_pinned<real>(m * n); THCTensor_(copyTensor2d)(state, a_data, a); real *rs_data = th_magma_malloc_pinned<real>(k); real *ru_data = th_magma_malloc_pinned<real>(m * j); real *rv_data = th_magma_malloc_pinned<real>(n * n); real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #endif int lwork = (int) wkopt; real *work_data = th_magma_malloc_pinned<real>(lwork); int *iwork = th_magma_malloc_pinned<int>(8 * k); #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #endif if (info > 0) THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info); else if (info < 0) THError("MAGMA gesdd : Argument %d : illegal value", -info); THCTensor_(copyArray2d)(state, rv_, rv_data, n, n); THCTensor_(transpose)(state, rv_, NULL, 0, 1); if (jobz != MagmaAllVec) THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv); THCTensor_(copyArray2d)(state, ru_, ru_data, m, j); THCTensor_(copyArray1d)(state, rs_, rs_data, k); THCTensor_(copyArray2d)(state, ra_, a_data, m, n); magma_free_pinned(work_data); magma_free_pinned(iwork); magma_free_pinned(rv_data); magma_free_pinned(ru_data); magma_free_pinned(rs_data); magma_free_pinned(a_data); #else THError(NoMagma(gesvd2)); #endif } THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a) { THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); #ifdef USE_MAGMA int info; int n = a->size[0]; int lwork = n * magma_get_sgetri_nb(n); THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int *ipiv = th_magma_malloc_pinned<int>(n); THCTensor *work = THCTensor_(newWithSize1d)(state, lwork); real *work_data = THCTensor_(data)(state, work); // Run LU #if defined(THC_REAL_IS_FLOAT) magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info); #else magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info); #endif if (info > 0) THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #else magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getri : Argument %d : illegal value", -info); THCTensor_(free)(state, work); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, input, ra_); #else int n = a->size[0]; // input THCTensor *input = THCTensor_(newColumnMajor)(state, a, a); THCTensor_(resizeNd)(state, ra_, 2, input->size, input->stride); real *matrices1[1] = { THCTensor_(data)(state, input) }; real *matrices2[1] = { THCTensor_(data)(state, ra_) }; // Copy pointers to device. real **d_matrices1, **d_matrices2; THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, sizeof(real*))); THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, sizeof(real*))); THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, sizeof(real*), cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, sizeof(real*), cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); int info; int *info_gpu; THCudaCheck(THCudaMalloc(state, (void**)&info_gpu, sizeof(int))); int *ipiv_gpu; THCudaCheck(THCudaMalloc(state, (void**)&ipiv_gpu, n * sizeof(int))); // Run LU #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #else THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #endif THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #else THCudaBlas_Dgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #endif THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getri : Argument %d : illegal value", -info); THCudaCheck(THCudaFree(state, ipiv_gpu)); THCudaCheck(THCudaFree(state, info_gpu)); THCudaCheck(THCudaFree(state, d_matrices1)); THCudaCheck(THCudaFree(state, d_matrices2)); THCTensor_(free)(state, input); #endif } __global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r > c) { input[idx] = input[r*n + c]; } } } __global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r < c) { input[idx] = input[r*n + c]; } } } THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotri_gpu(ul, n, input_data, n, &info); #else magma_dpotri_gpu(ul, n, input_data, n, &info); #endif if (info > 0) THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potri : Argument %d : illegal value", -info); cudaStream_t stream = THCState_getCurrentStream(state); const int len = n*n; dim3 blocks(std::min(DIVUP(len, 128), 65535)); dim3 threads(128); if (uplo[0] == 'U') { THCTensor_(copyUpperSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len); } else { THCTensor_(copyLowerSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len); } THCTensor_(freeCopyTo)(state, input, ra_); #else THError(NoMagma(potri)); #endif } THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotrf_gpu(ul, n, input_data, n, &info); #else magma_dpotrf_gpu(ul, n, input_data, n, &info); #endif // check error value if (info > 0) THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potrf : Argument %d : illegal value", -info); if (uplo[0] == 'U') { THCTensor_(triu)(state, ra_, input, 0); } else { THCTensor_(tril)(state, ra_, input, 0); } THCTensor_(free)(state, input); #else THError(NoMagma(potrf)); #endif } THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; int nrhs = b->size[1]; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b); real *b_data = THCTensor_(data)(state, b_); THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a); real *a_data = THCTensor_(data)(state, a_); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info); #else magma_dpotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info); #endif // check error value if (info < 0) THError("MAGMA potrs : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, b_, rb_); THCTensor_(free)(state, a_); #else THError(NoMagma(potrs)); #endif } THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional"); THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_); int m = a->size[0]; int n = a->size[1]; int k = (m < n ? m : n); #ifdef MAGMA_V2 #if defined(THC_REAL_IS_FLOAT) int nb = magma_get_sgeqrf_nb(m, n); #else int nb = magma_get_dgeqrf_nb(m, n); #endif #else #if defined(THC_REAL_IS_FLOAT) int nb = magma_get_sgeqrf_nb(m); #else int nb = magma_get_dgeqrf_nb(m); #endif #endif real *a_data = THCTensor_(data)(state, a); real *tau_data = th_magma_malloc_pinned<real>(k); THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb); real *work_data = THCTensor_(data)(state, work); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #else magma_dgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #endif if (info != 0) THError("MAGMA geqrf2 : Argument %d : illegal value.", -info); THCTensor_(narrow)(state, a, a, 0, 0, k); THCTensor_(triu)(state, rr_, a, 0); THCTensor_(free)(state, a); a = THCTensor_(newColumnMajor)(state, rq_, a_); a_data = THCTensor_(data)(state, a); #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #else magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #endif if (info != 0) THError("MAGMA geqrf : Argument %d : illegal value.", -info); THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a); real *q_data = THCTensor_(data)(state, q); #if defined(THC_REAL_IS_FLOAT) magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #else magma_dorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #endif if (info != 0) THError("MAGMA orgqr : Argument %d : illegal value.", -info); THCTensor_(free)(state, work); magma_free_pinned(tau_data); THCTensor_(narrow)(state, q, q, 1, 0, k); THCTensor_(freeCopyTo)(state, q, rq_); #else THError(NoMagma(qr)); #endif } #endif #endif
9e392ebb59a0ff305901a317c6ee36ba46757035.hip
// !!! This is a file automatically generated by hipify!!! /* Example showing the use of CUFFT for fast 1D-convolution using FFT. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <helper_functions.h> #include <helper_cuda.h> // includes, project #include <hipfft.h> // Complex data type typedef float2 Complex; static __device__ __host__ inline Complex ComplexAdd(Complex, Complex); static __device__ __host__ inline Complex ComplexScale(Complex, float); static __device__ __host__ inline Complex ComplexMul(Complex, Complex); static __global__ void ComplexPointwiseMulAndScale(Complex*, const Complex*, int, float); // Filtering functions void Convolve(const Complex*, int, const Complex*, int, Complex*); // Padding functions int PadData(const Complex*, Complex**, int, const Complex*, Complex**, int); //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char** argv); // The filter size is assumed to be a number smaller than the signal size #define SIGNAL_SIZE 50 #define FILTER_KERNEL_SIZE 11 //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { printf("[simpleCUFFT] is starting...\n"); // Allocate host memory for the signal Complex* h_signal = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE); // Initalize the memory for the signal for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) { h_signal[i].x = rand() / (float)RAND_MAX; h_signal[i].y = 0; } // Allocate host memory for the filter Complex* h_filter_kernel = (Complex*)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE); // Initalize the memory for the filter for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) { h_filter_kernel[i].x = rand() / (float)RAND_MAX; h_filter_kernel[i].y = 0; } // Pad signal and filter kernel Complex* h_padded_signal; Complex* h_padded_filter_kernel; int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE, h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE); int mem_size = sizeof(Complex) * new_size; // Allocate device memory for signal Complex* d_signal; hipMalloc((void**)&d_signal, mem_size); // Copy host memory to device hipMemcpy(d_signal, h_padded_signal, mem_size, hipMemcpyHostToDevice); // Allocate device memory for filter kernel Complex* d_filter_kernel; hipMalloc((void**)&d_filter_kernel, mem_size); // Copy host memory to device hipMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size, hipMemcpyHostToDevice); // CUFFT plan hipfftHandle plan; hipfftPlan1d(&plan, new_size, HIPFFT_C2C, 1); // Transform signal and kernel printf("Transforming signal hipfftExecC2C\n"); hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_FORWARD); hipfftExecC2C(plan, (hipfftComplex *)d_filter_kernel, (hipfftComplex *)d_filter_kernel, HIPFFT_FORWARD); // Multiply the coefficients together and normalize the result printf("Launching ComplexPointwiseMulAndScale<<< >>>\n"); ComplexPointwiseMulAndScale << <32, 256 >> >(d_signal, d_filter_kernel, new_size, 1.0f / new_size); // Transform signal back printf("Transforming signal back hipfftExecC2C\n"); hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_BACKWARD); // Copy device memory to host Complex* h_convolved_signal = h_padded_signal; hipMemcpy(h_convolved_signal, d_signal, mem_size, hipMemcpyDeviceToHost); // Allocate host memory for the convolution result Complex* h_convolved_signal_ref = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE); // Convolve on the host Convolve(h_signal, SIGNAL_SIZE, h_filter_kernel, FILTER_KERNEL_SIZE, h_convolved_signal_ref); //Destroy CUFFT context hipfftDestroy(plan); // cleanup memory free(h_signal); free(h_filter_kernel); free(h_padded_signal); free(h_padded_filter_kernel); free(h_convolved_signal_ref); hipFree(d_signal); hipFree(d_filter_kernel); } // Pad data int PadData(const Complex* signal, Complex** padded_signal, int signal_size, const Complex* filter_kernel, Complex** padded_filter_kernel, int filter_kernel_size) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; int new_size = signal_size + maxRadius; // Pad signal Complex* new_data = (Complex*)malloc(sizeof(Complex) * new_size); memcpy(new_data + 0, signal, signal_size * sizeof(Complex)); memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex)); *padded_signal = new_data; // Pad filter new_data = (Complex*)malloc(sizeof(Complex) * new_size); memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex)); memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex)); memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex)); *padded_filter_kernel = new_data; return new_size; } //////////////////////////////////////////////////////////////////////////////// // Filtering operations //////////////////////////////////////////////////////////////////////////////// // Computes convolution on the host void Convolve(const Complex* signal, int signal_size, const Complex* filter_kernel, int filter_kernel_size, Complex* filtered_signal) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; // Loop over output element indices for (int i = 0; i < signal_size; ++i) { filtered_signal[i].x = filtered_signal[i].y = 0; // Loop over convolution indices for (int j = -maxRadius + 1; j <= minRadius; ++j) { int k = i + j; if (k >= 0 && k < signal_size) filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j])); } } } //////////////////////////////////////////////////////////////////////////////// // Complex operations //////////////////////////////////////////////////////////////////////////////// // Complex addition static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) { Complex c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } // Complex scale static __device__ __host__ inline Complex ComplexScale(Complex a, float s) { Complex c; c.x = s * a.x; c.y = s * a.y; return c; } // Complex multiplication static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) { Complex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // Complex pointwise multiplication static __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < size; i += numThreads) a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale); }
9e392ebb59a0ff305901a317c6ee36ba46757035.cu
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <helper_functions.h> #include <helper_cuda.h> // includes, project #include <cufft.h> // Complex data type typedef float2 Complex; static __device__ __host__ inline Complex ComplexAdd(Complex, Complex); static __device__ __host__ inline Complex ComplexScale(Complex, float); static __device__ __host__ inline Complex ComplexMul(Complex, Complex); static __global__ void ComplexPointwiseMulAndScale(Complex*, const Complex*, int, float); // Filtering functions void Convolve(const Complex*, int, const Complex*, int, Complex*); // Padding functions int PadData(const Complex*, Complex**, int, const Complex*, Complex**, int); //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char** argv); // The filter size is assumed to be a number smaller than the signal size #define SIGNAL_SIZE 50 #define FILTER_KERNEL_SIZE 11 //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { printf("[simpleCUFFT] is starting...\n"); // Allocate host memory for the signal Complex* h_signal = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE); // Initalize the memory for the signal for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) { h_signal[i].x = rand() / (float)RAND_MAX; h_signal[i].y = 0; } // Allocate host memory for the filter Complex* h_filter_kernel = (Complex*)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE); // Initalize the memory for the filter for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) { h_filter_kernel[i].x = rand() / (float)RAND_MAX; h_filter_kernel[i].y = 0; } // Pad signal and filter kernel Complex* h_padded_signal; Complex* h_padded_filter_kernel; int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE, h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE); int mem_size = sizeof(Complex) * new_size; // Allocate device memory for signal Complex* d_signal; cudaMalloc((void**)&d_signal, mem_size); // Copy host memory to device cudaMemcpy(d_signal, h_padded_signal, mem_size, cudaMemcpyHostToDevice); // Allocate device memory for filter kernel Complex* d_filter_kernel; cudaMalloc((void**)&d_filter_kernel, mem_size); // Copy host memory to device cudaMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size, cudaMemcpyHostToDevice); // CUFFT plan cufftHandle plan; cufftPlan1d(&plan, new_size, CUFFT_C2C, 1); // Transform signal and kernel printf("Transforming signal cufftExecC2C\n"); cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD); cufftExecC2C(plan, (cufftComplex *)d_filter_kernel, (cufftComplex *)d_filter_kernel, CUFFT_FORWARD); // Multiply the coefficients together and normalize the result printf("Launching ComplexPointwiseMulAndScale<<< >>>\n"); ComplexPointwiseMulAndScale << <32, 256 >> >(d_signal, d_filter_kernel, new_size, 1.0f / new_size); // Transform signal back printf("Transforming signal back cufftExecC2C\n"); cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE); // Copy device memory to host Complex* h_convolved_signal = h_padded_signal; cudaMemcpy(h_convolved_signal, d_signal, mem_size, cudaMemcpyDeviceToHost); // Allocate host memory for the convolution result Complex* h_convolved_signal_ref = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE); // Convolve on the host Convolve(h_signal, SIGNAL_SIZE, h_filter_kernel, FILTER_KERNEL_SIZE, h_convolved_signal_ref); //Destroy CUFFT context cufftDestroy(plan); // cleanup memory free(h_signal); free(h_filter_kernel); free(h_padded_signal); free(h_padded_filter_kernel); free(h_convolved_signal_ref); cudaFree(d_signal); cudaFree(d_filter_kernel); } // Pad data int PadData(const Complex* signal, Complex** padded_signal, int signal_size, const Complex* filter_kernel, Complex** padded_filter_kernel, int filter_kernel_size) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; int new_size = signal_size + maxRadius; // Pad signal Complex* new_data = (Complex*)malloc(sizeof(Complex) * new_size); memcpy(new_data + 0, signal, signal_size * sizeof(Complex)); memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex)); *padded_signal = new_data; // Pad filter new_data = (Complex*)malloc(sizeof(Complex) * new_size); memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex)); memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex)); memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex)); *padded_filter_kernel = new_data; return new_size; } //////////////////////////////////////////////////////////////////////////////// // Filtering operations //////////////////////////////////////////////////////////////////////////////// // Computes convolution on the host void Convolve(const Complex* signal, int signal_size, const Complex* filter_kernel, int filter_kernel_size, Complex* filtered_signal) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; // Loop over output element indices for (int i = 0; i < signal_size; ++i) { filtered_signal[i].x = filtered_signal[i].y = 0; // Loop over convolution indices for (int j = -maxRadius + 1; j <= minRadius; ++j) { int k = i + j; if (k >= 0 && k < signal_size) filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j])); } } } //////////////////////////////////////////////////////////////////////////////// // Complex operations //////////////////////////////////////////////////////////////////////////////// // Complex addition static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) { Complex c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } // Complex scale static __device__ __host__ inline Complex ComplexScale(Complex a, float s) { Complex c; c.x = s * a.x; c.y = s * a.y; return c; } // Complex multiplication static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) { Complex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // Complex pointwise multiplication static __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < size; i += numThreads) a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale); }
92a0499d2b564b1ec5de988e6fedb263f946b624.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <pcl/gpu/utils/device/block.hpp> #include <pcl/gpu/utils/device/funcattrib.hpp> #include "device.hpp" namespace pcl { namespace device { struct Combined { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y }; struct plus { __forceinline__ __device__ float operator () (const float &lhs, const volatile float& rhs) const { return (lhs + rhs); } }; Mat33 Rcurr; float3 tcurr; PtrStep<float> vmap_curr; PtrStep<float> nmap_curr; Mat33 Rprev_inv; float3 tprev; Intr intr; PtrStep<float> vmap_g_prev; PtrStep<float> nmap_g_prev; float distThres; float angleThres; int cols; int rows; mutable PtrStep<float> gbuf; __device__ __forceinline__ bool search (int x, int y, float3& n, float3& d, float3& s) const { float3 ncurr; ncurr.x = nmap_curr.ptr (y)[x]; if (isnan (ncurr.x)) return (false); float3 vcurr; vcurr.x = vmap_curr.ptr (y )[x]; vcurr.y = vmap_curr.ptr (y + rows)[x]; vcurr.z = vmap_curr.ptr (y + 2 * rows)[x]; float3 vcurr_g = Rcurr * vcurr + tcurr; float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space int2 ukr; //projection ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4 ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4 if (ukr.x < 0 || ukr.y < 0 || ukr.x >= cols || ukr.y >= rows || vcurr_cp.z < 0) return (false); float3 nprev_g; nprev_g.x = nmap_g_prev.ptr (ukr.y)[ukr.x]; if (isnan (nprev_g.x)) return (false); float3 vprev_g; vprev_g.x = vmap_g_prev.ptr (ukr.y )[ukr.x]; vprev_g.y = vmap_g_prev.ptr (ukr.y + rows)[ukr.x]; vprev_g.z = vmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]; float dist = norm (vprev_g - vcurr_g); if (dist > distThres) return (false); ncurr.y = nmap_curr.ptr (y + rows)[x]; ncurr.z = nmap_curr.ptr (y + 2 * rows)[x]; float3 ncurr_g = Rcurr * ncurr; nprev_g.y = nmap_g_prev.ptr (ukr.y + rows)[ukr.x]; nprev_g.z = nmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]; float sine = norm (cross (ncurr_g, nprev_g)); if (sine >= angleThres) return (false); n = nprev_g; d = vprev_g; s = vcurr_g; return (true); } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; float3 n, d, s; bool found_coresp = false; if (x < cols || y < rows) found_coresp = search (x, y, n, d, s); float row[7]; if (found_coresp) { *(float3*)&row[0] = cross (s, n); *(float3*)&row[3] = n; row[6] = dot (n, d - s); } else row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f; __shared__ float smem[CTA_SIZE]; int tid = Block::flattenedThreadId (); int shift = 0; for (int i = 0; i < 6; ++i) //rows { #pragma unroll for (int j = i; j < 7; ++j) // cols + b { __syncthreads (); smem[tid] = row[i] * row[j]; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); if (tid == 0) gbuf.ptr (shift++)[blockIdx.x + gridDim.x * blockIdx.y] = smem[0]; } } } }; __global__ void combinedKernel (const Combined cs) { cs (); } struct TranformReduction { enum { CTA_SIZE = 512, STRIDE = CTA_SIZE, B = 6, COLS = 6, ROWS = 6, DIAG = 6, UPPER_DIAG_MAT = (COLS * ROWS - DIAG) / 2 + DIAG, TOTAL = UPPER_DIAG_MAT + B, GRID_X = TOTAL }; struct plus { __forceinline__ __device__ float operator () (const float &lhs, const volatile float& rhs) const { return lhs + rhs; } }; PtrStep<float> gbuf; int length; mutable float* output; __device__ __forceinline__ void operator () () const { const float *beg = gbuf.ptr (blockIdx.x); const float *end = beg + length; int tid = threadIdx.x; float sum = 0.f; for (const float *t = beg + tid; t < end; t += STRIDE) sum += *t; __shared__ float smem[CTA_SIZE]; smem[tid] = sum; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); if (tid == 0) output[blockIdx.x] = smem[0]; } }; __global__ void TransformEstimatorKernel2 (const TranformReduction tr) { tr (); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void pcl::device::estimateCombined (const Mat33& Rcurr, const float3& tcurr, const MapArr& vmap_curr, const MapArr& nmap_curr, const Mat33& Rprev_inv, const float3& tprev, const Intr& intr, const MapArr& vmap_g_prev, const MapArr& nmap_g_prev, float distThres, float angleThres, DeviceArray2D<float>& gbuf, DeviceArray<float>& mbuf, float* matrixA_host, float* vectorB_host) { int cols = vmap_curr.cols (); int rows = vmap_curr.rows () / 3; Combined cs; cs.Rcurr = Rcurr; cs.tcurr = tcurr; cs.vmap_curr = vmap_curr; cs.nmap_curr = nmap_curr; cs.Rprev_inv = Rprev_inv; cs.tprev = tprev; cs.intr = intr; cs.vmap_g_prev = vmap_g_prev; cs.nmap_g_prev = nmap_g_prev; cs.distThres = distThres; cs.angleThres = angleThres; cs.cols = cols; cs.rows = rows; ////////////////////////////// dim3 block (Combined::CTA_SIZE_X, Combined::CTA_SIZE_Y); dim3 grid (1, 1, 1); grid.x = divUp (cols, block.x); grid.y = divUp (rows, block.y); mbuf.create (TranformReduction::TOTAL); if (gbuf.rows () != TranformReduction::TOTAL || gbuf.cols () < (int)(grid.x * grid.y)) gbuf.create (TranformReduction::TOTAL, grid.x * grid.y); cs.gbuf = gbuf; hipLaunchKernelGGL(( combinedKernel), dim3(grid), dim3(block), 0, 0, cs); cudaSafeCall ( hipGetLastError () ); //cudaSafeCall(hipDeviceSynchronize()); //printFuncAttrib(combinedKernel); TranformReduction tr; tr.gbuf = gbuf; tr.length = grid.x * grid.y; tr.output = mbuf; hipLaunchKernelGGL(( TransformEstimatorKernel2), dim3(TranformReduction::TOTAL), dim3(TranformReduction::CTA_SIZE), 0, 0, tr); cudaSafeCall (hipGetLastError ()); cudaSafeCall (hipDeviceSynchronize ()); float host_data[TranformReduction::TOTAL]; mbuf.download (host_data); int shift = 0; for (int i = 0; i < 6; ++i) //rows for (int j = i; j < 7; ++j) // cols + b { float value = host_data[shift++]; if (j == 6) // vector b vectorB_host[i] = value; else matrixA_host[j * 6 + i] = matrixA_host[i * 6 + j] = value; } }
92a0499d2b564b1ec5de988e6fedb263f946b624.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <pcl/gpu/utils/device/block.hpp> #include <pcl/gpu/utils/device/funcattrib.hpp> #include "device.hpp" namespace pcl { namespace device { struct Combined { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y }; struct plus { __forceinline__ __device__ float operator () (const float &lhs, const volatile float& rhs) const { return (lhs + rhs); } }; Mat33 Rcurr; float3 tcurr; PtrStep<float> vmap_curr; PtrStep<float> nmap_curr; Mat33 Rprev_inv; float3 tprev; Intr intr; PtrStep<float> vmap_g_prev; PtrStep<float> nmap_g_prev; float distThres; float angleThres; int cols; int rows; mutable PtrStep<float> gbuf; __device__ __forceinline__ bool search (int x, int y, float3& n, float3& d, float3& s) const { float3 ncurr; ncurr.x = nmap_curr.ptr (y)[x]; if (isnan (ncurr.x)) return (false); float3 vcurr; vcurr.x = vmap_curr.ptr (y )[x]; vcurr.y = vmap_curr.ptr (y + rows)[x]; vcurr.z = vmap_curr.ptr (y + 2 * rows)[x]; float3 vcurr_g = Rcurr * vcurr + tcurr; float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space int2 ukr; //projection ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4 ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4 if (ukr.x < 0 || ukr.y < 0 || ukr.x >= cols || ukr.y >= rows || vcurr_cp.z < 0) return (false); float3 nprev_g; nprev_g.x = nmap_g_prev.ptr (ukr.y)[ukr.x]; if (isnan (nprev_g.x)) return (false); float3 vprev_g; vprev_g.x = vmap_g_prev.ptr (ukr.y )[ukr.x]; vprev_g.y = vmap_g_prev.ptr (ukr.y + rows)[ukr.x]; vprev_g.z = vmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]; float dist = norm (vprev_g - vcurr_g); if (dist > distThres) return (false); ncurr.y = nmap_curr.ptr (y + rows)[x]; ncurr.z = nmap_curr.ptr (y + 2 * rows)[x]; float3 ncurr_g = Rcurr * ncurr; nprev_g.y = nmap_g_prev.ptr (ukr.y + rows)[ukr.x]; nprev_g.z = nmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]; float sine = norm (cross (ncurr_g, nprev_g)); if (sine >= angleThres) return (false); n = nprev_g; d = vprev_g; s = vcurr_g; return (true); } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; float3 n, d, s; bool found_coresp = false; if (x < cols || y < rows) found_coresp = search (x, y, n, d, s); float row[7]; if (found_coresp) { *(float3*)&row[0] = cross (s, n); *(float3*)&row[3] = n; row[6] = dot (n, d - s); } else row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f; __shared__ float smem[CTA_SIZE]; int tid = Block::flattenedThreadId (); int shift = 0; for (int i = 0; i < 6; ++i) //rows { #pragma unroll for (int j = i; j < 7; ++j) // cols + b { __syncthreads (); smem[tid] = row[i] * row[j]; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); if (tid == 0) gbuf.ptr (shift++)[blockIdx.x + gridDim.x * blockIdx.y] = smem[0]; } } } }; __global__ void combinedKernel (const Combined cs) { cs (); } struct TranformReduction { enum { CTA_SIZE = 512, STRIDE = CTA_SIZE, B = 6, COLS = 6, ROWS = 6, DIAG = 6, UPPER_DIAG_MAT = (COLS * ROWS - DIAG) / 2 + DIAG, TOTAL = UPPER_DIAG_MAT + B, GRID_X = TOTAL }; struct plus { __forceinline__ __device__ float operator () (const float &lhs, const volatile float& rhs) const { return lhs + rhs; } }; PtrStep<float> gbuf; int length; mutable float* output; __device__ __forceinline__ void operator () () const { const float *beg = gbuf.ptr (blockIdx.x); const float *end = beg + length; int tid = threadIdx.x; float sum = 0.f; for (const float *t = beg + tid; t < end; t += STRIDE) sum += *t; __shared__ float smem[CTA_SIZE]; smem[tid] = sum; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); if (tid == 0) output[blockIdx.x] = smem[0]; } }; __global__ void TransformEstimatorKernel2 (const TranformReduction tr) { tr (); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void pcl::device::estimateCombined (const Mat33& Rcurr, const float3& tcurr, const MapArr& vmap_curr, const MapArr& nmap_curr, const Mat33& Rprev_inv, const float3& tprev, const Intr& intr, const MapArr& vmap_g_prev, const MapArr& nmap_g_prev, float distThres, float angleThres, DeviceArray2D<float>& gbuf, DeviceArray<float>& mbuf, float* matrixA_host, float* vectorB_host) { int cols = vmap_curr.cols (); int rows = vmap_curr.rows () / 3; Combined cs; cs.Rcurr = Rcurr; cs.tcurr = tcurr; cs.vmap_curr = vmap_curr; cs.nmap_curr = nmap_curr; cs.Rprev_inv = Rprev_inv; cs.tprev = tprev; cs.intr = intr; cs.vmap_g_prev = vmap_g_prev; cs.nmap_g_prev = nmap_g_prev; cs.distThres = distThres; cs.angleThres = angleThres; cs.cols = cols; cs.rows = rows; ////////////////////////////// dim3 block (Combined::CTA_SIZE_X, Combined::CTA_SIZE_Y); dim3 grid (1, 1, 1); grid.x = divUp (cols, block.x); grid.y = divUp (rows, block.y); mbuf.create (TranformReduction::TOTAL); if (gbuf.rows () != TranformReduction::TOTAL || gbuf.cols () < (int)(grid.x * grid.y)) gbuf.create (TranformReduction::TOTAL, grid.x * grid.y); cs.gbuf = gbuf; combinedKernel<<<grid, block>>>(cs); cudaSafeCall ( cudaGetLastError () ); //cudaSafeCall(cudaDeviceSynchronize()); //printFuncAttrib(combinedKernel); TranformReduction tr; tr.gbuf = gbuf; tr.length = grid.x * grid.y; tr.output = mbuf; TransformEstimatorKernel2<<<TranformReduction::TOTAL, TranformReduction::CTA_SIZE>>>(tr); cudaSafeCall (cudaGetLastError ()); cudaSafeCall (cudaDeviceSynchronize ()); float host_data[TranformReduction::TOTAL]; mbuf.download (host_data); int shift = 0; for (int i = 0; i < 6; ++i) //rows for (int j = i; j < 7; ++j) // cols + b { float value = host_data[shift++]; if (j == 6) // vector b vectorB_host[i] = value; else matrixA_host[j * 6 + i] = matrixA_host[i * 6 + j] = value; } }
7d312e733c16b443792b52ec0bdd73faa33c40ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////// // File: ProgramCU.cu // Author: Changchang Wu // Description : implementation of ProgramCU and all CUDA kernels // // Copyright (c) 2007 University of North Carolina at Chapel Hill // All Rights Reserved // // Permission to use, copy, modify and distribute this software and its // documentation for educational, research and non-profit purposes, without // fee, and without a written agreement is hereby granted, provided that the // above copyright notice and the following paragraph appear in all copies. // // The University of North Carolina at Chapel Hill make no representations // about the suitability of this software for any purpose. It is provided // 'as is' without express or implied warranty. // // Please send BUG REPORTS to [email protected] // //////////////////////////////////////////////////////////////////////////// #if defined(CUDA_SIFTGPU_ENABLED) #include "GL/glew.h" #include "stdio.h" #include "CuTexImage.h" #include "ProgramCU.h" #include "GlobalUtil.h" //---------------------------------------------------------------- //Begin SiftGPU setting section. ////////////////////////////////////////////////////////// #define IMUL(X,Y) __mul24(X,Y) //#define FDIV(X,Y) ((X)/(Y)) #define FDIV(X,Y) __fdividef(X,Y) ///////////////////////////////////////////////////////// //filter kernel width range (don't change this) #define KERNEL_MAX_WIDTH 33 #define KERNEL_MIN_WIDTH 5 ////////////////////////////////////////////////////////// //horizontal filter block size (32, 64, 128, 256, 512) #define FILTERH_TILE_WIDTH 128 //#define FILTERH_TILE_WIDTH 256 //#define FILTERH_TILE_WIDTH 160 //thread block for vertical filter. FILTERV_BLOCK_WIDTH can be (4, 8 or 16) #define FILTERV_BLOCK_WIDTH 16 #define FILTERV_BLOCK_HEIGHT 32 //The corresponding image patch for a thread block #define FILTERV_PIXEL_PER_THREAD 4 #define FILTERV_TILE_WIDTH FILTERV_BLOCK_WIDTH #define FILTERV_TILE_HEIGHT (FILTERV_PIXEL_PER_THREAD * FILTERV_BLOCK_HEIGHT) ////////////////////////////////////////////////////////// //thread block size for computing Difference of Gaussian #define DOG_BLOCK_LOG_DIMX 7 #define DOG_BLOCK_LOG_DIMY 0 #define DOG_BLOCK_DIMX (1 << DOG_BLOCK_LOG_DIMX) #define DOG_BLOCK_DIMY (1 << DOG_BLOCK_LOG_DIMY) ////////////////////////////////////////////////////////// //thread block size for keypoint detection #define KEY_BLOCK_LOG_DIMX 3 #define KEY_BLOCK_LOG_DIMY 3 #define KEY_BLOCK_DIMX (1<<KEY_BLOCK_LOG_DIMX) #define KEY_BLOCK_DIMY (1<<KEY_BLOCK_LOG_DIMY) //#define KEY_OFFSET_ONE //make KEY_BLOCK_LOG_DIMX 4 will make the write coalesced.. //but it seems uncoalesced writes don't affect the speed ////////////////////////////////////////////////////////// //thread block size for initializing list generation (64, 128, 256, 512 ...) #define HIST_INIT_WIDTH 128 //thread block size for generating feature list (32, 64, 128, 256, 512, ...) #define LISTGEN_BLOCK_DIM 128 ///////////////////////////////////////////////////////// //how many keypoint orientations to compute in a block #define ORIENTATION_COMPUTE_PER_BLOCK 64 //how many keypoint descriptor to compute in a block (2, 4, 8, 16, 32) #define DESCRIPTOR_COMPUTE_PER_BLOCK 4 #define DESCRIPTOR_COMPUTE_BLOCK_SIZE (16 * DESCRIPTOR_COMPUTE_PER_BLOCK) //how many keypoint descriptor to normalized in a block (32, ...) #define DESCRIPTOR_NORMALIZ_PER_BLOCK 32 /////////////////////////////////////////// //Thread block size for visualization //(This doesn't affect the speed of computation) #define BLOCK_LOG_DIM 4 #define BLOCK_DIM (1 << BLOCK_LOG_DIM) //End SiftGPU setting section. //---------------------------------------------------------------- __device__ __constant__ float d_kernel[KERNEL_MAX_WIDTH]; texture<float, 1, hipReadModeElementType> texData; texture<unsigned char, 1, hipReadModeNormalizedFloat> texDataB; texture<float2, 2, hipReadModeElementType> texDataF2; texture<float4, 1, hipReadModeElementType> texDataF4; texture<int4, 1, hipReadModeElementType> texDataI4; texture<int4, 1, hipReadModeElementType> texDataList; //template<int i> __device__ float Conv(float *data) { return Conv<i-1>(data) + data[i]*d_kernel[i];} //template<> __device__ float Conv<0>(float *data) { return data[0] * d_kernel[0]; } ////////////////////////////////////////////////////////////// template<int FW> __global__ void FilterH( float* d_result, int width) { const int HALF_WIDTH = FW >> 1; // FW/2 const int CACHE_WIDTH = FILTERH_TILE_WIDTH + FW -1; //128+FW-1 const int CACHE_COUNT = 2 + (CACHE_WIDTH - 2)/ FILTERH_TILE_WIDTH; //catch __shared__ float data[CACHE_WIDTH]; //128+ const int bcol = IMUL(blockIdx.x, FILTERH_TILE_WIDTH); //128*blockIdx.x const int col = bcol + threadIdx.x; // const int index_min = IMUL(blockIdx.y, width);// const int index_max = index_min + width - 1;// int src_index = index_min + bcol - HALF_WIDTH + threadIdx.x; //blockIdx.x int cache_index = threadIdx.x; //0~128 float value = 0; #pragma unroll for(int j = 0; j < CACHE_COUNT; ++j) //catch { if(cache_index < CACHE_WIDTH) //128+FW-128=FW FW { int fetch_index = src_index < index_min? index_min : (src_index > index_max ? index_max : src_index); data[cache_index] = tex1Dfetch(texData,fetch_index); src_index += FILTERH_TILE_WIDTH; cache_index += FILTERH_TILE_WIDTH; } } __syncthreads(); if(col >= width) return; #pragma unroll for(int i = 0; i < FW; ++i) { value += (data[threadIdx.x + i]* d_kernel[i]); } // value = Conv<FW-1>(data + threadIdx.x); d_result[index_min + col] = value; } //////////////////////////////////////////////////////////////////// template<int FW> __global__ void FilterV(float* d_result, int width, int height) { const int HALF_WIDTH = FW >> 1; // const int CACHE_WIDTH = FW + FILTERV_TILE_HEIGHT - 1; //128+FW-1 const int TEMP = CACHE_WIDTH & 0xf;//15 //add some extra space to avoid bank conflict #if FILTERV_TILE_WIDTH == 16 //make the stride 16 * n +/- 1 const int EXTRA = (TEMP == 1 || TEMP == 0) ? 1 - TEMP : 15 - TEMP; #elif FILTERV_TILE_WIDTH == 8 //make the stride 16 * n +/- 2 const int EXTRA = (TEMP == 2 || TEMP == 1 || TEMP == 0) ? 2 - TEMP : (TEMP == 15? 3 : 14 - TEMP); #elif FILTERV_TILE_WIDTH == 4 //make the stride 16 * n +/- 4 const int EXTRA = (TEMP >=0 && TEMP <=4) ? 4 - TEMP : (TEMP > 12? 20 - TEMP : 12 - TEMP); #else #error #endif const int CACHE_TRUE_WIDTH = CACHE_WIDTH + EXTRA;// const int CACHE_COUNT = (CACHE_WIDTH + FILTERV_BLOCK_HEIGHT - 1) / FILTERV_BLOCK_HEIGHT; //catchlength/32 const int WRITE_COUNT = (FILTERV_TILE_HEIGHT + FILTERV_BLOCK_HEIGHT -1) / FILTERV_BLOCK_HEIGHT;//128/32 __shared__ float data[CACHE_TRUE_WIDTH * FILTERV_TILE_WIDTH]; //CACHE_TRUE_WIDTH*16 const int row_block_first = IMUL(blockIdx.y, FILTERV_TILE_HEIGHT); // const int col = IMUL(blockIdx.x, FILTERV_TILE_WIDTH) + threadIdx.x; // const int row_first = row_block_first - HALF_WIDTH;// const int data_index_max = IMUL(height - 1, width) + col; // const int cache_col_start = threadIdx.y; // const int cache_row_start = IMUL(threadIdx.x, CACHE_TRUE_WIDTH); int cache_index = cache_col_start + cache_row_start; // int data_index = IMUL(row_first + cache_col_start, width) + col;// if(col < width) { #pragma unroll for(int i = 0; i < CACHE_COUNT; ++i) { if(cache_col_start < CACHE_WIDTH - i * FILTERV_BLOCK_HEIGHT) { int fetch_index = data_index < col ? col : (data_index > data_index_max? data_index_max : data_index); // // data[cache_index + i * FILTERV_BLOCK_HEIGHT] = tex1Dfetch(texData,fetch_index); data_index += IMUL(FILTERV_BLOCK_HEIGHT, width);//32 } } } __syncthreads(); // if(col >= width) return; int row = row_block_first + threadIdx.y; int index_start = cache_row_start + threadIdx.y; #pragma unroll //128/32 32 32 for(int i = 0; i < WRITE_COUNT;++i,row += FILTERV_BLOCK_HEIGHT, index_start += FILTERV_BLOCK_HEIGHT) { if(row < height) { int index_dest = IMUL(row, width) + col; float value = 0; #pragma unroll for(int i = 0; i < FW; ++i) { value += (data[index_start + i] * d_kernel[i]); } d_result[index_dest] = value; } } } template<int LOG_SCALE> __global__ void UpsampleKernel(float* d_result, int width) { const int SCALE = (1 << LOG_SCALE), SCALE_MASK = (SCALE - 1); const float INV_SCALE = 1.0f / (float(SCALE)); int col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; if(col >= width) return; int row = blockIdx.y >> LOG_SCALE; int index = row * width + col; int dst_row = blockIdx.y; int dst_idx= (width * dst_row + col) * SCALE; int helper = blockIdx.y & SCALE_MASK; if (helper) { float v11 = tex1Dfetch(texData, index); float v12 = tex1Dfetch(texData, index + 1); index += width; float v21 = tex1Dfetch(texData, index); float v22 = tex1Dfetch(texData, index + 1); float w1 = INV_SCALE * helper, w2 = 1.0 - w1; float v1 = (v21 * w1 + w2 * v11); float v2 = (v22 * w1 + w2 * v12); d_result[dst_idx] = v1; #pragma unroll for(int i = 1; i < SCALE; ++i) { const float r2 = i * INV_SCALE; const float r1 = 1.0f - r2; d_result[dst_idx +i] = v1 * r1 + v2 * r2; } }else { float v1 = tex1Dfetch(texData, index); float v2 = tex1Dfetch(texData, index + 1); d_result[dst_idx] = v1; #pragma unroll for(int i = 1; i < SCALE; ++i) { const float r2 = i * INV_SCALE; const float r1 = 1.0f - r2; d_result[dst_idx +i] = v1 * r1 + v2 * r2; } } } //////////////////////////////////////////////////////////////////////////////////////// void ProgramCU::SampleImageU(CuTexImage *dst, CuTexImage *src, int log_scale) { int width = src->GetImgWidth(), height = src->GetImgHeight(); src->BindTexture(texData); dim3 grid((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height << log_scale); dim3 block(FILTERH_TILE_WIDTH); switch(log_scale) { case 1 : hipLaunchKernelGGL(( UpsampleKernel<1>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break; case 2 : hipLaunchKernelGGL(( UpsampleKernel<2>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break; case 3 : hipLaunchKernelGGL(( UpsampleKernel<3>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break; default: break; } } template<int LOG_SCALE> __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width) { const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; if(dst_col >= dst_width) return; const int src_col = min((dst_col << LOG_SCALE), (src_width - 1)); //dst_col*2 const int dst_row = blockIdx.y; // const int src_row = blockIdx.y << LOG_SCALE; // dst_row*2 const int src_idx = IMUL(src_row, src_width) + src_col; const int dst_idx = IMUL(dst_width, dst_row) + dst_col; d_result[dst_idx] = tex1Dfetch(texData, src_idx); } __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width, const int log_scale) { const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; if(dst_col >= dst_width) return; const int src_col = min((dst_col << log_scale), (src_width - 1)); const int dst_row = blockIdx.y; const int src_row = blockIdx.y << log_scale; const int src_idx = IMUL(src_row, src_width) + src_col; const int dst_idx = IMUL(dst_width, dst_row) + dst_col; d_result[dst_idx] = tex1Dfetch(texData, src_idx); } void ProgramCU::SampleImageD(CuTexImage *dst, CuTexImage *src, int log_scale) { int src_width = src->GetImgWidth(), dst_width = dst->GetImgWidth() ; src->BindTexture(texData); dim3 grid((dst_width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, dst->GetImgHeight()); dim3 block(FILTERH_TILE_WIDTH); switch(log_scale) { case 1 : hipLaunchKernelGGL(( DownsampleKernel<1>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break; case 2 :hipLaunchKernelGGL(( DownsampleKernel<2>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break; case 3 : hipLaunchKernelGGL(( DownsampleKernel<3>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break; default:hipLaunchKernelGGL(( DownsampleKernel) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width, log_scale); } hipDeviceSynchronize(); } __global__ void ChannelReduce_Kernel(float* d_result) { int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; d_result[index] = tex1Dfetch(texData, index*4); } __global__ void ChannelReduce_Convert_Kernel(float* d_result) { int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; float4 rgba = tex1Dfetch(texDataF4, index); d_result[index] = 0.299f * rgba.x + 0.587f* rgba.y + 0.114f * rgba.z; } void ProgramCU::ReduceToSingleChannel(CuTexImage* dst, CuTexImage* src, int convert_rgb) { int width = src->GetImgWidth(), height = dst->GetImgHeight() ; dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH); dim3 block(FILTERH_TILE_WIDTH); if(convert_rgb) { src->BindTexture(texDataF4); hipLaunchKernelGGL(( ChannelReduce_Convert_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData); }else { src->BindTexture(texData); hipLaunchKernelGGL(( ChannelReduce_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData); } } __global__ void ConvertByteToFloat_Kernel(float* d_result) { int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; d_result[index] = tex1Dfetch(texDataB, index); } void ProgramCU::ConvertByteToFloat(CuTexImage*src, CuTexImage* dst) { int width = src->GetImgWidth(), height = dst->GetImgHeight() ; dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH); dim3 block(FILTERH_TILE_WIDTH); src->BindTexture(texDataB); hipLaunchKernelGGL(( ConvertByteToFloat_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData); } void ProgramCU::CreateFilterKernel(float sigma, float* kernel, int& width) { int i, sz = int( ceil( GlobalUtil::_FilterWidthFactor * sigma -0.5) ) ;// width = 2*sz + 1; if(width > KERNEL_MAX_WIDTH) { //filter size truncation sz = KERNEL_MAX_WIDTH >> 1; width =KERNEL_MAX_WIDTH; }else if(width < KERNEL_MIN_WIDTH) { sz = KERNEL_MIN_WIDTH >> 1; width =KERNEL_MIN_WIDTH; } float rv = 1.0f/(sigma*sigma), v, ksum =0; // pre-compute filter for( i = -sz ; i <= sz ; ++i) { kernel[i+sz] = v = exp(-0.5f * i * i *rv) ; ksum += v; } //normalize the kernel rv = 1.0f/ksum; for(i = 0; i< width ;i++) kernel[i]*=rv; } template<int FW> void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf) { int width = src->GetImgWidth(), height = src->GetImgHeight(); //horizontal filtering src->BindTexture(texData); //src dim3 gridh((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height); dim3 blockh(FILTERH_TILE_WIDTH); //GlobalUtil::StartTimer(""); hipLaunchKernelGGL(( FilterH<FW>), dim3(gridh), dim3(blockh), 0, 0, (float*)buf->_cuData, width); hipDeviceSynchronize(); // GlobalUtil::StopTimer(); // float _timing0 = GlobalUtil::GetElapsedTime(); //CheckErrorCUDA("FilterH"); ///vertical filtering buf->BindTexture(texData); //16,128 dim3 gridv((width + FILTERV_TILE_WIDTH - 1)/ FILTERV_TILE_WIDTH, (height + FILTERV_TILE_HEIGHT - 1)/FILTERV_TILE_HEIGHT); //(50,5) dim3 blockv(FILTERV_TILE_WIDTH, FILTERV_BLOCK_HEIGHT); //(16*32) //GlobalUtil::StartTimer(""); hipLaunchKernelGGL(( FilterV<FW>), dim3(gridv), dim3(blockv), 0, 0, (float*)dst->_cuData, width, height); hipDeviceSynchronize(); // GlobalUtil::StopTimer(); //float _timing1 = GlobalUtil::GetElapsedTime(); //0.005,0.008 1.6 CheckErrorCUDA("FilterV"); } ////////////////////////////////////////////////////////////////////// // tested on 2048x1500 image, the time on pyramid construction is // OpenGL version : 18ms // CUDA version: 28 ms void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf, float sigma) { float filter_kernel[KERNEL_MAX_WIDTH]; int width; CreateFilterKernel(sigma, filter_kernel, width); hipMemcpyToSymbol(d_kernel, filter_kernel, width * sizeof(float), 0, hipMemcpyHostToDevice); switch(width) { case 5: FilterImage< 5>(dst, src, buf); break; case 7: FilterImage< 7>(dst, src, buf); break; case 9: FilterImage< 9>(dst, src, buf); break; case 11: FilterImage<11>(dst, src, buf); break; case 13: FilterImage<13>(dst, src, buf); break; case 15: FilterImage<15>(dst, src, buf); break; case 17: FilterImage<17>(dst, src, buf); break; case 19: FilterImage<19>(dst, src, buf); break; case 21: FilterImage<21>(dst, src, buf); break; case 23: FilterImage<23>(dst, src, buf); break; case 25: FilterImage<25>(dst, src, buf); break; case 27: FilterImage<27>(dst, src, buf); break; case 29: FilterImage<29>(dst, src, buf); break; case 31: FilterImage<31>(dst, src, buf); break; case 33: FilterImage<33>(dst, src, buf); break; default: break; } } texture<float, 1, hipReadModeElementType> texC; texture<float, 1, hipReadModeElementType> texP; texture<float, 1, hipReadModeElementType> texN; void __global__ ComputeDOG_Kernel(float* d_dog, float2* d_got, int width, int height) { int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y; int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x; if(col < width && row < height) { int index = IMUL(row, width) + col; float vp = tex1Dfetch(texP, index); float v = tex1Dfetch(texC, index); d_dog[index] = v - vp; float vxn = tex1Dfetch(texC, index + 1); float vxp = tex1Dfetch(texC, index - 1); float vyp = tex1Dfetch(texC, index - width); float vyn = tex1Dfetch(texC, index + width); float dx = vxn - vxp, dy = vyn - vyp; float grd = 0.5f * sqrt(dx * dx + dy * dy); float rot = (grd == 0.0f? 0.0f : atan2(dy, dx)); d_got[index] = make_float2(grd, rot); } } void __global__ ComputeDOG_Kernel(float* d_dog, int width, int height) { int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y; int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x; if(col < width && row < height) { int index = IMUL(row, width) + col; float vp = tex1Dfetch(texP, index); float v = tex1Dfetch(texC, index); d_dog[index] = v - vp; } } void ProgramCU::ComputeDOG(CuTexImage* gus, CuTexImage* dog, CuTexImage* got) { int width = gus->GetImgWidth(), height = gus->GetImgHeight(); dim3 grid((width + DOG_BLOCK_DIMX - 1)/ DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1)/DOG_BLOCK_DIMY); dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY); gus->BindTexture(texC); (gus -1)->BindTexture(texP); //got if(got->_cuData) hipLaunchKernelGGL(( ComputeDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) dog->_cuData, (float2*) got->_cuData, width, height); else hipLaunchKernelGGL(( ComputeDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) dog->_cuData, width, height); hipDeviceSynchronize(); } #define READ_CMP_DOG_DATA(datai, tex, idx) \ datai[0] = tex1Dfetch(tex, idx - 1);\ datai[1] = tex1Dfetch(tex, idx);\ datai[2] = tex1Dfetch(tex, idx + 1);\ if(v > nmax)\ {\ nmax = max(nmax, datai[0]);\ nmax = max(nmax, datai[1]);\ nmax = max(nmax, datai[2]);\ if(v < nmax) goto key_finish;\ }else\ {\ nmin = min(nmin, datai[0]);\ nmin = min(nmin, datai[1]);\ nmin = min(nmin, datai[2]);\ if(v > nmin) goto key_finish;\ } void __global__ ComputeKEY_Kernel(float4* d_key, int width, int colmax, int rowmax, float dog_threshold0, float dog_threshold, float edge_threshold, int subpixel_localization) { float data[3][3], v; float datap[3][3], datan[3][3]; #ifdef KEY_OFFSET_ONE int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y + 1; int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x + 1; #else int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y; int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x; #endif int index = IMUL(row, width) + col; int idx[3] ={index - width, index, index + width}; int in_image =0; float nmax, nmin, result = 0.0f; float dx = 0, dy = 0, ds = 0; bool offset_test_passed = true; #ifdef KEY_OFFSET_ONE if(row < rowmax && col < colmax) #else if(row > 0 && col > 0 && row < rowmax && col < colmax) #endif { // in_image = 1; data[1][1] = v = tex1Dfetch(texC, idx[1]); // if(fabs(v) <= dog_threshold0) goto key_finish; data[1][0] = tex1Dfetch(texC, idx[1] - 1); data[1][2] = tex1Dfetch(texC, idx[1] + 1);// nmax = max(data[1][0], data[1][2]); nmin = min(data[1][0], data[1][2]);// if(v <=nmax && v >= nmin) goto key_finish; // //if((v > nmax && v < 0 )|| (v < nmin && v > 0)) goto key_finish; READ_CMP_DOG_DATA(data[0], texC, idx[0]);// READ_CMP_DOG_DATA(data[2], texC, idx[2]);// // //edge supression float vx2 = v * 2.0f; float fxx = data[1][0] + data[1][2] - vx2; float fyy = data[0][1] + data[2][1] - vx2; float fxy = 0.25f * (data[2][2] + data[0][0] - data[2][0] - data[0][2]); float temp1 = fxx * fyy - fxy * fxy; float temp2 = (fxx + fyy) * (fxx + fyy); if(temp1 <=0 || temp2 > edge_threshold * temp1) goto key_finish; // //read the previous level READ_CMP_DOG_DATA(datap[0], texP, idx[0]);// READ_CMP_DOG_DATA(datap[1], texP, idx[1]); READ_CMP_DOG_DATA(datap[2], texP, idx[2]); //read the next level READ_CMP_DOG_DATA(datan[0], texN, idx[0]);// READ_CMP_DOG_DATA(datan[1], texN, idx[1]); READ_CMP_DOG_DATA(datan[2], texN, idx[2]); if(subpixel_localization) { //subpixel localization float fx = 0.5f * (data[1][2] - data[1][0]); float fy = 0.5f * (data[2][1] - data[0][1]); float fs = 0.5f * (datan[1][1] - datap[1][1]); float fss = (datan[1][1] + datap[1][1] - vx2); float fxs = 0.25f* (datan[1][2] + datap[1][0] - datan[1][0] - datap[1][2]); float fys = 0.25f* (datan[2][1] + datap[0][1] - datan[0][1] - datap[2][1]); //need to solve dx, dy, ds; // |-fx| | fxx fxy fxs | |dx| // |-fy| = | fxy fyy fys | * |dy| // |-fs| | fxs fys fss | |ds| float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx); float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy); float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs); // float maxa = max(max(A0.x, A1.x), A2.x); // if(maxa >= 1e-10) { if(maxa == A1.x)//A1A1A0 { float4 TEMP = A1; A1 = A0; A0 = TEMP; }else if(maxa == A2.x)//A2A1A0 { float4 TEMP = A2; A2 = A0; A0 = TEMP; } A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x; A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w; A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w; if(abs(A2.y) > abs(A1.y)) { float4 TEMP = A2; A2 = A1; A1 = TEMP; } if(abs(A1.y) >= 1e-10) { A1.z /= A1.y; A1.w /= A1.y; A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w; if(abs(A2.z) >= 1e-10) { ds = A2.w / A2.z; dy = A1.w - ds * A1.z; dx = A0.w - ds * A0.z - dy * A0.y; offset_test_passed = fabs(data[1][1] + 0.5f * (dx * fx + dy * fy + ds * fs)) > dog_threshold // &&fabs(ds) < 1.0f && fabs(dx) < 1.0f && fabs(dy) < 1.0f; } } } } if(offset_test_passed) result = v > nmax ? 1.0 : -1.0;// } key_finish: //index if(in_image) d_key[index] = make_float4(result, dx, dy, ds); //( } void ProgramCU::ComputeKEY(CuTexImage* dog, CuTexImage* key, float Tdog, float Tedge) { int width = dog->GetImgWidth(), height = dog->GetImgHeight(); float Tdog1 = (GlobalUtil::_SubpixelLocalization? 0.8f : 1.0f) * Tdog; CuTexImage* dogp = dog - 1; CuTexImage* dogn = dog + 1; #ifdef KEY_OFFSET_ONE dim3 grid((width - 1 + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height - 1 + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY); #else dim3 grid((width + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY); #endif dim3 block(KEY_BLOCK_DIMX, KEY_BLOCK_DIMY); dogp->BindTexture(texP); dog ->BindTexture(texC); dogn->BindTexture(texN); Tedge = (Tedge+1)*(Tedge+1)/Tedge; //(8,8) (800/8,600/8) hipLaunchKernelGGL(( ComputeKEY_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) key->_cuData, width, width -1, height -1, Tdog1, Tdog, Tedge, GlobalUtil::_SubpixelLocalization); hipDeviceSynchronize(); } //ws 800,wd 200,height 600 void __global__ InitHist_Kernel(int4* hist, int ws, int wd, int height) { int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;// int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;// if(row < height && col < wd) { int hidx = IMUL(row, wd) + col; //(0,1,2,3,4...200) int scol = col << 2;//4 (0,4,8,12...800) int sidx = IMUL(row, ws) + scol;//(0,4,8,12...800) int v[4] = {0, 0, 0, 0}; if(row > 0 && row < height -1) { #pragma unroll for(int i = 0; i < 4 ; ++i, ++scol) { float4 temp = tex1Dfetch(texDataF4, sidx +i);// //temp(result, dx, dy, ds)result0 v[i] = (scol < ws -1 && scol > 0 && temp.x!=0) ? 1 : 0;//temp.x 1 } } hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);//1/4 } } void ProgramCU::InitHistogram(CuTexImage* key, CuTexImage* hist) { int ws = key->GetImgWidth(), hs = key->GetImgHeight();//800*600 int wd = hist->GetImgWidth(), hd = hist->GetImgHeight();//200*600 dim3 grid((wd + HIST_INIT_WIDTH - 1)/ HIST_INIT_WIDTH, hd);//(200/128,600) dim3 block(HIST_INIT_WIDTH, 1); //(128,1) key->BindTexture(texDataF4); //hist->cuda,800,200,600 hipLaunchKernelGGL(( InitHist_Kernel), dim3(grid), dim3(block), 0, 0, (int4*) hist->_cuData, ws, wd, hd); hipDeviceSynchronize(); } //200,50,600 void __global__ ReduceHist_Kernel(int4* d_hist, int ws, int wd, int height) { int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;// int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;// if(row < height && col < wd) { int hidx = IMUL(row, wd) + col;// int scol = col << 2;//col*4 int sidx = IMUL(row, ws) + scol;// int v[4] = {0, 0, 0, 0}; #pragma unroll for(int i = 0; i < 4 && scol < ws; ++i, ++scol) { int4 temp = tex1Dfetch(texDataI4, sidx + i);// 200--50 v[i] = temp.x + temp.y + temp.z + temp.w; //(x,y,z,w) } d_hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);// } } void ProgramCU::ReduceHistogram(CuTexImage*hist1, CuTexImage* hist2) { int ws = hist1->GetImgWidth(), hs = hist1->GetImgHeight(); int wd = hist2->GetImgWidth(), hd = hist2->GetImgHeight(); int temp = (int)floor(logf(float(wd * 2/ 3)) / logf(2.0f)); const int wi = min(7, max(temp , 0)); hist1->BindTexture(texDataI4); const int BW = 1 << wi, BH = 1 << (7 - wi); dim3 grid((wd + BW - 1)/ BW, (hd + BH -1) / BH);//wd/32,hd/4 dim3 block(BW, BH);//32,4 hipLaunchKernelGGL(( ReduceHist_Kernel), dim3(grid), dim3(block), 0, 0, (int4*)hist2->_cuData, ws, wd, hd); hipDeviceSynchronize(); } void __global__ ListGen_Kernel(int4* d_list, int width) { int idx1 = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; // int4 pos = tex1Dfetch(texDataList, idx1); // int idx2 = IMUL(pos.y, width) + pos.x; //0 y600xwidth4 int4 temp = tex1Dfetch(texDataI4, idx2);// int sum1 = temp.x + temp.y; int sum2 = sum1 + temp.z; pos.x <<= 2;//pos.x *4 if(pos.z >= sum2)// pos.x=0pos.x00pos.x=0 { pos.x += 3; pos.z -= sum2; }else if(pos.z >= sum1) { pos.x += 2; pos.z -= sum1; }else if(pos.z >= temp.x) { pos.x += 1; pos.z -= temp.x; } d_list[idx1] = pos; } //input list (x, y) (x, y) .... featurehist void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* hist) { int len = list->GetImgWidth();//327 list->BindTexture(texDataList); hist->BindTexture(texDataI4); dim3 grid((len + LISTGEN_BLOCK_DIM -1) /LISTGEN_BLOCK_DIM);// len/128 dim3 block(LISTGEN_BLOCK_DIM);//128 //listgenerate hipLaunchKernelGGL(( ListGen_Kernel), dim3(grid), dim3(block), 0, 0, (int4*) list->_cuData, hist->GetImgWidth()); hipDeviceSynchronize(); } void __global__ ComputeOrientation_Kernel(float4* d_list, int list_len,//327 int width, int height, //800,600 float sigma, float sigma_step, //2.01,1.26 float gaussian_factor, float sample_factor,//1.5,3 int num_orientation,//2 int existing_keypoint, //0 int subpixel, //1 int keepsign)//0 { //10 const float ten_degree_per_radius = 5.7295779513082320876798154814105; //(360/10)/2pi // const float radius_per_ten_degrees = 1.0 / 5.7295779513082320876798154814105;//(10/360)*2PI int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;// if(idx >= list_len) return; float4 key; //key(x,y,sigma,) if(existing_keypoint) { key = tex1Dfetch(texDataF4, idx); }else { int4 ikey = tex1Dfetch(texDataList, idx);//(x,y,sigma,) key.x = ikey.x + 0.5f; // key.y = ikey.y + 0.5f; key.z = sigma; if(subpixel || keepsign) { float4 offset = tex1Dfetch(texDataF4, IMUL(width, ikey.y) + ikey.x);// if(subpixel) { //keyresult,dx,dy,ds key.x += offset.y;//x+=dx key.y += offset.z;//y+=dy key.z *= pow(sigma_step, offset.w);//z*=dz } if(keepsign) key.z *= offset.x; //??? } } if(num_orientation == 0) { key.w = 0; d_list[idx] = key; return; } float vote[37]; // float gsigma = key.z * gaussian_factor;//key.zsigma float win = fabs(key.z) * sample_factor;// float dist_threshold = win * win + 0.5; // float factor = -0.5f / (gsigma * gsigma); float xmin = max(1.5f, floor(key.x - win) + 0.5f); //-radius float ymin = max(1.5f, floor(key.y - win) + 0.5f); float xmax = min(width - 1.5f, floor(key.x + win) + 0.5f);//+radius float ymax = min(height -1.5f, floor(key.y + win) + 0.5f); #pragma unroll for(int i = 0; i < 36; ++i) vote[i] = 0.0f; //36 for(float y = ymin; y <= ymax; y += 1.0f) //-radius~+radius { for(float x = xmin; x <= xmax; x += 1.0f)//-radius~+radius { float dx = x - key.x; //i float dy = y - key.y; //j float sq_dist = dx * dx + dy * dy; float2 got = tex2D(texDataF2, x, y);// got.xgot.y float weight = got.x * exp(sq_dist * factor);// float fidx = floor(got.y * ten_degree_per_radius);//2PI536 int oidx = fidx; if(oidx < 0) oidx += 36; vote[oidx] += weight; // } } //filter the vote const float one_third = 1.0 /3.0;// #pragma unroll // for(int i = 0; i < 6; ++i) { vote[36] = vote[0]; float pre = vote[35]; #pragma unroll for(int j = 0; j < 36; ++j) { float temp = one_third * (pre + vote[j] + vote[j + 1]); pre = vote[j]; vote[j] = temp; //1pre2 } } vote[36] = vote[0]; if(num_orientation == 1 || existing_keypoint) { int index_max = 0; float max_vote = vote[0]; #pragma unroll for(int i = 1; i < 36; ++i) { index_max = vote[i] > max_vote? i : index_max; max_vote = max(max_vote, vote[i]); } float pre = vote[index_max == 0? 35 : index_max -1]; float next = vote[index_max + 1]; float weight = max_vote; float off = 0.5f * FDIV(next - pre, weight + weight - next - pre); key.w = radius_per_ten_degrees * (index_max + 0.5f + off); d_list[idx] = key; }else { float max_vote = vote[0]; #pragma unroll for(int i = 1; i < 36; ++i) max_vote = max(max_vote, vote[i]); // float vote_threshold = max_vote * 0.8f; //80% float pre = vote[35]; float max_rot[2], max_vot[2] = {0, 0}; // int ocount = 0; #pragma unroll for(int i =0; i < 36; ++i)//36 { float next = vote[i + 1];// if(vote[i] > vote_threshold && vote[i] > pre && vote[i] > next) { float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);// float rot = i + di + 0.5f; float weight = vote[i]; ///max_vot[0] max_vot[1]>=max_vot[0] if(weight > max_vot[1]) { if(weight > max_vot[0]) { max_vot[1] = max_vot[0]; // max_rot[1] = max_rot[0]; max_vot[0] = weight; max_rot[0] = rot; } else { max_vot[1] = weight; max_rot[1] = rot; } ocount ++; } } pre = vote[i]; } float fr1 = max_rot[0] / 36.0f; //0,1 if(fr1 < 0) fr1 += 1.0f; unsigned short us1 = ocount == 0? 65535 : ((unsigned short )floor(fr1 * 65535.0f)); unsigned short us2 = 65535; if(ocount > 1) //ocount11 { float fr2 = max_rot[1] / 36.0f; //0,1 if(fr2 < 0) fr2 += 1.0f; us2 = (unsigned short ) floor(fr2 * 65535.0f); } unsigned int uspack = (us2 << 16) | us1; //us2*2^16 us2!!! //311,3*2=611020 //16065535 key.w = __int_as_float(uspack);//intfloatkey.wkeyfloat4 d_list[idx] = key; } } void ProgramCU::ComputeOrientation(CuTexImage* list, CuTexImage* got, CuTexImage*key, float sigma, float sigma_step, int existing_keypoint) { int len = list->GetImgWidth(); if(len <= 0) return; int width = got->GetImgWidth(), height = got->GetImgHeight(); if(existing_keypoint) { list->BindTexture(texDataF4); }else { list->BindTexture(texDataList); if(GlobalUtil::_SubpixelLocalization) key->BindTexture(texDataF4);//F4(result,dx,dy,ds) } got->BindTexture2D(texDataF2); //F2, const int block_width = len < ORIENTATION_COMPUTE_PER_BLOCK ? 16 : ORIENTATION_COMPUTE_PER_BLOCK; //len<64,1664 dim3 grid((len + block_width -1) / block_width); dim3 block(block_width); hipLaunchKernelGGL(( ComputeOrientation_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) list->_cuData, len, width, height, sigma, sigma_step, GlobalUtil::_OrientationGaussianFactor, GlobalUtil::_OrientationGaussianFactor * GlobalUtil::_OrientationWindowFactor, GlobalUtil::_FixedOrientation? 0 : GlobalUtil::_MaxOrientation, //0 existing_keypoint, GlobalUtil::_SubpixelLocalization, GlobalUtil::_KeepExtremumSign); hipDeviceSynchronize(); ProgramCU::CheckErrorCUDA("ComputeOrientation"); } template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptor_Kernel(float4* d_des, int num, int width, int height, float window_factor) { const float rpi = 4.0/ 3.14159265358979323846; int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; int fidx = idx >> 4; //idx/16 if(fidx >= num) return; float4 key = tex1Dfetch(texDataF4, fidx);//featureTex int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2; //(0,0),(0,1)(0,2)...(3,3) float spt = fabs(key.z * window_factor);// 3sigma float s, c; __sincosf(key.w, &s, &c); // float anglef = key.w > 3.14159265358979323846? key.w - (2.0 * 3.14159265358979323846) : key.w ; float cspt = c * spt, sspt = s * spt;// float crspt = c / spt, srspt = s / spt;// float2 offsetpt, pt; float xmin, ymin, xmax, ymax, bsz; offsetpt.x = ix - 1.5f;//~~1.5+d/2-0.5 offsetpt.y = iy - 1.5f;//(0~3)->(-1.5~1.5) // pt.x = cspt * offsetpt.x - sspt * offsetpt.y + key.x; // pt.y = cspt * offsetpt.y + sspt * offsetpt.x + key.y; bsz = fabs(cspt) + fabs(sspt); //radius 2*spt xmin = max(1.5f, floor(pt.x - bsz) + 0.5f); //-radius~radius ymin = max(1.5f, floor(pt.y - bsz) + 0.5f); //-radius~radius xmax = min(width - 1.5f, floor(pt.x + bsz) + 0.5f); ymax = min(height - 1.5f, floor(pt.y + bsz) + 0.5f); float des[9]; #pragma unroll for(int i =0; i < 9; ++i) des[i] = 0.0f; // for(float y = ymin; y <= ymax; y += 1.0f) { for(float x = xmin; x <= xmax; x += 1.0f) { float dx = x - pt.x; //pt float dy = y - pt.y; float nx = crspt * dx + srspt * dy; // float ny = crspt * dy - srspt * dx; float nxn = fabs(nx); float nyn = fabs(ny); if(nxn < 1.0f && nyn < 1.0f) { float2 cc = tex2D(texDataF2, x, y); // float dnx = nx + offsetpt.x; float dny = ny + offsetpt.y;// float ww = exp(-0.125f * (dnx * dnx + dny * dny)); float wx = 1.0 - nxn; float wy = 1.0 - nyn; float weight = ww * wx * wy * cc.x; float theta = (anglef - cc.y) * rpi; // if(theta < 0) theta += 8.0f; float fo = floor(theta); int fidx = fo;// float weight1 = fo + 1.0f - theta; //1-(theta - fo) float weight2 = theta - fo; //theta - fo if(DYNAMIC_INDEXING) { des[fidx] += (weight1 * weight); des[fidx + 1] += (weight2 * weight); //this dynamic indexing part might be slow }else { #pragma unroll for(int k = 0; k < 8; ++k) { if(k == fidx) { des[k] += (weight1 * weight); des[k+1] += (weight2 * weight); } } } } } } des[0] += des[8]; int didx = idx << 1;//0,2,4,6,8 //1616*8=128 //32idx //128(float)*num->32(float4)*num->2(float4)* 16*num //1616<<1=16*2 8 d_des[didx] = make_float4(des[0], des[1], des[2], des[3]); d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]); } template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptorRECT_Kernel(float4* d_des, int num, int width, int height, float window_factor) { const float rpi = 4.0/ 3.14159265358979323846; int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; int fidx = idx >> 4; if(fidx >= num) return; float4 key = tex1Dfetch(texDataF4, fidx); int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2; //float aspect_ratio = key.w / key.z; //float aspect_sq = aspect_ratio * aspect_ratio; float sptx = key.z * 0.25, spty = key.w * 0.25; float xmin, ymin, xmax, ymax; float2 pt; pt.x = sptx * (ix + 0.5f) + key.x; pt.y = spty * (iy + 0.5f) + key.y; xmin = max(1.5f, floor(pt.x - sptx) + 0.5f); ymin = max(1.5f, floor(pt.y - spty) + 0.5f); xmax = min(width - 1.5f, floor(pt.x + sptx) + 0.5f); ymax = min(height - 1.5f, floor(pt.y + spty) + 0.5f); float des[9]; #pragma unroll for(int i =0; i < 9; ++i) des[i] = 0.0f; for(float y = ymin; y <= ymax; y += 1.0f) { for(float x = xmin; x <= xmax; x += 1.0f) { float nx = (x - pt.x) / sptx; float ny = (y - pt.y) / spty; float nxn = fabs(nx); float nyn = fabs(ny); if(nxn < 1.0f && nyn < 1.0f) { float2 cc = tex2D(texDataF2, x, y); float wx = 1.0 - nxn; float wy = 1.0 - nyn; float weight = wx * wy * cc.x; float theta = (- cc.y) * rpi; if(theta < 0) theta += 8.0f; float fo = floor(theta); int fidx = fo; float weight1 = fo + 1.0f - theta; float weight2 = theta - fo; if(DYNAMIC_INDEXING) { des[fidx] += (weight1 * weight); des[fidx + 1] += (weight2 * weight); //this dynamic indexing part might be slow }else { #pragma unroll for(int k = 0; k < 8; ++k) { if(k == fidx) { des[k] += (weight1 * weight); des[k+1] += (weight2 * weight); } } } } } } des[0] += des[8]; int didx = idx << 1; d_des[didx] = make_float4(des[0], des[1], des[2], des[3]); d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]); } void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num) { float4 temp[32]; int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; if(idx >= num) return; int sidx = idx << 5;//idx*32 float norm1 = 0, norm2 = 0; #pragma unroll for(int i = 0; i < 32; ++i) //32*4=128 { temp[i] = tex1Dfetch(texDataF4, sidx +i); norm1 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y + temp[i].z * temp[i].z + temp[i].w * temp[i].w); } norm1 = rsqrt(norm1); // #pragma unroll for(int i = 0; i < 32; ++i) { temp[i].x = min(0.2f, temp[i].x * norm1); temp[i].y = min(0.2f, temp[i].y * norm1); temp[i].z = min(0.2f, temp[i].z * norm1); temp[i].w = min(0.2f, temp[i].w * norm1); norm2 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y + temp[i].z * temp[i].z + temp[i].w * temp[i].w);// } norm2 = rsqrt(norm2); #pragma unroll for(int i = 0; i < 32; ++i) { temp[i].x *= norm2; temp[i].y *= norm2; temp[i].z *= norm2; temp[i].w *= norm2; d_des[sidx + i] = temp[i]; } } void ProgramCU::ComputeDescriptor(CuTexImage*list, CuTexImage* got, CuTexImage* dtex, int rect, int stream) { int num = list->GetImgWidth(); int width = got->GetImgWidth(); int height = got->GetImgHeight(); dtex->InitTexture(num * 128, 1, 1); got->BindTexture2D(texDataF2); list->BindTexture(texDataF4); int block_width = DESCRIPTOR_COMPUTE_BLOCK_SIZE;//64 dim3 grid((num * 16 + block_width -1) / block_width);//num*16/64 16*8=128 dim3 block(block_width);//64 if(rect) { if(GlobalUtil::_UseDynamicIndexing) hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); else hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); }else { if(GlobalUtil::_UseDynamicIndexing) hipLaunchKernelGGL(( ComputeDescriptor_Kernel<true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); else hipLaunchKernelGGL(( ComputeDescriptor_Kernel<false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); } hipDeviceSynchronize(); if(GlobalUtil::_NormalizedSIFT) { dtex->BindTexture(texDataF4); const int block_width = DESCRIPTOR_NORMALIZ_PER_BLOCK; dim3 grid((num + block_width -1) / block_width); dim3 block(block_width); hipLaunchKernelGGL(( NormalizeDescriptor_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num); hipDeviceSynchronize(); } CheckErrorCUDA("ComputeDescriptor"); } ////////////////////////////////////////////////////// void ProgramCU::FinishCUDA() { hipDeviceSynchronize(); } int ProgramCU::CheckErrorCUDA(const char* location) { hipError_t e = hipGetLastError(); if(e) { if(location) fprintf(stderr, "%s:\t", location); fprintf(stderr, "%s\n", hipGetErrorString(e)); //assert(0); return 1; }else { return 0; } } void __global__ ConvertDOG_Kernel(float* d_result, int width, int height) { int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y; int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x; if(col < width && row < height) { int index = row * width + col; float v = tex1Dfetch(texData, index); d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)? 0.5 : saturate(0.5+20.0*v); } } /// void ProgramCU::DisplayConvertDOG(CuTexImage* dog, CuTexImage* out) { if(out->_cuData == NULL) return; int width = dog->GetImgWidth(), height = dog ->GetImgHeight(); dog->BindTexture(texData); dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM); dim3 block(BLOCK_DIM, BLOCK_DIM); hipLaunchKernelGGL(( ConvertDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) out->_cuData, width, height); ProgramCU::CheckErrorCUDA("DisplayConvertDOG"); } void __global__ ConvertGRD_Kernel(float* d_result, int width, int height) { int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y; int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x; if(col < width && row < height) { int index = row * width + col; float v = tex1Dfetch(texData, index << 1); d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)? 0 : saturate(5 * v); } } void ProgramCU::DisplayConvertGRD(CuTexImage* got, CuTexImage* out) { if(out->_cuData == NULL) return; int width = got->GetImgWidth(), height = got ->GetImgHeight(); got->BindTexture(texData); dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM); dim3 block(BLOCK_DIM, BLOCK_DIM); hipLaunchKernelGGL(( ConvertGRD_Kernel), dim3(grid), dim3(block), 0, 0, (float*) out->_cuData, width, height); ProgramCU::CheckErrorCUDA("DisplayConvertGRD"); } void __global__ ConvertKEY_Kernel(float4* d_result, int width, int height) { int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y; int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x; if(col < width && row < height) { int index = row * width + col; float4 keyv = tex1Dfetch(texDataF4, index); int is_key = (keyv.x == 1.0f || keyv.x == -1.0f); int inside = col > 0 && row > 0 && row < height -1 && col < width - 1; float v = inside? saturate(0.5 + 20 * tex1Dfetch(texData, index)) : 0.5; d_result[index] = is_key && inside ? (keyv.x > 0? make_float4(1.0f, 0, 0, 1.0f) : make_float4(0.0f, 1.0f, 0.0f, 1.0f)): make_float4(v, v, v, 1.0f) ; } } void ProgramCU::DisplayConvertKEY(CuTexImage* key, CuTexImage* dog, CuTexImage* out) { if(out->_cuData == NULL) return; int width = key->GetImgWidth(), height = key ->GetImgHeight(); dog->BindTexture(texData); key->BindTexture(texDataF4); dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM); dim3 block(BLOCK_DIM, BLOCK_DIM); hipLaunchKernelGGL(( ConvertKEY_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, width, height); } void __global__ DisplayKeyPoint_Kernel(float4 * d_result, int num) { int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; if(idx >= num) return; float4 v = tex1Dfetch(texDataF4, idx); d_result[idx] = make_float4(v.x, v.y, 0, 1.0f); } void ProgramCU::DisplayKeyPoint(CuTexImage* ftex, CuTexImage* out) { int num = ftex->GetImgWidth(); int block_width = 64; dim3 grid((num + block_width -1) /block_width); dim3 block(block_width); ftex->BindTexture(texDataF4); hipLaunchKernelGGL(( DisplayKeyPoint_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, num); ProgramCU::CheckErrorCUDA("DisplayKeyPoint"); } void __global__ DisplayKeyBox_Kernel(float4* d_result, int num) { int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; if(idx >= num) return; int kidx = idx / 10, vidx = idx - IMUL(kidx , 10); float4 v = tex1Dfetch(texDataF4, kidx); float sz = fabs(v.z * 3.0f); /////////////////////// float s, c; __sincosf(v.w, &s, &c); /////////////////////// float dx = vidx == 0? 0 : ((vidx <= 4 || vidx >= 9)? sz : -sz); float dy = vidx <= 1? 0 : ((vidx <= 2 || vidx >= 7)? -sz : sz); float4 pos; pos.x = v.x + c * dx - s * dy; pos.y = v.y + c * dy + s * dx; pos.z = 0; pos.w = 1.0f; d_result[idx] = pos; } void ProgramCU::DisplayKeyBox(CuTexImage* ftex, CuTexImage* out) { int len = ftex->GetImgWidth(); int block_width = 32; dim3 grid((len * 10 + block_width -1) / block_width); dim3 block(block_width); ftex->BindTexture(texDataF4); hipLaunchKernelGGL(( DisplayKeyBox_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, len * 10); } /////////////////////////////////////////////////////////////////// inline void CuTexImage:: BindTexture(textureReference& texRef) { hipBindTexture(NULL, &texRef, _cuData, &texRef.channelDesc, _numBytes); } inline void CuTexImage::BindTexture2D(textureReference& texRef) { #if defined(SIFTGPU_ENABLE_LINEAR_TEX2D) hipBindTexture2D(0, &texRef, _cuData, &texRef.channelDesc, _imgWidth, _imgHeight, _imgWidth* _numChannel* sizeof(float)); #else hipChannelFormatDesc desc; hipGetChannelDesc(&desc, _cuData2D); hipBindTextureToArray(&texRef, _cuData2D, &desc); #endif } int ProgramCU::CheckCudaDevice(int device) { int count = 0, device_used; if(hipGetDeviceCount(&count) != hipSuccess || count <= 0) { ProgramCU::CheckErrorCUDA("CheckCudaDevice"); return 0; }else if(count == 1) { hipDeviceProp_t deviceProp; if ( hipGetDeviceProperties(&deviceProp, 0) != hipSuccess || (deviceProp.major == 9999 && deviceProp.minor == 9999)) { fprintf(stderr, "CheckCudaDevice: no device supporting CUDA.\n"); return 0; }else { GlobalUtil::_MemCapGPU = deviceProp.totalGlobalMem / 1024; GlobalUtil::_texMaxDimGL = 132768; if(GlobalUtil::_verbose) fprintf(stdout, "NOTE: changing maximum texture dimension to %d\n", GlobalUtil::_texMaxDimGL); } } if(device >0 && device < count) { hipSetDevice(device); CheckErrorCUDA("hipSetDevice\n"); } hipGetDevice(&device_used); if(device != device_used) fprintf(stderr, "\nERROR: Cannot set device to %d\n" "\nWARNING: Use # %d device instead (out of %d)\n", device, device_used, count); return 1; } //////////////////////////////////////////////////////////////////////////////////////// // siftmatch funtions ////////////////////////////////////////////////////////////////////////////////////////// #define MULT_TBLOCK_DIMX 128 #define MULT_TBLOCK_DIMY 1 #define MULT_BLOCK_DIMX (MULT_TBLOCK_DIMX) #define MULT_BLOCK_DIMY (8 * MULT_TBLOCK_DIMY) texture<uint4, 1, hipReadModeElementType> texDes1; texture<uint4, 1, hipReadModeElementType> texDes2; //dim grid(num2/128,num1/8) //dim block(128,1) void __global__ MultiplyDescriptor_Kernel(int* d_result, int num1, int num2, int3* d_temp) { //MULT_BLOCK_DIMY : 8 MULT_BLOCK_DIMX : 128 int idx01 = (blockIdx.y * MULT_BLOCK_DIMY), //0~num1(0,8,16,24,32...) idx02 = (blockIdx.x * MULT_BLOCK_DIMX); //0~num2 int idx1 = idx01 + threadIdx.y, //idx1 = idx01 idx2 = idx02 + threadIdx.x;//col __shared__ int data1[17 * 2 * MULT_BLOCK_DIMY]; //272 128,1 int read_idx1 = idx01 * 8 + threadIdx.x,// read_idx2 = idx2 * 8;// int col4 = threadIdx.x & 0x3, //0,1,2,3 row4 = threadIdx.x >> 2; //threadIdx.x/4 int cache_idx1 = IMUL(row4, 17) + (col4 << 2);//row4*17+col4*4 /////////////////////////////////////////////////////////////// //Load feature descriptors /////////////////////////////////////////////////////////////// #if MULT_BLOCK_DIMY == 16 uint4 v = tex1Dfetch(texDes1, read_idx1); data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; #elif MULT_BLOCK_DIMY == 8 if(threadIdx.x < 64) //threadIdx.x = 64cache_idx1272 { uint4 v = tex1Dfetch(texDes1, read_idx1); //num1 data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; } #else #error #endif __syncthreads(); /// if(idx2 >= num2) return; /////////////////////////////////////////////////////////////////////////// //compare descriptors int results[MULT_BLOCK_DIMY]; //8result #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i) results[i] = 0; #pragma unroll for(int i = 0; i < 8; ++i) //8 { uint4 v = tex1Dfetch(texDes2, read_idx2 + i); unsigned char* p2 = (unsigned char*)(&v); //int16char #pragma unroll for(int k = 0; k < MULT_BLOCK_DIMY; ++k) //8 { unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4)); //i/445 results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1]) + IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3]) + IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5]) + IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7]) + IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9]) + IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11]) + IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13]) + IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15])); // results[k] += ( IMUL(p1[0]-p2[0], p1[0]-p2[0]) + IMUL(p1[1]-p2[1], p1[1]-p2[1]) //+ IMUL(p1[2]-p2[2], p1[2]-p2[2]) + IMUL(p1[3]-p2[3],p1[3]-p2[3] ) //+ IMUL(p1[4]-p2[4], p1[4]-p2[4]) + IMUL(p1[5]-p2[5], p1[5]-p2[5]) //+ IMUL(p1[6]-p2[6], p1[6]-p2[6]) + IMUL(p1[7]-p2[7], p1[7]-p2[7]) //+ IMUL(p1[8]-p2[8], p1[8]-p2[8]) + IMUL(p1[9]-p2[9], p1[9]-p2[9]) //+ IMUL(p1[10]-p2[10],p1[10]-p2[10]) + IMUL(p1[11]-p2[11], p1[11]-p2[11]) //+ IMUL(p1[12]-p2[12], p1[12]-p2[12]) + IMUL(p1[13] -p2[13],p1[13] -p2[13]) //+ IMUL(p1[14]-p2[14],p1[14]-p2[14] ) + IMUL(p1[15]-p2[15], p1[15]-p2[15])); } } int dst_idx = IMUL(idx1, num2) + idx2; //(8*threadIdx.y*num2+idx2) if(d_temp) { int3 cmp_result = make_int3(0, -1, 0); //8result((num1)) #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i) //8 { if(idx1 + i < num1)//i:0~8+idx1 { cmp_result = results[i] > cmp_result.x? make_int3(results[i], idx1 + i, cmp_result.x) : make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i])); //(i, num2) d_result[dst_idx + IMUL(i, num2)] = results[i]; } } d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result; } else { #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i)//8 { if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = results[i]; } } } void ProgramCU::MultiplyDescriptor(CuTexImage* des1, CuTexImage* des2, CuTexImage* texDot, CuTexImage* texCRT) { int num1 = des1->GetImgWidth() / 8; //1067 num*8 4 float4 int num2 = des2->GetImgWidth() / 8; //728 dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);// (num2/128,num1/8) dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);//(128,1) //(num1/8)*num2 texDot->InitTexture( num2,num1);// (num2*num1)*4 float4 if(texCRT) //(num2 * num1/8 ) *4 float4 texCRT->InitTexture(num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 32); des1->BindTexture(texDes1);// des2->BindTexture(texDes2);// //CRTNULL texDes1texDes2 //texDot GlobalUtil::StartTimer(""); hipLaunchKernelGGL(( MultiplyDescriptor_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData, num1, num2, (texCRT? (int3*)texCRT->_cuData : NULL)); hipDeviceSynchronize(); GlobalUtil::StopTimer(); float _timing1 = GlobalUtil::GetElapsedTime(); hipDeviceSynchronize(); ProgramCU::CheckErrorCUDA("MultiplyDescriptor"); } texture<float, 1, hipReadModeElementType> texLoc1; texture<float2, 1, hipReadModeElementType> texLoc2; struct Matrix33{float mat[3][3];}; //(num2/128num1/8) //(128,8) void __global__ MultiplyDescriptorG_Kernel(int* d_result, int num1, int num2, int3* d_temp, Matrix33 H, float hdistmax, Matrix33 F, float fdistmax) { int idx01 = (blockIdx.y * MULT_BLOCK_DIMY); //8 0,8,16,24,32... int idx02 = (blockIdx.x * MULT_BLOCK_DIMX);//128 (0,128,256...) int idx1 = idx01 + threadIdx.y;// int idx2 = idx02 + threadIdx.x;// __shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];//272 __shared__ float loc1[MULT_BLOCK_DIMY * 2]; //16 int read_idx1 = idx01 * 8 + threadIdx.x ; //8 int read_idx2 = idx2 * 8; //8 int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2; int cache_idx1 = IMUL(row4, 17) + (col4 << 2); #if MULT_BLOCK_DIMY == 16 // uint4 v = tex1Dfetch(texDes1, read_idx1); data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; #elif MULT_BLOCK_DIMY == 8 if(threadIdx.x < 64) //64272 { uint4 v = tex1Dfetch(texDes1, read_idx1); data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; } #else #error #endif __syncthreads(); if(threadIdx.x < MULT_BLOCK_DIMY * 2) //16 { // 0,8,16,24,32...->0,16,32,48,64...+16 loc1[threadIdx.x] = tex1Dfetch(texLoc1, 2 * idx01 + threadIdx.x); // } __syncthreads(); if(idx2 >= num2) return; int results[MULT_BLOCK_DIMY]; ///////////////////////////////////////////////////////////////////////////////////////////// //geometric verification ///////////////////////////////////////////////////////////////////////////////////////////// int good_count = 0; float2 loc2 = tex1Dfetch(texLoc2, idx2); // //idx2= // #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i)//8 { if(idx1 + i < num1) { float* loci = loc1 + i * 2; float locx = loci[0], locy = loci[1]; //homography float x[3], diff[2]; x[0] = H.mat[0][0] * locx + H.mat[0][1] * locy + H.mat[0][2]; x[1] = H.mat[1][0] * locx + H.mat[1][1] * locy + H.mat[1][2]; x[2] = H.mat[2][0] * locx + H.mat[2][1] * locy + H.mat[2][2]; diff[0] = fabs(FDIV(x[0], x[2]) - loc2.x); diff[1] = fabs(FDIV(x[1], x[2]) - loc2.y); if(diff[0] < hdistmax && diff[1] < hdistmax) { //check fundamental matrix float fx1[3], ftx2[3], x2fx1, se; fx1[0] = F.mat[0][0] * locx + F.mat[0][1] * locy + F.mat[0][2]; fx1[1] = F.mat[1][0] * locx + F.mat[1][1] * locy + F.mat[1][2]; fx1[2] = F.mat[2][0] * locx + F.mat[2][1] * locy + F.mat[2][2]; ftx2[0] = F.mat[0][0] * loc2.x + F.mat[1][0] * loc2.y + F.mat[2][0]; ftx2[1] = F.mat[0][1] * loc2.x + F.mat[1][1] * loc2.y + F.mat[2][1]; //ftx2[2] = F.mat[0][2] * loc2.x + F.mat[1][2] * loc2.y + F.mat[2][2]; x2fx1 = loc2.x * fx1[0] + loc2.y * fx1[1] + fx1[2]; se = FDIV(x2fx1 * x2fx1, fx1[0] * fx1[0] + fx1[1] * fx1[1] + ftx2[0] * ftx2[0] + ftx2[1] * ftx2[1]); results[i] = se < fdistmax? 0: -262144; }else { results[i] = -262144; } }else { results[i] = -262144; } good_count += (results[i] >=0); } ///////////////////////////////////////////////////////////////////////////////////////////// ///compare feature descriptors anyway ///////////////////////////////////////////////////////////////////////////////////////////// if(good_count > 0) { #pragma unroll for(int i = 0; i < 8; ++i) { uint4 v = tex1Dfetch(texDes2, read_idx2 + i); unsigned char* p2 = (unsigned char*)(&v); #pragma unroll for(int k = 0; k < MULT_BLOCK_DIMY; ++k) { unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4)); results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1]) + IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3]) + IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5]) + IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7]) + IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9]) + IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11]) + IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13]) + IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15])); } } } int dst_idx = IMUL(idx1, num2) + idx2; if(d_temp) { int3 cmp_result = make_int3(0, -1, 0); #pragma unroll for(int i= 0; i < MULT_BLOCK_DIMY; ++i) { if(idx1 + i < num1) { cmp_result = results[i] > cmp_result.x? make_int3(results[i], idx1 + i, cmp_result.x) : make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i])); d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0); }else { break; } } d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result; }else { #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i) { if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0); else break; } } } void ProgramCU::MultiplyDescriptorG(CuTexImage* des1, CuTexImage* des2, CuTexImage* loc1, CuTexImage* loc2, CuTexImage* texDot, CuTexImage* texCRT, float H[3][3], float hdistmax, float F[3][3], float fdistmax) { int num1 = des1->GetImgWidth() / 8; int num2 = des2->GetImgWidth() / 8; Matrix33 MatF, MatH; //copy the matrix memcpy(MatF.mat, F, 9 * sizeof(float)); memcpy(MatH.mat, H, 9 * sizeof(float)); //thread blocks dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX, //(num2/128num1/8) (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY); dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY); //(128,8) //intermediate results texDot->InitTexture( num2,num1); if(texCRT) texCRT->InitTexture( num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 3); loc1->BindTexture(texLoc1); loc2->BindTexture(texLoc2); des1->BindTexture(texDes1); //1 des2->BindTexture(texDes2);//2 hipLaunchKernelGGL(( MultiplyDescriptorG_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData, num1, num2, (texCRT? (int3*)texCRT->_cuData : NULL), MatH, hdistmax, MatF, fdistmax); } texture<int, 1, hipReadModeElementType> texDOT; #define ROWMATCH_BLOCK_WIDTH 32 #define ROWMATCH_BLOCK_HEIGHT 1 void __global__ RowMatch_Kernel(int*d_dot, int* d_result, int num2, float distmax, float ratiomax) { #if ROWMATCH_BLOCK_HEIGHT == 1 __shared__ int dotmax[ROWMATCH_BLOCK_WIDTH]; //32 __shared__ int dotnxt[ROWMATCH_BLOCK_WIDTH]; //32 __shared__ int dotidx[ROWMATCH_BLOCK_WIDTH]; //32 int row = blockIdx.y; #else // __shared__ int x_dotmax[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH]; __shared__ int x_dotnxt[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH]; __shared__ int x_dotidx[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH]; int* dotmax = x_dotmax[threadIdx.y]; int* dotnxt = x_dotnxt[threadIdx.y]; int* dotidx = x_dotidx[threadIdx.y]; int row = IMUL(blockIdx.y, ROWMATCH_BLOCK_HEIGHT) + threadIdx.y; #endif int base_address = IMUL(row , num2); //dot int t_dotmax = 0, t_dotnxt = 0, t_dotidx = -1;// for(int i = 0; i < num2; i += ROWMATCH_BLOCK_WIDTH)//32 threadIdxnum2/32 { if(threadIdx.x + i < num2) { int v = tex1Dfetch(texDOT, base_address + threadIdx.x + i);//d_dot[base_address + threadIdx.x + i];// bool test = v > t_dotmax; // t_dotnxt = test? t_dotmax : max(t_dotnxt, v);// t_dotidx = test? (threadIdx.x + i) : t_dotidx; //32 t_dotmax = test? v: t_dotmax; } __syncthreads();//t_dotmax //, } // dotmax[threadIdx.x] = t_dotmax; dotnxt[threadIdx.x] = t_dotnxt; dotidx[threadIdx.x] = t_dotidx; __syncthreads(); #pragma unroll for(int step = ROWMATCH_BLOCK_WIDTH/2; step >0; step /= 2) // { if(threadIdx.x < step) { int v1 = dotmax[threadIdx.x], v2 = dotmax[threadIdx.x + step]; bool test = v2 > v1; dotnxt[threadIdx.x] = test? max(v1, dotnxt[threadIdx.x + step]) :max(dotnxt[threadIdx.x], v2); //v1v2 dotidx[threadIdx.x] = test? dotidx[threadIdx.x + step] : dotidx[threadIdx.x]; dotmax[threadIdx.x] = test? v2 : v1; } __syncthreads(); } if(threadIdx.x == 0) { float dist = acos(min(dotmax[0] * 0.000003814697265625f, 1.0)); //acos float distn = acos(min(dotnxt[0] * 0.000003814697265625f, 1.0)); //acos //float ratio = dist / distn; d_result[row] = (dist < distmax) && (dist < distn * ratiomax) ? dotidx[0] : -1;// } } void ProgramCU::GetRowMatch(CuTexImage* texDot, CuTexImage* texMatch, float distmax, float ratiomax) { int num1 = texDot->GetImgHeight();//1068 int num2 = texDot->GetImgWidth();//731 dim3 grid(1, num1/ROWMATCH_BLOCK_HEIGHT);//dim3 grid(1,num1) dim3 block(ROWMATCH_BLOCK_WIDTH, ROWMATCH_BLOCK_HEIGHT);//dim3 block(32,1) texDot->BindTexture(texDOT); GlobalUtil::StartTimer(""); hipLaunchKernelGGL(( RowMatch_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData, (int*)texMatch->_cuData, num2, distmax, ratiomax); hipDeviceSynchronize(); GlobalUtil::StopTimer(); float _timing1 = GlobalUtil::GetElapsedTime(); } #define COLMATCH_BLOCK_WIDTH 32 //texture<int3, 1, hipReadModeElementType> texCT; void __global__ ColMatch_Kernel(int3*d_crt, int* d_result, int height, int num2, float distmax, float ratiomax) { int col = COLMATCH_BLOCK_WIDTH * blockIdx.x + threadIdx.x; // if(col >= num2) return; int3 result = d_crt[col];//tex1Dfetch(texCT, col); int read_idx = col + num2; for(int i = 1; i < height; ++i, read_idx += num2) { int3 temp = d_crt[read_idx];//tex1Dfetch(texCT, read_idx); result = result.x < temp.x? // make_int3(temp.x, temp.y, max(result.x, temp.z)) : make_int3(result.x, result.y, max(result.z, temp.x)); } float dist = acos(min(result.x * 0.000003814697265625f, 1.0)); float distn = acos(min(result.z * 0.000003814697265625f, 1.0)); //float ratio = dist / distn; d_result[col] = (dist < distmax) && (dist < distn * ratiomax) ? result.y : -1;// } void ProgramCU::GetColMatch(CuTexImage* texCRT, CuTexImage* texMatch, float distmax, float ratiomax) { int height = texCRT->GetImgHeight(); int num2 = texCRT->GetImgWidth(); //texCRT->BindTexture(texCT); dim3 grid((num2 + COLMATCH_BLOCK_WIDTH -1) / COLMATCH_BLOCK_WIDTH);//num2/32 dim3 block(COLMATCH_BLOCK_WIDTH); //32*1 hipLaunchKernelGGL(( ColMatch_Kernel), dim3(grid), dim3(block), 0, 0, (int3*)texCRT->_cuData, (int*) texMatch->_cuData, height, num2, distmax, ratiomax); hipDeviceSynchronize(); } #endif
7d312e733c16b443792b52ec0bdd73faa33c40ca.cu
//////////////////////////////////////////////////////////////////////////// // File: ProgramCU.cu // Author: Changchang Wu // Description : implementation of ProgramCU and all CUDA kernels // // Copyright (c) 2007 University of North Carolina at Chapel Hill // All Rights Reserved // // Permission to use, copy, modify and distribute this software and its // documentation for educational, research and non-profit purposes, without // fee, and without a written agreement is hereby granted, provided that the // above copyright notice and the following paragraph appear in all copies. // // The University of North Carolina at Chapel Hill make no representations // about the suitability of this software for any purpose. It is provided // 'as is' without express or implied warranty. // // Please send BUG REPORTS to [email protected] // //////////////////////////////////////////////////////////////////////////// #if defined(CUDA_SIFTGPU_ENABLED) #include "GL/glew.h" #include "stdio.h" #include "CuTexImage.h" #include "ProgramCU.h" #include "GlobalUtil.h" //---------------------------------------------------------------- //Begin SiftGPU setting section. ////////////////////////////////////////////////////////// #define IMUL(X,Y) __mul24(X,Y) //#define FDIV(X,Y) ((X)/(Y)) #define FDIV(X,Y) __fdividef(X,Y) ///////////////////////////////////////////////////////// //filter kernel width range (don't change this) #define KERNEL_MAX_WIDTH 33 #define KERNEL_MIN_WIDTH 5 ////////////////////////////////////////////////////////// //horizontal filter block size (32, 64, 128, 256, 512) #define FILTERH_TILE_WIDTH 128 //#define FILTERH_TILE_WIDTH 256 //#define FILTERH_TILE_WIDTH 160 //thread block for vertical filter. FILTERV_BLOCK_WIDTH can be (4, 8 or 16) #define FILTERV_BLOCK_WIDTH 16 #define FILTERV_BLOCK_HEIGHT 32 //The corresponding image patch for a thread block #define FILTERV_PIXEL_PER_THREAD 4 #define FILTERV_TILE_WIDTH FILTERV_BLOCK_WIDTH #define FILTERV_TILE_HEIGHT (FILTERV_PIXEL_PER_THREAD * FILTERV_BLOCK_HEIGHT) ////////////////////////////////////////////////////////// //thread block size for computing Difference of Gaussian #define DOG_BLOCK_LOG_DIMX 7 #define DOG_BLOCK_LOG_DIMY 0 #define DOG_BLOCK_DIMX (1 << DOG_BLOCK_LOG_DIMX) #define DOG_BLOCK_DIMY (1 << DOG_BLOCK_LOG_DIMY) ////////////////////////////////////////////////////////// //thread block size for keypoint detection #define KEY_BLOCK_LOG_DIMX 3 #define KEY_BLOCK_LOG_DIMY 3 #define KEY_BLOCK_DIMX (1<<KEY_BLOCK_LOG_DIMX) #define KEY_BLOCK_DIMY (1<<KEY_BLOCK_LOG_DIMY) //#define KEY_OFFSET_ONE //make KEY_BLOCK_LOG_DIMX 4 will make the write coalesced.. //but it seems uncoalesced writes don't affect the speed ////////////////////////////////////////////////////////// //thread block size for initializing list generation (64, 128, 256, 512 ...) 实例化列表生成的线程块大小! #define HIST_INIT_WIDTH 128 //thread block size for generating feature list (32, 64, 128, 256, 512, ...)生成特征列表的线程块大小 #define LISTGEN_BLOCK_DIM 128 ///////////////////////////////////////////////////////// //how many keypoint orientations to compute in a block #define ORIENTATION_COMPUTE_PER_BLOCK 64 //how many keypoint descriptor to compute in a block (2, 4, 8, 16, 32) #define DESCRIPTOR_COMPUTE_PER_BLOCK 4 #define DESCRIPTOR_COMPUTE_BLOCK_SIZE (16 * DESCRIPTOR_COMPUTE_PER_BLOCK) //how many keypoint descriptor to normalized in a block (32, ...) #define DESCRIPTOR_NORMALIZ_PER_BLOCK 32 /////////////////////////////////////////// //Thread block size for visualization //(This doesn't affect the speed of computation) #define BLOCK_LOG_DIM 4 #define BLOCK_DIM (1 << BLOCK_LOG_DIM) //End SiftGPU setting section. //---------------------------------------------------------------- __device__ __constant__ float d_kernel[KERNEL_MAX_WIDTH]; texture<float, 1, cudaReadModeElementType> texData; texture<unsigned char, 1, cudaReadModeNormalizedFloat> texDataB; texture<float2, 2, cudaReadModeElementType> texDataF2; texture<float4, 1, cudaReadModeElementType> texDataF4; texture<int4, 1, cudaReadModeElementType> texDataI4; texture<int4, 1, cudaReadModeElementType> texDataList; //template<int i> __device__ float Conv(float *data) { return Conv<i-1>(data) + data[i]*d_kernel[i];} //template<> __device__ float Conv<0>(float *data) { return data[0] * d_kernel[0]; } ////////////////////////////////////////////////////////////// template<int FW> __global__ void FilterH( float* d_result, int width) { const int HALF_WIDTH = FW >> 1; // FW/2高斯卷积缩减一半 const int CACHE_WIDTH = FILTERH_TILE_WIDTH + FW -1; //128+FW-1 共享内存大小 const int CACHE_COUNT = 2 + (CACHE_WIDTH - 2)/ FILTERH_TILE_WIDTH; //一个catch包含几个高斯卷积核 __shared__ float data[CACHE_WIDTH]; //128+一个高斯卷积核大小 const int bcol = IMUL(blockIdx.x, FILTERH_TILE_WIDTH); //128*blockIdx.x const int col = bcol + threadIdx.x; //线程索引 const int index_min = IMUL(blockIdx.y, width);//每一行第一个 const int index_max = index_min + width - 1;//每一行最后一个 int src_index = index_min + bcol - HALF_WIDTH + threadIdx.x; //每一行第blockIdx.x个线程块,减去高斯核的一半,有边缘效应! int cache_index = threadIdx.x; //0~128 float value = 0; #pragma unroll for(int j = 0; j < CACHE_COUNT; ++j) //一个catch包含几个高斯卷积核 { if(cache_index < CACHE_WIDTH) //128+FW-128=FW 也就是前FW个线程运算了两次!!! { int fetch_index = src_index < index_min? index_min : (src_index > index_max ? index_max : src_index); data[cache_index] = tex1Dfetch(texData,fetch_index); src_index += FILTERH_TILE_WIDTH; cache_index += FILTERH_TILE_WIDTH; } } __syncthreads(); if(col >= width) return; #pragma unroll for(int i = 0; i < FW; ++i) { value += (data[threadIdx.x + i]* d_kernel[i]); } // value = Conv<FW-1>(data + threadIdx.x); d_result[index_min + col] = value; } //////////////////////////////////////////////////////////////////// template<int FW> __global__ void FilterV(float* d_result, int width, int height) { const int HALF_WIDTH = FW >> 1; //滤波的一半 const int CACHE_WIDTH = FW + FILTERV_TILE_HEIGHT - 1; //128+FW-1 const int TEMP = CACHE_WIDTH & 0xf;//最大值是15 //add some extra space to avoid bank conflict #if FILTERV_TILE_WIDTH == 16 //make the stride 16 * n +/- 1 步幅 const int EXTRA = (TEMP == 1 || TEMP == 0) ? 1 - TEMP : 15 - TEMP; #elif FILTERV_TILE_WIDTH == 8 //make the stride 16 * n +/- 2 const int EXTRA = (TEMP == 2 || TEMP == 1 || TEMP == 0) ? 2 - TEMP : (TEMP == 15? 3 : 14 - TEMP); #elif FILTERV_TILE_WIDTH == 4 //make the stride 16 * n +/- 4 const int EXTRA = (TEMP >=0 && TEMP <=4) ? 4 - TEMP : (TEMP > 12? 20 - TEMP : 12 - TEMP); #else #error #endif const int CACHE_TRUE_WIDTH = CACHE_WIDTH + EXTRA;//真正的共享内存宽度, const int CACHE_COUNT = (CACHE_WIDTH + FILTERV_BLOCK_HEIGHT - 1) / FILTERV_BLOCK_HEIGHT; //catchlength/32 const int WRITE_COUNT = (FILTERV_TILE_HEIGHT + FILTERV_BLOCK_HEIGHT -1) / FILTERV_BLOCK_HEIGHT;//128/32 __shared__ float data[CACHE_TRUE_WIDTH * FILTERV_TILE_WIDTH]; //CACHE_TRUE_WIDTH*16 const int row_block_first = IMUL(blockIdx.y, FILTERV_TILE_HEIGHT); //行索引的第一个值 const int col = IMUL(blockIdx.x, FILTERV_TILE_WIDTH) + threadIdx.x; //这个才是正常的列索引 const int row_first = row_block_first - HALF_WIDTH;//影像第一个行索引 const int data_index_max = IMUL(height - 1, width) + col; //最后一行最后一个 const int cache_col_start = threadIdx.y; //列开始的地方 const int cache_row_start = IMUL(threadIdx.x, CACHE_TRUE_WIDTH); int cache_index = cache_col_start + cache_row_start; //行列交叉 int data_index = IMUL(row_first + cache_col_start, width) + col;//行列交叉 if(col < width) { #pragma unroll for(int i = 0; i < CACHE_COUNT; ++i) { if(cache_col_start < CACHE_WIDTH - i * FILTERV_BLOCK_HEIGHT) { int fetch_index = data_index < col ? col : (data_index > data_index_max? data_index_max : data_index); //如果把共享内存比作二位的,那么对应的就是左边一部分,在整个索引中分配是间断的!!! //随着循环的次数慢慢补齐你懂的 data[cache_index + i * FILTERV_BLOCK_HEIGHT] = tex1Dfetch(texData,fetch_index); data_index += IMUL(FILTERV_BLOCK_HEIGHT, width);//每隔32行 } } } __syncthreads(); //已完成共享内存的分配 if(col >= width) return; int row = row_block_first + threadIdx.y; int index_start = cache_row_start + threadIdx.y; #pragma unroll //128/32 32 32 for(int i = 0; i < WRITE_COUNT;++i,row += FILTERV_BLOCK_HEIGHT, index_start += FILTERV_BLOCK_HEIGHT) { if(row < height) { int index_dest = IMUL(row, width) + col; float value = 0; #pragma unroll for(int i = 0; i < FW; ++i) { value += (data[index_start + i] * d_kernel[i]); } d_result[index_dest] = value; } } } template<int LOG_SCALE> __global__ void UpsampleKernel(float* d_result, int width) { const int SCALE = (1 << LOG_SCALE), SCALE_MASK = (SCALE - 1); const float INV_SCALE = 1.0f / (float(SCALE)); int col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; if(col >= width) return; int row = blockIdx.y >> LOG_SCALE; int index = row * width + col; int dst_row = blockIdx.y; int dst_idx= (width * dst_row + col) * SCALE; int helper = blockIdx.y & SCALE_MASK; if (helper) { float v11 = tex1Dfetch(texData, index); float v12 = tex1Dfetch(texData, index + 1); index += width; float v21 = tex1Dfetch(texData, index); float v22 = tex1Dfetch(texData, index + 1); float w1 = INV_SCALE * helper, w2 = 1.0 - w1; float v1 = (v21 * w1 + w2 * v11); float v2 = (v22 * w1 + w2 * v12); d_result[dst_idx] = v1; #pragma unroll for(int i = 1; i < SCALE; ++i) { const float r2 = i * INV_SCALE; const float r1 = 1.0f - r2; d_result[dst_idx +i] = v1 * r1 + v2 * r2; } }else { float v1 = tex1Dfetch(texData, index); float v2 = tex1Dfetch(texData, index + 1); d_result[dst_idx] = v1; #pragma unroll for(int i = 1; i < SCALE; ++i) { const float r2 = i * INV_SCALE; const float r1 = 1.0f - r2; d_result[dst_idx +i] = v1 * r1 + v2 * r2; } } } //////////////////////////////////////////////////////////////////////////////////////// void ProgramCU::SampleImageU(CuTexImage *dst, CuTexImage *src, int log_scale) { int width = src->GetImgWidth(), height = src->GetImgHeight(); src->BindTexture(texData); dim3 grid((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height << log_scale); dim3 block(FILTERH_TILE_WIDTH); switch(log_scale) { case 1 : UpsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, width); break; case 2 : UpsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, width); break; case 3 : UpsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, width); break; default: break; } } template<int LOG_SCALE> __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width) { const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; if(dst_col >= dst_width) return; const int src_col = min((dst_col << LOG_SCALE), (src_width - 1)); //dst_col*2 const int dst_row = blockIdx.y; //降采样影像 const int src_row = blockIdx.y << LOG_SCALE; //源影像 dst_row*2 const int src_idx = IMUL(src_row, src_width) + src_col; const int dst_idx = IMUL(dst_width, dst_row) + dst_col; d_result[dst_idx] = tex1Dfetch(texData, src_idx); } __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width, const int log_scale) { const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; if(dst_col >= dst_width) return; const int src_col = min((dst_col << log_scale), (src_width - 1)); const int dst_row = blockIdx.y; const int src_row = blockIdx.y << log_scale; const int src_idx = IMUL(src_row, src_width) + src_col; const int dst_idx = IMUL(dst_width, dst_row) + dst_col; d_result[dst_idx] = tex1Dfetch(texData, src_idx); } void ProgramCU::SampleImageD(CuTexImage *dst, CuTexImage *src, int log_scale) { int src_width = src->GetImgWidth(), dst_width = dst->GetImgWidth() ; src->BindTexture(texData); dim3 grid((dst_width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, dst->GetImgHeight()); dim3 block(FILTERH_TILE_WIDTH); switch(log_scale) { case 1 : DownsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break; case 2 : DownsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break; case 3 : DownsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break; default: DownsampleKernel <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width, log_scale); } cudaThreadSynchronize(); } __global__ void ChannelReduce_Kernel(float* d_result) { int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; d_result[index] = tex1Dfetch(texData, index*4); } __global__ void ChannelReduce_Convert_Kernel(float* d_result) { int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; float4 rgba = tex1Dfetch(texDataF4, index); d_result[index] = 0.299f * rgba.x + 0.587f* rgba.y + 0.114f * rgba.z; } void ProgramCU::ReduceToSingleChannel(CuTexImage* dst, CuTexImage* src, int convert_rgb) { int width = src->GetImgWidth(), height = dst->GetImgHeight() ; dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH); dim3 block(FILTERH_TILE_WIDTH); if(convert_rgb) { src->BindTexture(texDataF4); ChannelReduce_Convert_Kernel<<<grid, block>>>((float*)dst->_cuData); }else { src->BindTexture(texData); ChannelReduce_Kernel<<<grid, block>>>((float*)dst->_cuData); } } __global__ void ConvertByteToFloat_Kernel(float* d_result) { int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; d_result[index] = tex1Dfetch(texDataB, index); } void ProgramCU::ConvertByteToFloat(CuTexImage*src, CuTexImage* dst) { int width = src->GetImgWidth(), height = dst->GetImgHeight() ; dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH); dim3 block(FILTERH_TILE_WIDTH); src->BindTexture(texDataB); ConvertByteToFloat_Kernel<<<grid, block>>>((float*)dst->_cuData); } void ProgramCU::CreateFilterKernel(float sigma, float* kernel, int& width) { int i, sz = int( ceil( GlobalUtil::_FilterWidthFactor * sigma -0.5) ) ;// width = 2*sz + 1; if(width > KERNEL_MAX_WIDTH) { //filter size truncation sz = KERNEL_MAX_WIDTH >> 1; width =KERNEL_MAX_WIDTH; }else if(width < KERNEL_MIN_WIDTH) { sz = KERNEL_MIN_WIDTH >> 1; width =KERNEL_MIN_WIDTH; } float rv = 1.0f/(sigma*sigma), v, ksum =0; // pre-compute filter for( i = -sz ; i <= sz ; ++i) { kernel[i+sz] = v = exp(-0.5f * i * i *rv) ; ksum += v; } //normalize the kernel rv = 1.0f/ksum; for(i = 0; i< width ;i++) kernel[i]*=rv; } template<int FW> void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf) { int width = src->GetImgWidth(), height = src->GetImgHeight(); //horizontal filtering src->BindTexture(texData); //src是源图像 dim3 gridh((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height); dim3 blockh(FILTERH_TILE_WIDTH); //GlobalUtil::StartTimer("水平"); FilterH<FW><<<gridh, blockh>>>((float*)buf->_cuData, width); cudaThreadSynchronize(); // GlobalUtil::StopTimer(); // float _timing0 = GlobalUtil::GetElapsedTime(); //CheckErrorCUDA("FilterH"); ///vertical filtering buf->BindTexture(texData); //16,128 dim3 gridv((width + FILTERV_TILE_WIDTH - 1)/ FILTERV_TILE_WIDTH, (height + FILTERV_TILE_HEIGHT - 1)/FILTERV_TILE_HEIGHT); //(50,5) dim3 blockv(FILTERV_TILE_WIDTH, FILTERV_BLOCK_HEIGHT); //(16*32) //GlobalUtil::StartTimer("竖直"); FilterV<FW><<<gridv, blockv>>>((float*)dst->_cuData, width, height); cudaThreadSynchronize(); // GlobalUtil::StopTimer(); //float _timing1 = GlobalUtil::GetElapsedTime(); //0.005,0.008 是原来的1.6倍!!! CheckErrorCUDA("FilterV"); } ////////////////////////////////////////////////////////////////////// // tested on 2048x1500 image, the time on pyramid construction is // OpenGL version : 18ms // CUDA version: 28 ms void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf, float sigma) { float filter_kernel[KERNEL_MAX_WIDTH]; int width; CreateFilterKernel(sigma, filter_kernel, width); cudaMemcpyToSymbol(d_kernel, filter_kernel, width * sizeof(float), 0, cudaMemcpyHostToDevice); switch(width) { case 5: FilterImage< 5>(dst, src, buf); break; case 7: FilterImage< 7>(dst, src, buf); break; case 9: FilterImage< 9>(dst, src, buf); break; case 11: FilterImage<11>(dst, src, buf); break; case 13: FilterImage<13>(dst, src, buf); break; case 15: FilterImage<15>(dst, src, buf); break; case 17: FilterImage<17>(dst, src, buf); break; case 19: FilterImage<19>(dst, src, buf); break; case 21: FilterImage<21>(dst, src, buf); break; case 23: FilterImage<23>(dst, src, buf); break; case 25: FilterImage<25>(dst, src, buf); break; case 27: FilterImage<27>(dst, src, buf); break; case 29: FilterImage<29>(dst, src, buf); break; case 31: FilterImage<31>(dst, src, buf); break; case 33: FilterImage<33>(dst, src, buf); break; default: break; } } texture<float, 1, cudaReadModeElementType> texC; texture<float, 1, cudaReadModeElementType> texP; texture<float, 1, cudaReadModeElementType> texN; void __global__ ComputeDOG_Kernel(float* d_dog, float2* d_got, int width, int height) { int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y; int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x; if(col < width && row < height) { int index = IMUL(row, width) + col; float vp = tex1Dfetch(texP, index); float v = tex1Dfetch(texC, index); d_dog[index] = v - vp; float vxn = tex1Dfetch(texC, index + 1); float vxp = tex1Dfetch(texC, index - 1); float vyp = tex1Dfetch(texC, index - width); float vyn = tex1Dfetch(texC, index + width); float dx = vxn - vxp, dy = vyn - vyp; float grd = 0.5f * sqrt(dx * dx + dy * dy); float rot = (grd == 0.0f? 0.0f : atan2(dy, dx)); d_got[index] = make_float2(grd, rot); } } void __global__ ComputeDOG_Kernel(float* d_dog, int width, int height) { int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y; int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x; if(col < width && row < height) { int index = IMUL(row, width) + col; float vp = tex1Dfetch(texP, index); float v = tex1Dfetch(texC, index); d_dog[index] = v - vp; } } void ProgramCU::ComputeDOG(CuTexImage* gus, CuTexImage* dog, CuTexImage* got) { int width = gus->GetImgWidth(), height = gus->GetImgHeight(); dim3 grid((width + DOG_BLOCK_DIMX - 1)/ DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1)/DOG_BLOCK_DIMY); dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY); gus->BindTexture(texC); (gus -1)->BindTexture(texP); //got实际上是前三层高斯金字塔的梯度值 if(got->_cuData) ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, (float2*) got->_cuData, width, height); else ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, width, height); cudaThreadSynchronize(); } #define READ_CMP_DOG_DATA(datai, tex, idx) \ datai[0] = tex1Dfetch(tex, idx - 1);\ datai[1] = tex1Dfetch(tex, idx);\ datai[2] = tex1Dfetch(tex, idx + 1);\ if(v > nmax)\ {\ nmax = max(nmax, datai[0]);\ nmax = max(nmax, datai[1]);\ nmax = max(nmax, datai[2]);\ if(v < nmax) goto key_finish;\ }else\ {\ nmin = min(nmin, datai[0]);\ nmin = min(nmin, datai[1]);\ nmin = min(nmin, datai[2]);\ if(v > nmin) goto key_finish;\ } void __global__ ComputeKEY_Kernel(float4* d_key, int width, int colmax, int rowmax, float dog_threshold0, float dog_threshold, float edge_threshold, int subpixel_localization) { float data[3][3], v; float datap[3][3], datan[3][3]; #ifdef KEY_OFFSET_ONE int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y + 1; int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x + 1; #else int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y; int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x; #endif int index = IMUL(row, width) + col; int idx[3] ={index - width, index, index + width}; int in_image =0; float nmax, nmin, result = 0.0f; float dx = 0, dy = 0, ds = 0; bool offset_test_passed = true; #ifdef KEY_OFFSET_ONE if(row < rowmax && col < colmax) #else if(row > 0 && col > 0 && row < rowmax && col < colmax) #endif { //一维抑制!!!!!!! in_image = 1; data[1][1] = v = tex1Dfetch(texC, idx[1]); //当前像元 if(fabs(v) <= dog_threshold0) goto key_finish; data[1][0] = tex1Dfetch(texC, idx[1] - 1); data[1][2] = tex1Dfetch(texC, idx[1] + 1);//左右像元 nmax = max(data[1][0], data[1][2]); nmin = min(data[1][0], data[1][2]);//左右像元最大最小值 if(v <=nmax && v >= nmin) goto key_finish; //极值是什么,是比所有值都大!!!当然比旁边的大,比旁边小的走开。 //if((v > nmax && v < 0 )|| (v < nmin && v > 0)) goto key_finish; READ_CMP_DOG_DATA(data[0], texC, idx[0]);//第一行最大最小值 READ_CMP_DOG_DATA(data[2], texC, idx[2]);//第三行最大最小值 //二维抑制!!!! //edge supression 边缘检测! float vx2 = v * 2.0f; float fxx = data[1][0] + data[1][2] - vx2; float fyy = data[0][1] + data[2][1] - vx2; float fxy = 0.25f * (data[2][2] + data[0][0] - data[2][0] - data[0][2]); float temp1 = fxx * fyy - fxy * fxy; float temp2 = (fxx + fyy) * (fxx + fyy); if(temp1 <=0 || temp2 > edge_threshold * temp1) goto key_finish; //三维抑制!!!! //read the previous level READ_CMP_DOG_DATA(datap[0], texP, idx[0]);//上一层的最大最小值 READ_CMP_DOG_DATA(datap[1], texP, idx[1]); READ_CMP_DOG_DATA(datap[2], texP, idx[2]); //read the next level READ_CMP_DOG_DATA(datan[0], texN, idx[0]);//下一层的最大最小值 READ_CMP_DOG_DATA(datan[1], texN, idx[1]); READ_CMP_DOG_DATA(datan[2], texN, idx[2]); if(subpixel_localization) { //subpixel localization float fx = 0.5f * (data[1][2] - data[1][0]); float fy = 0.5f * (data[2][1] - data[0][1]); float fs = 0.5f * (datan[1][1] - datap[1][1]); float fss = (datan[1][1] + datap[1][1] - vx2); float fxs = 0.25f* (datan[1][2] + datap[1][0] - datan[1][0] - datap[1][2]); float fys = 0.25f* (datan[2][1] + datap[0][1] - datan[0][1] - datap[2][1]); //need to solve dx, dy, ds; // |-fx| | fxx fxy fxs | |dx| // |-fy| = | fxy fyy fys | * |dy| // |-fs| | fxs fys fss | |ds| float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx); float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy); float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs); //高斯消元法 float maxa = max(max(A0.x, A1.x), A2.x); //选主元 if(maxa >= 1e-10) { if(maxa == A1.x)//如果A1是最大的,那么A1和A0调换 { float4 TEMP = A1; A1 = A0; A0 = TEMP; }else if(maxa == A2.x)//如果A2是最大的,那么A1和A0调换 { float4 TEMP = A2; A2 = A0; A0 = TEMP; } A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x; A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w; A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w; if(abs(A2.y) > abs(A1.y)) { float4 TEMP = A2; A2 = A1; A1 = TEMP; } if(abs(A1.y) >= 1e-10) { A1.z /= A1.y; A1.w /= A1.y; A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w; if(abs(A2.z) >= 1e-10) { ds = A2.w / A2.z; dy = A1.w - ds * A1.z; dx = A0.w - ds * A0.z - dy * A0.y; offset_test_passed = fabs(data[1][1] + 0.5f * (dx * fx + dy * fy + ds * fs)) > dog_threshold //去除低对比度的点 &&fabs(ds) < 1.0f && fabs(dx) < 1.0f && fabs(dy) < 1.0f; } } } } if(offset_test_passed) result = v > nmax ? 1.0 : -1.0;//当前像元通过测试,大于最大值为极大值,否则为极小值! } key_finish: //已经知道位置了index就代表了行列,但是保存不了那么多信息 if(in_image) d_key[index] = make_float4(result, dx, dy, ds); //得到像元的改正值!(用周围像元替代该像元直到收敛,应该是以后的事) } void ProgramCU::ComputeKEY(CuTexImage* dog, CuTexImage* key, float Tdog, float Tedge) { int width = dog->GetImgWidth(), height = dog->GetImgHeight(); float Tdog1 = (GlobalUtil::_SubpixelLocalization? 0.8f : 1.0f) * Tdog; CuTexImage* dogp = dog - 1; CuTexImage* dogn = dog + 1; #ifdef KEY_OFFSET_ONE dim3 grid((width - 1 + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height - 1 + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY); #else dim3 grid((width + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY); #endif dim3 block(KEY_BLOCK_DIMX, KEY_BLOCK_DIMY); dogp->BindTexture(texP); dog ->BindTexture(texC); dogn->BindTexture(texN); Tedge = (Tedge+1)*(Tedge+1)/Tedge; //(8,8) (800/8,600/8) ComputeKEY_Kernel<<<grid, block>>>((float4*) key->_cuData, width, width -1, height -1, Tdog1, Tdog, Tedge, GlobalUtil::_SubpixelLocalization); cudaThreadSynchronize(); } //ws 800,wd 200,height 600 void __global__ InitHist_Kernel(int4* hist, int ws, int wd, int height) { int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;//行索引 int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;//列索引 if(row < height && col < wd) { int hidx = IMUL(row, wd) + col; //直方图索引(0,1,2,3,4...200) int scol = col << 2;//乘以4 (0,4,8,12...800) int sidx = IMUL(row, ws) + scol;//影像索引(0,4,8,12...800) int v[4] = {0, 0, 0, 0}; if(row > 0 && row < height -1) { #pragma unroll for(int i = 0; i < 4 ; ++i, ++scol) { float4 temp = tex1Dfetch(texDataF4, sidx +i);//当前像元以及右边三个 //temp(result, dx, dy, ds),result不为0说明是特征点! v[i] = (scol < ws -1 && scol > 0 && temp.x!=0) ? 1 : 0;//满足条件,不超过列索引,且temp.x不为零 则为1 } } hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);//高度不变,宽度变为之前的1/4 } } void ProgramCU::InitHistogram(CuTexImage* key, CuTexImage* hist) { int ws = key->GetImgWidth(), hs = key->GetImgHeight();//800*600 int wd = hist->GetImgWidth(), hd = hist->GetImgHeight();//200*600 dim3 grid((wd + HIST_INIT_WIDTH - 1)/ HIST_INIT_WIDTH, hd);//(200/128,600) dim3 block(HIST_INIT_WIDTH, 1); //(128,1) key->BindTexture(texDataF4); //hist->cuda,800,200,600 InitHist_Kernel<<<grid, block>>>((int4*) hist->_cuData, ws, wd, hd); cudaThreadSynchronize(); } //200,50,600 void __global__ ReduceHist_Kernel(int4* d_hist, int ws, int wd, int height) { int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;//行索引 int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;//列索引 if(row < height && col < wd) { int hidx = IMUL(row, wd) + col;//直方图索引 int scol = col << 2;//col*4 int sidx = IMUL(row, ws) + scol;//上一层直方图索引 int v[4] = {0, 0, 0, 0}; #pragma unroll for(int i = 0; i < 4 && scol < ws; ++i, ++scol) { int4 temp = tex1Dfetch(texDataI4, sidx + i);//上一层直方图中的像元和右边三个像元 200--50四倍关系! v[i] = temp.x + temp.y + temp.z + temp.w; //直方图的四个元素(x,y,z,w) } d_hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);//本层的直方图索引 } } void ProgramCU::ReduceHistogram(CuTexImage*hist1, CuTexImage* hist2) { int ws = hist1->GetImgWidth(), hs = hist1->GetImgHeight(); int wd = hist2->GetImgWidth(), hd = hist2->GetImgHeight(); int temp = (int)floor(logf(float(wd * 2/ 3)) / logf(2.0f)); const int wi = min(7, max(temp , 0)); hist1->BindTexture(texDataI4); const int BW = 1 << wi, BH = 1 << (7 - wi); dim3 grid((wd + BW - 1)/ BW, (hd + BH -1) / BH);//(wd/32,hd/4) dim3 block(BW, BH);//(32,4) ReduceHist_Kernel<<<grid, block>>>((int4*)hist2->_cuData, ws, wd, hd); cudaThreadSynchronize(); } void __global__ ListGen_Kernel(int4* d_list, int width) { int idx1 = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; //特征点索引 int4 pos = tex1Dfetch(texDataList, idx1); //拾取纹理内存 int idx2 = IMUL(pos.y, width) + pos.x; //直方图索引(不为0的) y指600行中的第几行,即第几个特征点,x为特征点具体位置,第一次width为4 int4 temp = tex1Dfetch(texDataI4, idx2);//拾取直方图纹理内存 int sum1 = temp.x + temp.y; int sum2 = sum1 + temp.z; pos.x <<= 2;//pos.x *4 if(pos.z >= sum2)//这个设计的好巧妙呀!!! 先算pos.x=0的,再把pos.x不等于0的化为0,再按照pos.x=0的情况算 { pos.x += 3; pos.z -= sum2; }else if(pos.z >= sum1) { pos.x += 2; pos.z -= sum1; }else if(pos.z >= temp.x) { pos.x += 1; pos.z -= temp.x; } d_list[idx1] = pos; } //input list (x, y) (x, y) .... 特征feature层,hist层直方图层 void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* hist) { int len = list->GetImgWidth();//327个特征点 list->BindTexture(texDataList); hist->BindTexture(texDataI4); dim3 grid((len + LISTGEN_BLOCK_DIM -1) /LISTGEN_BLOCK_DIM);// len/128 dim3 block(LISTGEN_BLOCK_DIM);//128 //listgenerate,列表生成核函数 ListGen_Kernel<<<grid, block>>>((int4*) list->_cuData, hist->GetImgWidth()); cudaThreadSynchronize(); } void __global__ ComputeOrientation_Kernel(float4* d_list, int list_len,//327 int width, int height, //800,600 float sigma, float sigma_step, //2.01,1.26 float gaussian_factor, float sample_factor,//1.5,3 int num_orientation,//2 int existing_keypoint, //0 int subpixel, //1 int keepsign)//0 { //10度每半径 const float ten_degree_per_radius = 5.7295779513082320876798154814105; //(360/10)/2pi //半径每十度 const float radius_per_ten_degrees = 1.0 / 5.7295779513082320876798154814105;//(10/360)*2PI int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;//线程索引 if(idx >= list_len) return; float4 key; //重构key!!!(x,y,sigma,) if(existing_keypoint) { key = tex1Dfetch(texDataF4, idx); }else { int4 ikey = tex1Dfetch(texDataList, idx);//(x,y,sigma,) key.x = ikey.x + 0.5f; //四舍五入 key.y = ikey.y + 0.5f; key.z = sigma; if(subpixel || keepsign) { float4 offset = tex1Dfetch(texDataF4, IMUL(width, ikey.y) + ikey.x);//定位到关键点 if(subpixel) { //key(result,dx,dy,ds) key.x += offset.y;//x+=dx key.y += offset.z;//y+=dy key.z *= pow(sigma_step, offset.w);//z*=dz } if(keepsign) key.z *= offset.x; //??? } } if(num_orientation == 0) { key.w = 0; d_list[idx] = key; return; } float vote[37]; //权!!! float gsigma = key.z * gaussian_factor;//key.z就是sigma float win = fabs(key.z) * sample_factor;//窗口 float dist_threshold = win * win + 0.5; //距离阈值 float factor = -0.5f / (gsigma * gsigma); float xmin = max(1.5f, floor(key.x - win) + 0.5f); //-radius float ymin = max(1.5f, floor(key.y - win) + 0.5f); float xmax = min(width - 1.5f, floor(key.x + win) + 0.5f);//+radius float ymax = min(height -1.5f, floor(key.y + win) + 0.5f); #pragma unroll for(int i = 0; i < 36; ++i) vote[i] = 0.0f; //36个方向 for(float y = ymin; y <= ymax; y += 1.0f) //-radius~+radius { for(float x = xmin; x <= xmax; x += 1.0f)//-radius~+radius { float dx = x - key.x; //i float dy = y - key.y; //j float sq_dist = dx * dx + dy * dy; float2 got = tex2D(texDataF2, x, y);// 二维纹理 got.x是梯度幅值,got.y是梯度方向 float weight = got.x * exp(sq_dist * factor);//该点相对于特征点的高斯权重!!! 这个是梯度的幅值 float fidx = floor(got.y * ten_degree_per_radius);//不超过2PI的乘以5肯定不超过36! int oidx = fidx; if(oidx < 0) oidx += 36; vote[oidx] += weight; //可能有很多个的!!!累加到这个方向 } } //filter the vote 高斯权滤波 const float one_third = 1.0 /3.0;//三分之一 #pragma unroll //循环展开 for(int i = 0; i < 6; ++i) { vote[36] = vote[0]; float pre = vote[35]; #pragma unroll for(int j = 0; j < 36; ++j) { float temp = one_third * (pre + vote[j] + vote[j + 1]); pre = vote[j]; vote[j] = temp; //1、当前值赋值给pre2、当前值为左边值和右边值中间值平均值 } } vote[36] = vote[0]; if(num_orientation == 1 || existing_keypoint) { int index_max = 0; float max_vote = vote[0]; #pragma unroll for(int i = 1; i < 36; ++i) { index_max = vote[i] > max_vote? i : index_max; max_vote = max(max_vote, vote[i]); } float pre = vote[index_max == 0? 35 : index_max -1]; float next = vote[index_max + 1]; float weight = max_vote; float off = 0.5f * FDIV(next - pre, weight + weight - next - pre); key.w = radius_per_ten_degrees * (index_max + 0.5f + off); d_list[idx] = key; }else { float max_vote = vote[0]; #pragma unroll for(int i = 1; i < 36; ++i) max_vote = max(max_vote, vote[i]); //找到最大权值 float vote_threshold = max_vote * 0.8f; //80% 峰值!! float pre = vote[35]; float max_rot[2], max_vot[2] = {0, 0}; //主方向和辅方向 int ocount = 0; #pragma unroll for(int i =0; i < 36; ++i)//36个方向 { float next = vote[i + 1];//下一个 if(vote[i] > vote_threshold && vote[i] > pre && vote[i] > next) { float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);//除以 float rot = i + di + 0.5f; float weight = vote[i]; ///得到的结果是max_vot[0]是最大的 max_vot[1]>=max_vot[0] if(weight > max_vot[1]) { if(weight > max_vot[0]) { max_vot[1] = max_vot[0]; // max_rot[1] = max_rot[0]; max_vot[0] = weight; max_rot[0] = rot; } else { max_vot[1] = weight; max_rot[1] = rot; } ocount ++; } } pre = vote[i]; } float fr1 = max_rot[0] / 36.0f; //归一化(0,1)主方向 得出的是一个百分比! if(fr1 < 0) fr1 += 1.0f; unsigned short us1 = ocount == 0? 65535 : ((unsigned short )floor(fr1 * 65535.0f)); unsigned short us2 = 65535; if(ocount > 1) //如果ocount为1,则说明没有辅助方向,大于1则说明有辅方向 { float fr2 = max_rot[1] / 36.0f; //归一化(0,1) if(fr2 < 0) fr2 += 1.0f; us2 = (unsigned short ) floor(fr2 * 65535.0f); } unsigned int uspack = (us2 << 16) | us1; //us2*2^16 us2移位运算!!! 保留了两个方向!!! //数学不好是坑啊,3二进制是11,3*2=6二进制是110,相当于每乘以2后面加一个0 //也就是加了16个0,也就是避开了65535!保留了主方向和辅助方向! key.w = __int_as_float(uspack);//把int作为float保存在key.w中,别忘了key的数据类型是!!!float4 d_list[idx] = key; } } void ProgramCU::ComputeOrientation(CuTexImage* list, CuTexImage* got, CuTexImage*key, float sigma, float sigma_step, int existing_keypoint) { int len = list->GetImgWidth(); if(len <= 0) return; int width = got->GetImgWidth(), height = got->GetImgHeight(); if(existing_keypoint) { list->BindTexture(texDataF4); }else { list->BindTexture(texDataList); if(GlobalUtil::_SubpixelLocalization) key->BindTexture(texDataF4);//F4(result,dx,dy,ds) } got->BindTexture2D(texDataF2); //F2,梯度和角度 const int block_width = len < ORIENTATION_COMPUTE_PER_BLOCK ? 16 : ORIENTATION_COMPUTE_PER_BLOCK; //len<64,以16位大小,大于以64 dim3 grid((len + block_width -1) / block_width); dim3 block(block_width); ComputeOrientation_Kernel<<<grid, block>>>((float4*) list->_cuData, len, width, height, sigma, sigma_step, GlobalUtil::_OrientationGaussianFactor, GlobalUtil::_OrientationGaussianFactor * GlobalUtil::_OrientationWindowFactor, GlobalUtil::_FixedOrientation? 0 : GlobalUtil::_MaxOrientation, //0是假 existing_keypoint, GlobalUtil::_SubpixelLocalization, GlobalUtil::_KeepExtremumSign); cudaThreadSynchronize(); ProgramCU::CheckErrorCUDA("ComputeOrientation"); } template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptor_Kernel(float4* d_des, int num, int width, int height, float window_factor) { const float rpi = 4.0/ 3.14159265358979323846; int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; int fidx = idx >> 4; //idx/16 得到真正的特征点个数 if(fidx >= num) return; float4 key = tex1Dfetch(texDataF4, fidx);//featureTex特征点定位 int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2; //(0,0),(0,1)(0,2)...(3,3) float spt = fabs(key.z * window_factor);//窗口大小 3sigma float s, c; __sincosf(key.w, &s, &c); //旋转到主方向!!! float anglef = key.w > 3.14159265358979323846? key.w - (2.0 * 3.14159265358979323846) : key.w ; float cspt = c * spt, sspt = s * spt;//单独看并没有什么意义 float crspt = c / spt, srspt = s / spt;//单独看并没有什么意义 float2 offsetpt, pt; float xmin, ymin, xmax, ymax, bsz; offsetpt.x = ix - 1.5f;//哦~~这个1.5好熟悉呀,原来是那个+d/2-0.5 offsetpt.y = iy - 1.5f;//将(0~3)->转变为(-1.5~1.5) //种子点的坐标 pt.x = cspt * offsetpt.x - sspt * offsetpt.y + key.x; //关键点旋转后的坐标? pt.y = cspt * offsetpt.y + sspt * offsetpt.x + key.y; bsz = fabs(cspt) + fabs(sspt); //radius √2*spt xmin = max(1.5f, floor(pt.x - bsz) + 0.5f); //-radius~radius ymin = max(1.5f, floor(pt.y - bsz) + 0.5f); //-radius~radius xmax = min(width - 1.5f, floor(pt.x + bsz) + 0.5f); ymax = min(height - 1.5f, floor(pt.y + bsz) + 0.5f); float des[9]; #pragma unroll for(int i =0; i < 9; ++i) des[i] = 0.0f; //前面不管,已经确定了邻域窗口大小了 for(float y = ymin; y <= ymax; y += 1.0f) { for(float x = xmin; x <= xmax; x += 1.0f) { float dx = x - pt.x; //pt为中心点坐标 float dy = y - pt.y; float nx = crspt * dx + srspt * dy; //得到区域坐标 float ny = crspt * dy - srspt * dx; float nxn = fabs(nx); float nyn = fabs(ny); if(nxn < 1.0f && nyn < 1.0f) { float2 cc = tex2D(texDataF2, x, y); //梯度金字塔 float dnx = nx + offsetpt.x; float dny = ny + offsetpt.y;//又旋转回来了!!! float ww = exp(-0.125f * (dnx * dnx + dny * dny)); float wx = 1.0 - nxn; float wy = 1.0 - nyn; float weight = ww * wx * wy * cc.x; float theta = (anglef - cc.y) * rpi; //旋转到主方向 if(theta < 0) theta += 8.0f; float fo = floor(theta); int fidx = fo;//方向 float weight1 = fo + 1.0f - theta; //1-(theta - fo) float weight2 = theta - fo; //theta - fo if(DYNAMIC_INDEXING) { des[fidx] += (weight1 * weight); des[fidx + 1] += (weight2 * weight); //this dynamic indexing part might be slow }else { #pragma unroll for(int k = 0; k < 8; ++k) { if(k == fidx) { des[k] += (weight1 * weight); des[k+1] += (weight2 * weight); } } } } } } des[0] += des[8]; int didx = idx << 1;//0,2,4,6,8 //每个特征点八个方向,16个特征区域,16*8=128 //32个idx为一个特征点!!!! //128(float)*num->32(float4)*num->2(float4)* 16*num //16个线程,16<<1=16*2 每个线程计算8个方向 d_des[didx] = make_float4(des[0], des[1], des[2], des[3]); d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]); } template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptorRECT_Kernel(float4* d_des, int num, int width, int height, float window_factor) { const float rpi = 4.0/ 3.14159265358979323846; int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; int fidx = idx >> 4; if(fidx >= num) return; float4 key = tex1Dfetch(texDataF4, fidx); int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2; //float aspect_ratio = key.w / key.z; //float aspect_sq = aspect_ratio * aspect_ratio; float sptx = key.z * 0.25, spty = key.w * 0.25; float xmin, ymin, xmax, ymax; float2 pt; pt.x = sptx * (ix + 0.5f) + key.x; pt.y = spty * (iy + 0.5f) + key.y; xmin = max(1.5f, floor(pt.x - sptx) + 0.5f); ymin = max(1.5f, floor(pt.y - spty) + 0.5f); xmax = min(width - 1.5f, floor(pt.x + sptx) + 0.5f); ymax = min(height - 1.5f, floor(pt.y + spty) + 0.5f); float des[9]; #pragma unroll for(int i =0; i < 9; ++i) des[i] = 0.0f; for(float y = ymin; y <= ymax; y += 1.0f) { for(float x = xmin; x <= xmax; x += 1.0f) { float nx = (x - pt.x) / sptx; float ny = (y - pt.y) / spty; float nxn = fabs(nx); float nyn = fabs(ny); if(nxn < 1.0f && nyn < 1.0f) { float2 cc = tex2D(texDataF2, x, y); float wx = 1.0 - nxn; float wy = 1.0 - nyn; float weight = wx * wy * cc.x; float theta = (- cc.y) * rpi; if(theta < 0) theta += 8.0f; float fo = floor(theta); int fidx = fo; float weight1 = fo + 1.0f - theta; float weight2 = theta - fo; if(DYNAMIC_INDEXING) { des[fidx] += (weight1 * weight); des[fidx + 1] += (weight2 * weight); //this dynamic indexing part might be slow }else { #pragma unroll for(int k = 0; k < 8; ++k) { if(k == fidx) { des[k] += (weight1 * weight); des[k+1] += (weight2 * weight); } } } } } } des[0] += des[8]; int didx = idx << 1; d_des[didx] = make_float4(des[0], des[1], des[2], des[3]); d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]); } void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num) { float4 temp[32]; int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; if(idx >= num) return; int sidx = idx << 5;//idx*32 float norm1 = 0, norm2 = 0; #pragma unroll for(int i = 0; i < 32; ++i) //32*4=128 { temp[i] = tex1Dfetch(texDataF4, sidx +i); norm1 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y + temp[i].z * temp[i].z + temp[i].w * temp[i].w); } norm1 = rsqrt(norm1); //分母 #pragma unroll for(int i = 0; i < 32; ++i) { temp[i].x = min(0.2f, temp[i].x * norm1); temp[i].y = min(0.2f, temp[i].y * norm1); temp[i].z = min(0.2f, temp[i].z * norm1); temp[i].w = min(0.2f, temp[i].w * norm1); norm2 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y + temp[i].z * temp[i].z + temp[i].w * temp[i].w);// } norm2 = rsqrt(norm2); #pragma unroll for(int i = 0; i < 32; ++i) { temp[i].x *= norm2; temp[i].y *= norm2; temp[i].z *= norm2; temp[i].w *= norm2; d_des[sidx + i] = temp[i]; } } void ProgramCU::ComputeDescriptor(CuTexImage*list, CuTexImage* got, CuTexImage* dtex, int rect, int stream) { int num = list->GetImgWidth(); int width = got->GetImgWidth(); int height = got->GetImgHeight(); dtex->InitTexture(num * 128, 1, 1); got->BindTexture2D(texDataF2); list->BindTexture(texDataF4); int block_width = DESCRIPTOR_COMPUTE_BLOCK_SIZE;//64 dim3 grid((num * 16 + block_width -1) / block_width);//num*16/64 16*8=128 dim3 block(block_width);//64 if(rect) { if(GlobalUtil::_UseDynamicIndexing) ComputeDescriptorRECT_Kernel<true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); else ComputeDescriptorRECT_Kernel<false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); }else { if(GlobalUtil::_UseDynamicIndexing) ComputeDescriptor_Kernel<true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); else ComputeDescriptor_Kernel<false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); } cudaThreadSynchronize(); if(GlobalUtil::_NormalizedSIFT) { dtex->BindTexture(texDataF4); const int block_width = DESCRIPTOR_NORMALIZ_PER_BLOCK; dim3 grid((num + block_width -1) / block_width); dim3 block(block_width); NormalizeDescriptor_Kernel<<<grid, block>>>((float4*) dtex->_cuData, num); cudaThreadSynchronize(); } CheckErrorCUDA("ComputeDescriptor"); } ////////////////////////////////////////////////////// void ProgramCU::FinishCUDA() { cudaThreadSynchronize(); } int ProgramCU::CheckErrorCUDA(const char* location) { cudaError_t e = cudaGetLastError(); if(e) { if(location) fprintf(stderr, "%s:\t", location); fprintf(stderr, "%s\n", cudaGetErrorString(e)); //assert(0); return 1; }else { return 0; } } void __global__ ConvertDOG_Kernel(float* d_result, int width, int height) { int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y; int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x; if(col < width && row < height) { int index = row * width + col; float v = tex1Dfetch(texData, index); d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)? 0.5 : saturate(0.5+20.0*v); } } /// void ProgramCU::DisplayConvertDOG(CuTexImage* dog, CuTexImage* out) { if(out->_cuData == NULL) return; int width = dog->GetImgWidth(), height = dog ->GetImgHeight(); dog->BindTexture(texData); dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM); dim3 block(BLOCK_DIM, BLOCK_DIM); ConvertDOG_Kernel<<<grid, block>>>((float*) out->_cuData, width, height); ProgramCU::CheckErrorCUDA("DisplayConvertDOG"); } void __global__ ConvertGRD_Kernel(float* d_result, int width, int height) { int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y; int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x; if(col < width && row < height) { int index = row * width + col; float v = tex1Dfetch(texData, index << 1); d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)? 0 : saturate(5 * v); } } void ProgramCU::DisplayConvertGRD(CuTexImage* got, CuTexImage* out) { if(out->_cuData == NULL) return; int width = got->GetImgWidth(), height = got ->GetImgHeight(); got->BindTexture(texData); dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM); dim3 block(BLOCK_DIM, BLOCK_DIM); ConvertGRD_Kernel<<<grid, block>>>((float*) out->_cuData, width, height); ProgramCU::CheckErrorCUDA("DisplayConvertGRD"); } void __global__ ConvertKEY_Kernel(float4* d_result, int width, int height) { int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y; int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x; if(col < width && row < height) { int index = row * width + col; float4 keyv = tex1Dfetch(texDataF4, index); int is_key = (keyv.x == 1.0f || keyv.x == -1.0f); int inside = col > 0 && row > 0 && row < height -1 && col < width - 1; float v = inside? saturate(0.5 + 20 * tex1Dfetch(texData, index)) : 0.5; d_result[index] = is_key && inside ? (keyv.x > 0? make_float4(1.0f, 0, 0, 1.0f) : make_float4(0.0f, 1.0f, 0.0f, 1.0f)): make_float4(v, v, v, 1.0f) ; } } void ProgramCU::DisplayConvertKEY(CuTexImage* key, CuTexImage* dog, CuTexImage* out) { if(out->_cuData == NULL) return; int width = key->GetImgWidth(), height = key ->GetImgHeight(); dog->BindTexture(texData); key->BindTexture(texDataF4); dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM); dim3 block(BLOCK_DIM, BLOCK_DIM); ConvertKEY_Kernel<<<grid, block>>>((float4*) out->_cuData, width, height); } void __global__ DisplayKeyPoint_Kernel(float4 * d_result, int num) { int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; if(idx >= num) return; float4 v = tex1Dfetch(texDataF4, idx); d_result[idx] = make_float4(v.x, v.y, 0, 1.0f); } void ProgramCU::DisplayKeyPoint(CuTexImage* ftex, CuTexImage* out) { int num = ftex->GetImgWidth(); int block_width = 64; dim3 grid((num + block_width -1) /block_width); dim3 block(block_width); ftex->BindTexture(texDataF4); DisplayKeyPoint_Kernel<<<grid, block>>>((float4*) out->_cuData, num); ProgramCU::CheckErrorCUDA("DisplayKeyPoint"); } void __global__ DisplayKeyBox_Kernel(float4* d_result, int num) { int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; if(idx >= num) return; int kidx = idx / 10, vidx = idx - IMUL(kidx , 10); float4 v = tex1Dfetch(texDataF4, kidx); float sz = fabs(v.z * 3.0f); /////////////////////// float s, c; __sincosf(v.w, &s, &c); /////////////////////// float dx = vidx == 0? 0 : ((vidx <= 4 || vidx >= 9)? sz : -sz); float dy = vidx <= 1? 0 : ((vidx <= 2 || vidx >= 7)? -sz : sz); float4 pos; pos.x = v.x + c * dx - s * dy; pos.y = v.y + c * dy + s * dx; pos.z = 0; pos.w = 1.0f; d_result[idx] = pos; } void ProgramCU::DisplayKeyBox(CuTexImage* ftex, CuTexImage* out) { int len = ftex->GetImgWidth(); int block_width = 32; dim3 grid((len * 10 + block_width -1) / block_width); dim3 block(block_width); ftex->BindTexture(texDataF4); DisplayKeyBox_Kernel<<<grid, block>>>((float4*) out->_cuData, len * 10); } /////////////////////////////////////////////////////////////////// inline void CuTexImage:: BindTexture(textureReference& texRef) { cudaBindTexture(NULL, &texRef, _cuData, &texRef.channelDesc, _numBytes); } inline void CuTexImage::BindTexture2D(textureReference& texRef) { #if defined(SIFTGPU_ENABLE_LINEAR_TEX2D) cudaBindTexture2D(0, &texRef, _cuData, &texRef.channelDesc, _imgWidth, _imgHeight, _imgWidth* _numChannel* sizeof(float)); #else cudaChannelFormatDesc desc; cudaGetChannelDesc(&desc, _cuData2D); cudaBindTextureToArray(&texRef, _cuData2D, &desc); #endif } int ProgramCU::CheckCudaDevice(int device) { int count = 0, device_used; if(cudaGetDeviceCount(&count) != cudaSuccess || count <= 0) { ProgramCU::CheckErrorCUDA("CheckCudaDevice"); return 0; }else if(count == 1) { cudaDeviceProp deviceProp; if ( cudaGetDeviceProperties(&deviceProp, 0) != cudaSuccess || (deviceProp.major == 9999 && deviceProp.minor == 9999)) { fprintf(stderr, "CheckCudaDevice: no device supporting CUDA.\n"); return 0; }else { GlobalUtil::_MemCapGPU = deviceProp.totalGlobalMem / 1024; GlobalUtil::_texMaxDimGL = 132768; if(GlobalUtil::_verbose) fprintf(stdout, "NOTE: changing maximum texture dimension to %d\n", GlobalUtil::_texMaxDimGL); } } if(device >0 && device < count) { cudaSetDevice(device); CheckErrorCUDA("cudaSetDevice\n"); } cudaGetDevice(&device_used); if(device != device_used) fprintf(stderr, "\nERROR: Cannot set device to %d\n" "\nWARNING: Use # %d device instead (out of %d)\n", device, device_used, count); return 1; } //////////////////////////////////////////////////////////////////////////////////////// // siftmatch funtions ////////////////////////////////////////////////////////////////////////////////////////// #define MULT_TBLOCK_DIMX 128 #define MULT_TBLOCK_DIMY 1 #define MULT_BLOCK_DIMX (MULT_TBLOCK_DIMX) #define MULT_BLOCK_DIMY (8 * MULT_TBLOCK_DIMY) texture<uint4, 1, cudaReadModeElementType> texDes1; texture<uint4, 1, cudaReadModeElementType> texDes2; //dim grid(num2/128,num1/8) //dim block(128,1) void __global__ MultiplyDescriptor_Kernel(int* d_result, int num1, int num2, int3* d_temp) { //MULT_BLOCK_DIMY : 8 MULT_BLOCK_DIMX : 128 int idx01 = (blockIdx.y * MULT_BLOCK_DIMY), //0~num1(0,8,16,24,32...) idx02 = (blockIdx.x * MULT_BLOCK_DIMX); //0~num2 int idx1 = idx01 + threadIdx.y, //idx1 = idx01 idx2 = idx02 + threadIdx.x;//col线程的列索引 __shared__ int data1[17 * 2 * MULT_BLOCK_DIMY]; //每个线程块共享内存272,共享内存是一维的! 线程块大小:(128,1) int read_idx1 = idx01 * 8 + threadIdx.x,//用到了共享内存 read_idx2 = idx2 * 8;//没用到共享内存 int col4 = threadIdx.x & 0x3, //得到0,1,2,3 row4 = threadIdx.x >> 2; //threadIdx.x/4 int cache_idx1 = IMUL(row4, 17) + (col4 << 2);//row4*17+col4*4 /////////////////////////////////////////////////////////////// //Load feature descriptors /////////////////////////////////////////////////////////////// #if MULT_BLOCK_DIMY == 16 uint4 v = tex1Dfetch(texDes1, read_idx1); data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; #elif MULT_BLOCK_DIMY == 8 if(threadIdx.x < 64) //threadIdx.x = 64时!!!cache_idx1为272!!! { uint4 v = tex1Dfetch(texDes1, read_idx1); //num1的索引 data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; } #else #error #endif __syncthreads(); /// if(idx2 >= num2) return; /////////////////////////////////////////////////////////////////////////// //compare descriptors int results[MULT_BLOCK_DIMY]; //8个result #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i) results[i] = 0; #pragma unroll for(int i = 0; i < 8; ++i) //8 { uint4 v = tex1Dfetch(texDes2, read_idx2 + i); unsigned char* p2 = (unsigned char*)(&v); //取出int包含了16个char #pragma unroll for(int k = 0; k < MULT_BLOCK_DIMY; ++k) //8 { unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4)); //i/4是余数,因为每隔4个就加5 results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1]) + IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3]) + IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5]) + IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7]) + IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9]) + IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11]) + IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13]) + IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15])); // results[k] += ( IMUL(p1[0]-p2[0], p1[0]-p2[0]) + IMUL(p1[1]-p2[1], p1[1]-p2[1]) //+ IMUL(p1[2]-p2[2], p1[2]-p2[2]) + IMUL(p1[3]-p2[3],p1[3]-p2[3] ) //+ IMUL(p1[4]-p2[4], p1[4]-p2[4]) + IMUL(p1[5]-p2[5], p1[5]-p2[5]) //+ IMUL(p1[6]-p2[6], p1[6]-p2[6]) + IMUL(p1[7]-p2[7], p1[7]-p2[7]) //+ IMUL(p1[8]-p2[8], p1[8]-p2[8]) + IMUL(p1[9]-p2[9], p1[9]-p2[9]) //+ IMUL(p1[10]-p2[10],p1[10]-p2[10]) + IMUL(p1[11]-p2[11], p1[11]-p2[11]) //+ IMUL(p1[12]-p2[12], p1[12]-p2[12]) + IMUL(p1[13] -p2[13],p1[13] -p2[13]) //+ IMUL(p1[14]-p2[14],p1[14]-p2[14] ) + IMUL(p1[15]-p2[15], p1[15]-p2[15])); } } int dst_idx = IMUL(idx1, num2) + idx2; //(8*threadIdx.y*num2+idx2) if(d_temp) { int3 cmp_result = make_int3(0, -1, 0); //8个result里面(最大距离,位置(num1上的),次大值) #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i) //8 { if(idx1 + i < num1)//i:0~8+idx1 { cmp_result = results[i] > cmp_result.x? make_int3(results[i], idx1 + i, cmp_result.x) : make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i])); //(i, num2)由于是共性内存,所以用了八个 d_result[dst_idx + IMUL(i, num2)] = results[i]; } } d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result; } else { #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i)//8 { if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = results[i]; } } } void ProgramCU::MultiplyDescriptor(CuTexImage* des1, CuTexImage* des2, CuTexImage* texDot, CuTexImage* texCRT) { int num1 = des1->GetImgWidth() / 8; //1067 num*8 4通道 float4 int num2 = des2->GetImgWidth() / 8; //728 dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);// (num2/128,num1/8) dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);//(128,1) //使用的线程的大小为(num1/8)*num2 texDot->InitTexture( num2,num1);// (num2*num1)*4 float4 if(texCRT) //(num2 * num1/8 ) *4 float4 texCRT->InitTexture(num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 32); des1->BindTexture(texDes1);// des2->BindTexture(texDes2);// //输入:CRT或NULL texDes1和texDes2 //输出:texDot GlobalUtil::StartTimer("竖直"); MultiplyDescriptor_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2, (texCRT? (int3*)texCRT->_cuData : NULL)); cudaThreadSynchronize(); GlobalUtil::StopTimer(); float _timing1 = GlobalUtil::GetElapsedTime(); cudaThreadSynchronize(); ProgramCU::CheckErrorCUDA("MultiplyDescriptor"); } texture<float, 1, cudaReadModeElementType> texLoc1; texture<float2, 1, cudaReadModeElementType> texLoc2; struct Matrix33{float mat[3][3];}; //(num2/128,num1/8) //(128,8) void __global__ MultiplyDescriptorG_Kernel(int* d_result, int num1, int num2, int3* d_temp, Matrix33 H, float hdistmax, Matrix33 F, float fdistmax) { int idx01 = (blockIdx.y * MULT_BLOCK_DIMY); //8 (0,8,16,24,32...) int idx02 = (blockIdx.x * MULT_BLOCK_DIMX);//128 (0,128,256...) int idx1 = idx01 + threadIdx.y;//行索引 int idx2 = idx02 + threadIdx.x;//列索引 __shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];//272 __shared__ float loc1[MULT_BLOCK_DIMY * 2]; //16 int read_idx1 = idx01 * 8 + threadIdx.x ; //8个代表一个描述子, int read_idx2 = idx2 * 8; //8个代表一个描述子, int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2; int cache_idx1 = IMUL(row4, 17) + (col4 << 2); #if MULT_BLOCK_DIMY == 16 //不执行 uint4 v = tex1Dfetch(texDes1, read_idx1); data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; #elif MULT_BLOCK_DIMY == 8 if(threadIdx.x < 64) //小于64刚好272 { uint4 v = tex1Dfetch(texDes1, read_idx1); data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; } #else #error #endif __syncthreads(); if(threadIdx.x < MULT_BLOCK_DIMY * 2) //小于16 { // (0,8,16,24,32...)->(0,16,32,48,64...)+16 loc1[threadIdx.x] = tex1Dfetch(texLoc1, 2 * idx01 + threadIdx.x); //一个在共享内存里 } __syncthreads(); if(idx2 >= num2) return; int results[MULT_BLOCK_DIMY]; ///////////////////////////////////////////////////////////////////////////////////////////// //geometric verification 几何验证 ///////////////////////////////////////////////////////////////////////////////////////////// int good_count = 0; float2 loc2 = tex1Dfetch(texLoc2, idx2); //一个在纹理内存里,负责定位 //idx2= //每个特征点对应 #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i)//8 { if(idx1 + i < num1) { float* loci = loc1 + i * 2; float locx = loci[0], locy = loci[1]; //homography float x[3], diff[2]; x[0] = H.mat[0][0] * locx + H.mat[0][1] * locy + H.mat[0][2]; x[1] = H.mat[1][0] * locx + H.mat[1][1] * locy + H.mat[1][2]; x[2] = H.mat[2][0] * locx + H.mat[2][1] * locy + H.mat[2][2]; diff[0] = fabs(FDIV(x[0], x[2]) - loc2.x); diff[1] = fabs(FDIV(x[1], x[2]) - loc2.y); if(diff[0] < hdistmax && diff[1] < hdistmax) { //check fundamental matrix 检查基础矩阵 float fx1[3], ftx2[3], x2fx1, se; fx1[0] = F.mat[0][0] * locx + F.mat[0][1] * locy + F.mat[0][2]; fx1[1] = F.mat[1][0] * locx + F.mat[1][1] * locy + F.mat[1][2]; fx1[2] = F.mat[2][0] * locx + F.mat[2][1] * locy + F.mat[2][2]; ftx2[0] = F.mat[0][0] * loc2.x + F.mat[1][0] * loc2.y + F.mat[2][0]; ftx2[1] = F.mat[0][1] * loc2.x + F.mat[1][1] * loc2.y + F.mat[2][1]; //ftx2[2] = F.mat[0][2] * loc2.x + F.mat[1][2] * loc2.y + F.mat[2][2]; x2fx1 = loc2.x * fx1[0] + loc2.y * fx1[1] + fx1[2]; se = FDIV(x2fx1 * x2fx1, fx1[0] * fx1[0] + fx1[1] * fx1[1] + ftx2[0] * ftx2[0] + ftx2[1] * ftx2[1]); results[i] = se < fdistmax? 0: -262144; }else { results[i] = -262144; } }else { results[i] = -262144; } good_count += (results[i] >=0); } ///////////////////////////////////////////////////////////////////////////////////////////// ///compare feature descriptors anyway 无论如何都要进行特征描述子生成,跟双向匹配没有任何区别!!! ///////////////////////////////////////////////////////////////////////////////////////////// if(good_count > 0) { #pragma unroll for(int i = 0; i < 8; ++i) { uint4 v = tex1Dfetch(texDes2, read_idx2 + i); unsigned char* p2 = (unsigned char*)(&v); #pragma unroll for(int k = 0; k < MULT_BLOCK_DIMY; ++k) { unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4)); results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1]) + IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3]) + IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5]) + IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7]) + IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9]) + IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11]) + IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13]) + IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15])); } } } int dst_idx = IMUL(idx1, num2) + idx2; if(d_temp) { int3 cmp_result = make_int3(0, -1, 0); #pragma unroll for(int i= 0; i < MULT_BLOCK_DIMY; ++i) { if(idx1 + i < num1) { cmp_result = results[i] > cmp_result.x? make_int3(results[i], idx1 + i, cmp_result.x) : make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i])); d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0); }else { break; } } d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result; }else { #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i) { if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0); else break; } } } void ProgramCU::MultiplyDescriptorG(CuTexImage* des1, CuTexImage* des2, CuTexImage* loc1, CuTexImage* loc2, CuTexImage* texDot, CuTexImage* texCRT, float H[3][3], float hdistmax, float F[3][3], float fdistmax) { int num1 = des1->GetImgWidth() / 8; int num2 = des2->GetImgWidth() / 8; Matrix33 MatF, MatH; //copy the matrix memcpy(MatF.mat, F, 9 * sizeof(float)); memcpy(MatH.mat, H, 9 * sizeof(float)); //thread blocks dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX, //(num2/128,num1/8) (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY); dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY); //(128,8) //intermediate results 中间结果:双向匹配 texDot->InitTexture( num2,num1); if(texCRT) texCRT->InitTexture( num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 3); loc1->BindTexture(texLoc1); loc2->BindTexture(texLoc2); des1->BindTexture(texDes1); //影像1 des2->BindTexture(texDes2);//影像2 MultiplyDescriptorG_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2, (texCRT? (int3*)texCRT->_cuData : NULL), MatH, hdistmax, MatF, fdistmax); } texture<int, 1, cudaReadModeElementType> texDOT; #define ROWMATCH_BLOCK_WIDTH 32 #define ROWMATCH_BLOCK_HEIGHT 1 void __global__ RowMatch_Kernel(int*d_dot, int* d_result, int num2, float distmax, float ratiomax) { #if ROWMATCH_BLOCK_HEIGHT == 1 __shared__ int dotmax[ROWMATCH_BLOCK_WIDTH]; //32 __shared__ int dotnxt[ROWMATCH_BLOCK_WIDTH]; //32 __shared__ int dotidx[ROWMATCH_BLOCK_WIDTH]; //32 int row = blockIdx.y; #else //不使用 __shared__ int x_dotmax[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH]; __shared__ int x_dotnxt[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH]; __shared__ int x_dotidx[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH]; int* dotmax = x_dotmax[threadIdx.y]; int* dotnxt = x_dotnxt[threadIdx.y]; int* dotidx = x_dotidx[threadIdx.y]; int row = IMUL(blockIdx.y, ROWMATCH_BLOCK_HEIGHT) + threadIdx.y; #endif int base_address = IMUL(row , num2); //dot层的索引!!! int t_dotmax = 0, t_dotnxt = 0, t_dotidx = -1;//最大值,次大值,索引 for(int i = 0; i < num2; i += ROWMATCH_BLOCK_WIDTH)//步长32 说明threadIdx使用了num2/32次 { if(threadIdx.x + i < num2) { int v = tex1Dfetch(texDOT, base_address + threadIdx.x + i);//d_dot[base_address + threadIdx.x + i];// bool test = v > t_dotmax; //最大值 t_dotnxt = test? t_dotmax : max(t_dotnxt, v);//次大值 t_dotidx = test? (threadIdx.x + i) : t_dotidx; //只记录32个共享内存里最大值的索引!!! t_dotmax = test? v: t_dotmax; } __syncthreads();//在一个线程块之行结束后,t_dotmax已经被赋值,这个时候等待其它线程块求最大值 //所有线程块都计算完之后才同步,,然后重复利用线程块,进行其他区域的判定! } //已经求得所有区域的最值!!! dotmax[threadIdx.x] = t_dotmax; dotnxt[threadIdx.x] = t_dotnxt; dotidx[threadIdx.x] = t_dotidx; __syncthreads(); #pragma unroll for(int step = ROWMATCH_BLOCK_WIDTH/2; step >0; step /= 2) //归约算法,每次分为两部分 { if(threadIdx.x < step) { int v1 = dotmax[threadIdx.x], v2 = dotmax[threadIdx.x + step]; bool test = v2 > v1; dotnxt[threadIdx.x] = test? max(v1, dotnxt[threadIdx.x + step]) :max(dotnxt[threadIdx.x], v2); //次大值和v1v2中较小的相比较 dotidx[threadIdx.x] = test? dotidx[threadIdx.x + step] : dotidx[threadIdx.x]; dotmax[threadIdx.x] = test? v2 : v1; } __syncthreads(); } if(threadIdx.x == 0) { float dist = acos(min(dotmax[0] * 0.000003814697265625f, 1.0)); //最大值,但是acos是减函数,所以得到的较小值 float distn = acos(min(dotnxt[0] * 0.000003814697265625f, 1.0)); //次大值,但是acos是减函数,所以得到反而是较大值 //float ratio = dist / distn; d_result[row] = (dist < distmax) && (dist < distn * ratiomax) ? dotidx[0] : -1;// } } void ProgramCU::GetRowMatch(CuTexImage* texDot, CuTexImage* texMatch, float distmax, float ratiomax) { int num1 = texDot->GetImgHeight();//1068 int num2 = texDot->GetImgWidth();//731 dim3 grid(1, num1/ROWMATCH_BLOCK_HEIGHT);//dim3 grid(1,num1) dim3 block(ROWMATCH_BLOCK_WIDTH, ROWMATCH_BLOCK_HEIGHT);//dim3 block(32,1) texDot->BindTexture(texDOT); GlobalUtil::StartTimer("竖直"); RowMatch_Kernel<<<grid, block>>>((int*)texDot->_cuData, (int*)texMatch->_cuData, num2, distmax, ratiomax); cudaThreadSynchronize(); GlobalUtil::StopTimer(); float _timing1 = GlobalUtil::GetElapsedTime(); } #define COLMATCH_BLOCK_WIDTH 32 //texture<int3, 1, cudaReadModeElementType> texCT; void __global__ ColMatch_Kernel(int3*d_crt, int* d_result, int height, int num2, float distmax, float ratiomax) { int col = COLMATCH_BLOCK_WIDTH * blockIdx.x + threadIdx.x; //列 if(col >= num2) return; int3 result = d_crt[col];//tex1Dfetch(texCT, col); int read_idx = col + num2; for(int i = 1; i < height; ++i, read_idx += num2) { int3 temp = d_crt[read_idx];//tex1Dfetch(texCT, read_idx); result = result.x < temp.x? //取较大值 make_int3(temp.x, temp.y, max(result.x, temp.z)) : make_int3(result.x, result.y, max(result.z, temp.x)); } float dist = acos(min(result.x * 0.000003814697265625f, 1.0)); float distn = acos(min(result.z * 0.000003814697265625f, 1.0)); //float ratio = dist / distn; d_result[col] = (dist < distmax) && (dist < distn * ratiomax) ? result.y : -1;// } void ProgramCU::GetColMatch(CuTexImage* texCRT, CuTexImage* texMatch, float distmax, float ratiomax) { int height = texCRT->GetImgHeight(); int num2 = texCRT->GetImgWidth(); //texCRT->BindTexture(texCT); dim3 grid((num2 + COLMATCH_BLOCK_WIDTH -1) / COLMATCH_BLOCK_WIDTH);//num2/32 dim3 block(COLMATCH_BLOCK_WIDTH); //32*1 ColMatch_Kernel<<<grid, block>>>((int3*)texCRT->_cuData, (int*) texMatch->_cuData, height, num2, distmax, ratiomax); cudaThreadSynchronize(); } #endif
b849eac751ef03436552bf158f41de0bc54c383f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cusp/hyb_matrix.h> #include <cusp/io/matrix_market.h> #include <cusp/krylov/cg.h> #include <cusp/csr_matrix.h> #include <cusp/print.h> #include <cusp/multiply.h> #include "kernel.h" #include "ScopeProfile.h" extern std::vector<StepProfile> gProfile; extern std::map<std::string, long> gProfileGroup; using namespace std; using namespace cusp; void runTest_ell(int argc, char **argv) { gProfileGroup.clear(); for (int mtx = 1; mtx < argc; mtx++) { gProfile.clear(); // cusp::csr_matrix<int, DTYPE, cusp::host_memory> A(ROWS, COLS, ENTRIES); cusp::csr_matrix<int, DTYPE, host_memory> AA; cusp::ell_matrix<int, DTYPE, host_memory> A; // load a matrix stored in MatrixMarket format // cusp::io::read_matrix_market_file(A, "cusp1/cant.mtx"); // cusp::io::read_matrix_market_file(A, "cusp1/consph.mtx"); // // try{ cusp::io::read_matrix_market_file(AA, argv[mtx]); A = AA; // } catch (...) // { // continue; // } // cusp::io::read_matrix_market_file(A, "cusp1/qcd5_4.mtx"); // cusp::io::read_matrix_market_file(A, "cusp1/rail4284.mtx"); // cusp::io::read_matrix_market_file(A, "cusp1/rma10.mtx"); // cusp::io::read_matrix_market_file(A, "cusp1/webbase-1M.mtx"); thrust::host_vector<int> indexes(A.column_indices.num_rows * A.column_indices.num_cols); thrust::host_vector<DTYPE> data(A.column_indices.num_rows * A.column_indices.num_cols); printf("-------------------\n"); printf("matrix %s rows %d cols %d num entries %d avg %d.\n", argv[mtx], A.num_rows, A.num_cols, A.num_entries, A.num_entries/A.num_rows); int * col_ptr = thrust::raw_pointer_cast(indexes.data()); DTYPE *data_ptr = thrust::raw_pointer_cast(data.data()); memcpy(col_ptr, &(A.column_indices(0,0)), sizeof(int) * A.column_indices.num_cols * A.column_indices.num_rows); for (int j = 0; j != A.column_indices.num_rows; j++) for (int i = 0; i != A.column_indices.num_cols; i++) { // printf("%d ", A.column_indices(0, i)); /* if (A.column_indices(j,i) == -1) printf("-1 found in %d %d.\n", j,i);*/ *col_ptr = A.column_indices(j,i); *data_ptr = A.values(j,i); col_ptr++; data_ptr++; } thrust::device_vector<int> col_index_d = indexes; thrust::device_vector<DTYPE> data_d = data; // reset pointer col_ptr = thrust::raw_pointer_cast(col_index_d.data()); data_ptr = thrust::raw_pointer_cast(data_d.data()); // printf("here 2.\n"); // for (int i = 0; i < 100; i++) // cout << indexes[i] << " "; cusp::array1d<DTYPE, cusp::host_memory> xh(A.num_cols); thrust::host_vector<DTYPE> xx(A.num_cols); for (int i = 0; i < A.num_cols; i++) { xh[i] = 1.0; //i+1; xx[i] = 1.0; // i+1; } cusp::array1d<DTYPE, cusp::device_memory> xd = xh; cusp::array1d<DTYPE, cusp::device_memory> yd(A.num_rows); cusp::ell_matrix<int,DTYPE, device_memory> B = A; { GPUScopeProfile cuspp("ell_cusp_multiply"); cusp::multiply(B, xd, yd); } cusp::array1d<DTYPE, cusp::host_memory> yh = yd; /* for (int i = 0; i != 20; i++) printf("yd[%d] = %f.\n", i, yh[i]);*/ thrust::device_vector<DTYPE> xx_d = xx; thrust::device_vector<DTYPE> yy_d(A.num_rows); DTYPE *yy_ptr = thrust::raw_pointer_cast(yy_d.data()); DTYPE *xx_ptr = thrust::raw_pointer_cast(xx_d.data()); { GPUScopeProfile cuspp("ell_hidp_multiply_warp"); hipLaunchKernelGGL(( ell_kernel_warp), dim3(128), dim3(WARP_KERNEL_SIZE), 0, 0, col_ptr, data_ptr, xx_ptr, yy_ptr, A.column_indices.num_rows, A.column_indices.num_cols); } { thrust::host_vector<DTYPE> yy_h = yy_d; isIdentical(&yh[0], &yy_h[0], A.num_rows, "csr warp"); } { GPUScopeProfile cuspp("ell_hidp_multiply_subwarp"); hipLaunchKernelGGL(( ell_kernel_subwarp), dim3(128), dim3(WARP_KERNEL_SIZE), 0, 0, col_ptr, data_ptr, xx_ptr, yy_ptr, A.column_indices.num_rows, A.column_indices.num_cols); } { thrust::host_vector<DTYPE> yy_h = yy_d; isIdentical(&yh[0], &yy_h[0], A.num_rows, "csr warp"); } { GPUScopeProfile cuspp("ell_hidp_multiply_thread"); hipLaunchKernelGGL(( ell_kernel_thread), dim3(128), dim3(WARP_KERNEL_SIZE), 0, 0, col_ptr, data_ptr, xx_ptr, yy_ptr, A.column_indices.num_rows, A.column_indices.num_cols); } { thrust::host_vector<DTYPE> yy_h = yy_d; isIdentical(&yh[0], &yy_h[0], A.num_rows, "csr warp"); } showProfileResult(gProfile); /* cusp::csr_matrix<int,DTYPE ,cusp::device_memory> B = A; */ } showProfileResult(gProfileGroup); printf("*****************\n"); }
b849eac751ef03436552bf158f41de0bc54c383f.cu
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cusp/hyb_matrix.h> #include <cusp/io/matrix_market.h> #include <cusp/krylov/cg.h> #include <cusp/csr_matrix.h> #include <cusp/print.h> #include <cusp/multiply.h> #include "kernel.h" #include "ScopeProfile.h" extern std::vector<StepProfile> gProfile; extern std::map<std::string, long> gProfileGroup; using namespace std; using namespace cusp; void runTest_ell(int argc, char **argv) { gProfileGroup.clear(); for (int mtx = 1; mtx < argc; mtx++) { gProfile.clear(); // cusp::csr_matrix<int, DTYPE, cusp::host_memory> A(ROWS, COLS, ENTRIES); cusp::csr_matrix<int, DTYPE, host_memory> AA; cusp::ell_matrix<int, DTYPE, host_memory> A; // load a matrix stored in MatrixMarket format // cusp::io::read_matrix_market_file(A, "cusp1/cant.mtx"); // cusp::io::read_matrix_market_file(A, "cusp1/consph.mtx"); // // try{ cusp::io::read_matrix_market_file(AA, argv[mtx]); A = AA; // } catch (...) // { // continue; // } // cusp::io::read_matrix_market_file(A, "cusp1/qcd5_4.mtx"); // cusp::io::read_matrix_market_file(A, "cusp1/rail4284.mtx"); // cusp::io::read_matrix_market_file(A, "cusp1/rma10.mtx"); // cusp::io::read_matrix_market_file(A, "cusp1/webbase-1M.mtx"); thrust::host_vector<int> indexes(A.column_indices.num_rows * A.column_indices.num_cols); thrust::host_vector<DTYPE> data(A.column_indices.num_rows * A.column_indices.num_cols); printf("-------------------\n"); printf("matrix %s rows %d cols %d num entries %d avg %d.\n", argv[mtx], A.num_rows, A.num_cols, A.num_entries, A.num_entries/A.num_rows); int * col_ptr = thrust::raw_pointer_cast(indexes.data()); DTYPE *data_ptr = thrust::raw_pointer_cast(data.data()); memcpy(col_ptr, &(A.column_indices(0,0)), sizeof(int) * A.column_indices.num_cols * A.column_indices.num_rows); for (int j = 0; j != A.column_indices.num_rows; j++) for (int i = 0; i != A.column_indices.num_cols; i++) { // printf("%d ", A.column_indices(0, i)); /* if (A.column_indices(j,i) == -1) printf("-1 found in %d %d.\n", j,i);*/ *col_ptr = A.column_indices(j,i); *data_ptr = A.values(j,i); col_ptr++; data_ptr++; } thrust::device_vector<int> col_index_d = indexes; thrust::device_vector<DTYPE> data_d = data; // reset pointer col_ptr = thrust::raw_pointer_cast(col_index_d.data()); data_ptr = thrust::raw_pointer_cast(data_d.data()); // printf("here 2.\n"); // for (int i = 0; i < 100; i++) // cout << indexes[i] << " "; cusp::array1d<DTYPE, cusp::host_memory> xh(A.num_cols); thrust::host_vector<DTYPE> xx(A.num_cols); for (int i = 0; i < A.num_cols; i++) { xh[i] = 1.0; //i+1; xx[i] = 1.0; // i+1; } cusp::array1d<DTYPE, cusp::device_memory> xd = xh; cusp::array1d<DTYPE, cusp::device_memory> yd(A.num_rows); cusp::ell_matrix<int,DTYPE, device_memory> B = A; { GPUScopeProfile cuspp("ell_cusp_multiply"); cusp::multiply(B, xd, yd); } cusp::array1d<DTYPE, cusp::host_memory> yh = yd; /* for (int i = 0; i != 20; i++) printf("yd[%d] = %f.\n", i, yh[i]);*/ thrust::device_vector<DTYPE> xx_d = xx; thrust::device_vector<DTYPE> yy_d(A.num_rows); DTYPE *yy_ptr = thrust::raw_pointer_cast(yy_d.data()); DTYPE *xx_ptr = thrust::raw_pointer_cast(xx_d.data()); { GPUScopeProfile cuspp("ell_hidp_multiply_warp"); ell_kernel_warp<<<128, WARP_KERNEL_SIZE>>>(col_ptr, data_ptr, xx_ptr, yy_ptr, A.column_indices.num_rows, A.column_indices.num_cols); } { thrust::host_vector<DTYPE> yy_h = yy_d; isIdentical(&yh[0], &yy_h[0], A.num_rows, "csr warp"); } { GPUScopeProfile cuspp("ell_hidp_multiply_subwarp"); ell_kernel_subwarp<<<128, WARP_KERNEL_SIZE>>>(col_ptr, data_ptr, xx_ptr, yy_ptr, A.column_indices.num_rows, A.column_indices.num_cols); } { thrust::host_vector<DTYPE> yy_h = yy_d; isIdentical(&yh[0], &yy_h[0], A.num_rows, "csr warp"); } { GPUScopeProfile cuspp("ell_hidp_multiply_thread"); ell_kernel_thread<<<128, WARP_KERNEL_SIZE>>>(col_ptr, data_ptr, xx_ptr, yy_ptr, A.column_indices.num_rows, A.column_indices.num_cols); } { thrust::host_vector<DTYPE> yy_h = yy_d; isIdentical(&yh[0], &yy_h[0], A.num_rows, "csr warp"); } showProfileResult(gProfile); /* cusp::csr_matrix<int,DTYPE ,cusp::device_memory> B = A; */ } showProfileResult(gProfileGroup); printf("*****************\n"); }
0d4bc430c42c06710e23399919598c2b4c66d603.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" //The number of character in the encrypted text #define N 1024 #define A 15 #define B 27 #define M 128 #define INV_MOD 111 void checkCUDAError(const char*); void read_encrypted_file(int*); /* Exercise 1.1 */ __device__ int modulo(int a, int b){ int r = a % b; r = (r < 0) ? r + b : r; return r; } __global__ void affine_decrypt(int *d_input, int *d_output) { /* Exercise 1.2 */ int i = blockIdx.x * blockDim.x + threadIdx.x; int a = INV_MOD * (d_input[i] - B); int b = M; int d = modulo(a, b); d_output[i] = d; // Assign decrypted value to output address } __global__ void affine_decrypt_multiblock(int *d_input, int *d_output) { /* Exercise 1.8 */ } int main(int argc, char *argv[]) { int *h_input, *h_output; int *d_input, *d_output; unsigned int size; int i; size = N * sizeof(int); /* allocate the host memory */ h_input = (int *)malloc(size); h_output = (int *)malloc(size); /* Exercise 1.3: allocate device memory */ hipMalloc((void **)&d_input, size); hipMalloc((void **)&d_output, size); checkCUDAError("Memory allocation"); /* read the encryted text */ read_encrypted_file(h_input); /* Exercise 1.4: copy host input to device input */ hipMemcpy(d_input, h_input, size, hipMemcpyHostToDevice); checkCUDAError("Input transfer to device"); /* Exercise 1.5: Configure the grid of thread blocks and run the GPU kernel */ dim3 blocksPerGrid(1, 1, 1); dim3 threadsPerBlock(N, 1, 1); hipLaunchKernelGGL(( affine_decrypt) , dim3(1), dim3(N), 0, 0, d_input, d_output); /* wait for all threads to complete */ hipDeviceSynchronize(); checkCUDAError("Kernel execution"); /* Exercise 1.6: copy the gpu output back to the host */ hipMemcpy(h_output, d_output, size, hipMemcpyDeviceToHost); checkCUDAError("Result transfer to host"); /* print out the result to screen */ for (i = 0; i < N; i++) { printf("%c", (char)h_output[i]); } printf("\n"); /* Exercise 1.7: free device memory */ hipFree(d_input); hipFree(d_output); checkCUDAError("Free memory"); /* free host buffers */ free(h_input); free(h_output); return 0; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } void read_encrypted_file(int* input) { FILE *f = NULL; f = fopen("encrypted01.bin", "rb"); //read and binary flags if (f == NULL){ fprintf(stderr, "Error: Could not find encrypted01.bin file \n"); exit(1); } //read encrypted data fread(input, sizeof(unsigned int), N, f); fclose(f); }
0d4bc430c42c06710e23399919598c2b4c66d603.cu
#include <stdio.h> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" //The number of character in the encrypted text #define N 1024 #define A 15 #define B 27 #define M 128 #define INV_MOD 111 void checkCUDAError(const char*); void read_encrypted_file(int*); /* Exercise 1.1 */ __device__ int modulo(int a, int b){ int r = a % b; r = (r < 0) ? r + b : r; return r; } __global__ void affine_decrypt(int *d_input, int *d_output) { /* Exercise 1.2 */ int i = blockIdx.x * blockDim.x + threadIdx.x; int a = INV_MOD * (d_input[i] - B); int b = M; int d = modulo(a, b); d_output[i] = d; // Assign decrypted value to output address } __global__ void affine_decrypt_multiblock(int *d_input, int *d_output) { /* Exercise 1.8 */ } int main(int argc, char *argv[]) { int *h_input, *h_output; int *d_input, *d_output; unsigned int size; int i; size = N * sizeof(int); /* allocate the host memory */ h_input = (int *)malloc(size); h_output = (int *)malloc(size); /* Exercise 1.3: allocate device memory */ cudaMalloc((void **)&d_input, size); cudaMalloc((void **)&d_output, size); checkCUDAError("Memory allocation"); /* read the encryted text */ read_encrypted_file(h_input); /* Exercise 1.4: copy host input to device input */ cudaMemcpy(d_input, h_input, size, cudaMemcpyHostToDevice); checkCUDAError("Input transfer to device"); /* Exercise 1.5: Configure the grid of thread blocks and run the GPU kernel */ dim3 blocksPerGrid(1, 1, 1); dim3 threadsPerBlock(N, 1, 1); affine_decrypt <<<1, N>>> (d_input, d_output); /* wait for all threads to complete */ cudaThreadSynchronize(); checkCUDAError("Kernel execution"); /* Exercise 1.6: copy the gpu output back to the host */ cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost); checkCUDAError("Result transfer to host"); /* print out the result to screen */ for (i = 0; i < N; i++) { printf("%c", (char)h_output[i]); } printf("\n"); /* Exercise 1.7: free device memory */ cudaFree(d_input); cudaFree(d_output); checkCUDAError("Free memory"); /* free host buffers */ free(h_input); free(h_output); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void read_encrypted_file(int* input) { FILE *f = NULL; f = fopen("encrypted01.bin", "rb"); //read and binary flags if (f == NULL){ fprintf(stderr, "Error: Could not find encrypted01.bin file \n"); exit(1); } //read encrypted data fread(input, sizeof(unsigned int), N, f); fclose(f); }
53cc9a9362a3140e051b916533c727cb6ff5913a.hip
// !!! This is a file automatically generated by hipify!!! /* CUDA BarnesHut v2.0: Simulation of the gravitational forces in a galactic cluster using the Barnes-Hut n-body algorithm Copyright (c) 2011, Texas State University-San Marcos. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University-San Marcos nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> // thread count #define THREADS1 512 /* must be a power of 2 */ #define THREADS2 1024 #define THREADS3 1024 #define THREADS4 256 #define THREADS5 256 #define THREADS6 512 // block count = factor * #SMs #define FACTOR1 3 #define FACTOR2 1 #define FACTOR3 1 /* must all be resident at the same time */ #define FACTOR4 1 /* must all be resident at the same time */ #define FACTOR5 5 #define FACTOR6 3 #define WARPSIZE 32 #define MAXDEPTH 32 /******************************************************************************/ // childd is aliased with velxd, velyd, velzd, accxd, accyd, acczd, and sortd but they never use the same memory locations __device__ __constant__ int nnodesd, nbodiesd; __constant__ float dtimed, dthfd, epssqd, itolsqd; __constant__ volatile float *massd, *posxd, *posyd, *poszd, *velxd, *velyd, *velzd, *accxd, *accyd, *acczd; __constant__ volatile float *maxxd, *maxyd, *maxzd, *minxd, *minyd, *minzd; __constant__ volatile int *errd, *sortd, *childd, *countd, *startd; __device__ volatile int stepd, bottomd, maxdepthd, blkcntd; __device__ volatile float radiusd; /******************************************************************************/ /*** initialize memory ********************************************************/ /******************************************************************************/ __global__ void InitializationKernel() { *errd = 0; stepd = -1; maxdepthd = 1; blkcntd = 0; } /******************************************************************************/ /*** compute center and radius ************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS1, FACTOR1) void BoundingBoxKernel() { register int i, j, k, inc; register float val, minx, maxx, miny, maxy, minz, maxz; __shared__ volatile float sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1], sminz[THREADS1], smaxz[THREADS1]; // initialize with valid data (in case #bodies < #threads) minx = maxx = posxd[0]; miny = maxy = posyd[0]; minz = maxz = poszd[0]; // scan all bodies i = threadIdx.x; inc = THREADS1 * gridDim.x; for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) { val = posxd[j]; minx = min(minx, val); maxx = max(maxx, val); val = posyd[j]; miny = min(miny, val); maxy = max(maxy, val); val = poszd[j]; minz = min(minz, val); maxz = max(maxz, val); } // reduction in shared memory sminx[i] = minx; smaxx[i] = maxx; sminy[i] = miny; smaxy[i] = maxy; sminz[i] = minz; smaxz[i] = maxz; for (j = THREADS1 / 2; j > 0; j /= 2) { __syncthreads(); if (i < j) { k = i + j; sminx[i] = minx = min(minx, sminx[k]); smaxx[i] = maxx = max(maxx, smaxx[k]); sminy[i] = miny = min(miny, sminy[k]); smaxy[i] = maxy = max(maxy, smaxy[k]); sminz[i] = minz = min(minz, sminz[k]); smaxz[i] = maxz = max(maxz, smaxz[k]); } } // write block result to global memory if (i == 0) { k = blockIdx.x; minxd[k] = minx; maxxd[k] = maxx; minyd[k] = miny; maxyd[k] = maxy; minzd[k] = minz; maxzd[k] = maxz; __threadfence(); inc = gridDim.x - 1; if (inc == atomicInc((unsigned int *)&blkcntd, inc)) { // I'm the last block, so combine all block results for (j = 0; j <= inc; j++) { minx = min(minx, minxd[j]); maxx = max(maxx, maxxd[j]); miny = min(miny, minyd[j]); maxy = max(maxy, maxyd[j]); minz = min(minz, minzd[j]); maxz = max(maxz, maxzd[j]); } // compute 'radius' val = max(maxx - minx, maxy - miny); radiusd = max(val, maxz - minz) * 0.5f; // create root node k = nnodesd; bottomd = k; massd[k] = -1.0f; startd[k] = 0; posxd[k] = (minx + maxx) * 0.5f; posyd[k] = (miny + maxy) * 0.5f; poszd[k] = (minz + maxz) * 0.5f; k *= 8; for (i = 0; i < 8; i++) childd[k + i] = -1; stepd++; } } } /******************************************************************************/ /*** build tree ***************************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS2, FACTOR2) void TreeBuildingKernel() { register int i, j, k, depth, localmaxdepth, skip, inc; register float x, y, z, r; register float px, py, pz; register int ch, n, cell, locked, patch; register float radius, rootx, rooty, rootz; // cache root data radius = radiusd; rootx = posxd[nnodesd]; rooty = posyd[nnodesd]; rootz = poszd[nnodesd]; localmaxdepth = 1; skip = 1; inc = blockDim.x * gridDim.x; i = threadIdx.x + blockIdx.x * blockDim.x; // iterate over all bodies assigned to thread while (i < nbodiesd) { if (skip != 0) { // new body, so start traversing at root skip = 0; px = posxd[i]; py = posyd[i]; pz = poszd[i]; n = nnodesd; depth = 1; r = radius; j = 0; // determine which child to follow if (rootx < px) j = 1; if (rooty < py) j += 2; if (rootz < pz) j += 4; } // follow path to leaf cell ch = childd[n*8+j]; while (ch >= nbodiesd) { n = ch; depth++; r *= 0.5f; j = 0; // determine which child to follow if (posxd[n] < px) j = 1; if (posyd[n] < py) j += 2; if (poszd[n] < pz) j += 4; ch = childd[n*8+j]; } if (ch != -2) { // skip if child pointer is locked and try again later locked = n*8+j; if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock if (ch == -1) { // if null, just insert the new body childd[locked] = i; } else { // there already is a body in this position patch = -1; // create new cell(s) and insert the old and new body do { depth++; cell = atomicSub((int *)&bottomd, 1) - 1; if (cell <= nbodiesd) { *errd = 1; bottomd = nnodesd; } patch = max(patch, cell); x = (j & 1) * r; y = ((j >> 1) & 1) * r; z = ((j >> 2) & 1) * r; r *= 0.5f; massd[cell] = -1.0f; startd[cell] = -1; x = posxd[cell] = posxd[n] - r + x; y = posyd[cell] = posyd[n] - r + y; z = poszd[cell] = poszd[n] - r + z; for (k = 0; k < 8; k++) childd[cell*8+k] = -1; if (patch != cell) { childd[n*8+j] = cell; } j = 0; if (x < posxd[ch]) j = 1; if (y < posyd[ch]) j += 2; if (z < poszd[ch]) j += 4; childd[cell*8+j] = ch; n = cell; j = 0; if (x < px) j = 1; if (y < py) j += 2; if (z < pz) j += 4; ch = childd[n*8+j]; // repeat until the two bodies are different children } while (ch >= 0); childd[n*8+j] = i; __threadfence(); // push out subtree childd[locked] = patch; } __threadfence(); // push out results localmaxdepth = max(depth, localmaxdepth); i += inc; // move on to next body skip = 1; } } __syncthreads(); // throttle } // record maximum tree depth atomicMax((int *)&maxdepthd, localmaxdepth); } /******************************************************************************/ /*** compute center of mass ***************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS3, FACTOR3) void SummarizationKernel() { register int i, j, k, ch, inc, missing, cnt, bottom; register float m, cm, px, py, pz; __shared__ volatile int child[THREADS3 * 8]; bottom = bottomd; inc = blockDim.x * gridDim.x; k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size if (k < bottom) k += inc; missing = 0; // iterate over all cells assigned to thread while (k <= nnodesd) { if (missing == 0) { // new cell, so initialize cm = 0.0f; px = 0.0f; py = 0.0f; pz = 0.0f; cnt = 0; j = 0; for (i = 0; i < 8; i++) { ch = childd[k*8+i]; if (ch >= 0) { if (i != j) { // move children to front (needed later for speed) childd[k*8+i] = -1; childd[k*8+j] = ch; } child[missing*THREADS3+threadIdx.x] = ch; // cache missing children m = massd[ch]; missing++; if (m >= 0.0f) { // child is ready missing--; if (ch >= nbodiesd) { // count bodies (needed later) cnt += countd[ch] - 1; } // add child's contribution cm += m; px += posxd[ch] * m; py += posyd[ch] * m; pz += poszd[ch] * m; } j++; } } __threadfence(); // for performance only cnt += j; } if (missing != 0) { do { // poll missing child ch = child[(missing-1)*THREADS3+threadIdx.x]; m = massd[ch]; if (m >= 0.0f) { // child is now ready missing--; if (ch >= nbodiesd) { // count bodies (needed later) cnt += countd[ch] - 1; } // add child's contribution cm += m; px += posxd[ch] * m; py += posyd[ch] * m; pz += poszd[ch] * m; } // repeat until we are done or child is not ready } while ((m >= 0.0f) && (missing != 0)); } if (missing == 0) { // all children are ready, so store computed information countd[k] = cnt; m = 1.0f / cm; posxd[k] = px * m; posyd[k] = py * m; poszd[k] = pz * m; __threadfence(); // make sure data are visible before setting mass massd[k] = cm; __threadfence(); // push out results k += inc; // move on to next cell } } } /******************************************************************************/ /*** sort bodies **************************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS4, FACTOR4) void SortKernel() { register int i, k, ch, dec, start, bottom; bottom = bottomd; dec = blockDim.x * gridDim.x; k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x; // iterate over all cells assigned to thread while (k >= bottom) { start = startd[k]; if (start >= 0) { for (i = 0; i < 8; i++) { ch = childd[k*8+i]; if (ch >= nbodiesd) { // child is a cell startd[ch] = start; // set start ID of child start += countd[ch]; // add #bodies in subtree } else if (ch >= 0) { // child is a body sortd[start] = ch; // record body in 'sorted' array start++; } } k -= dec; // move on to next cell } __syncthreads(); // throttle } } /******************************************************************************/ /*** compute force ************************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS5, FACTOR5) void ForceCalculationKernel() { register int i, j, k, n, depth, base, sbase, diff; register float px, py, pz, ax, ay, az, dx, dy, dz, tmp; __shared__ volatile int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE]; __shared__ volatile float dq[MAXDEPTH * THREADS5/WARPSIZE]; __shared__ volatile int step, maxdepth; if (0 == threadIdx.x) { step = stepd; maxdepth = maxdepthd; tmp = radiusd; // precompute values that depend only on tree level dq[0] = tmp * tmp * itolsqd; for (i = 1; i < maxdepth; i++) { dq[i] = dq[i - 1] * 0.25f; } if (maxdepth > MAXDEPTH) { *errd = maxdepth; } } __syncthreads(); if (maxdepth <= MAXDEPTH) { // figure out first thread in each warp (lane 0) base = threadIdx.x / WARPSIZE; sbase = base * WARPSIZE; j = base * MAXDEPTH; diff = threadIdx.x - sbase; // make multiple copies to avoid index calculations later if (diff < MAXDEPTH) { dq[diff+j] = dq[diff]; } __syncthreads(); // iterate over all bodies assigned to thread for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) { i = sortd[k]; // get permuted/sorted index // cache position info px = posxd[i]; py = posyd[i]; pz = poszd[i]; ax = 0.0f; ay = 0.0f; az = 0.0f; // initialize iteration stack, i.e., push root node onto stack depth = j; if (sbase == threadIdx.x) { node[j] = nnodesd; pos[j] = 0; } __threadfence(); // make sure it's visible while (depth >= j) { // stack is not empty while (pos[depth] < 8) { // node on top of stack has more children to process n = childd[node[depth]*8+pos[depth]]; // load child pointer if (sbase == threadIdx.x) { // I'm the first thread in the warp pos[depth]++; } __threadfence(); // make sure it's visible if (n >= 0) { dx = posxd[n] - px; dy = posyd[n] - py; dz = poszd[n] - pz; tmp = dx*dx + (dy*dy + (dz*dz + epssqd)); // compute distance squared (plus softening) if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body) tmp = rsqrtf(tmp); // compute distance tmp = massd[n] * tmp * tmp * tmp; ax += dx * tmp; ay += dy * tmp; az += dz * tmp; } else { // push cell onto stack depth++; if (sbase == threadIdx.x) { node[depth] = n; pos[depth] = 0; } __threadfence(); // make sure it's visible } } else { depth = max(j, depth - 1); // early out because all remaining children are also zero } } depth--; // done with this level } if (step > 0) { // update velocity velxd[i] += (ax - accxd[i]) * dthfd; velyd[i] += (ay - accyd[i]) * dthfd; velzd[i] += (az - acczd[i]) * dthfd; } // save computed acceleration accxd[i] = ax; accyd[i] = ay; acczd[i] = az; } } } /******************************************************************************/ /*** advance bodies ***********************************************************/ /******************************************************************************/ extern "C" __global__ void dummy(float4* data) { } __global__ __launch_bounds__(THREADS6, FACTOR6) void IntegrationKernel() { register int i, inc; register float dvelx, dvely, dvelz; register float velhx, velhy, velhz; // iterate over all bodies assigned to thread inc = blockDim.x * gridDim.x; for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) { // integrate dvelx = accxd[i] * dthfd; dvely = accyd[i] * dthfd; dvelz = acczd[i] * dthfd; velhx = velxd[i] + dvelx; velhy = velyd[i] + dvely; velhz = velzd[i] + dvelz; posxd[i] += velhx * dtimed; posyd[i] += velhy * dtimed; poszd[i] += velhz * dtimed; velxd[i] = velhx + dvelx; velyd[i] = velhy + dvely; velzd[i] = velhz + dvelz; } } /******************************************************************************/ static void CudaTest(char *msg) { hipError_t e; hipDeviceSynchronize(); if (hipSuccess != (e = hipGetLastError())) { fprintf(stderr, "%s: %d\n", msg, e); fprintf(stderr, "%s\n", hipGetErrorString(e)); exit(-1); } } /******************************************************************************/ // random number generator #define MULT 1103515245 #define ADD 12345 #define MASK 0x7FFFFFFF #define TWOTO31 2147483648.0 static int A = 1; static int B = 0; static int randx = 1; static int lastrand; static void drndset(int seed) { A = 1; B = 0; randx = (A * seed + B) & MASK; A = (MULT * A) & MASK; B = (MULT * B + ADD) & MASK; } static double drnd() { lastrand = randx; randx = (A * randx + B) & MASK; return (double)lastrand / TWOTO31; } /******************************************************************************/ int main(int argc, char *argv[]) { register int i, run, blocks; register int nnodes, nbodies, step, timesteps; register int runtime, mintime; int error; register float dtime, dthf, epssq, itolsq; float time, timing[7]; clock_t starttime, endtime; hipEvent_t start, stop; float *mass, *posx, *posy, *posz, *velx, *vely, *velz; int *errl, *sortl, *childl, *countl, *startl; float *massl; float *posxl, *posyl, *poszl; float *velxl, *velyl, *velzl; float *accxl, *accyl, *acczl; float *maxxl, *maxyl, *maxzl; float *minxl, *minyl, *minzl; register double rsc, vsc, r, v, x, y, z, sq, scale; // perform some checks fprintf(stderr, "CUDA BarnesHut v2.0\n"); if (argc != 3) { fprintf(stderr, "\n"); fprintf(stderr, "arguments: number_of_bodies number_of_timesteps\n"); exit(-1); } int deviceCount; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "There is no device supporting CUDA\n"); exit(-1); } hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) { fprintf(stderr, "There is no CUDA capable device\n"); exit(-1); } /// if (deviceProp.major < 2) { // fprintf(stderr, "Need at least compute capability 2.0\n"); // exit(-1); //} // if (deviceProp.warpSize != WARPSIZE) { // fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize); // exit(-1); // } blocks = deviceProp.multiProcessorCount; fprintf(stderr, "blocks = %d\n", blocks); if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) { fprintf(stderr, "Warp size must be greater than zero and a power of two\n"); exit(-1); } if (MAXDEPTH > WARPSIZE) { fprintf(stderr, "MAXDEPTH must be less than or equal to WARPSIZE\n"); exit(-1); } if ((THREADS1 <= 0) || (THREADS1 & (THREADS1-1) != 0)) { fprintf(stderr, "THREADS1 must be greater than zero and a power of two\n"); exit(-1); } // set L1/shared memory configuration hipFuncSetCacheConfig(BoundingBoxKernel, hipFuncCachePreferShared); hipFuncSetCacheConfig(TreeBuildingKernel, hipFuncCachePreferL1); hipFuncSetCacheConfig(SummarizationKernel, hipFuncCachePreferShared); hipFuncSetCacheConfig(SortKernel, hipFuncCachePreferL1); hipFuncSetCacheConfig(ForceCalculationKernel, hipFuncCachePreferL1); hipFuncSetCacheConfig(IntegrationKernel, hipFuncCachePreferL1); hipGetLastError(); // reset error value for (run = 0; run < 3; run++) { for (i = 0; i < 7; i++) timing[i] = 0.0f; nbodies = atoi(argv[1]); if (nbodies < 1) { fprintf(stderr, "nbodies is too small: %d\n", nbodies); exit(-1); } if (nbodies > (1 << 30)) { fprintf(stderr, "nbodies is too large: %d\n", nbodies); exit(-1); } nnodes = nbodies * 2; if (nnodes < 1024*blocks) nnodes = 1024*blocks; while ((nnodes & (WARPSIZE-1)) != 0) nnodes++; nnodes--; timesteps = atoi(argv[2]); dtime = 0.025; dthf = dtime * 0.5f; epssq = 0.05 * 0.05; itolsq = 1.0f / (0.5 * 0.5); // allocate memory if (run == 0) { fprintf(stderr, "nodes = %d\n", nnodes+1); fprintf(stderr, "configuration: %d bodies, %d time steps\n", nbodies, timesteps); mass = (float *)malloc(sizeof(float) * nbodies); if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);} posx = (float *)malloc(sizeof(float) * nbodies); if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);} posy = (float *)malloc(sizeof(float) * nbodies); if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);} posz = (float *)malloc(sizeof(float) * nbodies); if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);} velx = (float *)malloc(sizeof(float) * nbodies); if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);} vely = (float *)malloc(sizeof(float) * nbodies); if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);} velz = (float *)malloc(sizeof(float) * nbodies); if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);} if (hipSuccess != hipMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd"); if (hipSuccess != hipMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd"); if (hipSuccess != hipMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd"); if (hipSuccess != hipMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd"); if (hipSuccess != hipMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd"); if (hipSuccess != hipMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd"); if (hipSuccess != hipMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd"); if (hipSuccess != hipMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd"); // alias arrays int inc = (nbodies + WARPSIZE - 1) & (-WARPSIZE); velxl = (float *)&childl[0*inc]; velyl = (float *)&childl[1*inc]; velzl = (float *)&childl[2*inc]; accxl = (float *)&childl[3*inc]; accyl = (float *)&childl[4*inc]; acczl = (float *)&childl[5*inc]; sortl = (int *)&childl[6*inc]; if (hipSuccess != hipMalloc((void **)&maxxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd"); if (hipSuccess != hipMalloc((void **)&maxyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd"); if (hipSuccess != hipMalloc((void **)&maxzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd"); if (hipSuccess != hipMalloc((void **)&minxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd"); if (hipSuccess != hipMalloc((void **)&minyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd"); if (hipSuccess != hipMalloc((void **)&minzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd"); if (hipSuccess != hipMemcpyToSymbol(nnodesd, &nnodes, sizeof(int))) fprintf(stderr, "copying of nnodes to device failed\n"); CudaTest("nnode copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(nbodiesd, &nbodies, sizeof(int))) fprintf(stderr, "copying of nbodies to device failed\n"); CudaTest("nbody copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(errd, &errl, sizeof(int))) fprintf(stderr, "copying of err to device failed\n"); CudaTest("err copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(dtimed, &dtime, sizeof(float))) fprintf(stderr, "copying of dtime to device failed\n"); CudaTest("dtime copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(dthfd, &dthf, sizeof(float))) fprintf(stderr, "copying of dthf to device failed\n"); CudaTest("dthf copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(epssqd, &epssq, sizeof(float))) fprintf(stderr, "copying of epssq to device failed\n"); CudaTest("epssq copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(itolsqd, &itolsq, sizeof(float))) fprintf(stderr, "copying of itolsq to device failed\n"); CudaTest("itolsq copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(sortd, &sortl, sizeof(int))) fprintf(stderr, "copying of sortl to device failed\n"); CudaTest("sortl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(countd, &countl, sizeof(int))) fprintf(stderr, "copying of countl to device failed\n"); CudaTest("countl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(startd, &startl, sizeof(int))) fprintf(stderr, "copying of startl to device failed\n"); CudaTest("startl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(childd, &childl, sizeof(int))) fprintf(stderr, "copying of childl to device failed\n"); CudaTest("childl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(massd, &massl, sizeof(int))) fprintf(stderr, "copying of massl to device failed\n"); CudaTest("massl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(posxd, &posxl, sizeof(int))) fprintf(stderr, "copying of posxl to device failed\n"); CudaTest("posxl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(posyd, &posyl, sizeof(int))) fprintf(stderr, "copying of posyl to device failed\n"); CudaTest("posyl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(poszd, &poszl, sizeof(int))) fprintf(stderr, "copying of poszl to device failed\n"); CudaTest("poszl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(velxd, &velxl, sizeof(int))) fprintf(stderr, "copying of velxl to device failed\n"); CudaTest("velxl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(velyd, &velyl, sizeof(int))) fprintf(stderr, "copying of velyl to device failed\n"); CudaTest("velyl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(velzd, &velzl, sizeof(int))) fprintf(stderr, "copying of velzl to device failed\n"); CudaTest("velzl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(accxd, &accxl, sizeof(int))) fprintf(stderr, "copying of accxl to device failed\n"); CudaTest("accxl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(accyd, &accyl, sizeof(int))) fprintf(stderr, "copying of accyl to device failed\n"); CudaTest("accyl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(acczd, &acczl, sizeof(int))) fprintf(stderr, "copying of acczl to device failed\n"); CudaTest("acczl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(maxxd, &maxxl, sizeof(int))) fprintf(stderr, "copying of maxxl to device failed\n"); CudaTest("maxxl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(maxyd, &maxyl, sizeof(int))) fprintf(stderr, "copying of maxyl to device failed\n"); CudaTest("maxyl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(maxzd, &maxzl, sizeof(int))) fprintf(stderr, "copying of maxzl to device failed\n"); CudaTest("maxzl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(minxd, &minxl, sizeof(int))) fprintf(stderr, "copying of minxl to device failed\n"); CudaTest("minxl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(minyd, &minyl, sizeof(int))) fprintf(stderr, "copying of minyl to device failed\n"); CudaTest("minyl copy to device failed"); if (hipSuccess != hipMemcpyToSymbol(minzd, &minzl, sizeof(int))) fprintf(stderr, "copying of minzl to device failed\n"); CudaTest("minzl copy to device failed"); } // generate input drndset(7); rsc = (3 * 3.1415926535897932384626433832795) / 16; vsc = sqrt(1.0 / rsc); for (i = 0; i < nbodies; i++) { mass[i] = 1.0 / nbodies; r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1); do { x = drnd()*2.0 - 1.0; y = drnd()*2.0 - 1.0; z = drnd()*2.0 - 1.0; sq = x*x + y*y + z*z; } while (sq > 1.0); scale = rsc * r / sqrt(sq); posx[i] = x * scale; posy[i] = y * scale; posz[i] = z * scale; do { x = drnd(); y = drnd() * 0.1; } while (y > x*x * pow(1 - x*x, 3.5)); v = x * sqrt(2.0 / sqrt(1 + r*r)); do { x = drnd()*2.0 - 1.0; y = drnd()*2.0 - 1.0; z = drnd()*2.0 - 1.0; sq = x*x + y*y + z*z; } while (sq > 1.0); scale = vsc * v / sqrt(sq); velx[i] = x * scale; vely[i] = y * scale; velz[i] = z * scale; } if (hipSuccess != hipMemcpy(massl, mass, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed"); if (hipSuccess != hipMemcpy(posxl, posx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed"); if (hipSuccess != hipMemcpy(posyl, posy, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed"); if (hipSuccess != hipMemcpy(poszl, posz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed"); if (hipSuccess != hipMemcpy(velxl, velx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed"); if (hipSuccess != hipMemcpy(velyl, vely, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed"); if (hipSuccess != hipMemcpy(velzl, velz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed"); // run timesteps (lauch GPU kernels) hipEventCreate(&start); hipEventCreate(&stop); starttime = clock(); hipEventRecord(start, 0); hipLaunchKernelGGL(( InitializationKernel), dim3(1), dim3(1), 0, 0, ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timing[0] += time; CudaTest("kernel 0 launch failed"); for (step = 0; step < timesteps; step++) { hipEventRecord(start, 0); hipLaunchKernelGGL(( BoundingBoxKernel), dim3(blocks * FACTOR1), dim3(THREADS1), 0, 0, ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timing[1] += time; CudaTest("kernel 1 launch failed"); hipEventRecord(start, 0); hipLaunchKernelGGL(( TreeBuildingKernel), dim3(blocks * FACTOR2), dim3(THREADS2), 0, 0, ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timing[2] += time; CudaTest("kernel 2 launch failed"); hipEventRecord(start, 0); hipLaunchKernelGGL(( SummarizationKernel), dim3(blocks * FACTOR3), dim3(THREADS3), 0, 0, ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timing[3] += time; CudaTest("kernel 3 launch failed"); hipEventRecord(start, 0); hipLaunchKernelGGL(( SortKernel), dim3(blocks * FACTOR4), dim3(THREADS4), 0, 0, ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timing[4] += time; CudaTest("kernel 4 launch failed"); hipEventRecord(start, 0); hipLaunchKernelGGL(( ForceCalculationKernel), dim3(blocks * FACTOR5), dim3(THREADS5), 0, 0, ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timing[5] += time; CudaTest("kernel 5 launch failed"); hipEventRecord(start, 0); hipLaunchKernelGGL(( IntegrationKernel), dim3(blocks * FACTOR6), dim3(THREADS6), 0, 0, ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timing[6] += time; CudaTest("kernel 6 launch failed"); } endtime = clock(); CudaTest("kernel launch failed"); hipEventDestroy(start); hipEventDestroy(stop); // transfer result back to CPU if (hipSuccess != hipMemcpy(&error, errl, sizeof(int), hipMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed"); if (hipSuccess != hipMemcpy(posx, posxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed"); if (hipSuccess != hipMemcpy(posy, posyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed"); if (hipSuccess != hipMemcpy(posz, poszl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed"); if (hipSuccess != hipMemcpy(velx, velxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed"); if (hipSuccess != hipMemcpy(vely, velyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed"); if (hipSuccess != hipMemcpy(velz, velzl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed"); runtime = (int) (1000.0f * (endtime - starttime) / CLOCKS_PER_SEC); fprintf(stderr, "runtime: %d ms (", runtime); time = 0; for (i = 1; i < 7; i++) { fprintf(stderr, " %.1f ", timing[i]); time += timing[i]; } if (error == 0) { fprintf(stderr, ") = %.1f\n", time); } else { fprintf(stderr, ") = %.1f FAILED %d\n", time, error); } if ((run == 0) || (mintime > runtime)) mintime = runtime; } fprintf(stderr, "mintime: %d ms\n", mintime); // print output // for (i = 0; i < nbodies; i++) { printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]); // } free(mass); free(posx); free(posy); free(posz); free(velx); free(vely); free(velz); hipFree(errl); hipFree(childl); hipFree(massl); hipFree(posxl); hipFree(posyl); hipFree(poszl); hipFree(countl); hipFree(startl); hipFree(maxxl); hipFree(maxyl); hipFree(maxzl); hipFree(minxl); hipFree(minyl); hipFree(minzl); return 0; }
53cc9a9362a3140e051b916533c727cb6ff5913a.cu
/* CUDA BarnesHut v2.0: Simulation of the gravitational forces in a galactic cluster using the Barnes-Hut n-body algorithm Copyright (c) 2011, Texas State University-San Marcos. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University-San Marcos nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <cuda.h> // thread count #define THREADS1 512 /* must be a power of 2 */ #define THREADS2 1024 #define THREADS3 1024 #define THREADS4 256 #define THREADS5 256 #define THREADS6 512 // block count = factor * #SMs #define FACTOR1 3 #define FACTOR2 1 #define FACTOR3 1 /* must all be resident at the same time */ #define FACTOR4 1 /* must all be resident at the same time */ #define FACTOR5 5 #define FACTOR6 3 #define WARPSIZE 32 #define MAXDEPTH 32 /******************************************************************************/ // childd is aliased with velxd, velyd, velzd, accxd, accyd, acczd, and sortd but they never use the same memory locations __device__ __constant__ int nnodesd, nbodiesd; __constant__ float dtimed, dthfd, epssqd, itolsqd; __constant__ volatile float *massd, *posxd, *posyd, *poszd, *velxd, *velyd, *velzd, *accxd, *accyd, *acczd; __constant__ volatile float *maxxd, *maxyd, *maxzd, *minxd, *minyd, *minzd; __constant__ volatile int *errd, *sortd, *childd, *countd, *startd; __device__ volatile int stepd, bottomd, maxdepthd, blkcntd; __device__ volatile float radiusd; /******************************************************************************/ /*** initialize memory ********************************************************/ /******************************************************************************/ __global__ void InitializationKernel() { *errd = 0; stepd = -1; maxdepthd = 1; blkcntd = 0; } /******************************************************************************/ /*** compute center and radius ************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS1, FACTOR1) void BoundingBoxKernel() { register int i, j, k, inc; register float val, minx, maxx, miny, maxy, minz, maxz; __shared__ volatile float sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1], sminz[THREADS1], smaxz[THREADS1]; // initialize with valid data (in case #bodies < #threads) minx = maxx = posxd[0]; miny = maxy = posyd[0]; minz = maxz = poszd[0]; // scan all bodies i = threadIdx.x; inc = THREADS1 * gridDim.x; for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) { val = posxd[j]; minx = min(minx, val); maxx = max(maxx, val); val = posyd[j]; miny = min(miny, val); maxy = max(maxy, val); val = poszd[j]; minz = min(minz, val); maxz = max(maxz, val); } // reduction in shared memory sminx[i] = minx; smaxx[i] = maxx; sminy[i] = miny; smaxy[i] = maxy; sminz[i] = minz; smaxz[i] = maxz; for (j = THREADS1 / 2; j > 0; j /= 2) { __syncthreads(); if (i < j) { k = i + j; sminx[i] = minx = min(minx, sminx[k]); smaxx[i] = maxx = max(maxx, smaxx[k]); sminy[i] = miny = min(miny, sminy[k]); smaxy[i] = maxy = max(maxy, smaxy[k]); sminz[i] = minz = min(minz, sminz[k]); smaxz[i] = maxz = max(maxz, smaxz[k]); } } // write block result to global memory if (i == 0) { k = blockIdx.x; minxd[k] = minx; maxxd[k] = maxx; minyd[k] = miny; maxyd[k] = maxy; minzd[k] = minz; maxzd[k] = maxz; __threadfence(); inc = gridDim.x - 1; if (inc == atomicInc((unsigned int *)&blkcntd, inc)) { // I'm the last block, so combine all block results for (j = 0; j <= inc; j++) { minx = min(minx, minxd[j]); maxx = max(maxx, maxxd[j]); miny = min(miny, minyd[j]); maxy = max(maxy, maxyd[j]); minz = min(minz, minzd[j]); maxz = max(maxz, maxzd[j]); } // compute 'radius' val = max(maxx - minx, maxy - miny); radiusd = max(val, maxz - minz) * 0.5f; // create root node k = nnodesd; bottomd = k; massd[k] = -1.0f; startd[k] = 0; posxd[k] = (minx + maxx) * 0.5f; posyd[k] = (miny + maxy) * 0.5f; poszd[k] = (minz + maxz) * 0.5f; k *= 8; for (i = 0; i < 8; i++) childd[k + i] = -1; stepd++; } } } /******************************************************************************/ /*** build tree ***************************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS2, FACTOR2) void TreeBuildingKernel() { register int i, j, k, depth, localmaxdepth, skip, inc; register float x, y, z, r; register float px, py, pz; register int ch, n, cell, locked, patch; register float radius, rootx, rooty, rootz; // cache root data radius = radiusd; rootx = posxd[nnodesd]; rooty = posyd[nnodesd]; rootz = poszd[nnodesd]; localmaxdepth = 1; skip = 1; inc = blockDim.x * gridDim.x; i = threadIdx.x + blockIdx.x * blockDim.x; // iterate over all bodies assigned to thread while (i < nbodiesd) { if (skip != 0) { // new body, so start traversing at root skip = 0; px = posxd[i]; py = posyd[i]; pz = poszd[i]; n = nnodesd; depth = 1; r = radius; j = 0; // determine which child to follow if (rootx < px) j = 1; if (rooty < py) j += 2; if (rootz < pz) j += 4; } // follow path to leaf cell ch = childd[n*8+j]; while (ch >= nbodiesd) { n = ch; depth++; r *= 0.5f; j = 0; // determine which child to follow if (posxd[n] < px) j = 1; if (posyd[n] < py) j += 2; if (poszd[n] < pz) j += 4; ch = childd[n*8+j]; } if (ch != -2) { // skip if child pointer is locked and try again later locked = n*8+j; if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock if (ch == -1) { // if null, just insert the new body childd[locked] = i; } else { // there already is a body in this position patch = -1; // create new cell(s) and insert the old and new body do { depth++; cell = atomicSub((int *)&bottomd, 1) - 1; if (cell <= nbodiesd) { *errd = 1; bottomd = nnodesd; } patch = max(patch, cell); x = (j & 1) * r; y = ((j >> 1) & 1) * r; z = ((j >> 2) & 1) * r; r *= 0.5f; massd[cell] = -1.0f; startd[cell] = -1; x = posxd[cell] = posxd[n] - r + x; y = posyd[cell] = posyd[n] - r + y; z = poszd[cell] = poszd[n] - r + z; for (k = 0; k < 8; k++) childd[cell*8+k] = -1; if (patch != cell) { childd[n*8+j] = cell; } j = 0; if (x < posxd[ch]) j = 1; if (y < posyd[ch]) j += 2; if (z < poszd[ch]) j += 4; childd[cell*8+j] = ch; n = cell; j = 0; if (x < px) j = 1; if (y < py) j += 2; if (z < pz) j += 4; ch = childd[n*8+j]; // repeat until the two bodies are different children } while (ch >= 0); childd[n*8+j] = i; __threadfence(); // push out subtree childd[locked] = patch; } __threadfence(); // push out results localmaxdepth = max(depth, localmaxdepth); i += inc; // move on to next body skip = 1; } } __syncthreads(); // throttle } // record maximum tree depth atomicMax((int *)&maxdepthd, localmaxdepth); } /******************************************************************************/ /*** compute center of mass ***************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS3, FACTOR3) void SummarizationKernel() { register int i, j, k, ch, inc, missing, cnt, bottom; register float m, cm, px, py, pz; __shared__ volatile int child[THREADS3 * 8]; bottom = bottomd; inc = blockDim.x * gridDim.x; k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size if (k < bottom) k += inc; missing = 0; // iterate over all cells assigned to thread while (k <= nnodesd) { if (missing == 0) { // new cell, so initialize cm = 0.0f; px = 0.0f; py = 0.0f; pz = 0.0f; cnt = 0; j = 0; for (i = 0; i < 8; i++) { ch = childd[k*8+i]; if (ch >= 0) { if (i != j) { // move children to front (needed later for speed) childd[k*8+i] = -1; childd[k*8+j] = ch; } child[missing*THREADS3+threadIdx.x] = ch; // cache missing children m = massd[ch]; missing++; if (m >= 0.0f) { // child is ready missing--; if (ch >= nbodiesd) { // count bodies (needed later) cnt += countd[ch] - 1; } // add child's contribution cm += m; px += posxd[ch] * m; py += posyd[ch] * m; pz += poszd[ch] * m; } j++; } } __threadfence(); // for performance only cnt += j; } if (missing != 0) { do { // poll missing child ch = child[(missing-1)*THREADS3+threadIdx.x]; m = massd[ch]; if (m >= 0.0f) { // child is now ready missing--; if (ch >= nbodiesd) { // count bodies (needed later) cnt += countd[ch] - 1; } // add child's contribution cm += m; px += posxd[ch] * m; py += posyd[ch] * m; pz += poszd[ch] * m; } // repeat until we are done or child is not ready } while ((m >= 0.0f) && (missing != 0)); } if (missing == 0) { // all children are ready, so store computed information countd[k] = cnt; m = 1.0f / cm; posxd[k] = px * m; posyd[k] = py * m; poszd[k] = pz * m; __threadfence(); // make sure data are visible before setting mass massd[k] = cm; __threadfence(); // push out results k += inc; // move on to next cell } } } /******************************************************************************/ /*** sort bodies **************************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS4, FACTOR4) void SortKernel() { register int i, k, ch, dec, start, bottom; bottom = bottomd; dec = blockDim.x * gridDim.x; k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x; // iterate over all cells assigned to thread while (k >= bottom) { start = startd[k]; if (start >= 0) { for (i = 0; i < 8; i++) { ch = childd[k*8+i]; if (ch >= nbodiesd) { // child is a cell startd[ch] = start; // set start ID of child start += countd[ch]; // add #bodies in subtree } else if (ch >= 0) { // child is a body sortd[start] = ch; // record body in 'sorted' array start++; } } k -= dec; // move on to next cell } __syncthreads(); // throttle } } /******************************************************************************/ /*** compute force ************************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS5, FACTOR5) void ForceCalculationKernel() { register int i, j, k, n, depth, base, sbase, diff; register float px, py, pz, ax, ay, az, dx, dy, dz, tmp; __shared__ volatile int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE]; __shared__ volatile float dq[MAXDEPTH * THREADS5/WARPSIZE]; __shared__ volatile int step, maxdepth; if (0 == threadIdx.x) { step = stepd; maxdepth = maxdepthd; tmp = radiusd; // precompute values that depend only on tree level dq[0] = tmp * tmp * itolsqd; for (i = 1; i < maxdepth; i++) { dq[i] = dq[i - 1] * 0.25f; } if (maxdepth > MAXDEPTH) { *errd = maxdepth; } } __syncthreads(); if (maxdepth <= MAXDEPTH) { // figure out first thread in each warp (lane 0) base = threadIdx.x / WARPSIZE; sbase = base * WARPSIZE; j = base * MAXDEPTH; diff = threadIdx.x - sbase; // make multiple copies to avoid index calculations later if (diff < MAXDEPTH) { dq[diff+j] = dq[diff]; } __syncthreads(); // iterate over all bodies assigned to thread for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) { i = sortd[k]; // get permuted/sorted index // cache position info px = posxd[i]; py = posyd[i]; pz = poszd[i]; ax = 0.0f; ay = 0.0f; az = 0.0f; // initialize iteration stack, i.e., push root node onto stack depth = j; if (sbase == threadIdx.x) { node[j] = nnodesd; pos[j] = 0; } __threadfence(); // make sure it's visible while (depth >= j) { // stack is not empty while (pos[depth] < 8) { // node on top of stack has more children to process n = childd[node[depth]*8+pos[depth]]; // load child pointer if (sbase == threadIdx.x) { // I'm the first thread in the warp pos[depth]++; } __threadfence(); // make sure it's visible if (n >= 0) { dx = posxd[n] - px; dy = posyd[n] - py; dz = poszd[n] - pz; tmp = dx*dx + (dy*dy + (dz*dz + epssqd)); // compute distance squared (plus softening) if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body) tmp = rsqrtf(tmp); // compute distance tmp = massd[n] * tmp * tmp * tmp; ax += dx * tmp; ay += dy * tmp; az += dz * tmp; } else { // push cell onto stack depth++; if (sbase == threadIdx.x) { node[depth] = n; pos[depth] = 0; } __threadfence(); // make sure it's visible } } else { depth = max(j, depth - 1); // early out because all remaining children are also zero } } depth--; // done with this level } if (step > 0) { // update velocity velxd[i] += (ax - accxd[i]) * dthfd; velyd[i] += (ay - accyd[i]) * dthfd; velzd[i] += (az - acczd[i]) * dthfd; } // save computed acceleration accxd[i] = ax; accyd[i] = ay; acczd[i] = az; } } } /******************************************************************************/ /*** advance bodies ***********************************************************/ /******************************************************************************/ extern "C" __global__ void dummy(float4* data) { } __global__ __launch_bounds__(THREADS6, FACTOR6) void IntegrationKernel() { register int i, inc; register float dvelx, dvely, dvelz; register float velhx, velhy, velhz; // iterate over all bodies assigned to thread inc = blockDim.x * gridDim.x; for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) { // integrate dvelx = accxd[i] * dthfd; dvely = accyd[i] * dthfd; dvelz = acczd[i] * dthfd; velhx = velxd[i] + dvelx; velhy = velyd[i] + dvely; velhz = velzd[i] + dvelz; posxd[i] += velhx * dtimed; posyd[i] += velhy * dtimed; poszd[i] += velhz * dtimed; velxd[i] = velhx + dvelx; velyd[i] = velhy + dvely; velzd[i] = velhz + dvelz; } } /******************************************************************************/ static void CudaTest(char *msg) { cudaError_t e; cudaThreadSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "%s: %d\n", msg, e); fprintf(stderr, "%s\n", cudaGetErrorString(e)); exit(-1); } } /******************************************************************************/ // random number generator #define MULT 1103515245 #define ADD 12345 #define MASK 0x7FFFFFFF #define TWOTO31 2147483648.0 static int A = 1; static int B = 0; static int randx = 1; static int lastrand; static void drndset(int seed) { A = 1; B = 0; randx = (A * seed + B) & MASK; A = (MULT * A) & MASK; B = (MULT * B + ADD) & MASK; } static double drnd() { lastrand = randx; randx = (A * randx + B) & MASK; return (double)lastrand / TWOTO31; } /******************************************************************************/ int main(int argc, char *argv[]) { register int i, run, blocks; register int nnodes, nbodies, step, timesteps; register int runtime, mintime; int error; register float dtime, dthf, epssq, itolsq; float time, timing[7]; clock_t starttime, endtime; cudaEvent_t start, stop; float *mass, *posx, *posy, *posz, *velx, *vely, *velz; int *errl, *sortl, *childl, *countl, *startl; float *massl; float *posxl, *posyl, *poszl; float *velxl, *velyl, *velzl; float *accxl, *accyl, *acczl; float *maxxl, *maxyl, *maxzl; float *minxl, *minyl, *minzl; register double rsc, vsc, r, v, x, y, z, sq, scale; // perform some checks fprintf(stderr, "CUDA BarnesHut v2.0\n"); if (argc != 3) { fprintf(stderr, "\n"); fprintf(stderr, "arguments: number_of_bodies number_of_timesteps\n"); exit(-1); } int deviceCount; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "There is no device supporting CUDA\n"); exit(-1); } cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) { fprintf(stderr, "There is no CUDA capable device\n"); exit(-1); } /// if (deviceProp.major < 2) { // fprintf(stderr, "Need at least compute capability 2.0\n"); // exit(-1); //} // if (deviceProp.warpSize != WARPSIZE) { // fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize); // exit(-1); // } blocks = deviceProp.multiProcessorCount; fprintf(stderr, "blocks = %d\n", blocks); if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) { fprintf(stderr, "Warp size must be greater than zero and a power of two\n"); exit(-1); } if (MAXDEPTH > WARPSIZE) { fprintf(stderr, "MAXDEPTH must be less than or equal to WARPSIZE\n"); exit(-1); } if ((THREADS1 <= 0) || (THREADS1 & (THREADS1-1) != 0)) { fprintf(stderr, "THREADS1 must be greater than zero and a power of two\n"); exit(-1); } // set L1/shared memory configuration cudaFuncSetCacheConfig(BoundingBoxKernel, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(TreeBuildingKernel, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(SummarizationKernel, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(SortKernel, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(ForceCalculationKernel, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(IntegrationKernel, cudaFuncCachePreferL1); cudaGetLastError(); // reset error value for (run = 0; run < 3; run++) { for (i = 0; i < 7; i++) timing[i] = 0.0f; nbodies = atoi(argv[1]); if (nbodies < 1) { fprintf(stderr, "nbodies is too small: %d\n", nbodies); exit(-1); } if (nbodies > (1 << 30)) { fprintf(stderr, "nbodies is too large: %d\n", nbodies); exit(-1); } nnodes = nbodies * 2; if (nnodes < 1024*blocks) nnodes = 1024*blocks; while ((nnodes & (WARPSIZE-1)) != 0) nnodes++; nnodes--; timesteps = atoi(argv[2]); dtime = 0.025; dthf = dtime * 0.5f; epssq = 0.05 * 0.05; itolsq = 1.0f / (0.5 * 0.5); // allocate memory if (run == 0) { fprintf(stderr, "nodes = %d\n", nnodes+1); fprintf(stderr, "configuration: %d bodies, %d time steps\n", nbodies, timesteps); mass = (float *)malloc(sizeof(float) * nbodies); if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);} posx = (float *)malloc(sizeof(float) * nbodies); if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);} posy = (float *)malloc(sizeof(float) * nbodies); if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);} posz = (float *)malloc(sizeof(float) * nbodies); if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);} velx = (float *)malloc(sizeof(float) * nbodies); if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);} vely = (float *)malloc(sizeof(float) * nbodies); if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);} velz = (float *)malloc(sizeof(float) * nbodies); if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);} if (cudaSuccess != cudaMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd"); if (cudaSuccess != cudaMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd"); if (cudaSuccess != cudaMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd"); if (cudaSuccess != cudaMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd"); if (cudaSuccess != cudaMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd"); if (cudaSuccess != cudaMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd"); if (cudaSuccess != cudaMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd"); if (cudaSuccess != cudaMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd"); // alias arrays int inc = (nbodies + WARPSIZE - 1) & (-WARPSIZE); velxl = (float *)&childl[0*inc]; velyl = (float *)&childl[1*inc]; velzl = (float *)&childl[2*inc]; accxl = (float *)&childl[3*inc]; accyl = (float *)&childl[4*inc]; acczl = (float *)&childl[5*inc]; sortl = (int *)&childl[6*inc]; if (cudaSuccess != cudaMalloc((void **)&maxxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd"); if (cudaSuccess != cudaMalloc((void **)&maxyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd"); if (cudaSuccess != cudaMalloc((void **)&maxzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd"); if (cudaSuccess != cudaMalloc((void **)&minxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd"); if (cudaSuccess != cudaMalloc((void **)&minyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd"); if (cudaSuccess != cudaMalloc((void **)&minzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd"); if (cudaSuccess != cudaMemcpyToSymbol(nnodesd, &nnodes, sizeof(int))) fprintf(stderr, "copying of nnodes to device failed\n"); CudaTest("nnode copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(nbodiesd, &nbodies, sizeof(int))) fprintf(stderr, "copying of nbodies to device failed\n"); CudaTest("nbody copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(errd, &errl, sizeof(int))) fprintf(stderr, "copying of err to device failed\n"); CudaTest("err copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(dtimed, &dtime, sizeof(float))) fprintf(stderr, "copying of dtime to device failed\n"); CudaTest("dtime copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(dthfd, &dthf, sizeof(float))) fprintf(stderr, "copying of dthf to device failed\n"); CudaTest("dthf copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(epssqd, &epssq, sizeof(float))) fprintf(stderr, "copying of epssq to device failed\n"); CudaTest("epssq copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(itolsqd, &itolsq, sizeof(float))) fprintf(stderr, "copying of itolsq to device failed\n"); CudaTest("itolsq copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(sortd, &sortl, sizeof(int))) fprintf(stderr, "copying of sortl to device failed\n"); CudaTest("sortl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(countd, &countl, sizeof(int))) fprintf(stderr, "copying of countl to device failed\n"); CudaTest("countl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(startd, &startl, sizeof(int))) fprintf(stderr, "copying of startl to device failed\n"); CudaTest("startl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(childd, &childl, sizeof(int))) fprintf(stderr, "copying of childl to device failed\n"); CudaTest("childl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(massd, &massl, sizeof(int))) fprintf(stderr, "copying of massl to device failed\n"); CudaTest("massl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(posxd, &posxl, sizeof(int))) fprintf(stderr, "copying of posxl to device failed\n"); CudaTest("posxl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(posyd, &posyl, sizeof(int))) fprintf(stderr, "copying of posyl to device failed\n"); CudaTest("posyl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(poszd, &poszl, sizeof(int))) fprintf(stderr, "copying of poszl to device failed\n"); CudaTest("poszl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(velxd, &velxl, sizeof(int))) fprintf(stderr, "copying of velxl to device failed\n"); CudaTest("velxl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(velyd, &velyl, sizeof(int))) fprintf(stderr, "copying of velyl to device failed\n"); CudaTest("velyl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(velzd, &velzl, sizeof(int))) fprintf(stderr, "copying of velzl to device failed\n"); CudaTest("velzl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(accxd, &accxl, sizeof(int))) fprintf(stderr, "copying of accxl to device failed\n"); CudaTest("accxl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(accyd, &accyl, sizeof(int))) fprintf(stderr, "copying of accyl to device failed\n"); CudaTest("accyl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(acczd, &acczl, sizeof(int))) fprintf(stderr, "copying of acczl to device failed\n"); CudaTest("acczl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(maxxd, &maxxl, sizeof(int))) fprintf(stderr, "copying of maxxl to device failed\n"); CudaTest("maxxl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(maxyd, &maxyl, sizeof(int))) fprintf(stderr, "copying of maxyl to device failed\n"); CudaTest("maxyl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(maxzd, &maxzl, sizeof(int))) fprintf(stderr, "copying of maxzl to device failed\n"); CudaTest("maxzl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(minxd, &minxl, sizeof(int))) fprintf(stderr, "copying of minxl to device failed\n"); CudaTest("minxl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(minyd, &minyl, sizeof(int))) fprintf(stderr, "copying of minyl to device failed\n"); CudaTest("minyl copy to device failed"); if (cudaSuccess != cudaMemcpyToSymbol(minzd, &minzl, sizeof(int))) fprintf(stderr, "copying of minzl to device failed\n"); CudaTest("minzl copy to device failed"); } // generate input drndset(7); rsc = (3 * 3.1415926535897932384626433832795) / 16; vsc = sqrt(1.0 / rsc); for (i = 0; i < nbodies; i++) { mass[i] = 1.0 / nbodies; r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1); do { x = drnd()*2.0 - 1.0; y = drnd()*2.0 - 1.0; z = drnd()*2.0 - 1.0; sq = x*x + y*y + z*z; } while (sq > 1.0); scale = rsc * r / sqrt(sq); posx[i] = x * scale; posy[i] = y * scale; posz[i] = z * scale; do { x = drnd(); y = drnd() * 0.1; } while (y > x*x * pow(1 - x*x, 3.5)); v = x * sqrt(2.0 / sqrt(1 + r*r)); do { x = drnd()*2.0 - 1.0; y = drnd()*2.0 - 1.0; z = drnd()*2.0 - 1.0; sq = x*x + y*y + z*z; } while (sq > 1.0); scale = vsc * v / sqrt(sq); velx[i] = x * scale; vely[i] = y * scale; velz[i] = z * scale; } if (cudaSuccess != cudaMemcpy(massl, mass, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed"); if (cudaSuccess != cudaMemcpy(posxl, posx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed"); if (cudaSuccess != cudaMemcpy(posyl, posy, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed"); if (cudaSuccess != cudaMemcpy(poszl, posz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed"); if (cudaSuccess != cudaMemcpy(velxl, velx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed"); if (cudaSuccess != cudaMemcpy(velyl, vely, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed"); if (cudaSuccess != cudaMemcpy(velzl, velz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed"); // run timesteps (lauch GPU kernels) cudaEventCreate(&start); cudaEventCreate(&stop); starttime = clock(); cudaEventRecord(start, 0); InitializationKernel<<<1, 1>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[0] += time; CudaTest("kernel 0 launch failed"); for (step = 0; step < timesteps; step++) { cudaEventRecord(start, 0); BoundingBoxKernel<<<blocks * FACTOR1, THREADS1>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[1] += time; CudaTest("kernel 1 launch failed"); cudaEventRecord(start, 0); TreeBuildingKernel<<<blocks * FACTOR2, THREADS2>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[2] += time; CudaTest("kernel 2 launch failed"); cudaEventRecord(start, 0); SummarizationKernel<<<blocks * FACTOR3, THREADS3>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[3] += time; CudaTest("kernel 3 launch failed"); cudaEventRecord(start, 0); SortKernel<<<blocks * FACTOR4, THREADS4>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[4] += time; CudaTest("kernel 4 launch failed"); cudaEventRecord(start, 0); ForceCalculationKernel<<<blocks * FACTOR5, THREADS5>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[5] += time; CudaTest("kernel 5 launch failed"); cudaEventRecord(start, 0); IntegrationKernel<<<blocks * FACTOR6, THREADS6>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[6] += time; CudaTest("kernel 6 launch failed"); } endtime = clock(); CudaTest("kernel launch failed"); cudaEventDestroy(start); cudaEventDestroy(stop); // transfer result back to CPU if (cudaSuccess != cudaMemcpy(&error, errl, sizeof(int), cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed"); if (cudaSuccess != cudaMemcpy(posx, posxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed"); if (cudaSuccess != cudaMemcpy(posy, posyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed"); if (cudaSuccess != cudaMemcpy(posz, poszl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed"); if (cudaSuccess != cudaMemcpy(velx, velxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed"); if (cudaSuccess != cudaMemcpy(vely, velyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed"); if (cudaSuccess != cudaMemcpy(velz, velzl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed"); runtime = (int) (1000.0f * (endtime - starttime) / CLOCKS_PER_SEC); fprintf(stderr, "runtime: %d ms (", runtime); time = 0; for (i = 1; i < 7; i++) { fprintf(stderr, " %.1f ", timing[i]); time += timing[i]; } if (error == 0) { fprintf(stderr, ") = %.1f\n", time); } else { fprintf(stderr, ") = %.1f FAILED %d\n", time, error); } if ((run == 0) || (mintime > runtime)) mintime = runtime; } fprintf(stderr, "mintime: %d ms\n", mintime); // print output // for (i = 0; i < nbodies; i++) { printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]); // } free(mass); free(posx); free(posy); free(posz); free(velx); free(vely); free(velz); cudaFree(errl); cudaFree(childl); cudaFree(massl); cudaFree(posxl); cudaFree(posyl); cudaFree(poszl); cudaFree(countl); cudaFree(startl); cudaFree(maxxl); cudaFree(maxyl); cudaFree(maxzl); cudaFree(minxl); cudaFree(minyl); cudaFree(minzl); return 0; }
bbbe24a4358a27f23a0fba584fbec3c6510d8e90.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void helloFromGPU (void) { printf("Hello from GPU\n"); } int main(void) { hipLaunchKernelGGL(( helloFromGPU), dim3(1),dim3(10), 0, 0, ); hipDeviceReset(); return 0; }
bbbe24a4358a27f23a0fba584fbec3c6510d8e90.cu
#include <stdio.h> __global__ void helloFromGPU (void) { printf("Hello from GPU\n"); } int main(void) { helloFromGPU<<<1,10>>>(); cudaDeviceReset(); return 0; }
bb7e366e26219e901406e446afcd20fca345e4a6.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdlib> #include <hip/hip_runtime.h> #include <quda_internal.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <quda_matrix.h> #include <float_vector.h> #include <complex_quda.h> namespace quda { #ifdef GPU_GAUGE_TOOLS template <typename Float, typename Gauge, typename Mom> struct UpdateGaugeArg { Gauge out; Gauge in; Mom momentum; Float dt; int nDim; UpdateGaugeArg(const Gauge &out, const Gauge &in, const Mom &momentum, Float dt, int nDim) : out(out), in(in), momentum(momentum), dt(dt), nDim(nDim) { } }; /** Direct port of the TIFR expsu3 algorithm */ template <typename Float> __device__ __host__ void expsu3(Matrix<complex<Float>,3> &q, int x) { typedef complex<Float> Complex; Complex a2 = (q(3)*q(1)+q(7)*q(5)+q(6)*q(2) - (q(0)*q(4)+(q(0)+q(4))*q(8))) / (Float)3.0 ; Complex a3 = q(0)*q(4)*q(8) + q(1)*q(5)*q(6) + q(2)*q(3)*q(7) - q(6)*q(4)*q(2) - q(3)*q(1)*q(8) - q(0)*q(7)*q(5); Complex sg2h3 = sqrt(a3*a3-(Float)4.*a2*a2*a2); Complex cp = exp( log((Float)0.5*(a3+sg2h3)) / (Float)3.0); Complex cm = a2/cp; Complex r1 = exp( Complex(0.0,1.0)*(Float)(2.0*M_PI/3.0)); Complex r2 = exp(-Complex(0.0,1.0)*(Float)(2.0*M_PI/3.0)); Complex w1[3]; w1[0]=cm+cp; w1[1]=r1*cp+r2*cm; w1[2]=r2*cp+r1*cm; Complex z1=q(1)*q(6)-q(0)*q(7); Complex z2=q(3)*q(7)-q(4)*q(6); Complex al = w1[0]; Complex wr21 = (z1+al*q(7)) / (z2+al*q(6)); Complex wr31 = (al-q(0)-wr21*q(3))/q(6); al=w1[1]; Complex wr22 = (z1+al*q(7))/(z2+al*q(6)); Complex wr32 = (al-q(0)-wr22*q(3))/q(6); al=w1[2]; Complex wr23 = (z1+al*q(7))/(z2+al*q(6)); Complex wr33 = (al-q(0)-wr23*q(3))/q(6); z1=q(3)*q(2) - q(0)*q(5); z2=q(1)*q(5) - q(4)*q(2); al=w1[0]; Complex wl21 = conj((z1+al*q(5))/(z2+al*q(2))); Complex wl31 = conj((al-q(0)-conj(wl21)*q(1))/q(2)); al=w1[1]; Complex wl22 = conj((z1+al*q(5))/(z2+al*q(2))); Complex wl32 = conj((al-q(0)-conj(wl22)*q(1))/q(2)); al=w1[2]; Complex wl23 = conj((z1+al*q(5))/(z2+al*q(2))); Complex wl33 = conj((al-q(0)-conj(wl23)*q(1))/q(2)); Complex xn1 = (Float)1. + wr21*conj(wl21) + wr31*conj(wl31); Complex xn2 = (Float)1. + wr22*conj(wl22) + wr32*conj(wl32); Complex xn3 = (Float)1. + wr23*conj(wl23) + wr33*conj(wl33); Complex d1 = exp(w1[0]); Complex d2 = exp(w1[1]); Complex d3 = exp(w1[2]); Complex y11 = d1/xn1; Complex y12 = d2/xn2; Complex y13 = d3/xn3; Complex y21 = wr21*d1/xn1; Complex y22 = wr22*d2/xn2; Complex y23 = wr23*d3/xn3; Complex y31 = wr31*d1/xn1; Complex y32 = wr32*d2/xn2; Complex y33 = wr33*d3/xn3; q(0) = y11 + y12 + y13; q(1) = y21 + y22 + y23; q(2) = y31 + y32 + y33; q(3) = y11*conj(wl21) + y12*conj(wl22) + y13*conj(wl23); q(4) = y21*conj(wl21) + y22*conj(wl22) + y23*conj(wl23); q(5) = y31*conj(wl21) + y32*conj(wl22) + y33*conj(wl23); q(6) = y11*conj(wl31) + y12*conj(wl32) + y13*conj(wl33); q(7) = y21*conj(wl31) + y22*conj(wl32) + y23*conj(wl33); q(8) = y31*conj(wl31) + y32*conj(wl32) + y33*conj(wl33); } template<typename Float, typename Gauge, typename Mom, int N, bool conj_mom, bool exact> __device__ __host__ void updateGaugeFieldCompute (UpdateGaugeArg<Float,Gauge,Mom> &arg, int x, int parity) { typedef complex<Float> Complex; Matrix<Complex,3> link, result, mom; for(int dir=0; dir<arg.nDim; ++dir){ arg.in.load((Float*)(link.data), x, dir, parity); arg.momentum.load((Float*)(mom.data), x, dir, parity); Complex trace = getTrace(mom); mom(0,0) -= trace/static_cast<Float>(3.0); mom(1,1) -= trace/static_cast<Float>(3.0); mom(2,2) -= trace/static_cast<Float>(3.0); if (!exact) { result = link; // Nth order expansion of exponential if (!conj_mom) { for(int r=N; r>0; r--) result = (arg.dt/r)*mom*result + link; } else { for(int r=N; r>0; r--) result = (arg.dt/r)*conj(mom)*result + link; } } else { mom = arg.dt * mom; expsu3<Float>(mom, x+dir+parity); if (!conj_mom) { link = mom * link; } else { link = conj(mom) * link; } result = link; } arg.out.save((Float*)(result.data), x, dir, parity); } // dir } template<typename Float, typename Gauge, typename Mom, int N, bool conj_mom, bool exact> void updateGaugeField(UpdateGaugeArg<Float,Gauge,Mom> arg) { for (unsigned int parity=0; parity<2; parity++) { for (int x=0; x<arg.out.volumeCB; x++) { updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact> (arg, x, parity); } } } template<typename Float, typename Gauge, typename Mom, int N, bool conj_mom, bool exact> __global__ void updateGaugeFieldKernel(UpdateGaugeArg<Float,Gauge,Mom> arg) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= 2*arg.out.volumeCB) return; int parity = (idx >= arg.out.volumeCB) ? 1 : 0; idx -= parity*arg.out.volumeCB; updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact>(arg, idx, parity); } template <typename Float, typename Gauge, typename Mom, int N, bool conj_mom, bool exact> class UpdateGaugeField : public Tunable { private: UpdateGaugeArg<Float,Gauge,Mom> arg; const GaugeField &meta; // meta data const QudaFieldLocation location; // location of the lattice fields unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return 2*arg.in.volumeCB; } bool tuneGridDim() const { return false; } public: UpdateGaugeField(const UpdateGaugeArg<Float,Gauge,Mom> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("threads=%d,prec=%lu,stride=%d", 2*arg.in.volumeCB, sizeof(Float), arg.in.stride); } virtual ~UpdateGaugeField() { } void apply(const hipStream_t &stream){ if (location == QUDA_CUDA_FIELD_LOCATION) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); hipLaunchKernelGGL(( updateGaugeFieldKernel<Float,Gauge,Mom,N,conj_mom,exact>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg); } else { // run the CPU code updateGaugeField<Float,Gauge,Mom,N,conj_mom,exact>(arg); } } // apply long long flops() const { const int Nc = 3; return arg.nDim*2*arg.in.volumeCB*N*(Nc*Nc*2 + // scalar-matrix multiply (8*Nc*Nc*Nc - 2*Nc*Nc) + // matrix-matrix multiply Nc*Nc*2); // matrix-matrix addition } long long bytes() const { return arg.nDim*2*arg.in.volumeCB* (arg.in.Bytes() + arg.out.Bytes() + arg.momentum.Bytes()); } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } }; template <typename Float, typename Gauge, typename Mom> void updateGaugeField(Gauge &out, const Gauge &in, const Mom &mom, double dt, const GaugeField &meta, bool conj_mom, bool exact, QudaFieldLocation location) { // degree of exponential expansion const int N = 8; if (conj_mom) { if (exact) { UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4); UpdateGaugeField<Float,Gauge,Mom,N,true,true> updateGauge(arg, meta, location); updateGauge.apply(0); } else { UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4); UpdateGaugeField<Float,Gauge,Mom,N,true,false> updateGauge(arg, meta, location); updateGauge.apply(0); } } else { if (exact) { UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4); UpdateGaugeField<Float,Gauge,Mom,N,false,true> updateGauge(arg, meta, location); updateGauge.apply(0); } else { UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4); UpdateGaugeField<Float,Gauge,Mom,N,false,false> updateGauge(arg, meta, location); updateGauge.apply(0); } } if (location == QUDA_CUDA_FIELD_LOCATION) checkCudaError(); } template <typename Float, typename Gauge> void updateGaugeField(Gauge out, const Gauge &in, const GaugeField &mom, double dt, bool conj_mom, bool exact, QudaFieldLocation location) { if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (mom.Reconstruct() == QUDA_RECONSTRUCT_10) { // FIX ME - 11 is a misnomer to avoid confusion in template instantiation updateGaugeField<Float>(out, in, gauge::FloatNOrder<Float,18,2,11>(mom), dt, mom, conj_mom, exact, location); } else { errorQuda("Reconstruction type not supported"); } } else if (mom.Order() == QUDA_MILC_GAUGE_ORDER) { updateGaugeField<Float>(out, in, gauge::MILCOrder<Float,10>(mom), dt, mom, conj_mom, exact, location); } else { errorQuda("Gauge Field order %d not supported", mom.Order()); } } template <typename Float> void updateGaugeField(GaugeField &out, const GaugeField &in, const GaugeField &mom, double dt, bool conj_mom, bool exact, QudaFieldLocation location) { const int Nc = 3; if (out.Ncolor() != Nc) errorQuda("Ncolor=%d not supported at this time", out.Ncolor()); if (out.Order() != in.Order() || out.Reconstruct() != in.Reconstruct()) { errorQuda("Input and output gauge field ordering and reconstruction must match"); } if (out.isNative()) { if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; updateGaugeField<Float>(G(out),G(in), mom, dt, conj_mom, exact, location); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; updateGaugeField<Float>(G(out), G(in), mom, dt, conj_mom, exact, location); } else { errorQuda("Reconstruction type not supported"); } } else if (out.Order() == QUDA_MILC_GAUGE_ORDER) { updateGaugeField<Float>(gauge::MILCOrder<Float, Nc*Nc*2>(out), gauge::MILCOrder<Float, Nc*Nc*2>(in), mom, dt, conj_mom, exact, location); } else { errorQuda("Gauge Field order %d not supported", out.Order()); } } #endif void updateGaugeField(GaugeField &out, double dt, const GaugeField& in, const GaugeField& mom, bool conj_mom, bool exact) { #ifdef GPU_GAUGE_TOOLS if (out.Precision() != in.Precision() || out.Precision() != mom.Precision()) errorQuda("Gauge and momentum fields must have matching precision"); if (out.Location() != in.Location() || out.Location() != mom.Location()) errorQuda("Gauge and momentum fields must have matching location"); if (out.Precision() == QUDA_DOUBLE_PRECISION) { updateGaugeField<double>(out, in, mom, dt, conj_mom, exact, out.Location()); } else if (out.Precision() == QUDA_SINGLE_PRECISION) { updateGaugeField<float>(out, in, mom, dt, conj_mom, exact, out.Location()); } else { errorQuda("Precision %d not supported", out.Precision()); } #else errorQuda("Gauge tools are not build"); #endif } } // namespace quda
bb7e366e26219e901406e446afcd20fca345e4a6.cu
#include <cstdio> #include <cstdlib> #include <cuda.h> #include <quda_internal.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <quda_matrix.h> #include <float_vector.h> #include <complex_quda.h> namespace quda { #ifdef GPU_GAUGE_TOOLS template <typename Float, typename Gauge, typename Mom> struct UpdateGaugeArg { Gauge out; Gauge in; Mom momentum; Float dt; int nDim; UpdateGaugeArg(const Gauge &out, const Gauge &in, const Mom &momentum, Float dt, int nDim) : out(out), in(in), momentum(momentum), dt(dt), nDim(nDim) { } }; /** Direct port of the TIFR expsu3 algorithm */ template <typename Float> __device__ __host__ void expsu3(Matrix<complex<Float>,3> &q, int x) { typedef complex<Float> Complex; Complex a2 = (q(3)*q(1)+q(7)*q(5)+q(6)*q(2) - (q(0)*q(4)+(q(0)+q(4))*q(8))) / (Float)3.0 ; Complex a3 = q(0)*q(4)*q(8) + q(1)*q(5)*q(6) + q(2)*q(3)*q(7) - q(6)*q(4)*q(2) - q(3)*q(1)*q(8) - q(0)*q(7)*q(5); Complex sg2h3 = sqrt(a3*a3-(Float)4.*a2*a2*a2); Complex cp = exp( log((Float)0.5*(a3+sg2h3)) / (Float)3.0); Complex cm = a2/cp; Complex r1 = exp( Complex(0.0,1.0)*(Float)(2.0*M_PI/3.0)); Complex r2 = exp(-Complex(0.0,1.0)*(Float)(2.0*M_PI/3.0)); Complex w1[3]; w1[0]=cm+cp; w1[1]=r1*cp+r2*cm; w1[2]=r2*cp+r1*cm; Complex z1=q(1)*q(6)-q(0)*q(7); Complex z2=q(3)*q(7)-q(4)*q(6); Complex al = w1[0]; Complex wr21 = (z1+al*q(7)) / (z2+al*q(6)); Complex wr31 = (al-q(0)-wr21*q(3))/q(6); al=w1[1]; Complex wr22 = (z1+al*q(7))/(z2+al*q(6)); Complex wr32 = (al-q(0)-wr22*q(3))/q(6); al=w1[2]; Complex wr23 = (z1+al*q(7))/(z2+al*q(6)); Complex wr33 = (al-q(0)-wr23*q(3))/q(6); z1=q(3)*q(2) - q(0)*q(5); z2=q(1)*q(5) - q(4)*q(2); al=w1[0]; Complex wl21 = conj((z1+al*q(5))/(z2+al*q(2))); Complex wl31 = conj((al-q(0)-conj(wl21)*q(1))/q(2)); al=w1[1]; Complex wl22 = conj((z1+al*q(5))/(z2+al*q(2))); Complex wl32 = conj((al-q(0)-conj(wl22)*q(1))/q(2)); al=w1[2]; Complex wl23 = conj((z1+al*q(5))/(z2+al*q(2))); Complex wl33 = conj((al-q(0)-conj(wl23)*q(1))/q(2)); Complex xn1 = (Float)1. + wr21*conj(wl21) + wr31*conj(wl31); Complex xn2 = (Float)1. + wr22*conj(wl22) + wr32*conj(wl32); Complex xn3 = (Float)1. + wr23*conj(wl23) + wr33*conj(wl33); Complex d1 = exp(w1[0]); Complex d2 = exp(w1[1]); Complex d3 = exp(w1[2]); Complex y11 = d1/xn1; Complex y12 = d2/xn2; Complex y13 = d3/xn3; Complex y21 = wr21*d1/xn1; Complex y22 = wr22*d2/xn2; Complex y23 = wr23*d3/xn3; Complex y31 = wr31*d1/xn1; Complex y32 = wr32*d2/xn2; Complex y33 = wr33*d3/xn3; q(0) = y11 + y12 + y13; q(1) = y21 + y22 + y23; q(2) = y31 + y32 + y33; q(3) = y11*conj(wl21) + y12*conj(wl22) + y13*conj(wl23); q(4) = y21*conj(wl21) + y22*conj(wl22) + y23*conj(wl23); q(5) = y31*conj(wl21) + y32*conj(wl22) + y33*conj(wl23); q(6) = y11*conj(wl31) + y12*conj(wl32) + y13*conj(wl33); q(7) = y21*conj(wl31) + y22*conj(wl32) + y23*conj(wl33); q(8) = y31*conj(wl31) + y32*conj(wl32) + y33*conj(wl33); } template<typename Float, typename Gauge, typename Mom, int N, bool conj_mom, bool exact> __device__ __host__ void updateGaugeFieldCompute (UpdateGaugeArg<Float,Gauge,Mom> &arg, int x, int parity) { typedef complex<Float> Complex; Matrix<Complex,3> link, result, mom; for(int dir=0; dir<arg.nDim; ++dir){ arg.in.load((Float*)(link.data), x, dir, parity); arg.momentum.load((Float*)(mom.data), x, dir, parity); Complex trace = getTrace(mom); mom(0,0) -= trace/static_cast<Float>(3.0); mom(1,1) -= trace/static_cast<Float>(3.0); mom(2,2) -= trace/static_cast<Float>(3.0); if (!exact) { result = link; // Nth order expansion of exponential if (!conj_mom) { for(int r=N; r>0; r--) result = (arg.dt/r)*mom*result + link; } else { for(int r=N; r>0; r--) result = (arg.dt/r)*conj(mom)*result + link; } } else { mom = arg.dt * mom; expsu3<Float>(mom, x+dir+parity); if (!conj_mom) { link = mom * link; } else { link = conj(mom) * link; } result = link; } arg.out.save((Float*)(result.data), x, dir, parity); } // dir } template<typename Float, typename Gauge, typename Mom, int N, bool conj_mom, bool exact> void updateGaugeField(UpdateGaugeArg<Float,Gauge,Mom> arg) { for (unsigned int parity=0; parity<2; parity++) { for (int x=0; x<arg.out.volumeCB; x++) { updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact> (arg, x, parity); } } } template<typename Float, typename Gauge, typename Mom, int N, bool conj_mom, bool exact> __global__ void updateGaugeFieldKernel(UpdateGaugeArg<Float,Gauge,Mom> arg) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= 2*arg.out.volumeCB) return; int parity = (idx >= arg.out.volumeCB) ? 1 : 0; idx -= parity*arg.out.volumeCB; updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact>(arg, idx, parity); } template <typename Float, typename Gauge, typename Mom, int N, bool conj_mom, bool exact> class UpdateGaugeField : public Tunable { private: UpdateGaugeArg<Float,Gauge,Mom> arg; const GaugeField &meta; // meta data const QudaFieldLocation location; // location of the lattice fields unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return 2*arg.in.volumeCB; } bool tuneGridDim() const { return false; } public: UpdateGaugeField(const UpdateGaugeArg<Float,Gauge,Mom> &arg, const GaugeField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("threads=%d,prec=%lu,stride=%d", 2*arg.in.volumeCB, sizeof(Float), arg.in.stride); } virtual ~UpdateGaugeField() { } void apply(const cudaStream_t &stream){ if (location == QUDA_CUDA_FIELD_LOCATION) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); updateGaugeFieldKernel<Float,Gauge,Mom,N,conj_mom,exact> <<<tp.grid,tp.block,tp.shared_bytes>>>(arg); } else { // run the CPU code updateGaugeField<Float,Gauge,Mom,N,conj_mom,exact>(arg); } } // apply long long flops() const { const int Nc = 3; return arg.nDim*2*arg.in.volumeCB*N*(Nc*Nc*2 + // scalar-matrix multiply (8*Nc*Nc*Nc - 2*Nc*Nc) + // matrix-matrix multiply Nc*Nc*2); // matrix-matrix addition } long long bytes() const { return arg.nDim*2*arg.in.volumeCB* (arg.in.Bytes() + arg.out.Bytes() + arg.momentum.Bytes()); } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } }; template <typename Float, typename Gauge, typename Mom> void updateGaugeField(Gauge &out, const Gauge &in, const Mom &mom, double dt, const GaugeField &meta, bool conj_mom, bool exact, QudaFieldLocation location) { // degree of exponential expansion const int N = 8; if (conj_mom) { if (exact) { UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4); UpdateGaugeField<Float,Gauge,Mom,N,true,true> updateGauge(arg, meta, location); updateGauge.apply(0); } else { UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4); UpdateGaugeField<Float,Gauge,Mom,N,true,false> updateGauge(arg, meta, location); updateGauge.apply(0); } } else { if (exact) { UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4); UpdateGaugeField<Float,Gauge,Mom,N,false,true> updateGauge(arg, meta, location); updateGauge.apply(0); } else { UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4); UpdateGaugeField<Float,Gauge,Mom,N,false,false> updateGauge(arg, meta, location); updateGauge.apply(0); } } if (location == QUDA_CUDA_FIELD_LOCATION) checkCudaError(); } template <typename Float, typename Gauge> void updateGaugeField(Gauge out, const Gauge &in, const GaugeField &mom, double dt, bool conj_mom, bool exact, QudaFieldLocation location) { if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (mom.Reconstruct() == QUDA_RECONSTRUCT_10) { // FIX ME - 11 is a misnomer to avoid confusion in template instantiation updateGaugeField<Float>(out, in, gauge::FloatNOrder<Float,18,2,11>(mom), dt, mom, conj_mom, exact, location); } else { errorQuda("Reconstruction type not supported"); } } else if (mom.Order() == QUDA_MILC_GAUGE_ORDER) { updateGaugeField<Float>(out, in, gauge::MILCOrder<Float,10>(mom), dt, mom, conj_mom, exact, location); } else { errorQuda("Gauge Field order %d not supported", mom.Order()); } } template <typename Float> void updateGaugeField(GaugeField &out, const GaugeField &in, const GaugeField &mom, double dt, bool conj_mom, bool exact, QudaFieldLocation location) { const int Nc = 3; if (out.Ncolor() != Nc) errorQuda("Ncolor=%d not supported at this time", out.Ncolor()); if (out.Order() != in.Order() || out.Reconstruct() != in.Reconstruct()) { errorQuda("Input and output gauge field ordering and reconstruction must match"); } if (out.isNative()) { if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; updateGaugeField<Float>(G(out),G(in), mom, dt, conj_mom, exact, location); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; updateGaugeField<Float>(G(out), G(in), mom, dt, conj_mom, exact, location); } else { errorQuda("Reconstruction type not supported"); } } else if (out.Order() == QUDA_MILC_GAUGE_ORDER) { updateGaugeField<Float>(gauge::MILCOrder<Float, Nc*Nc*2>(out), gauge::MILCOrder<Float, Nc*Nc*2>(in), mom, dt, conj_mom, exact, location); } else { errorQuda("Gauge Field order %d not supported", out.Order()); } } #endif void updateGaugeField(GaugeField &out, double dt, const GaugeField& in, const GaugeField& mom, bool conj_mom, bool exact) { #ifdef GPU_GAUGE_TOOLS if (out.Precision() != in.Precision() || out.Precision() != mom.Precision()) errorQuda("Gauge and momentum fields must have matching precision"); if (out.Location() != in.Location() || out.Location() != mom.Location()) errorQuda("Gauge and momentum fields must have matching location"); if (out.Precision() == QUDA_DOUBLE_PRECISION) { updateGaugeField<double>(out, in, mom, dt, conj_mom, exact, out.Location()); } else if (out.Precision() == QUDA_SINGLE_PRECISION) { updateGaugeField<float>(out, in, mom, dt, conj_mom, exact, out.Location()); } else { errorQuda("Precision %d not supported", out.Precision()); } #else errorQuda("Gauge tools are not build"); #endif } } // namespace quda
ce94724e5061911534f0d2a1a7e05966e833b832.hip
// !!! This is a file automatically generated by hipify!!! /* Fractal code for CS 4380 / CS 5351 Copyright (c) 2016, Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is not permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdlib> #include <sys/time.h> #include <hip/hip_runtime.h> #include "cs43805351.h" static const int ThreadsPerBlock = 512; static const double Delta = 0.005491; static const double xMid = 0.745796; static const double yMid = 0.105089; static __global__ void FractalKernel(const int gpu_frames, const int width, unsigned char pic_d[]) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < gpu_frames * (width * width)) { const int col = idx % width; const int row = (idx / width) % width; const int frame = idx / (width * width); //todo: compute a single pixel here const double delta = Delta * pow(0.99, frame); const double xMin = xMid - delta; const double yMin = yMid - delta; const double dw = 2.0 * delta / width; // for (int row = 0; row < width; row++) { const double cy = -yMin - row * dw; // for (int col = 0; col < width; col++) { const double cx = -xMin - col * dw; double x = cx; double y = cy; int depth = 256; double x2, y2; do { x2 = x * x; y2 = y * y; y = 2 * x * y + cy; x = x2 - y2 + cx; depth--; } while ((depth > 0) && ((x2 + y2) < 5.0)); pic_d[idx] = (unsigned char)depth; } } unsigned char* GPU_Init(const int size) { unsigned char* pic_d; if (hipSuccess != hipMalloc((void **)&pic_d, size)) {fprintf(stderr, "could not allocate memory\n"); exit(-1);} return pic_d; } void GPU_Exec(const int gpu_frames, const int width, unsigned char pic_d[]) { // call the kernel (and do nothing else) int blockAmount = (width*width*gpu_frames + ThreadsPerBlock - 1) / ThreadsPerBlock; hipLaunchKernelGGL(( FractalKernel), dim3(blockAmount),dim3(ThreadsPerBlock), 0, 0, gpu_frames, width, pic_d); } void GPU_Fini(const int size, unsigned char pic[], unsigned char pic_d[]) { // copy the pixel data to the CPU and deallocate the GPU array if (hipSuccess != hipMemcpy(pic, pic_d, size, hipMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} hipFree(pic_d); }
ce94724e5061911534f0d2a1a7e05966e833b832.cu
/* Fractal code for CS 4380 / CS 5351 Copyright (c) 2016, Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is not permitted. Use in source and binary forms, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ #include <cstdlib> #include <sys/time.h> #include <cuda.h> #include "cs43805351.h" static const int ThreadsPerBlock = 512; static const double Delta = 0.005491; static const double xMid = 0.745796; static const double yMid = 0.105089; static __global__ void FractalKernel(const int gpu_frames, const int width, unsigned char pic_d[]) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < gpu_frames * (width * width)) { const int col = idx % width; const int row = (idx / width) % width; const int frame = idx / (width * width); //todo: compute a single pixel here const double delta = Delta * pow(0.99, frame); const double xMin = xMid - delta; const double yMin = yMid - delta; const double dw = 2.0 * delta / width; // for (int row = 0; row < width; row++) { const double cy = -yMin - row * dw; // for (int col = 0; col < width; col++) { const double cx = -xMin - col * dw; double x = cx; double y = cy; int depth = 256; double x2, y2; do { x2 = x * x; y2 = y * y; y = 2 * x * y + cy; x = x2 - y2 + cx; depth--; } while ((depth > 0) && ((x2 + y2) < 5.0)); pic_d[idx] = (unsigned char)depth; } } unsigned char* GPU_Init(const int size) { unsigned char* pic_d; if (cudaSuccess != cudaMalloc((void **)&pic_d, size)) {fprintf(stderr, "could not allocate memory\n"); exit(-1);} return pic_d; } void GPU_Exec(const int gpu_frames, const int width, unsigned char pic_d[]) { // call the kernel (and do nothing else) int blockAmount = (width*width*gpu_frames + ThreadsPerBlock - 1) / ThreadsPerBlock; FractalKernel<<<blockAmount,ThreadsPerBlock>>>(gpu_frames, width, pic_d); } void GPU_Fini(const int size, unsigned char pic[], unsigned char pic_d[]) { // copy the pixel data to the CPU and deallocate the GPU array if (cudaSuccess != cudaMemcpy(pic, pic_d, size, cudaMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} cudaFree(pic_d); }
731189400dc093cc5e450c1b517627cdb66e03f7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <stdint.h> #include <time.h> #include "cuStopwatch.cu" #define SHIFT 27 __global__ void search_1(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t gridsize = blockDim.x * gridDim.x; uint32_t warpcnt = 0; while(tid < size){ uint32_t herecount = (arr[tid] == val) ? 1 : 0; herecount += __shfl_down_sync(0xffffffff, herecount, 16); herecount += __shfl_down_sync(0xffffffff, herecount, 8); herecount += __shfl_down_sync(0xffffffff, herecount, 4); herecount += __shfl_down_sync(0xffffffff, herecount, 2); herecount += __shfl_down_sync(0xffffffff, herecount, 1); warpcnt += herecount; tid += gridsize; } if((threadIdx.x & 31) == 0) atomicAdd(res, warpcnt); return; } __global__ void search_2(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t gridsize = blockDim.x * gridDim.x; while(tid < size){ uint32_t ishere = (arr[tid] == val) ? 1 : 0; if(__any_sync(0xffffffff, ishere)) if ((threadIdx.x & 31) == 0) atomicAdd(res, 1); tid += gridsize; } return; } __global__ void search_3(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t gridsize = blockDim.x * gridDim.x; while(tid < size){ if(arr[tid] == val) atomicAdd(res, 1); tid += gridsize; } return; } __global__ void search_4(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) { if(*res != 0) return; uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t gridsize = blockDim.x * gridDim.x; while((tid < size) && (*res == 0)){ if(arr[tid] == val) (*res)++; tid += gridsize; } return; } void randgen(uint32_t* arr, size_t count, uint32_t mask){ uint32_t state = time(NULL); for(uint32_t i = 0; i < count; i++){ state ^= state << 13; state ^= state >> 17; state ^= state << 5; arr[i] = state & mask; } return; } int main() { // Allocate memory, filling in random data and transfer to device uint32_t *arr_host, *arr_dev, *res_dev; uint32_t res; const uint32_t arr_size = 1 << SHIFT; hipHostMalloc((void**)&arr_host, arr_size*sizeof(uint32_t), hipHostMallocDefault); hipMalloc((void**)&arr_dev, arr_size*sizeof(uint32_t)); hipMalloc((void**)&res_dev, sizeof(uint32_t)); printf("Finding 42 in %d elements\n", arr_size); // Search the element 42 using different kernels for(int target_shift = 12; target_shift <= 32; target_shift+=4){ randgen(arr_host, arr_size, (1<<target_shift) - 1); uint32_t exactcnt = 0; float elapsed = 0; for(int i=0; i<arr_size; i++) if(arr_host[i] == 42) exactcnt++; printf("\nShift %d, with %d elements equal to 42 to be found\n", target_shift, exactcnt); hipMemcpyAsync(arr_dev, arr_host, arr_size*sizeof(uint32_t), hipMemcpyHostToDevice); // Performing odd-even computing on 2^25 integers cuStopwatch sw1; sw1.start(); res = 0; hipMemcpyAsync(res_dev, &res, sizeof(uint32_t), hipMemcpyHostToDevice); hipLaunchKernelGGL(( search_1), dim3(256), dim3(1024), 0, 0, arr_dev, arr_size, res_dev, 42); hipMemcpyAsync(&res, res_dev, sizeof(uint32_t), hipMemcpyDeviceToHost); elapsed = sw1.stop(); if(res != 0) printf("Method 1: %7.4fms, found, returning %u.\n", elapsed, res); else printf("Method 1: %7.4fms, not found, returning %u.\n", elapsed, res); cuStopwatch sw2; sw2.start(); res = 0; hipMemcpyAsync(res_dev, &res, sizeof(uint32_t), hipMemcpyHostToDevice); hipLaunchKernelGGL(( search_2), dim3(256), dim3(1024), 0, 0, arr_dev, arr_size, res_dev, 42); hipMemcpyAsync(&res, res_dev, sizeof(uint32_t), hipMemcpyDeviceToHost); elapsed = sw2.stop(); if(res != 0) printf("Method 2: %7.4fms, found, returning %u.\n", elapsed, res); else printf("Method 2: %7.4fms, not found, returning %u.\n", elapsed, res); cuStopwatch sw3; sw3.start(); res = 0; hipMemcpyAsync(res_dev, &res, sizeof(uint32_t), hipMemcpyHostToDevice); hipLaunchKernelGGL(( search_3), dim3(256), dim3(1024), 0, 0, arr_dev, arr_size, res_dev, 42); hipMemcpyAsync(&res, res_dev, sizeof(uint32_t), hipMemcpyDeviceToHost); elapsed = sw3.stop(); if(res != 0) printf("Method 3: %7.4fms, found, returning %u.\n", elapsed, res); else printf("Method 3: %7.4fms, not found, returning %u.\n", elapsed, res); cuStopwatch sw4; sw4.start(); res = 0; hipMemcpyAsync(res_dev, &res, sizeof(uint32_t), hipMemcpyHostToDevice); hipLaunchKernelGGL(( search_4), dim3(256), dim3(1024), 0, 0, arr_dev, arr_size, res_dev, 42); hipMemcpyAsync(&res, res_dev, sizeof(uint32_t), hipMemcpyDeviceToHost); elapsed = sw4.stop(); if(res != 0) printf("Method 4: %7.4fms, found, returning %u.\n", elapsed, res); else printf("Method 4: %7.4fms, not found, returning %u.\n", elapsed, res); } // Free memory hipHostFree(arr_host); hipFree(arr_dev); hipFree(res_dev); return 0; }
731189400dc093cc5e450c1b517627cdb66e03f7.cu
#include <stdio.h> #include <cuda_runtime.h> #include <stdint.h> #include <time.h> #include "cuStopwatch.cu" #define SHIFT 27 __global__ void search_1(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t gridsize = blockDim.x * gridDim.x; uint32_t warpcnt = 0; while(tid < size){ uint32_t herecount = (arr[tid] == val) ? 1 : 0; herecount += __shfl_down_sync(0xffffffff, herecount, 16); herecount += __shfl_down_sync(0xffffffff, herecount, 8); herecount += __shfl_down_sync(0xffffffff, herecount, 4); herecount += __shfl_down_sync(0xffffffff, herecount, 2); herecount += __shfl_down_sync(0xffffffff, herecount, 1); warpcnt += herecount; tid += gridsize; } if((threadIdx.x & 31) == 0) atomicAdd(res, warpcnt); return; } __global__ void search_2(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t gridsize = blockDim.x * gridDim.x; while(tid < size){ uint32_t ishere = (arr[tid] == val) ? 1 : 0; if(__any_sync(0xffffffff, ishere)) if ((threadIdx.x & 31) == 0) atomicAdd(res, 1); tid += gridsize; } return; } __global__ void search_3(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t gridsize = blockDim.x * gridDim.x; while(tid < size){ if(arr[tid] == val) atomicAdd(res, 1); tid += gridsize; } return; } __global__ void search_4(const uint32_t* arr, uint32_t size, uint32_t* res, uint32_t val) { if(*res != 0) return; uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t gridsize = blockDim.x * gridDim.x; while((tid < size) && (*res == 0)){ if(arr[tid] == val) (*res)++; tid += gridsize; } return; } void randgen(uint32_t* arr, size_t count, uint32_t mask){ uint32_t state = time(NULL); for(uint32_t i = 0; i < count; i++){ state ^= state << 13; state ^= state >> 17; state ^= state << 5; arr[i] = state & mask; } return; } int main() { // Allocate memory, filling in random data and transfer to device uint32_t *arr_host, *arr_dev, *res_dev; uint32_t res; const uint32_t arr_size = 1 << SHIFT; cudaHostAlloc((void**)&arr_host, arr_size*sizeof(uint32_t), cudaHostAllocDefault); cudaMalloc((void**)&arr_dev, arr_size*sizeof(uint32_t)); cudaMalloc((void**)&res_dev, sizeof(uint32_t)); printf("Finding 42 in %d elements\n", arr_size); // Search the element 42 using different kernels for(int target_shift = 12; target_shift <= 32; target_shift+=4){ randgen(arr_host, arr_size, (1<<target_shift) - 1); uint32_t exactcnt = 0; float elapsed = 0; for(int i=0; i<arr_size; i++) if(arr_host[i] == 42) exactcnt++; printf("\nShift %d, with %d elements equal to 42 to be found\n", target_shift, exactcnt); cudaMemcpyAsync(arr_dev, arr_host, arr_size*sizeof(uint32_t), cudaMemcpyHostToDevice); // Performing odd-even computing on 2^25 integers cuStopwatch sw1; sw1.start(); res = 0; cudaMemcpyAsync(res_dev, &res, sizeof(uint32_t), cudaMemcpyHostToDevice); search_1<<<256, 1024>>>(arr_dev, arr_size, res_dev, 42); cudaMemcpyAsync(&res, res_dev, sizeof(uint32_t), cudaMemcpyDeviceToHost); elapsed = sw1.stop(); if(res != 0) printf("Method 1: %7.4fms, found, returning %u.\n", elapsed, res); else printf("Method 1: %7.4fms, not found, returning %u.\n", elapsed, res); cuStopwatch sw2; sw2.start(); res = 0; cudaMemcpyAsync(res_dev, &res, sizeof(uint32_t), cudaMemcpyHostToDevice); search_2<<<256, 1024>>>(arr_dev, arr_size, res_dev, 42); cudaMemcpyAsync(&res, res_dev, sizeof(uint32_t), cudaMemcpyDeviceToHost); elapsed = sw2.stop(); if(res != 0) printf("Method 2: %7.4fms, found, returning %u.\n", elapsed, res); else printf("Method 2: %7.4fms, not found, returning %u.\n", elapsed, res); cuStopwatch sw3; sw3.start(); res = 0; cudaMemcpyAsync(res_dev, &res, sizeof(uint32_t), cudaMemcpyHostToDevice); search_3<<<256, 1024>>>(arr_dev, arr_size, res_dev, 42); cudaMemcpyAsync(&res, res_dev, sizeof(uint32_t), cudaMemcpyDeviceToHost); elapsed = sw3.stop(); if(res != 0) printf("Method 3: %7.4fms, found, returning %u.\n", elapsed, res); else printf("Method 3: %7.4fms, not found, returning %u.\n", elapsed, res); cuStopwatch sw4; sw4.start(); res = 0; cudaMemcpyAsync(res_dev, &res, sizeof(uint32_t), cudaMemcpyHostToDevice); search_4<<<256, 1024>>>(arr_dev, arr_size, res_dev, 42); cudaMemcpyAsync(&res, res_dev, sizeof(uint32_t), cudaMemcpyDeviceToHost); elapsed = sw4.stop(); if(res != 0) printf("Method 4: %7.4fms, found, returning %u.\n", elapsed, res); else printf("Method 4: %7.4fms, not found, returning %u.\n", elapsed, res); } // Free memory cudaFreeHost(arr_host); cudaFree(arr_dev); cudaFree(res_dev); return 0; }
9d914d3689716794c6cc44c7f0525fc35860a325.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_functor.h" #include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h" #include "paddle/fluid/operators/viterbi_decode_op.h" #include "paddle/phi/kernels/funcs/gather.cu.h" #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif namespace paddle { namespace operators { #define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \ case (1 << (log2_block_dim)): { \ constexpr auto kBlockDim = (1 << (log2_block_dim)); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM_CASE(...) \ FIXED_BLOCK_DIM_CASE_BASE(10, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__); int64_t ComputeBlockSize(int64_t col) { if (col > 512) return 1024; else if (col > 256) return 512; else if (col > 128) return 256; else if (col > 64) return 128; else if (col > 32) return 64; else if (col > 16) return 32; else if (col > 8) return 16; else return 8; } template <template <typename T> typename BinaryFunctor, typename T> struct BinaryOperation<platform::CUDADeviceContext, BinaryFunctor, T> { void operator()(const platform::CUDADeviceContext& dev_ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* output) { std::vector<const framework::Tensor*> ins{&lhs, &rhs}; std::vector<framework::Tensor*> outs{output}; paddle::operators::LaunchElementwiseCudaKernel<ElementwiseType::kBinary, T, T>(dev_ctx, ins, &outs, -1, BinaryFunctor<T>()); } }; template <template <typename InT, typename OutT> typename CompareFunctor, typename T> struct GetMask<platform::CUDADeviceContext, CompareFunctor, T> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* mask) { std::vector<const framework::Tensor*> ins = {&lhs, &rhs}; std::vector<framework::Tensor*> outs = {mask}; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); paddle::operators::LaunchSameDimsElementwiseCudaKernel<T>( dev_ctx, ins, &outs, CompareFunctor<int64_t, T>()); } }; template <typename T, typename IndType, size_t BlockDim> __global__ void ArgmaxCUDAKernel(const int64_t height, // n * h const int64_t width, // c const int64_t post_size, // h const T* in, IndType* out_idx, T* out) { typedef hipcub::BlockReduce<hipcub::KeyValuePair<int, T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; hipcub::ArgMax reducer; T init = (std::numeric_limits<T>::lowest)(); // for windows compile for (int idx = blockIdx.x; idx < height; idx += gridDim.x) { hipcub::KeyValuePair<int, T> kv_pair = {-1, init}; int h = idx / post_size; int w = idx % post_size; for (int k = threadIdx.x; k < width; k += blockDim.x) { kv_pair = reducer({k, in[h * width * post_size + k * post_size + w]}, kv_pair); } kv_pair = BlockReduce(temp_storage).Reduce(kv_pair, reducer); if (threadIdx.x == 0) { // return max, argmax if (out_idx != nullptr) out_idx[idx] = static_cast<IndType>(kv_pair.key); if (out != nullptr) out[idx] = kv_pair.value; } __syncthreads(); } } __global__ void ARangeKernel(int64_t* data, int num, int64_t scale) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int start = idx; idx < num; idx += gridDim.x) { data[idx] = idx * scale; } } template <> struct ARange<platform::CUDADeviceContext> { void operator()(const platform::CUDADeviceContext& dev_ctx, int64_t* data, int num, int64_t scale) { int64_t kBlockDim = ComputeBlockSize(num); // kBlockDim > num at most of time, so we can set grid = 1 hipLaunchKernelGGL(( ARangeKernel), dim3(1), dim3(kBlockDim), 0, dev_ctx.stream(), data, num, scale); } }; template <typename T, typename IndType> struct Argmax<platform::CUDADeviceContext, T, IndType> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& input, framework::Tensor* out_idx, framework::Tensor* out, int axis) { framework::DDim input_dims = input.dims(); int64_t numel = input.numel(); int64_t groups = numel / input_dims[axis]; int64_t pre = 1; int64_t post = 1; int64_t n = input_dims[axis]; for (int i = 0; i < axis; i++) { pre *= input_dims[i]; } for (int i = axis + 1; i < input_dims.size(); i++) { post *= input_dims[i]; } const auto& dev_ctx = ctx.cuda_device_context(); auto cu_stream = dev_ctx.stream(); int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int64_t height = pre * post; int64_t width = n; int64_t grid_size = height < max_grid_dimx ? height : max_grid_dimx; const T* in_data = input.data<T>(); IndType* out_idx_data = out_idx->data<IndType>(); T* out_data = out->data<T>(); switch (ComputeBlockSize(width)) { FIXED_BLOCK_DIM_CASE( hipLaunchKernelGGL(( ArgmaxCUDAKernel<T, IndType, kBlockDim>), dim3(grid_size), dim3(kBlockDim), 0, cu_stream, height, width, post, in_data, out_idx_data, out_data)); } } }; template <typename T> struct GetMaxValue<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& dev_ctx, const framework::Tensor& input, T* max_value) { framework::Tensor out_data; out_data.Resize(phi::make_ddim({1})); out_data.mutable_data<T>(platform::CUDAPlace()); switch (ComputeBlockSize(input.numel())) { FIXED_BLOCK_DIM_CASE( hipLaunchKernelGGL(( ArgmaxCUDAKernel<T, T, kBlockDim>), dim3(1), dim3(kBlockDim), 0, dev_ctx.stream(), 1, input.numel(), 1, input.data<int64_t>(), nullptr, out_data.data<int64_t>())); } framework::Tensor max_value_tensor; framework::TensorCopy(out_data, platform::CPUPlace(), &max_value_tensor); *max_value = max_value_tensor.data<T>()[0]; } }; template <typename T, typename IndexT> struct Gather<platform::CUDADeviceContext, T, IndexT> { void operator()(const platform::CUDADeviceContext& ctx, const framework::Tensor& src, const framework::Tensor& index, framework::Tensor* output) { phi::funcs::GPUGather<T, IndexT>(ctx, src, index, output); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace platform = paddle::platform; REGISTER_OP_CUDA_KERNEL( viterbi_decode, ops::ViterbiDecodeKernel<platform::CUDADeviceContext, float>, ops::ViterbiDecodeKernel<platform::CUDADeviceContext, double>);
9d914d3689716794c6cc44c7f0525fc35860a325.cu
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_functor.h" #include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h" #include "paddle/fluid/operators/viterbi_decode_op.h" #include "paddle/phi/kernels/funcs/gather.cu.h" #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif namespace paddle { namespace operators { #define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \ case (1 << (log2_block_dim)): { \ constexpr auto kBlockDim = (1 << (log2_block_dim)); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM_CASE(...) \ FIXED_BLOCK_DIM_CASE_BASE(10, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__); int64_t ComputeBlockSize(int64_t col) { if (col > 512) return 1024; else if (col > 256) return 512; else if (col > 128) return 256; else if (col > 64) return 128; else if (col > 32) return 64; else if (col > 16) return 32; else if (col > 8) return 16; else return 8; } template <template <typename T> typename BinaryFunctor, typename T> struct BinaryOperation<platform::CUDADeviceContext, BinaryFunctor, T> { void operator()(const platform::CUDADeviceContext& dev_ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* output) { std::vector<const framework::Tensor*> ins{&lhs, &rhs}; std::vector<framework::Tensor*> outs{output}; paddle::operators::LaunchElementwiseCudaKernel<ElementwiseType::kBinary, T, T>(dev_ctx, ins, &outs, -1, BinaryFunctor<T>()); } }; template <template <typename InT, typename OutT> typename CompareFunctor, typename T> struct GetMask<platform::CUDADeviceContext, CompareFunctor, T> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* mask) { std::vector<const framework::Tensor*> ins = {&lhs, &rhs}; std::vector<framework::Tensor*> outs = {mask}; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); paddle::operators::LaunchSameDimsElementwiseCudaKernel<T>( dev_ctx, ins, &outs, CompareFunctor<int64_t, T>()); } }; template <typename T, typename IndType, size_t BlockDim> __global__ void ArgmaxCUDAKernel(const int64_t height, // n * h const int64_t width, // c const int64_t post_size, // h const T* in, IndType* out_idx, T* out) { typedef cub::BlockReduce<cub::KeyValuePair<int, T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; cub::ArgMax reducer; T init = (std::numeric_limits<T>::lowest)(); // for windows compile for (int idx = blockIdx.x; idx < height; idx += gridDim.x) { cub::KeyValuePair<int, T> kv_pair = {-1, init}; int h = idx / post_size; int w = idx % post_size; for (int k = threadIdx.x; k < width; k += blockDim.x) { kv_pair = reducer({k, in[h * width * post_size + k * post_size + w]}, kv_pair); } kv_pair = BlockReduce(temp_storage).Reduce(kv_pair, reducer); if (threadIdx.x == 0) { // return max, argmax if (out_idx != nullptr) out_idx[idx] = static_cast<IndType>(kv_pair.key); if (out != nullptr) out[idx] = kv_pair.value; } __syncthreads(); } } __global__ void ARangeKernel(int64_t* data, int num, int64_t scale) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int start = idx; idx < num; idx += gridDim.x) { data[idx] = idx * scale; } } template <> struct ARange<platform::CUDADeviceContext> { void operator()(const platform::CUDADeviceContext& dev_ctx, int64_t* data, int num, int64_t scale) { int64_t kBlockDim = ComputeBlockSize(num); // kBlockDim > num at most of time, so we can set grid = 1 ARangeKernel<<<1, kBlockDim, 0, dev_ctx.stream()>>>(data, num, scale); } }; template <typename T, typename IndType> struct Argmax<platform::CUDADeviceContext, T, IndType> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& input, framework::Tensor* out_idx, framework::Tensor* out, int axis) { framework::DDim input_dims = input.dims(); int64_t numel = input.numel(); int64_t groups = numel / input_dims[axis]; int64_t pre = 1; int64_t post = 1; int64_t n = input_dims[axis]; for (int i = 0; i < axis; i++) { pre *= input_dims[i]; } for (int i = axis + 1; i < input_dims.size(); i++) { post *= input_dims[i]; } const auto& dev_ctx = ctx.cuda_device_context(); auto cu_stream = dev_ctx.stream(); int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int64_t height = pre * post; int64_t width = n; int64_t grid_size = height < max_grid_dimx ? height : max_grid_dimx; const T* in_data = input.data<T>(); IndType* out_idx_data = out_idx->data<IndType>(); T* out_data = out->data<T>(); switch (ComputeBlockSize(width)) { FIXED_BLOCK_DIM_CASE( ArgmaxCUDAKernel<T, IndType, kBlockDim><<<grid_size, kBlockDim, 0, cu_stream>>>( height, width, post, in_data, out_idx_data, out_data)); } } }; template <typename T> struct GetMaxValue<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& dev_ctx, const framework::Tensor& input, T* max_value) { framework::Tensor out_data; out_data.Resize(phi::make_ddim({1})); out_data.mutable_data<T>(platform::CUDAPlace()); switch (ComputeBlockSize(input.numel())) { FIXED_BLOCK_DIM_CASE( ArgmaxCUDAKernel<T, T, kBlockDim><<<1, kBlockDim, 0, dev_ctx.stream()>>>( 1, input.numel(), 1, input.data<int64_t>(), nullptr, out_data.data<int64_t>())); } framework::Tensor max_value_tensor; framework::TensorCopy(out_data, platform::CPUPlace(), &max_value_tensor); *max_value = max_value_tensor.data<T>()[0]; } }; template <typename T, typename IndexT> struct Gather<platform::CUDADeviceContext, T, IndexT> { void operator()(const platform::CUDADeviceContext& ctx, const framework::Tensor& src, const framework::Tensor& index, framework::Tensor* output) { phi::funcs::GPUGather<T, IndexT>(ctx, src, index, output); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace platform = paddle::platform; REGISTER_OP_CUDA_KERNEL( viterbi_decode, ops::ViterbiDecodeKernel<platform::CUDADeviceContext, float>, ops::ViterbiDecodeKernel<platform::CUDADeviceContext, double>);
887c46ce09ffb50304a9e435795dee03f9c199e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <cstdio> #include <string> #include <cmath> #include <algorithm> #include <limits> using namespace std; __device__ float operator !(const float3 p) { return sqrtf(p.x * p.x + p.y * p.y + p.z * p.z); } __device__ float operator &(const float3 a, const float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } __device__ float3 operator ^(const float3 a, const float3 b) { return { a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z , a.x * b.y - a.y * b.x }; } __device__ float3 operator /(const float3 a, const float3 b) { return { a.x / b.x,a.y / b.y,a.z / b.z }; } __device__ float3 operator /(const float3 a, const float b) { return { a.x / b,a.y / b,a.z / b }; } __device__ float3 operator *(const float3 a, const float3 b) { return { a.x * b.x,a.y * b.y,a.z * b.z }; } __device__ float3 operator *(const float3 a, const float b) { return { a.x * b,a.y * b,a.z * b }; } __device__ float3 operator +(const float3 a, const float3 b) { return { a.x + b.x,a.y + b.y,a.z + b.z }; } __device__ float3 operator +(const float3 a, const float b) { return { a.x + b,a.y + b,a.z + b }; } __device__ float3 operator -(const float3 a, const float3 b) { return { a.x - b.x,a.y - b.y,a.z - b.z }; } __device__ float3 operator -(const float3 a, const float b) { return { a.x - b,a.y - b,a.z - b }; } __device__ float3 operator %(const float3 a, const float3 b) { return { a.x - static_cast<int>(a.x / b.x) * b.x,a.y - static_cast<int>(a.y / b.y) * b.y,a.z - static_cast<int>(a.z / b.z) * b.z }; } __device__ int index(const int x, const int y, const int width) { return y * width + x; } __device__ float3 rotate_vec(const float3 p, const float3 a, const float t, const float c, const float s) { const float d = t * (a & p); const float3 x = a ^ p; return { d * a.x + p.x * c + s * x.x, d * a.y + p.y * c + s * x.y, d * a.z + p.z * c + s * x.z }; } /*__global__ void GetColor(Color* colors) { int i = blockIdx.x * blockDim.x + threadIdx.x; colors[i] = Color.FromArgb(i, i, i); }*/ __device__ float warp_dist(float d, const int iterations, const float scale) { for (int i = 0; i < iterations; ++i) d /= scale; return d; } __device__ float cube_de(const float3 p, const float3 c, const float di) { const float3 o = p - c; return max(abs(o.x), max(abs(o.y), abs(o.z))) - di / 2; } __device__ float de(const float3 p, const float side) { return max(abs(p.x), max(abs(p.y), abs(p.z))) - side / 2; } __device__ float3 mod_space(const float3 p, const float3 mod) { return ((p + mod / 2) % mod + mod) % mod - mod / 2; } __device__ float3 fold_space(const float3 p, const float3 n) { if ((p & n) >= 0) return p; return p - (n * 2 * (p & n) / (n & n)); } __device__ float3 fold_menger(float3 z) { float a = min(z.x - z.y, 0.f); z.x -= a; z.y += a; a = min(z.x - z.z, 0.f); z.x -= a; z.z += a; a = min(z.y - z.z, 0.f); z.y -= a; z.z += a; return z; } __device__ float3 max_space(const float3 a, const float3 b) { return { max(a.x, b.x), max(a.y, b.y), max(a.z, b.z) }; } __device__ float3 max_space(const float3 a, const float b) { return { max(a.x, b), max(a.y, b), max(a.z, b) }; } __device__ float3 min_space(const float3 a, const float3 b) { return { min(a.x, b.x), min(a.y, b.y), min(a.z, b.z) }; } __device__ float3 min_space(const float3 a, const float b) { return { min(a.x, b), min(a.y, b), min(a.z, b) }; } __device__ float3 fold_box(const float3 z, const float r) { return max_space(min_space(z, r), -r) * 2 - z; } __device__ float3 abs_space(const float3 p) { return { abs(p.x),abs(p.y),abs(p.z) }; } __device__ float3 abs_space_x(const float3 p) { return{ abs(p.x),p.y,p.z }; } __device__ float3 abs_space_y(const float3 p) { return{ p.x,abs(p.y),p.z }; } __device__ float3 abs_space_z(const float3 p) { return{ p.x,p.y,abs(p.z) }; } __device__ float3 rotate_x(const float3 z, const float t) { const float s = sin(t); const float c = cos(t); return{ z.x, c * z.y + s * z.z,c * z.z - s * z.y }; } __device__ float3 rotate_y(const float3 z, const float t) { const float s = sin(t); const float c = cos(t); return { c * z.x - s * z.z, z.y,c * z.z + s * z.x }; } __device__ float3 rotate_z(const float3 z, const float t) { const float s = sin(t); const float c = cos(t); return { c * z.x + s * z.y,c * z.y - s * z.x, z.z }; } __device__ float3 transform(float3 p, int iterations, const float3 seed, const float3 shift) { p = p * seed.x; p = abs_space(p); p = rotate_z(p, seed.y); p = fold_menger(p); p = rotate_x(p, seed.z); p = p - shift; return p; } __device__ float3 warp_space(float3 p, const int iterations, const float3 seed, const float3 shift) { for (int i = 0; i < iterations; ++i) p = transform(p, iterations, seed, shift); return p; } __device__ float scaled_de(const float3 p, const int iterations, const float side, const float3 seed, const float3 shift) { return warp_dist(de(warp_space(p, iterations, seed, shift), side), iterations, seed.x); } __device__ float3 normal(const float3 p, const int iterations, const float side, const float3 seed, const float3 shift, const float epsilon) { const float3 scaled = { scaled_de({ p.x + epsilon, p.y, p.z }, iterations, side, seed, shift) - scaled_de({ p.x - epsilon, p.y, p.z }, iterations, side, seed, shift), scaled_de({ p.x, p.y + epsilon, p.z }, iterations, side, seed, shift) - scaled_de({ p.x, p.y - epsilon, p.z }, iterations, side, seed, shift), scaled_de({ p.x, p.y, p.z + epsilon }, iterations, side, seed, shift) - scaled_de({ p.x, p.y, p.z - epsilon }, iterations, side, seed, shift) }; return scaled / !scaled; } __device__ float new_soft_shadow(const float3 p, const float3 d, const float shadow_strength, const int iterations, const float side, const float3 seed, const float3 shift, const float minDist, const float maxDist, const float minAngle) { float darkness = 1; float prev_dist = 2147483647; float angle = 1; float total_dist = minDist; while (total_dist < maxDist) { const float dist = scaled_de(p + d * total_dist, iterations, side, seed, shift); const float old_new_int_dist = dist * dist / (2 * prev_dist); const float leg_length = sqrt(dist * dist - old_new_int_dist * old_new_int_dist); angle = shadow_strength * leg_length / max(0.f, total_dist - old_new_int_dist); darkness = min(darkness, angle); prev_dist = dist; total_dist += dist; if (dist < 0 || darkness < minAngle) return 0; } return darkness; } __device__ float orbit(float3 p, const int iterations, const float side, const float3 seed, const float3 shift) { const float direction = scaled_de(p, 1, side, seed, shift); for (int i = 0; i < iterations; ++i) { p = transform(p, iterations, seed, shift); if (warp_dist(de(p, side), i, seed.x) * direction >= 0) return warp_dist(de(p, side), i - 1, seed.x) * 6; } return warp_dist(de(p, side), iterations - 1, seed.x) * 6; } __device__ float trapezoid_wave(const float loc) { return min(max(abs(loc - 3), 0.f) - 1, 1.f); } __device__ float red(const float loc) { return trapezoid_wave(loc - static_cast<int>(loc / 6) * 6); } __device__ float green(const float loc) { return trapezoid_wave(loc + 4 - static_cast<int>((loc + 4) / 6) * 6); } __device__ float blue(const float loc) { return trapezoid_wave(loc + 2 - static_cast<int>((loc + 2) / 6) * 6); } __global__ void get_direction(float3* directions, const float focal_length, const int width, const int height) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; const int h = index(i, height - 1 - j, width); const float3 p = { focal_length,(j - height / 2.f) / height,(i - width / 2.f) / height }; directions[h] = p / !p; } __global__ void rotate_direction(float3* directions, const float3 a, const float t, const float c, const float s, const int width, const int height) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; const int h = index(i, height - 1 - j, width); directions[h] = rotate_vec(directions[h], a, t, c, s); directions[h] = directions[h] / !directions[h]; } __global__ void march_ray(const float3* directions, unsigned char* pixel_values, const float3 camera, const float3 light, const float2 cols, const float min_dist, const float max_dist, const int max_step, int bytes, const int width, const int iterations, const float side, const float3 seed, const float3 shift, const float shadow_strength, const float ambient_occ_strength) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; const int h = index(i, j, width); float3 p = camera; int step_num = 0; float dist = min_dist + 1; float total_dist = 0; while (total_dist < max_dist && dist > min_dist && step_num != max_step) { dist = scaled_de(p, iterations, side, seed, shift); p = p + directions[h] * dist; total_dist += dist; ++step_num; } if (abs(dist) <= min_dist) { float brightness = 0; float3 off = light - p; const float light_vector_length = !off; off = off / light_vector_length; float diffuse_calculated = 0; const float normal_angle = (off & normal(p, iterations, side, seed, shift, min_dist)); if (normal_angle > 0) diffuse_calculated = max(cols.y * new_soft_shadow(p, off, shadow_strength, iterations, side, seed, shift, min_dist, light_vector_length, 0.01f) * normal_angle, 0.f); brightness += diffuse_calculated + cols.x / (1 + step_num * ambient_occ_strength); brightness = min(max(brightness, 0.f), 1.f); const float col = orbit(p, iterations, side, seed, shift); pixel_values[h * 3] = static_cast<unsigned char>(blue(col) * brightness * 255); pixel_values[h * 3 + 1] = static_cast<unsigned char>(green(col) * brightness * 255); pixel_values[h * 3 + 2] = static_cast<unsigned char>(red(col) * brightness * 255); } else { pixel_values[h * 3] = 0; pixel_values[h * 3 + 1] = 0; pixel_values[h * 3 + 2] = 0; } } hipError_t add_with_cuda(float3* c, const float3* a, const float3* b, unsigned int size); __global__ void add(float3* c, const float3* a, const float3* b) { const int i = threadIdx.x; c[i] = { a[i].x + b[i].x,a[i].y + b[i].y,a[i].z + b[i].z }; } string to_string(const float3 a) { return "(" + to_string(a.x) + "," + to_string(a.y) + "," + to_string(a.z) + ")"; } int main() { constexpr int array_size = 5; const float3 a[array_size] = { {1, 2, 3}, make_float3(4, 5, 6), make_float3(7, 8, 9), make_float3(10, 11, 12), make_float3(13, 14, 15) }; const float3 b[array_size] = { make_float3(10, 20, 30), make_float3(40, 50, 60), make_float3(70, 80, 90), make_float3(100, 110, 120), make_float3(130, 140, 150) }; float3 c[array_size] = { {0,0,0} }; // Add vectors in parallel. hipError_t cuda_status = add_with_cuda(c, a, b, array_size); if (cuda_status != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("a + b = {%s, %s, %s, %s, %s}\n", to_string(c[0]).c_str(), to_string(c[1]).c_str(), to_string(c[2]).c_str(), to_string(c[3]).c_str(), to_string(c[4]).c_str()); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cuda_status = hipDeviceReset(); if (cuda_status != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t add_with_cuda(float3* c, const float3* a, const float3* b, unsigned int size) { float3* dev_a = nullptr; float3* dev_b = nullptr; float3* dev_c = nullptr; // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cuda_status = hipSetDevice(0); if (cuda_status != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cuda_status = hipMalloc(reinterpret_cast<void**>(&dev_c), size * sizeof(float3)); if (cuda_status != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cuda_status = hipMalloc(reinterpret_cast<void**>(&dev_a), size * sizeof(float3)); if (cuda_status != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cuda_status = hipMalloc(reinterpret_cast<void**>(&dev_b), size * sizeof(float3)); if (cuda_status != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cuda_status = hipMemcpy(dev_a, a, size * sizeof(float3), hipMemcpyHostToDevice); if (cuda_status != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cuda_status = hipMemcpy(dev_b, b, size * sizeof(float3), hipMemcpyHostToDevice); if (cuda_status != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( add) , dim3(1), dim3(size) , 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cuda_status = hipGetLastError(); if (cuda_status != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cuda_status)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cuda_status = hipDeviceSynchronize(); if (cuda_status != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cuda_status); goto Error; } // Copy output vector from GPU buffer to host memory. cuda_status = hipMemcpy(c, dev_c, size * sizeof(float3), hipMemcpyDeviceToHost); if (cuda_status != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cuda_status; }
887c46ce09ffb50304a9e435795dee03f9c199e3.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cstdio> #include <string> #include <cmath> #include <algorithm> #include <limits> using namespace std; __device__ float operator !(const float3 p) { return sqrtf(p.x * p.x + p.y * p.y + p.z * p.z); } __device__ float operator &(const float3 a, const float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } __device__ float3 operator ^(const float3 a, const float3 b) { return { a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z , a.x * b.y - a.y * b.x }; } __device__ float3 operator /(const float3 a, const float3 b) { return { a.x / b.x,a.y / b.y,a.z / b.z }; } __device__ float3 operator /(const float3 a, const float b) { return { a.x / b,a.y / b,a.z / b }; } __device__ float3 operator *(const float3 a, const float3 b) { return { a.x * b.x,a.y * b.y,a.z * b.z }; } __device__ float3 operator *(const float3 a, const float b) { return { a.x * b,a.y * b,a.z * b }; } __device__ float3 operator +(const float3 a, const float3 b) { return { a.x + b.x,a.y + b.y,a.z + b.z }; } __device__ float3 operator +(const float3 a, const float b) { return { a.x + b,a.y + b,a.z + b }; } __device__ float3 operator -(const float3 a, const float3 b) { return { a.x - b.x,a.y - b.y,a.z - b.z }; } __device__ float3 operator -(const float3 a, const float b) { return { a.x - b,a.y - b,a.z - b }; } __device__ float3 operator %(const float3 a, const float3 b) { return { a.x - static_cast<int>(a.x / b.x) * b.x,a.y - static_cast<int>(a.y / b.y) * b.y,a.z - static_cast<int>(a.z / b.z) * b.z }; } __device__ int index(const int x, const int y, const int width) { return y * width + x; } __device__ float3 rotate_vec(const float3 p, const float3 a, const float t, const float c, const float s) { const float d = t * (a & p); const float3 x = a ^ p; return { d * a.x + p.x * c + s * x.x, d * a.y + p.y * c + s * x.y, d * a.z + p.z * c + s * x.z }; } /*__global__ void GetColor(Color* colors) { int i = blockIdx.x * blockDim.x + threadIdx.x; colors[i] = Color.FromArgb(i, i, i); }*/ __device__ float warp_dist(float d, const int iterations, const float scale) { for (int i = 0; i < iterations; ++i) d /= scale; return d; } __device__ float cube_de(const float3 p, const float3 c, const float di) { const float3 o = p - c; return max(abs(o.x), max(abs(o.y), abs(o.z))) - di / 2; } __device__ float de(const float3 p, const float side) { return max(abs(p.x), max(abs(p.y), abs(p.z))) - side / 2; } __device__ float3 mod_space(const float3 p, const float3 mod) { return ((p + mod / 2) % mod + mod) % mod - mod / 2; } __device__ float3 fold_space(const float3 p, const float3 n) { if ((p & n) >= 0) return p; return p - (n * 2 * (p & n) / (n & n)); } __device__ float3 fold_menger(float3 z) { float a = min(z.x - z.y, 0.f); z.x -= a; z.y += a; a = min(z.x - z.z, 0.f); z.x -= a; z.z += a; a = min(z.y - z.z, 0.f); z.y -= a; z.z += a; return z; } __device__ float3 max_space(const float3 a, const float3 b) { return { max(a.x, b.x), max(a.y, b.y), max(a.z, b.z) }; } __device__ float3 max_space(const float3 a, const float b) { return { max(a.x, b), max(a.y, b), max(a.z, b) }; } __device__ float3 min_space(const float3 a, const float3 b) { return { min(a.x, b.x), min(a.y, b.y), min(a.z, b.z) }; } __device__ float3 min_space(const float3 a, const float b) { return { min(a.x, b), min(a.y, b), min(a.z, b) }; } __device__ float3 fold_box(const float3 z, const float r) { return max_space(min_space(z, r), -r) * 2 - z; } __device__ float3 abs_space(const float3 p) { return { abs(p.x),abs(p.y),abs(p.z) }; } __device__ float3 abs_space_x(const float3 p) { return{ abs(p.x),p.y,p.z }; } __device__ float3 abs_space_y(const float3 p) { return{ p.x,abs(p.y),p.z }; } __device__ float3 abs_space_z(const float3 p) { return{ p.x,p.y,abs(p.z) }; } __device__ float3 rotate_x(const float3 z, const float t) { const float s = sin(t); const float c = cos(t); return{ z.x, c * z.y + s * z.z,c * z.z - s * z.y }; } __device__ float3 rotate_y(const float3 z, const float t) { const float s = sin(t); const float c = cos(t); return { c * z.x - s * z.z, z.y,c * z.z + s * z.x }; } __device__ float3 rotate_z(const float3 z, const float t) { const float s = sin(t); const float c = cos(t); return { c * z.x + s * z.y,c * z.y - s * z.x, z.z }; } __device__ float3 transform(float3 p, int iterations, const float3 seed, const float3 shift) { p = p * seed.x; p = abs_space(p); p = rotate_z(p, seed.y); p = fold_menger(p); p = rotate_x(p, seed.z); p = p - shift; return p; } __device__ float3 warp_space(float3 p, const int iterations, const float3 seed, const float3 shift) { for (int i = 0; i < iterations; ++i) p = transform(p, iterations, seed, shift); return p; } __device__ float scaled_de(const float3 p, const int iterations, const float side, const float3 seed, const float3 shift) { return warp_dist(de(warp_space(p, iterations, seed, shift), side), iterations, seed.x); } __device__ float3 normal(const float3 p, const int iterations, const float side, const float3 seed, const float3 shift, const float epsilon) { const float3 scaled = { scaled_de({ p.x + epsilon, p.y, p.z }, iterations, side, seed, shift) - scaled_de({ p.x - epsilon, p.y, p.z }, iterations, side, seed, shift), scaled_de({ p.x, p.y + epsilon, p.z }, iterations, side, seed, shift) - scaled_de({ p.x, p.y - epsilon, p.z }, iterations, side, seed, shift), scaled_de({ p.x, p.y, p.z + epsilon }, iterations, side, seed, shift) - scaled_de({ p.x, p.y, p.z - epsilon }, iterations, side, seed, shift) }; return scaled / !scaled; } __device__ float new_soft_shadow(const float3 p, const float3 d, const float shadow_strength, const int iterations, const float side, const float3 seed, const float3 shift, const float minDist, const float maxDist, const float minAngle) { float darkness = 1; float prev_dist = 2147483647; float angle = 1; float total_dist = minDist; while (total_dist < maxDist) { const float dist = scaled_de(p + d * total_dist, iterations, side, seed, shift); const float old_new_int_dist = dist * dist / (2 * prev_dist); const float leg_length = sqrt(dist * dist - old_new_int_dist * old_new_int_dist); angle = shadow_strength * leg_length / max(0.f, total_dist - old_new_int_dist); darkness = min(darkness, angle); prev_dist = dist; total_dist += dist; if (dist < 0 || darkness < minAngle) return 0; } return darkness; } __device__ float orbit(float3 p, const int iterations, const float side, const float3 seed, const float3 shift) { const float direction = scaled_de(p, 1, side, seed, shift); for (int i = 0; i < iterations; ++i) { p = transform(p, iterations, seed, shift); if (warp_dist(de(p, side), i, seed.x) * direction >= 0) return warp_dist(de(p, side), i - 1, seed.x) * 6; } return warp_dist(de(p, side), iterations - 1, seed.x) * 6; } __device__ float trapezoid_wave(const float loc) { return min(max(abs(loc - 3), 0.f) - 1, 1.f); } __device__ float red(const float loc) { return trapezoid_wave(loc - static_cast<int>(loc / 6) * 6); } __device__ float green(const float loc) { return trapezoid_wave(loc + 4 - static_cast<int>((loc + 4) / 6) * 6); } __device__ float blue(const float loc) { return trapezoid_wave(loc + 2 - static_cast<int>((loc + 2) / 6) * 6); } __global__ void get_direction(float3* directions, const float focal_length, const int width, const int height) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; const int h = index(i, height - 1 - j, width); const float3 p = { focal_length,(j - height / 2.f) / height,(i - width / 2.f) / height }; directions[h] = p / !p; } __global__ void rotate_direction(float3* directions, const float3 a, const float t, const float c, const float s, const int width, const int height) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; const int h = index(i, height - 1 - j, width); directions[h] = rotate_vec(directions[h], a, t, c, s); directions[h] = directions[h] / !directions[h]; } __global__ void march_ray(const float3* directions, unsigned char* pixel_values, const float3 camera, const float3 light, const float2 cols, const float min_dist, const float max_dist, const int max_step, int bytes, const int width, const int iterations, const float side, const float3 seed, const float3 shift, const float shadow_strength, const float ambient_occ_strength) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; const int h = index(i, j, width); float3 p = camera; int step_num = 0; float dist = min_dist + 1; float total_dist = 0; while (total_dist < max_dist && dist > min_dist && step_num != max_step) { dist = scaled_de(p, iterations, side, seed, shift); p = p + directions[h] * dist; total_dist += dist; ++step_num; } if (abs(dist) <= min_dist) { float brightness = 0; float3 off = light - p; const float light_vector_length = !off; off = off / light_vector_length; float diffuse_calculated = 0; const float normal_angle = (off & normal(p, iterations, side, seed, shift, min_dist)); if (normal_angle > 0) diffuse_calculated = max(cols.y * new_soft_shadow(p, off, shadow_strength, iterations, side, seed, shift, min_dist, light_vector_length, 0.01f) * normal_angle, 0.f); brightness += diffuse_calculated + cols.x / (1 + step_num * ambient_occ_strength); brightness = min(max(brightness, 0.f), 1.f); const float col = orbit(p, iterations, side, seed, shift); pixel_values[h * 3] = static_cast<unsigned char>(blue(col) * brightness * 255); pixel_values[h * 3 + 1] = static_cast<unsigned char>(green(col) * brightness * 255); pixel_values[h * 3 + 2] = static_cast<unsigned char>(red(col) * brightness * 255); } else { pixel_values[h * 3] = 0; pixel_values[h * 3 + 1] = 0; pixel_values[h * 3 + 2] = 0; } } cudaError_t add_with_cuda(float3* c, const float3* a, const float3* b, unsigned int size); __global__ void add(float3* c, const float3* a, const float3* b) { const int i = threadIdx.x; c[i] = { a[i].x + b[i].x,a[i].y + b[i].y,a[i].z + b[i].z }; } string to_string(const float3 a) { return "(" + to_string(a.x) + "," + to_string(a.y) + "," + to_string(a.z) + ")"; } int main() { constexpr int array_size = 5; const float3 a[array_size] = { {1, 2, 3}, make_float3(4, 5, 6), make_float3(7, 8, 9), make_float3(10, 11, 12), make_float3(13, 14, 15) }; const float3 b[array_size] = { make_float3(10, 20, 30), make_float3(40, 50, 60), make_float3(70, 80, 90), make_float3(100, 110, 120), make_float3(130, 140, 150) }; float3 c[array_size] = { {0,0,0} }; // Add vectors in parallel. cudaError_t cuda_status = add_with_cuda(c, a, b, array_size); if (cuda_status != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("a + b = {%s, %s, %s, %s, %s}\n", to_string(c[0]).c_str(), to_string(c[1]).c_str(), to_string(c[2]).c_str(), to_string(c[3]).c_str(), to_string(c[4]).c_str()); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cuda_status = cudaDeviceReset(); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t add_with_cuda(float3* c, const float3* a, const float3* b, unsigned int size) { float3* dev_a = nullptr; float3* dev_b = nullptr; float3* dev_c = nullptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cuda_status = cudaSetDevice(0); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cuda_status = cudaMalloc(reinterpret_cast<void**>(&dev_c), size * sizeof(float3)); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cuda_status = cudaMalloc(reinterpret_cast<void**>(&dev_a), size * sizeof(float3)); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cuda_status = cudaMalloc(reinterpret_cast<void**>(&dev_b), size * sizeof(float3)); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cuda_status = cudaMemcpy(dev_a, a, size * sizeof(float3), cudaMemcpyHostToDevice); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cuda_status = cudaMemcpy(dev_b, b, size * sizeof(float3), cudaMemcpyHostToDevice); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. add <<<1, size >>> (dev_c, dev_a, dev_b); // Check for any errors launching the kernel cuda_status = cudaGetLastError(); if (cuda_status != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cuda_status)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cuda_status = cudaDeviceSynchronize(); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cuda_status); goto Error; } // Copy output vector from GPU buffer to host memory. cuda_status = cudaMemcpy(c, dev_c, size * sizeof(float3), cudaMemcpyDeviceToHost); if (cuda_status != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cuda_status; }
84f103582e910e4da58837161702dc3e7bb95aa0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cassert> #include <cstdlib> #include <iostream> #include <iterator> #include <vector> #include <chrono> #include <random> using namespace std; //=========================== kernel ======================================== __global__ void vectorAdd(int *a, int *b, int *c, int N) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = j*gridDim.x * blockDim.x + i; if (k < N) c[k] = a[k] + b[k]; } auto get_time() { return chrono::high_resolution_clock::now(); } //=========================== fuction main =================================================== int main() { constexpr int N = 1000 << 16; size_t bytes = sizeof(int) * N; int NUM_THREADS = 1 << 10; int NUM_BLOCKS = (N + NUM_THREADS - 1) / NUM_THREADS; // CPU int *h_a, *h_b, *h_c; hipHostMalloc(&h_a, bytes); hipHostMalloc(&h_b, bytes); hipHostMalloc(&h_c, bytes); for (int i = 0; i < N; i++) // initialisation les vacteurs a ,b { h_a[i]=rand() % 100; h_b[i]=rand() % 100; } //GPU int *d_a, *d_b, *d_c; hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); // CPU ---> GPU hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); auto start = get_time(); hipLaunchKernelGGL(( vectorAdd), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, d_a, d_b, d_c, N); // GPU ---> CPU hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost); auto finish = get_time(); auto duration = chrono::duration_cast<std::chrono::milliseconds>(finish - start); cout << "temps coul en kernel = " << duration.count() << " ms\n"; for (int i = 0; i < N; i++) { assert(h_c[i] == h_a[i] + h_b[i]); } hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); cout << "termin avec succs"<<endl; return 0; }
84f103582e910e4da58837161702dc3e7bb95aa0.cu
#include <algorithm> #include <cassert> #include <cstdlib> #include <iostream> #include <iterator> #include <vector> #include <chrono> #include <random> using namespace std; //=========================== kernel ======================================== __global__ void vectorAdd(int *a, int *b, int *c, int N) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = j*gridDim.x * blockDim.x + i; if (k < N) c[k] = a[k] + b[k]; } auto get_time() { return chrono::high_resolution_clock::now(); } //=========================== fuction main =================================================== int main() { constexpr int N = 1000 << 16; size_t bytes = sizeof(int) * N; int NUM_THREADS = 1 << 10; int NUM_BLOCKS = (N + NUM_THREADS - 1) / NUM_THREADS; // CPU int *h_a, *h_b, *h_c; cudaMallocHost(&h_a, bytes); cudaMallocHost(&h_b, bytes); cudaMallocHost(&h_c, bytes); for (int i = 0; i < N; i++) // initialisation les vacteurs a ,b { h_a[i]=rand() % 100; h_b[i]=rand() % 100; } //GPU int *d_a, *d_b, *d_c; cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); // CPU ---> GPU cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); auto start = get_time(); vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, N); // GPU ---> CPU cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); auto finish = get_time(); auto duration = chrono::duration_cast<std::chrono::milliseconds>(finish - start); cout << "temps écoulé en kernel = " << duration.count() << " ms\n"; for (int i = 0; i < N; i++) { assert(h_c[i] == h_a[i] + h_b[i]); } cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cout << "terminé avec succès"<<endl; return 0; }
6e7cb0b5c1b0625b31c92938707e1f0eaddf7584.hip
// !!! This is a file automatically generated by hipify!!! #include "tcensus.hpp" #include "../util.hpp" #include "../util_opencv.hpp" #include <opencv2/imgproc.hpp> #include <opencv2/core.hpp> #include <hip/hip_runtime.h> namespace algorithms { template<int WINX, int WINY> struct TCensusTransform { __host__ __device__ inline void window(const int y, const int x, uint64_t* __restrict__ out) { static_assert(BPP == 2, "2 bits per pixel expected"); short center = im(y, x); uint8_t i = 0; // bit counter for *out for (int wy = -WINY/2; wy <= WINY/2; wy++) { for (int wx = -WINX/2; wx <= WINX/2; wx++) { const int y_ = y + wy; const int x_ = x + wx; // If fist value, set zero. Otherwise shift left by BPP if (i % 64 == 0) { *out = 0; } else { *out = (*out << BPP); } if (center+t > im(y_,x_)) { *out |= 1; /* 01 */ } else if (center-t < im(y_,x_)) { *out |= 2; /* 10 */ } else { /* 00 */ } i += BPP; // if all bits set, continue to next element if (i % 64 == 0) { out++; } } } } __host__ __device__ void operator()(ushort2 thread, ushort2 stride, ushort2 size) { for (int y = thread.y+WINY/2; y<size.y-WINY/2-1; y+=stride.y) { for (int x = thread.x+WINX/2; x<size.x-WINX/2-1; x+=stride.x) { window(y, x, &(out(y, x*WSTEP))); } } } Array2D<uchar>::Data im; Array2D<uint64_t>::Data out; short t; // intensity threshold // Bits per pixel (for each census feature). Must be 1 or power of 2 for // window() to be correct (for tri-census)! static constexpr int BPP = 2; // number of uint64_t values for each window static constexpr int WSTEP = (BPP*WINX*WINY-1)/(sizeof(uint64_t)*8) + 1; }; } void TCensusMatchingCost::set(const Array2D<uchar> &l, const Array2D<uchar> &r) { parallel2D<algorithms::TCensusTransform<9,7>>({l.data(), ct_l_.data(), t_}, l.width, l.height); parallel2D<algorithms::TCensusTransform<9,7>>({r.data(), ct_r_.data(), t_}, r.width, r.height); } void TCensusMatchingCost::set(cv::InputArray l, cv::InputArray r) { if (l.type() != CV_8UC1 || r.type() != CV_8UC1) { throw std::exception(); } if (l.rows() != r.rows() || l.cols() != r.cols() || l.rows() != height() || l.cols() != width()) { throw std::exception(); } if (l.isGpuMat() && r.isGpuMat()) { auto ml = l.getGpuMat(); auto mr = r.getGpuMat(); set(Array2D<uchar>(ml), Array2D<uchar>(mr)); } else if (l.isMat() && r.isMat()) { auto ml = l.getMat(); auto mr = r.getMat(); set(Array2D<uchar>(ml), Array2D<uchar>(mr)); } else { throw std::exception(); } }
6e7cb0b5c1b0625b31c92938707e1f0eaddf7584.cu
#include "tcensus.hpp" #include "../util.hpp" #include "../util_opencv.hpp" #include <opencv2/imgproc.hpp> #include <opencv2/core.hpp> #include <cuda_runtime.h> namespace algorithms { template<int WINX, int WINY> struct TCensusTransform { __host__ __device__ inline void window(const int y, const int x, uint64_t* __restrict__ out) { static_assert(BPP == 2, "2 bits per pixel expected"); short center = im(y, x); uint8_t i = 0; // bit counter for *out for (int wy = -WINY/2; wy <= WINY/2; wy++) { for (int wx = -WINX/2; wx <= WINX/2; wx++) { const int y_ = y + wy; const int x_ = x + wx; // If fist value, set zero. Otherwise shift left by BPP if (i % 64 == 0) { *out = 0; } else { *out = (*out << BPP); } if (center+t > im(y_,x_)) { *out |= 1; /* 01 */ } else if (center-t < im(y_,x_)) { *out |= 2; /* 10 */ } else { /* 00 */ } i += BPP; // if all bits set, continue to next element if (i % 64 == 0) { out++; } } } } __host__ __device__ void operator()(ushort2 thread, ushort2 stride, ushort2 size) { for (int y = thread.y+WINY/2; y<size.y-WINY/2-1; y+=stride.y) { for (int x = thread.x+WINX/2; x<size.x-WINX/2-1; x+=stride.x) { window(y, x, &(out(y, x*WSTEP))); } } } Array2D<uchar>::Data im; Array2D<uint64_t>::Data out; short t; // intensity threshold // Bits per pixel (for each census feature). Must be 1 or power of 2 for // window() to be correct (for tri-census)! static constexpr int BPP = 2; // number of uint64_t values for each window static constexpr int WSTEP = (BPP*WINX*WINY-1)/(sizeof(uint64_t)*8) + 1; }; } void TCensusMatchingCost::set(const Array2D<uchar> &l, const Array2D<uchar> &r) { parallel2D<algorithms::TCensusTransform<9,7>>({l.data(), ct_l_.data(), t_}, l.width, l.height); parallel2D<algorithms::TCensusTransform<9,7>>({r.data(), ct_r_.data(), t_}, r.width, r.height); } void TCensusMatchingCost::set(cv::InputArray l, cv::InputArray r) { if (l.type() != CV_8UC1 || r.type() != CV_8UC1) { throw std::exception(); } if (l.rows() != r.rows() || l.cols() != r.cols() || l.rows() != height() || l.cols() != width()) { throw std::exception(); } if (l.isGpuMat() && r.isGpuMat()) { auto ml = l.getGpuMat(); auto mr = r.getGpuMat(); set(Array2D<uchar>(ml), Array2D<uchar>(mr)); } else if (l.isMat() && r.isMat()) { auto ml = l.getMat(); auto mr = r.getMat(); set(Array2D<uchar>(ml), Array2D<uchar>(mr)); } else { throw std::exception(); } }
d6ccf78e382259f1adbac542e8b392fee30aec72.hip
// !!! This is a file automatically generated by hipify!!! // // Original code Copyright (c) Electronic Arts Inc. All rights reserved // Modifications/Rewrite Copyright (c) 2020 Eyal Rozenberg. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Note: Original code was retrieved from https://github.com/electronicarts/EASTL/ , // master branch, on 2020-03-06. #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "common_hip.cuh" #include <kat/tuple.hpp> #include <cuda/api_wrappers.hpp> #include <type_traits> #include <cstdint> #include <vector> #include <algorithm> #include <string> #include <utility> //#include "EASTLTest.h" //EA_DISABLE_VC_WARNING(4623 4625 4413 4510) namespace test_structs { struct default_constructible { enum : int { default_value = 0x1EE7C0DE }; KAT_HD default_constructible() : value(default_value) {} int value; }; namespace op_counts { __device__ int default_constructions = 0; __device__ int int_constructions = 0; __device__ int copy_constructions = 0; __device__ int move_constructions = 0; __device__ int copy_assignments = 0; __device__ int move_assignments = 0; __device__ int destructions = 0; } struct op_counting { KAT_HD op_counting() : value() { #ifndef __CUDA_ARCH__ ++default_constructions; #else ++op_counts::default_constructions; #endif } KAT_HD op_counting(int x) : value(x) { #ifndef __CUDA_ARCH__ ++int_constructions; #else ++op_counts::int_constructions; #endif } KAT_HD op_counting(const op_counting& x) : value(x.value) { #ifndef __CUDA_ARCH__ ++copy_constructions; #else ++op_counts::copy_constructions; #endif } KAT_HD op_counting(op_counting&& x) : value(x.value) { #ifndef __CUDA_ARCH__ ++move_constructions; #else ++op_counts::move_constructions; #endif x.value = 0; } KAT_HD op_counting& operator=(const op_counting& x) { value = x.value; #ifndef __CUDA_ARCH__ ++copy_assignments; #else ++op_counts::copy_assignments; #endif return *this; } KAT_HD op_counting& operator=(op_counting&& x) { value = x.value; x.value = 0; #ifndef __CUDA_ARCH__ ++move_assignments; #else ++op_counts::move_assignments; #endif return *this; } KAT_HD ~op_counting() { #ifndef __CUDA_ARCH__ ++destructions; #else ++op_counts::destructions; #endif } int value; KAT_HD static void reset_counters() { #ifndef __CUDA_ARCH__ default_constructions = 0; int_constructions = 0; copy_constructions = 0; move_constructions = 0; copy_assignments = 0; move_assignments = 0; destructions = 0; #else op_counts::default_constructions = 0; op_counts::int_constructions = 0; op_counts::copy_constructions = 0; op_counts::move_constructions = 0; op_counts::copy_assignments = 0; op_counts::move_assignments = 0; op_counts::destructions = 0; #endif } static int default_constructions; static int int_constructions; static int copy_constructions; static int move_constructions; static int copy_assignments; static int move_assignments; static int destructions; }; int op_counting::default_constructions = 0; int op_counting::int_constructions = 0; int op_counting::copy_constructions = 0; int op_counting::move_constructions = 0; int op_counting::copy_assignments = 0; int op_counting::move_assignments = 0; int op_counting::destructions = 0; // move_only_type - useful for verifying containers that may hold, e.g., unique_ptrs to make sure move ops are implemented struct move_only_type { move_only_type() = delete; KAT_HD move_only_type(int val) : value(val) {} move_only_type(const move_only_type&) = delete; KAT_HD move_only_type(move_only_type&& x) : value(x.value) { x.value = 0; } move_only_type& operator=(const move_only_type&) = delete; KAT_HD move_only_type& operator=(move_only_type&& x) { value = x.value; x.value = 0; return *this; } KAT_HD bool operator==(const move_only_type& o) const { return value == o.value; } int value; }; } // namespace test_structs using kat::tuple; using kat::tuple_size; using kat::tuple_element_t; using kat::get; using kat::make_tuple; using std::is_same; using namespace test_structs; TEST_SUITE("tuple") { TEST_CASE("static assertions") { using kat::tuple; using kat::tuple_size; using kat::tuple_element_t; using std::is_same; static_assert(tuple_size<tuple<int>>::value == 1, "tuple_size<tuple<T>> test failed."); static_assert(tuple_size<const tuple<int>>::value == 1, "tuple_size<const tuple<T>> test failed."); static_assert(tuple_size<const tuple<const int>>::value == 1, "tuple_size<const tuple<const T>> test failed."); static_assert(tuple_size<volatile tuple<int>>::value == 1, "tuple_size<volatile tuple<T>> test failed."); static_assert(tuple_size<const volatile tuple<int>>::value == 1, "tuple_size<const volatile tuple<T>> test failed."); static_assert(tuple_size<tuple<int, float, bool>>::value == 3, "tuple_size<tuple<T, T, T>> test failed."); static_assert(is_same<tuple_element_t<0, tuple<int>>, int>::value, "tuple_element<I, T> test failed."); static_assert(is_same<tuple_element_t<1, tuple<float, int>>, int>::value, "tuple_element<I, T> test failed."); static_assert(is_same<tuple_element_t<1, tuple<float, const int>>, const int>::value, "tuple_element<I, T> test failed."); static_assert(is_same<tuple_element_t<1, tuple<float, volatile int>>, volatile int>::value, "tuple_element<I, T> test failed."); static_assert(is_same<tuple_element_t<1, tuple<float, const volatile int>>, const volatile int>::value, "tuple_element<I, T> test failed."); static_assert(is_same<tuple_element_t<1, tuple<float, int&>>, int&>::value, "tuple_element<I, T> test failed."); } TEST_CASE("get") { tuple<int> single_element(1); CHECK( get<0>(single_element) == 1 ); get<0>(single_element) = 2; CHECK( get<0>(single_element) == 2 ); get<int>(single_element) = 3; CHECK( get<int>(single_element) == 3 ); const tuple<int> const_single_element(3); CHECK( get<int>(const_single_element) == 3 ); tuple<default_constructible> default_constructed; CHECK( get<0>(default_constructed).value == default_constructible::default_value ); } TEST_CASE("method invocation counts") { op_counting::reset_counters(); { tuple<op_counting> an_op_counter; CHECK_UNARY( (op_counting::default_constructions == 1 && get<0>(an_op_counter).value == 0) ); get<0>(an_op_counter).value = 1; tuple<op_counting> another_op_counter(an_op_counter); CHECK( true == ( op_counting::default_constructions == 1 && op_counting::copy_constructions == 1 && get<0>(another_op_counter).value == 1 ) ); get<0>(an_op_counter).value = 2; another_op_counter = an_op_counter; CHECK_UNARY( op_counting::default_constructions == 1 && op_counting::copy_constructions == 1 && op_counting::copy_assignments == 1 && get<0>(another_op_counter).value == 2 ); op_counting::reset_counters(); tuple<op_counting> yet_another_op_counter(op_counting(5)); CHECK_UNARY( ( op_counting::move_constructions == 1 && op_counting::default_constructions == 0 && op_counting::copy_constructions == 0 && get<0>(yet_another_op_counter).value == 5 ) ); } CHECK( op_counting::destructions == 4 ); } TEST_CASE("get") { // Test constructor tuple<int, float, bool> a_tuple(1, 1.0f, true); CHECK( get<0>(a_tuple) == 1 ); CHECK( get<1>(a_tuple) == 1.0f ); CHECK( get<2>(a_tuple) == true ); CHECK( get<int>(a_tuple) == 1 ); CHECK( get<float>(a_tuple) == 1.0f ); CHECK( get<bool>(a_tuple) == true ); get<1>(a_tuple) = 2.0f; CHECK( get<1>(a_tuple) == 2.0f ); // Test copy constructor tuple<int, float, bool> another_tuple(a_tuple); CHECK_UNARY( get<0>(another_tuple) == 1 && get<1>(another_tuple) == 2.0f && get<2>(another_tuple) == true ); // Test copy assignment tuple<int, float, bool> yet_another_tuple(2, 3.0f, true); CHECK_UNARY( get<0>(yet_another_tuple) == 2 && get<1>(yet_another_tuple) == 3.0f && get<2>(yet_another_tuple) == true); yet_another_tuple = another_tuple; CHECK_UNARY( get<0>(yet_another_tuple) == 1 && get<1>(yet_another_tuple) == 2.0f && get<2>(yet_another_tuple) == true); // Test converting 'copy' constructor (from a tuple of different type whose members are each convertible) tuple<double, double, bool> a_different_tuple(a_tuple); CHECK_UNARY( get<0>(a_different_tuple) == 1.0 && get<1>(a_different_tuple) == 2.0 && get<2>(a_different_tuple) == true); // Test converting assignment operator (from a tuple of different type whose members are each convertible) tuple<double, double, bool> another_different_tuple; CHECK_UNARY( get<0>(another_different_tuple) == 0.0 && get<1>(another_different_tuple) == 0.0 && get<2>(another_different_tuple) == false); another_different_tuple = another_tuple; CHECK_UNARY( get<0>(another_different_tuple) == 1.0 && get<1>(another_different_tuple) == 2.0 && get<2>(another_different_tuple) == true); // Test default initialization (built in types should be value initialized rather than default initialized) tuple<int, float, bool> a_default_initialized_tuple; CHECK_UNARY( get<0>(a_default_initialized_tuple) == 0 && get<1>(a_default_initialized_tuple) == 0.0f && get<2>(a_default_initialized_tuple) == false); } TEST_CASE("more typed get") { // Test some other cases with typed-getter tuple<double, double, bool> a_tuple_with_repeated_type(1.0f, 2.0f, true); CHECK( get<bool>(a_tuple_with_repeated_type) == true ); tuple<double, bool, double> another_tuple_with_repeated_type(1.0f, true, 2.0f); CHECK( get<bool>(another_tuple_with_repeated_type) == true ); tuple<bool, double, double> yet_another_tupleWithRepeatedType(true, 1.0f, 2.0f); CHECK( get<bool>(another_tuple_with_repeated_type) == true ); struct one_float { float val; }; struct second_float { float val; }; tuple<one_float, second_float> a_tuple_of_structs({ 1.0f }, { 2.0f } ); CHECK( get<one_float>(a_tuple_of_structs).val == 1.0f ); CHECK( get<second_float>(a_tuple_of_structs).val == 2.0f ); const tuple<double, double, bool> aConstTuple(a_tuple_with_repeated_type); const bool& constRef = get<bool>(aConstTuple); CHECK( constRef == true ); const bool&& constRval = get<bool>(std::move(a_tuple_with_repeated_type)); CHECK( constRval == true ); } TEST_CASE("more tuple methods") { tuple<int, float> a_tuple_with_default_init(1, {}); // tuple construction from pair std::pair<int, float> a_pair(1, 2.0f); tuple<int, float> a_tuple(a_pair); CHECK_UNARY( get<0>(a_tuple) == 1 && get<1>(a_tuple) == 2.0f ); tuple<double, double> another_tuple(a_pair); CHECK_UNARY( get<0>(another_tuple) == 1.0 && get<1>(another_tuple) == 2.0 ); another_tuple = std::make_pair(2, 3); CHECK_UNARY( get<0>(another_tuple) == 2.0 && get<1>(another_tuple) == 3.0 ); // operators: ==, !=, < another_tuple = a_tuple; CHECK( a_tuple == another_tuple ); CHECK_UNARY( !(a_tuple < another_tuple) && !(another_tuple < a_tuple) ); tuple<double, double> a_default_init_tuple; CHECK( a_tuple != a_default_init_tuple ); CHECK( a_default_init_tuple < a_tuple ); tuple<int, int, int> a_lesser_tuple(1, 2, 3); tuple<int, int, int> a_greater_tuple(1, 2, 4); CHECK_UNARY( a_lesser_tuple < a_greater_tuple && !(a_greater_tuple < a_lesser_tuple) && a_greater_tuple > a_lesser_tuple && !(a_lesser_tuple > a_greater_tuple)); // We don't have the library's TestObject here // tuple<int, float, TestObject> value_tuple(2, 2.0f, TestObject(2)); // tuple<int&, float&, TestObject&> refTup(value_tuple); // tuple<const int&, const float&, const TestObject&> const_ref_to_tuple(value_tuple); // // CHECK( get<0>(refTup) == get<0>(value_tuple) ); // CHECK( get<1>(refTup) == get<1>(value_tuple) ); // CHECK( refTup == value_tuple ); // CHECK( get<0>(refTup) == get<0>(const_ref_to_tuple) ); // CHECK( get<1>(refTup) == get<1>(const_ref_to_tuple) ); // CHECK( const_ref_to_tuple == value_tuple ); // CHECK( const_ref_to_tuple == refTup ); // swap swap(a_lesser_tuple, a_greater_tuple); CHECK_UNARY( get<2>(a_lesser_tuple) == 4 && get<2>(a_greater_tuple) == 3 ); swap(a_greater_tuple, a_lesser_tuple); CHECK( a_lesser_tuple < a_greater_tuple ); } TEST_CASE("move-only contained type") { static_assert(std::is_constructible<move_only_type, move_only_type>::value, "is_constructible type trait giving confusing answers."); static_assert(std::is_constructible<move_only_type, move_only_type&&>::value, "is_constructible type trait giving wrong answers."); static_assert(std::is_constructible<move_only_type&&, move_only_type&&>::value, "is_constructible type trait giving bizarre answers."); tuple<move_only_type> a_tuple_with_move_only_member(1); CHECK( get<0>(a_tuple_with_move_only_member).value == 1 ); get<0>(a_tuple_with_move_only_member) = move_only_type(2); CHECK( get<0>(a_tuple_with_move_only_member).value == 2 ); tuple<const move_only_type&> a_tuple_with_ref_to_move_only_member(a_tuple_with_move_only_member); CHECK( get<0>(a_tuple_with_ref_to_move_only_member).value == 2 ); tuple<const move_only_type&> aTupleWithConstRefToGetMoveOnly(get<0>(a_tuple_with_move_only_member)); CHECK( get<0>(aTupleWithConstRefToGetMoveOnly).value == 2 ); tuple<move_only_type&> a_tuple_with_ref_to_get_move_only(get<0>(a_tuple_with_move_only_member)); CHECK( get<0>(a_tuple_with_ref_to_get_move_only).value == 2 ); } TEST_CASE("make_tuple") { auto a_made_tuple = make_tuple(1, 2.0, true); CHECK_UNARY( get<0>(a_made_tuple) == 1 && get<1>(a_made_tuple) == 2.0 && get<2>(a_made_tuple) == true ); // TODO: reference_wrapper implementation needs to be finished to enable this code { int a = 2; float b = 3.0f; auto a_made_tuple_2 = make_tuple(kat::ref(a), b); get<0>(a_made_tuple_2) = 3; get<1>(a_made_tuple_2) = 4.0f; CHECK_UNARY( get<0>(a_made_tuple_2) == 3 && get<1>(a_made_tuple_2) == 4.0f && a == 3 && b == 3.0f ); } } TEST_CASE("forward_as_tuple") { auto forward_test = [](tuple<move_only_type&&, move_only_type&&> x) -> tuple<move_only_type, move_only_type> { return tuple<move_only_type, move_only_type>(std::move(x)); }; tuple<move_only_type, move_only_type> a_movable_tuple( forward_test(kat::forward_as_tuple(move_only_type(1), move_only_type(2)))); CHECK_UNARY( get<0>(a_movable_tuple).value == 1 && get<1>(a_movable_tuple).value == 2 ); } TEST_CASE("tie") { int a = 0; double b = 0.0f; static_assert(std::is_assignable<const kat::detail::ignore_t<int>&, int>::value, "ignore_t not assignable"); static_assert(kat::detail::tuple_assignable<tuple<const kat::detail::ignore_t<int>&>, tuple<int>>::value, "Not assignable"); kat::tie(a, kat::ignore, b) = kat::make_tuple(1, 3, 5); CHECK_UNARY( a == 1 && b == 5.0f ); } TEST_CASE("tuple_cat") { int a = 0; double b = 0.0f; auto concatenated_tuple = tuple_cat(make_tuple(1, 2.0f), make_tuple(3.0, true)); CHECK_UNARY( get<0>(concatenated_tuple) == 1 && get<1>(concatenated_tuple) == 2.0f && get<2>(concatenated_tuple) == 3.0 && get<3>(concatenated_tuple) == true); auto concatenated_tuple_2 = tuple_cat(make_tuple(1, 2.0f), make_tuple(3.0, true), make_tuple(5u, '6')); CHECK_UNARY( get<0>(concatenated_tuple_2) == 1 && get<1>(concatenated_tuple_2) == 2.0f && get<2>(concatenated_tuple_2) == 3.0 && get<3>(concatenated_tuple_2) == true && get<4>(concatenated_tuple_2) == 5u && get<5>(concatenated_tuple_2) == '6'); auto a_catted_ref_tuple = tuple_cat(make_tuple(1), kat::tie(a, kat::ignore, b)); get<1>(a_catted_ref_tuple) = 2; CHECK( a == 2 ); } TEST_CASE("empty tuple") { tuple<> empty_tuple; CHECK( tuple_size<decltype(empty_tuple)>::value == 0 ); empty_tuple = make_tuple(); auto another_empty_tuple = make_tuple(); swap(another_empty_tuple, empty_tuple); } TEST_CASE("std::tuple compatibility") { { tuple<> empty_tuple; auto empty_std_tuple_1 { static_cast< std::tuple<> >(empty_tuple) }; auto empty_std_tuple_2 { static_cast< std::tuple<> >(kat::make_tuple()) }; std::tuple<> empty_std_tuple_3 = empty_tuple; // empty_tuple = empty_std_tuple_1; CHECK (std::is_same<std::tuple<>,decltype(empty_std_tuple_1)>::value); CHECK (kat::detail::tuple_convertible< std::tuple<>, tuple<> >::value); } { tuple<int, float, bool> a_tuple(1, 1.0f, true); auto std_tuple_1 { static_cast< std::tuple<int, float, bool> >(a_tuple) }; auto std_tuple_2 { static_cast< std::tuple<int, float, bool> >(kat::make_tuple(1, 1.0f, true)) }; std::tuple<int, float, bool> std_tuple_3 = a_tuple; // a_tuple = std_tuple_1; CHECK (std::is_same<std::tuple<int, float, bool>,decltype(std_tuple_1)>::value); // CHECK (kat::detail::tuple_convertible< std::tuple<int, float, bool>, tuple<int, float, bool> >::value); // CHECK (kat::tuple_size<std::tuple<int, float, bool>>::value == 3); // CHECK (kat::detail::tuple_assignable< tuple<int, float, bool>, std::tuple<int, float, bool> >::value); // std::cout // << "tuple_size<typename std::remove_reference<tuple<int, float, bool>>::type>::value = " // << tuple_size<typename std::remove_reference<tuple<int, float, bool>>::type>::value << '\n' // << "tuple_size<std::tuple<int, float, bool>>::value) = " // << tuple_size<std::tuple<int, float, bool>>::value << '\n' // << "kat::detail::make_tuple_types_t<tuple<int, float, bool>> = " // << util::type_name<kat::detail::make_tuple_types_t<tuple<int, float, bool> > >() << '\n' // << "make_tuple_types_t<std::tuple<int, float, bool> > = " // << util::type_name< kat::detail::make_tuple_types_t<std::tuple<int, float, bool> > >() << '\n'; // CHECK (kat::detail::tuple_assignable< tuple<int, float, bool>, tuple<int, float, bool> >::value); } // std::tuple<> empty_std_tuple; // tuple<> empty_tuple_1 { static_cast< kat::tuple<> >(empty_tuple_1) }; // tuple<> empty_tuple_2 { static_cast< kat::tuple<> >(std::make_tuple()) }; // tuple<> empty_tuple_3 = empty_std_tuple_1; // swap(empty_tuple, empty_std_tuple); // swap(empty_std_tuple, empty_tuple); // { // tuple<move_only_type> a_tuple_with_move_only_member(1); // auto std_tuple_1 { static_cast< std::tuple<move_only_type> >(a_tuple_with_move_only_member) }; // // tuple<const move_only_type&> a_tuple_with_ref_to_move_only_member(a_tuple_with_move_only_member); // std::tuple<> std_tuple_2 { static_cast< std::tuple<const move_only_type> >(a_tuple_with_ref_to_move_only_member) }; // // tuple<const move_only_type&> aTupleWithConstRefToGetMoveOnly(get<0>(a_tuple_with_move_only_member)); // std::tuple<const move_only_type&> std_tuple_3 { static_cast< std::tuple<const move_only_type&> >(a_tuple_with_ref_to_move_only_member) }; // // tuple<move_only_type&> a_tuple_with_ref_to_get_move_only(get<0>(a_tuple_with_move_only_member)); // std::tuple<move_only_type&> std_tuple_4 { static_cast< std::tuple<move_only_type&> >(a_tuple_with_ref_to_move_only_member) }; // } { // operators: ==, !=, < tuple<int, float, bool> a_tuple(1, 1.0f, true); std::tuple<int, float, bool> an_std_tuple = a_tuple; CHECK( a_tuple == an_std_tuple ); CHECK_UNARY( !(a_tuple < an_std_tuple) && !(an_std_tuple < a_tuple) ); } { tuple<int, int, int> a_lesser_tuple(1, 2, 3); tuple<int, int, int> a_greater_tuple(1, 2, 4); std::tuple<int, int, int> a_lesser_std_tuple(1, 2, 3); std::tuple<int, int, int> a_greater_std_tuple(1, 2, 4); CHECK_UNARY( a_lesser_tuple < a_greater_std_tuple && !(a_greater_tuple < a_lesser_std_tuple) && a_greater_tuple > a_lesser_std_tuple && !(a_lesser_tuple > a_greater_std_tuple) ); CHECK_UNARY( a_lesser_std_tuple < a_greater_tuple && !(a_greater_std_tuple < a_lesser_tuple) && a_greater_std_tuple > a_lesser_tuple && !(a_lesser_std_tuple > a_greater_tuple) ); } } // TODO: Enable this when we've introduced compatibility code of kat::tuple // and std::tuple on the host side. Also, if we get kat::pair, replicate the // following tests for that class as well. /* TEST_CASE("piecewise_construction") { { struct local { local() = default; local(int a, int b) : mA(a), mB(b) {} int mA = 0; int mB = 0; }; auto t = kat::make_tuple(42, 43); std::pair<local, local> p(std::piecewise_construct, t, t); CHECK( p.first.mA == 42 ); CHECK( p.second.mA == 42 ); CHECK( p.first.mB == 43 ); CHECK( p.second.mB == 43 ); } { struct local { local() = default; local(int a, int b, int c, int d) : mA(a), mB(b), mC(c), mD(d) {} int mA = 0; int mB = 0; int mC = 0; int mD = 0; }; auto t = kat::make_tuple(42, 43, 44, 45); std::pair<local, local> p(std::piecewise_construct, t, t); CHECK( p.first.mA == 42 ); CHECK( p.second.mA == 42 ); CHECK( p.first.mB == 43 ); CHECK( p.second.mB == 43 ); CHECK( p.first.mC == 44 ); CHECK( p.second.mC == 44 ); CHECK( p.first.mD == 45 ); CHECK( p.second.mD == 45 ); } { struct local1 { local1() = default; local1(int a) : mA(a) {} int mA = 0; }; struct local2 { local2() = default; local2(char a) : mA(a) {} char mA = 0; }; auto t1 = kat::make_tuple(42); auto t2 = kat::make_tuple('a'); std::pair<local1, local2> p(std::piecewise_construct, t1, t2); CHECK( p.first.mA == 42 ); CHECK( p.second.mA == 'a' ); } } */ #if __cplusplus >= 201703L TEST_CASE("apply") { // test with tuples { { auto result = kat::apply([](int i) { return i; }, make_tuple(1)); CHECK( result == 1 ); } { auto result = kat::apply([](int i, int j) { return i + j; }, make_tuple(1, 2)); CHECK( result == 3 ); } { auto result = kat::apply([](int i, int j, int k, int m) { return i + j + k + m; }, make_tuple(1, 2, 3, 4)); CHECK( result == 10 ); } } // // test with pair // { // auto result = kat::apply([](int i, int j) { return i + j; }, make_pair(1, 2)); // CHECK( result == 3 ); // } // TODO: Test apply with arrays? } TEST_CASE("tuple structured bindings") { kat::tuple<int, int, int> t = {1,2,3}; auto [x,y,z] = t; CHECK( x == 1 ); CHECK( y == 2 ); CHECK( z == 3 ); } #endif // __cplusplus >= 201703L TEST_CASE("tuple_cat") { void* empty = nullptr; auto t = kat::make_tuple(empty, true); auto tc = kat::tuple_cat(kat::make_tuple("asd", 1), t); static_assert(std::is_same<decltype(tc), kat::tuple<const char*, int, void*, bool>>::value, "type mismatch"); CHECK( std::string("asd") == kat::get<0>(tc) ); CHECK( kat::get<1>(tc) == 1 ); CHECK( kat::get<2>(tc) == nullptr ); CHECK( kat::get<3>(tc) == true ); } } // TEST_SUITE("tuple") // EA_RESTORE_VC_WARNING()
d6ccf78e382259f1adbac542e8b392fee30aec72.cu
// // Original code Copyright (c) Electronic Arts Inc. All rights reserved // Modifications/Rewrite Copyright (c) 2020 Eyal Rozenberg. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Note: Original code was retrieved from https://github.com/electronicarts/EASTL/ , // master branch, on 2020-03-06. #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "common.cuh" #include <kat/tuple.hpp> #include <cuda/api_wrappers.hpp> #include <type_traits> #include <cstdint> #include <vector> #include <algorithm> #include <string> #include <utility> //#include "EASTLTest.h" //EA_DISABLE_VC_WARNING(4623 4625 4413 4510) namespace test_structs { struct default_constructible { enum : int { default_value = 0x1EE7C0DE }; KAT_HD default_constructible() : value(default_value) {} int value; }; namespace op_counts { __device__ int default_constructions = 0; __device__ int int_constructions = 0; __device__ int copy_constructions = 0; __device__ int move_constructions = 0; __device__ int copy_assignments = 0; __device__ int move_assignments = 0; __device__ int destructions = 0; } struct op_counting { KAT_HD op_counting() : value() { #ifndef __CUDA_ARCH__ ++default_constructions; #else ++op_counts::default_constructions; #endif } KAT_HD op_counting(int x) : value(x) { #ifndef __CUDA_ARCH__ ++int_constructions; #else ++op_counts::int_constructions; #endif } KAT_HD op_counting(const op_counting& x) : value(x.value) { #ifndef __CUDA_ARCH__ ++copy_constructions; #else ++op_counts::copy_constructions; #endif } KAT_HD op_counting(op_counting&& x) : value(x.value) { #ifndef __CUDA_ARCH__ ++move_constructions; #else ++op_counts::move_constructions; #endif x.value = 0; } KAT_HD op_counting& operator=(const op_counting& x) { value = x.value; #ifndef __CUDA_ARCH__ ++copy_assignments; #else ++op_counts::copy_assignments; #endif return *this; } KAT_HD op_counting& operator=(op_counting&& x) { value = x.value; x.value = 0; #ifndef __CUDA_ARCH__ ++move_assignments; #else ++op_counts::move_assignments; #endif return *this; } KAT_HD ~op_counting() { #ifndef __CUDA_ARCH__ ++destructions; #else ++op_counts::destructions; #endif } int value; KAT_HD static void reset_counters() { #ifndef __CUDA_ARCH__ default_constructions = 0; int_constructions = 0; copy_constructions = 0; move_constructions = 0; copy_assignments = 0; move_assignments = 0; destructions = 0; #else op_counts::default_constructions = 0; op_counts::int_constructions = 0; op_counts::copy_constructions = 0; op_counts::move_constructions = 0; op_counts::copy_assignments = 0; op_counts::move_assignments = 0; op_counts::destructions = 0; #endif } static int default_constructions; static int int_constructions; static int copy_constructions; static int move_constructions; static int copy_assignments; static int move_assignments; static int destructions; }; int op_counting::default_constructions = 0; int op_counting::int_constructions = 0; int op_counting::copy_constructions = 0; int op_counting::move_constructions = 0; int op_counting::copy_assignments = 0; int op_counting::move_assignments = 0; int op_counting::destructions = 0; // move_only_type - useful for verifying containers that may hold, e.g., unique_ptrs to make sure move ops are implemented struct move_only_type { move_only_type() = delete; KAT_HD move_only_type(int val) : value(val) {} move_only_type(const move_only_type&) = delete; KAT_HD move_only_type(move_only_type&& x) : value(x.value) { x.value = 0; } move_only_type& operator=(const move_only_type&) = delete; KAT_HD move_only_type& operator=(move_only_type&& x) { value = x.value; x.value = 0; return *this; } KAT_HD bool operator==(const move_only_type& o) const { return value == o.value; } int value; }; } // namespace test_structs using kat::tuple; using kat::tuple_size; using kat::tuple_element_t; using kat::get; using kat::make_tuple; using std::is_same; using namespace test_structs; TEST_SUITE("tuple") { TEST_CASE("static assertions") { using kat::tuple; using kat::tuple_size; using kat::tuple_element_t; using std::is_same; static_assert(tuple_size<tuple<int>>::value == 1, "tuple_size<tuple<T>> test failed."); static_assert(tuple_size<const tuple<int>>::value == 1, "tuple_size<const tuple<T>> test failed."); static_assert(tuple_size<const tuple<const int>>::value == 1, "tuple_size<const tuple<const T>> test failed."); static_assert(tuple_size<volatile tuple<int>>::value == 1, "tuple_size<volatile tuple<T>> test failed."); static_assert(tuple_size<const volatile tuple<int>>::value == 1, "tuple_size<const volatile tuple<T>> test failed."); static_assert(tuple_size<tuple<int, float, bool>>::value == 3, "tuple_size<tuple<T, T, T>> test failed."); static_assert(is_same<tuple_element_t<0, tuple<int>>, int>::value, "tuple_element<I, T> test failed."); static_assert(is_same<tuple_element_t<1, tuple<float, int>>, int>::value, "tuple_element<I, T> test failed."); static_assert(is_same<tuple_element_t<1, tuple<float, const int>>, const int>::value, "tuple_element<I, T> test failed."); static_assert(is_same<tuple_element_t<1, tuple<float, volatile int>>, volatile int>::value, "tuple_element<I, T> test failed."); static_assert(is_same<tuple_element_t<1, tuple<float, const volatile int>>, const volatile int>::value, "tuple_element<I, T> test failed."); static_assert(is_same<tuple_element_t<1, tuple<float, int&>>, int&>::value, "tuple_element<I, T> test failed."); } TEST_CASE("get") { tuple<int> single_element(1); CHECK( get<0>(single_element) == 1 ); get<0>(single_element) = 2; CHECK( get<0>(single_element) == 2 ); get<int>(single_element) = 3; CHECK( get<int>(single_element) == 3 ); const tuple<int> const_single_element(3); CHECK( get<int>(const_single_element) == 3 ); tuple<default_constructible> default_constructed; CHECK( get<0>(default_constructed).value == default_constructible::default_value ); } TEST_CASE("method invocation counts") { op_counting::reset_counters(); { tuple<op_counting> an_op_counter; CHECK_UNARY( (op_counting::default_constructions == 1 && get<0>(an_op_counter).value == 0) ); get<0>(an_op_counter).value = 1; tuple<op_counting> another_op_counter(an_op_counter); CHECK( true == ( op_counting::default_constructions == 1 && op_counting::copy_constructions == 1 && get<0>(another_op_counter).value == 1 ) ); get<0>(an_op_counter).value = 2; another_op_counter = an_op_counter; CHECK_UNARY( op_counting::default_constructions == 1 && op_counting::copy_constructions == 1 && op_counting::copy_assignments == 1 && get<0>(another_op_counter).value == 2 ); op_counting::reset_counters(); tuple<op_counting> yet_another_op_counter(op_counting(5)); CHECK_UNARY( ( op_counting::move_constructions == 1 && op_counting::default_constructions == 0 && op_counting::copy_constructions == 0 && get<0>(yet_another_op_counter).value == 5 ) ); } CHECK( op_counting::destructions == 4 ); } TEST_CASE("get") { // Test constructor tuple<int, float, bool> a_tuple(1, 1.0f, true); CHECK( get<0>(a_tuple) == 1 ); CHECK( get<1>(a_tuple) == 1.0f ); CHECK( get<2>(a_tuple) == true ); CHECK( get<int>(a_tuple) == 1 ); CHECK( get<float>(a_tuple) == 1.0f ); CHECK( get<bool>(a_tuple) == true ); get<1>(a_tuple) = 2.0f; CHECK( get<1>(a_tuple) == 2.0f ); // Test copy constructor tuple<int, float, bool> another_tuple(a_tuple); CHECK_UNARY( get<0>(another_tuple) == 1 && get<1>(another_tuple) == 2.0f && get<2>(another_tuple) == true ); // Test copy assignment tuple<int, float, bool> yet_another_tuple(2, 3.0f, true); CHECK_UNARY( get<0>(yet_another_tuple) == 2 && get<1>(yet_another_tuple) == 3.0f && get<2>(yet_another_tuple) == true); yet_another_tuple = another_tuple; CHECK_UNARY( get<0>(yet_another_tuple) == 1 && get<1>(yet_another_tuple) == 2.0f && get<2>(yet_another_tuple) == true); // Test converting 'copy' constructor (from a tuple of different type whose members are each convertible) tuple<double, double, bool> a_different_tuple(a_tuple); CHECK_UNARY( get<0>(a_different_tuple) == 1.0 && get<1>(a_different_tuple) == 2.0 && get<2>(a_different_tuple) == true); // Test converting assignment operator (from a tuple of different type whose members are each convertible) tuple<double, double, bool> another_different_tuple; CHECK_UNARY( get<0>(another_different_tuple) == 0.0 && get<1>(another_different_tuple) == 0.0 && get<2>(another_different_tuple) == false); another_different_tuple = another_tuple; CHECK_UNARY( get<0>(another_different_tuple) == 1.0 && get<1>(another_different_tuple) == 2.0 && get<2>(another_different_tuple) == true); // Test default initialization (built in types should be value initialized rather than default initialized) tuple<int, float, bool> a_default_initialized_tuple; CHECK_UNARY( get<0>(a_default_initialized_tuple) == 0 && get<1>(a_default_initialized_tuple) == 0.0f && get<2>(a_default_initialized_tuple) == false); } TEST_CASE("more typed get") { // Test some other cases with typed-getter tuple<double, double, bool> a_tuple_with_repeated_type(1.0f, 2.0f, true); CHECK( get<bool>(a_tuple_with_repeated_type) == true ); tuple<double, bool, double> another_tuple_with_repeated_type(1.0f, true, 2.0f); CHECK( get<bool>(another_tuple_with_repeated_type) == true ); tuple<bool, double, double> yet_another_tupleWithRepeatedType(true, 1.0f, 2.0f); CHECK( get<bool>(another_tuple_with_repeated_type) == true ); struct one_float { float val; }; struct second_float { float val; }; tuple<one_float, second_float> a_tuple_of_structs({ 1.0f }, { 2.0f } ); CHECK( get<one_float>(a_tuple_of_structs).val == 1.0f ); CHECK( get<second_float>(a_tuple_of_structs).val == 2.0f ); const tuple<double, double, bool> aConstTuple(a_tuple_with_repeated_type); const bool& constRef = get<bool>(aConstTuple); CHECK( constRef == true ); const bool&& constRval = get<bool>(std::move(a_tuple_with_repeated_type)); CHECK( constRval == true ); } TEST_CASE("more tuple methods") { tuple<int, float> a_tuple_with_default_init(1, {}); // tuple construction from pair std::pair<int, float> a_pair(1, 2.0f); tuple<int, float> a_tuple(a_pair); CHECK_UNARY( get<0>(a_tuple) == 1 && get<1>(a_tuple) == 2.0f ); tuple<double, double> another_tuple(a_pair); CHECK_UNARY( get<0>(another_tuple) == 1.0 && get<1>(another_tuple) == 2.0 ); another_tuple = std::make_pair(2, 3); CHECK_UNARY( get<0>(another_tuple) == 2.0 && get<1>(another_tuple) == 3.0 ); // operators: ==, !=, < another_tuple = a_tuple; CHECK( a_tuple == another_tuple ); CHECK_UNARY( !(a_tuple < another_tuple) && !(another_tuple < a_tuple) ); tuple<double, double> a_default_init_tuple; CHECK( a_tuple != a_default_init_tuple ); CHECK( a_default_init_tuple < a_tuple ); tuple<int, int, int> a_lesser_tuple(1, 2, 3); tuple<int, int, int> a_greater_tuple(1, 2, 4); CHECK_UNARY( a_lesser_tuple < a_greater_tuple && !(a_greater_tuple < a_lesser_tuple) && a_greater_tuple > a_lesser_tuple && !(a_lesser_tuple > a_greater_tuple)); // We don't have the library's TestObject here // tuple<int, float, TestObject> value_tuple(2, 2.0f, TestObject(2)); // tuple<int&, float&, TestObject&> refTup(value_tuple); // tuple<const int&, const float&, const TestObject&> const_ref_to_tuple(value_tuple); // // CHECK( get<0>(refTup) == get<0>(value_tuple) ); // CHECK( get<1>(refTup) == get<1>(value_tuple) ); // CHECK( refTup == value_tuple ); // CHECK( get<0>(refTup) == get<0>(const_ref_to_tuple) ); // CHECK( get<1>(refTup) == get<1>(const_ref_to_tuple) ); // CHECK( const_ref_to_tuple == value_tuple ); // CHECK( const_ref_to_tuple == refTup ); // swap swap(a_lesser_tuple, a_greater_tuple); CHECK_UNARY( get<2>(a_lesser_tuple) == 4 && get<2>(a_greater_tuple) == 3 ); swap(a_greater_tuple, a_lesser_tuple); CHECK( a_lesser_tuple < a_greater_tuple ); } TEST_CASE("move-only contained type") { static_assert(std::is_constructible<move_only_type, move_only_type>::value, "is_constructible type trait giving confusing answers."); static_assert(std::is_constructible<move_only_type, move_only_type&&>::value, "is_constructible type trait giving wrong answers."); static_assert(std::is_constructible<move_only_type&&, move_only_type&&>::value, "is_constructible type trait giving bizarre answers."); tuple<move_only_type> a_tuple_with_move_only_member(1); CHECK( get<0>(a_tuple_with_move_only_member).value == 1 ); get<0>(a_tuple_with_move_only_member) = move_only_type(2); CHECK( get<0>(a_tuple_with_move_only_member).value == 2 ); tuple<const move_only_type&> a_tuple_with_ref_to_move_only_member(a_tuple_with_move_only_member); CHECK( get<0>(a_tuple_with_ref_to_move_only_member).value == 2 ); tuple<const move_only_type&> aTupleWithConstRefToGetMoveOnly(get<0>(a_tuple_with_move_only_member)); CHECK( get<0>(aTupleWithConstRefToGetMoveOnly).value == 2 ); tuple<move_only_type&> a_tuple_with_ref_to_get_move_only(get<0>(a_tuple_with_move_only_member)); CHECK( get<0>(a_tuple_with_ref_to_get_move_only).value == 2 ); } TEST_CASE("make_tuple") { auto a_made_tuple = make_tuple(1, 2.0, true); CHECK_UNARY( get<0>(a_made_tuple) == 1 && get<1>(a_made_tuple) == 2.0 && get<2>(a_made_tuple) == true ); // TODO: reference_wrapper implementation needs to be finished to enable this code { int a = 2; float b = 3.0f; auto a_made_tuple_2 = make_tuple(kat::ref(a), b); get<0>(a_made_tuple_2) = 3; get<1>(a_made_tuple_2) = 4.0f; CHECK_UNARY( get<0>(a_made_tuple_2) == 3 && get<1>(a_made_tuple_2) == 4.0f && a == 3 && b == 3.0f ); } } TEST_CASE("forward_as_tuple") { auto forward_test = [](tuple<move_only_type&&, move_only_type&&> x) -> tuple<move_only_type, move_only_type> { return tuple<move_only_type, move_only_type>(std::move(x)); }; tuple<move_only_type, move_only_type> a_movable_tuple( forward_test(kat::forward_as_tuple(move_only_type(1), move_only_type(2)))); CHECK_UNARY( get<0>(a_movable_tuple).value == 1 && get<1>(a_movable_tuple).value == 2 ); } TEST_CASE("tie") { int a = 0; double b = 0.0f; static_assert(std::is_assignable<const kat::detail::ignore_t<int>&, int>::value, "ignore_t not assignable"); static_assert(kat::detail::tuple_assignable<tuple<const kat::detail::ignore_t<int>&>, tuple<int>>::value, "Not assignable"); kat::tie(a, kat::ignore, b) = kat::make_tuple(1, 3, 5); CHECK_UNARY( a == 1 && b == 5.0f ); } TEST_CASE("tuple_cat") { int a = 0; double b = 0.0f; auto concatenated_tuple = tuple_cat(make_tuple(1, 2.0f), make_tuple(3.0, true)); CHECK_UNARY( get<0>(concatenated_tuple) == 1 && get<1>(concatenated_tuple) == 2.0f && get<2>(concatenated_tuple) == 3.0 && get<3>(concatenated_tuple) == true); auto concatenated_tuple_2 = tuple_cat(make_tuple(1, 2.0f), make_tuple(3.0, true), make_tuple(5u, '6')); CHECK_UNARY( get<0>(concatenated_tuple_2) == 1 && get<1>(concatenated_tuple_2) == 2.0f && get<2>(concatenated_tuple_2) == 3.0 && get<3>(concatenated_tuple_2) == true && get<4>(concatenated_tuple_2) == 5u && get<5>(concatenated_tuple_2) == '6'); auto a_catted_ref_tuple = tuple_cat(make_tuple(1), kat::tie(a, kat::ignore, b)); get<1>(a_catted_ref_tuple) = 2; CHECK( a == 2 ); } TEST_CASE("empty tuple") { tuple<> empty_tuple; CHECK( tuple_size<decltype(empty_tuple)>::value == 0 ); empty_tuple = make_tuple(); auto another_empty_tuple = make_tuple(); swap(another_empty_tuple, empty_tuple); } TEST_CASE("std::tuple compatibility") { { tuple<> empty_tuple; auto empty_std_tuple_1 { static_cast< std::tuple<> >(empty_tuple) }; auto empty_std_tuple_2 { static_cast< std::tuple<> >(kat::make_tuple()) }; std::tuple<> empty_std_tuple_3 = empty_tuple; // empty_tuple = empty_std_tuple_1; CHECK (std::is_same<std::tuple<>,decltype(empty_std_tuple_1)>::value); CHECK (kat::detail::tuple_convertible< std::tuple<>, tuple<> >::value); } { tuple<int, float, bool> a_tuple(1, 1.0f, true); auto std_tuple_1 { static_cast< std::tuple<int, float, bool> >(a_tuple) }; auto std_tuple_2 { static_cast< std::tuple<int, float, bool> >(kat::make_tuple(1, 1.0f, true)) }; std::tuple<int, float, bool> std_tuple_3 = a_tuple; // a_tuple = std_tuple_1; CHECK (std::is_same<std::tuple<int, float, bool>,decltype(std_tuple_1)>::value); // CHECK (kat::detail::tuple_convertible< std::tuple<int, float, bool>, tuple<int, float, bool> >::value); // CHECK (kat::tuple_size<std::tuple<int, float, bool>>::value == 3); // CHECK (kat::detail::tuple_assignable< tuple<int, float, bool>, std::tuple<int, float, bool> >::value); // std::cout // << "tuple_size<typename std::remove_reference<tuple<int, float, bool>>::type>::value = " // << tuple_size<typename std::remove_reference<tuple<int, float, bool>>::type>::value << '\n' // << "tuple_size<std::tuple<int, float, bool>>::value) = " // << tuple_size<std::tuple<int, float, bool>>::value << '\n' // << "kat::detail::make_tuple_types_t<tuple<int, float, bool>> = " // << util::type_name<kat::detail::make_tuple_types_t<tuple<int, float, bool> > >() << '\n' // << "make_tuple_types_t<std::tuple<int, float, bool> > = " // << util::type_name< kat::detail::make_tuple_types_t<std::tuple<int, float, bool> > >() << '\n'; // CHECK (kat::detail::tuple_assignable< tuple<int, float, bool>, tuple<int, float, bool> >::value); } // std::tuple<> empty_std_tuple; // tuple<> empty_tuple_1 { static_cast< kat::tuple<> >(empty_tuple_1) }; // tuple<> empty_tuple_2 { static_cast< kat::tuple<> >(std::make_tuple()) }; // tuple<> empty_tuple_3 = empty_std_tuple_1; // swap(empty_tuple, empty_std_tuple); // swap(empty_std_tuple, empty_tuple); // { // tuple<move_only_type> a_tuple_with_move_only_member(1); // auto std_tuple_1 { static_cast< std::tuple<move_only_type> >(a_tuple_with_move_only_member) }; // // tuple<const move_only_type&> a_tuple_with_ref_to_move_only_member(a_tuple_with_move_only_member); // std::tuple<> std_tuple_2 { static_cast< std::tuple<const move_only_type> >(a_tuple_with_ref_to_move_only_member) }; // // tuple<const move_only_type&> aTupleWithConstRefToGetMoveOnly(get<0>(a_tuple_with_move_only_member)); // std::tuple<const move_only_type&> std_tuple_3 { static_cast< std::tuple<const move_only_type&> >(a_tuple_with_ref_to_move_only_member) }; // // tuple<move_only_type&> a_tuple_with_ref_to_get_move_only(get<0>(a_tuple_with_move_only_member)); // std::tuple<move_only_type&> std_tuple_4 { static_cast< std::tuple<move_only_type&> >(a_tuple_with_ref_to_move_only_member) }; // } { // operators: ==, !=, < tuple<int, float, bool> a_tuple(1, 1.0f, true); std::tuple<int, float, bool> an_std_tuple = a_tuple; CHECK( a_tuple == an_std_tuple ); CHECK_UNARY( !(a_tuple < an_std_tuple) && !(an_std_tuple < a_tuple) ); } { tuple<int, int, int> a_lesser_tuple(1, 2, 3); tuple<int, int, int> a_greater_tuple(1, 2, 4); std::tuple<int, int, int> a_lesser_std_tuple(1, 2, 3); std::tuple<int, int, int> a_greater_std_tuple(1, 2, 4); CHECK_UNARY( a_lesser_tuple < a_greater_std_tuple && !(a_greater_tuple < a_lesser_std_tuple) && a_greater_tuple > a_lesser_std_tuple && !(a_lesser_tuple > a_greater_std_tuple) ); CHECK_UNARY( a_lesser_std_tuple < a_greater_tuple && !(a_greater_std_tuple < a_lesser_tuple) && a_greater_std_tuple > a_lesser_tuple && !(a_lesser_std_tuple > a_greater_tuple) ); } } // TODO: Enable this when we've introduced compatibility code of kat::tuple // and std::tuple on the host side. Also, if we get kat::pair, replicate the // following tests for that class as well. /* TEST_CASE("piecewise_construction") { { struct local { local() = default; local(int a, int b) : mA(a), mB(b) {} int mA = 0; int mB = 0; }; auto t = kat::make_tuple(42, 43); std::pair<local, local> p(std::piecewise_construct, t, t); CHECK( p.first.mA == 42 ); CHECK( p.second.mA == 42 ); CHECK( p.first.mB == 43 ); CHECK( p.second.mB == 43 ); } { struct local { local() = default; local(int a, int b, int c, int d) : mA(a), mB(b), mC(c), mD(d) {} int mA = 0; int mB = 0; int mC = 0; int mD = 0; }; auto t = kat::make_tuple(42, 43, 44, 45); std::pair<local, local> p(std::piecewise_construct, t, t); CHECK( p.first.mA == 42 ); CHECK( p.second.mA == 42 ); CHECK( p.first.mB == 43 ); CHECK( p.second.mB == 43 ); CHECK( p.first.mC == 44 ); CHECK( p.second.mC == 44 ); CHECK( p.first.mD == 45 ); CHECK( p.second.mD == 45 ); } { struct local1 { local1() = default; local1(int a) : mA(a) {} int mA = 0; }; struct local2 { local2() = default; local2(char a) : mA(a) {} char mA = 0; }; auto t1 = kat::make_tuple(42); auto t2 = kat::make_tuple('a'); std::pair<local1, local2> p(std::piecewise_construct, t1, t2); CHECK( p.first.mA == 42 ); CHECK( p.second.mA == 'a' ); } } */ #if __cplusplus >= 201703L TEST_CASE("apply") { // test with tuples { { auto result = kat::apply([](int i) { return i; }, make_tuple(1)); CHECK( result == 1 ); } { auto result = kat::apply([](int i, int j) { return i + j; }, make_tuple(1, 2)); CHECK( result == 3 ); } { auto result = kat::apply([](int i, int j, int k, int m) { return i + j + k + m; }, make_tuple(1, 2, 3, 4)); CHECK( result == 10 ); } } // // test with pair // { // auto result = kat::apply([](int i, int j) { return i + j; }, make_pair(1, 2)); // CHECK( result == 3 ); // } // TODO: Test apply with arrays? } TEST_CASE("tuple structured bindings") { kat::tuple<int, int, int> t = {1,2,3}; auto [x,y,z] = t; CHECK( x == 1 ); CHECK( y == 2 ); CHECK( z == 3 ); } #endif // __cplusplus >= 201703L TEST_CASE("tuple_cat") { void* empty = nullptr; auto t = kat::make_tuple(empty, true); auto tc = kat::tuple_cat(kat::make_tuple("asd", 1), t); static_assert(std::is_same<decltype(tc), kat::tuple<const char*, int, void*, bool>>::value, "type mismatch"); CHECK( std::string("asd") == kat::get<0>(tc) ); CHECK( kat::get<1>(tc) == 1 ); CHECK( kat::get<2>(tc) == nullptr ); CHECK( kat::get<3>(tc) == true ); } } // TEST_SUITE("tuple") // EA_RESTORE_VC_WARNING()
16d7650469416eaf831c5a647f2d17d593483be8.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHGeneral.h> #include <THH/THHTensorMath.h> #include <THH/THHTensorCopy.h> #include <THH/THHTensorMathMagma.cuh> #include <THH/THHTensor.hpp> #include <THH/THHStorage.hpp> #include <algorithm> #include <ATen/native/hip/MiscUtils.h> #ifdef USE_MAGMA #include <magma.h> #endif #ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif #define NoMagma(name) "No CUDA implementation of '" #name "'. Install MAGMA and rebuild cutorch (http://icl.cs.utk.edu/magma/)" void THCMagma_init(THCState *state) { #ifdef USE_MAGMA magma_init(); #endif } #include <THH/generic/THHTensorMathMagma.hip> #include <THH/THHGenerateAllTypes.h>
16d7650469416eaf831c5a647f2d17d593483be8.cu
#include <THC/THCGeneral.h> #include <THC/THCTensorMath.h> #include <THC/THCTensorCopy.h> #include <THC/THCTensorMathMagma.cuh> #include <THC/THCTensor.hpp> #include <THC/THCStorage.hpp> #include <algorithm> #include <ATen/native/cuda/MiscUtils.h> #ifdef USE_MAGMA #include <magma.h> #endif #ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif #define NoMagma(name) "No CUDA implementation of '" #name "'. Install MAGMA and rebuild cutorch (http://icl.cs.utk.edu/magma/)" void THCMagma_init(THCState *state) { #ifdef USE_MAGMA magma_init(); #endif } #include <THC/generic/THCTensorMathMagma.cu> #include <THC/THCGenerateAllTypes.h>
0edf9c30142ae17ec7983aee128a8685263f4f00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Perform "naive" square matrix multiplication // #include <stdio.h> #define BLOCK_SIZE 16 // submatrix size #define N 1024 // matrix size is N*N __global__ void matMult ( float * a, float * b, int n, float * c ) { int bx = blockIdx.x; // block index int by = blockIdx.y; int tx = threadIdx.x; // thread index int ty = threadIdx.y; float sum = 0.0f; // computed subelement int ia = n * BLOCK_SIZE * by + n * ty; // a [i][0] int ib = BLOCK_SIZE * bx + tx; // Multiply the two matrices together; for ( int k = 0; k < n; k++ ) sum += a [ia + k] * b [ib + k*n]; // Write the block sub-matrix to global memory; // each thread writes one element int ic = n * BLOCK_SIZE * by + BLOCK_SIZE * bx; c [ic + n * ty + tx] = sum; } int main ( int argc, char * argv [] ) { int numBytes = N * N * sizeof ( float ); // allocate host memory float * a = new float [N*N]; float * b = new float [N*N]; float * c = new float [N*N]; for ( int i = 0; i < N; i++ ) for ( int j = 0; j < N; j++ ) { a [i] = 0.0f; b [i] = 1.0f; } // allocate device memory float * adev = NULL; float * bdev = NULL; float * cdev = NULL; hipMalloc ( (void**)&adev, numBytes ); hipMalloc ( (void**)&bdev, numBytes ); hipMalloc ( (void**)&cdev, numBytes ); // set kernel launch configuration dim3 threads ( BLOCK_SIZE, BLOCK_SIZE ); dim3 blocks ( N / threads.x, N / threads.y); // create cuda event handles hipEvent_t start, stop; float gpuTime = 0.0f; hipEventCreate ( &start ); hipEventCreate ( &stop ); // asynchronously issue work to the GPU (all to stream 0) hipEventRecord ( start, 0 ); hipMemcpy ( adev, a, numBytes, hipMemcpyHostToDevice ); hipMemcpy ( bdev, b, numBytes, hipMemcpyHostToDevice ); hipLaunchKernelGGL(( matMult), dim3(blocks), dim3(threads), 0, 0, adev, bdev, N, cdev ); hipMemcpy ( c, cdev, numBytes, hipMemcpyDeviceToHost ); hipEventRecord ( stop, 0 ); hipEventSynchronize ( stop ); hipEventElapsedTime ( &gpuTime, start, stop ); // print the cpu and gpu times printf("time spent executing by the GPU: %.2f millseconds\n", gpuTime ); // release resources hipEventDestroy ( start ); hipEventDestroy ( stop ); hipFree ( adev ); hipFree ( bdev ); hipFree ( cdev ); delete a; delete b; delete c; return 0; }
0edf9c30142ae17ec7983aee128a8685263f4f00.cu
// // Perform "naive" square matrix multiplication // #include <stdio.h> #define BLOCK_SIZE 16 // submatrix size #define N 1024 // matrix size is N*N __global__ void matMult ( float * a, float * b, int n, float * c ) { int bx = blockIdx.x; // block index int by = blockIdx.y; int tx = threadIdx.x; // thread index int ty = threadIdx.y; float sum = 0.0f; // computed subelement int ia = n * BLOCK_SIZE * by + n * ty; // a [i][0] int ib = BLOCK_SIZE * bx + tx; // Multiply the two matrices together; for ( int k = 0; k < n; k++ ) sum += a [ia + k] * b [ib + k*n]; // Write the block sub-matrix to global memory; // each thread writes one element int ic = n * BLOCK_SIZE * by + BLOCK_SIZE * bx; c [ic + n * ty + tx] = sum; } int main ( int argc, char * argv [] ) { int numBytes = N * N * sizeof ( float ); // allocate host memory float * a = new float [N*N]; float * b = new float [N*N]; float * c = new float [N*N]; for ( int i = 0; i < N; i++ ) for ( int j = 0; j < N; j++ ) { a [i] = 0.0f; b [i] = 1.0f; } // allocate device memory float * adev = NULL; float * bdev = NULL; float * cdev = NULL; cudaMalloc ( (void**)&adev, numBytes ); cudaMalloc ( (void**)&bdev, numBytes ); cudaMalloc ( (void**)&cdev, numBytes ); // set kernel launch configuration dim3 threads ( BLOCK_SIZE, BLOCK_SIZE ); dim3 blocks ( N / threads.x, N / threads.y); // create cuda event handles cudaEvent_t start, stop; float gpuTime = 0.0f; cudaEventCreate ( &start ); cudaEventCreate ( &stop ); // asynchronously issue work to the GPU (all to stream 0) cudaEventRecord ( start, 0 ); cudaMemcpy ( adev, a, numBytes, cudaMemcpyHostToDevice ); cudaMemcpy ( bdev, b, numBytes, cudaMemcpyHostToDevice ); matMult<<<blocks, threads>>> ( adev, bdev, N, cdev ); cudaMemcpy ( c, cdev, numBytes, cudaMemcpyDeviceToHost ); cudaEventRecord ( stop, 0 ); cudaEventSynchronize ( stop ); cudaEventElapsedTime ( &gpuTime, start, stop ); // print the cpu and gpu times printf("time spent executing by the GPU: %.2f millseconds\n", gpuTime ); // release resources cudaEventDestroy ( start ); cudaEventDestroy ( stop ); cudaFree ( adev ); cudaFree ( bdev ); cudaFree ( cdev ); delete a; delete b; delete c; return 0; }
018ecd4a98df4e1e27402824bbd3a58f0658ed66.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- #define EIGEN_USE_GPU #include "SparseConvBackpropFilterOpKernel.h" #include "open3d/core/CUDAUtils.h" #include "open3d/ml/impl/sparse_conv/SparseConvBackpropFilter.cuh" using namespace open3d; using namespace open3d::ml; using namespace open3d::ml::impl; using namespace tensorflow; template <class TFeat, class TOut, class TIndex, class TKernelIndex> class SparseConvBackpropFilterOpKernelCUDA : public SparseConvBackpropFilterOpKernel<TIndex> { public: explicit SparseConvBackpropFilterOpKernelCUDA( OpKernelConstruction* construction) : SparseConvBackpropFilterOpKernel<TIndex>(construction) { texture_alignment = open3d::core::GetCUDACurrentDeviceTextureAlignment(); } void Kernel(tensorflow::OpKernelContext* context, const tensorflow::Tensor& filters, const tensorflow::Tensor& inp_features, const tensorflow::Tensor& inp_importance, const tensorflow::Tensor& neighbors_index, const tensorflow::Tensor& neighbors_kernel_index, const tensorflow::Tensor& neighbors_importance, const tensorflow::Tensor& neighbors_row_splits, const tensorflow::Tensor& out_features_gradient, const std::vector<int>& filter_dims, const bool point_importances, const bool has_neighbors_importances, tensorflow::Tensor& filter_backprop) { auto device = context->eigen_gpu_device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size SparseConvBackpropFilterCUDA<TFeat, TOut, TIndex, TKernelIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, neighbors_row_splits.shape().dim_size(0) - 1, inp_features.shape().dim_size(0), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), (TKernelIndex*)neighbors_kernel_index.flat<TKernelIndex>() .data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), out_features_gradient.flat<TFeat>().data(), this->normalize); temp_size = ::max(::min(size_t(this->max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); Tensor temp_tensor; TensorShape temp_shape({ssize_t(temp_size)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<uint8_t>::v(), temp_shape, &temp_tensor)); temp_ptr = temp_tensor.flat<uint8_t>().data(); // actually run the operation SparseConvBackpropFilterCUDA<TFeat, TOut, TIndex, TKernelIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, neighbors_row_splits.shape().dim_size(0) - 1, inp_features.shape().dim_size(0), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), (TKernelIndex*)neighbors_kernel_index.flat<TKernelIndex>() .data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), out_features_gradient.flat<TFeat>().data(), this->normalize); } private: int texture_alignment; }; #define REG_KB(feattype, outtype, indextype, kernelindextype) \ REGISTER_KERNEL_BUILDER( \ Name("Open3DSparseConvBackpropFilter") \ .Device(DEVICE_GPU) \ .TypeConstraint<feattype>("TFeat") \ .TypeConstraint<outtype>("output_type") \ .TypeConstraint<indextype>("TIndex") \ .TypeConstraint<kernelindextype>("TKernelIndex"), \ SparseConvBackpropFilterOpKernelCUDA<feattype, outtype, indextype, \ kernelindextype>); REG_KB(float, float, int32, int16_t) REG_KB(float, float, int32, uint8_t) #undef REG_KB
018ecd4a98df4e1e27402824bbd3a58f0658ed66.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- #define EIGEN_USE_GPU #include "SparseConvBackpropFilterOpKernel.h" #include "open3d/core/CUDAUtils.h" #include "open3d/ml/impl/sparse_conv/SparseConvBackpropFilter.cuh" using namespace open3d; using namespace open3d::ml; using namespace open3d::ml::impl; using namespace tensorflow; template <class TFeat, class TOut, class TIndex, class TKernelIndex> class SparseConvBackpropFilterOpKernelCUDA : public SparseConvBackpropFilterOpKernel<TIndex> { public: explicit SparseConvBackpropFilterOpKernelCUDA( OpKernelConstruction* construction) : SparseConvBackpropFilterOpKernel<TIndex>(construction) { texture_alignment = open3d::core::GetCUDACurrentDeviceTextureAlignment(); } void Kernel(tensorflow::OpKernelContext* context, const tensorflow::Tensor& filters, const tensorflow::Tensor& inp_features, const tensorflow::Tensor& inp_importance, const tensorflow::Tensor& neighbors_index, const tensorflow::Tensor& neighbors_kernel_index, const tensorflow::Tensor& neighbors_importance, const tensorflow::Tensor& neighbors_row_splits, const tensorflow::Tensor& out_features_gradient, const std::vector<int>& filter_dims, const bool point_importances, const bool has_neighbors_importances, tensorflow::Tensor& filter_backprop) { auto device = context->eigen_gpu_device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size SparseConvBackpropFilterCUDA<TFeat, TOut, TIndex, TKernelIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, neighbors_row_splits.shape().dim_size(0) - 1, inp_features.shape().dim_size(0), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), (TKernelIndex*)neighbors_kernel_index.flat<TKernelIndex>() .data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), out_features_gradient.flat<TFeat>().data(), this->normalize); temp_size = std::max(std::min(size_t(this->max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); Tensor temp_tensor; TensorShape temp_shape({ssize_t(temp_size)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<uint8_t>::v(), temp_shape, &temp_tensor)); temp_ptr = temp_tensor.flat<uint8_t>().data(); // actually run the operation SparseConvBackpropFilterCUDA<TFeat, TOut, TIndex, TKernelIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, neighbors_row_splits.shape().dim_size(0) - 1, inp_features.shape().dim_size(0), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), (TKernelIndex*)neighbors_kernel_index.flat<TKernelIndex>() .data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), out_features_gradient.flat<TFeat>().data(), this->normalize); } private: int texture_alignment; }; #define REG_KB(feattype, outtype, indextype, kernelindextype) \ REGISTER_KERNEL_BUILDER( \ Name("Open3DSparseConvBackpropFilter") \ .Device(DEVICE_GPU) \ .TypeConstraint<feattype>("TFeat") \ .TypeConstraint<outtype>("output_type") \ .TypeConstraint<indextype>("TIndex") \ .TypeConstraint<kernelindextype>("TKernelIndex"), \ SparseConvBackpropFilterOpKernelCUDA<feattype, outtype, indextype, \ kernelindextype>); REG_KB(float, float, int32, int16_t) REG_KB(float, float, int32, uint8_t) #undef REG_KB
7b46d6e6c2bbaa2621ab3165e37e5a097e2d238b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float* var_16,float var_17,float var_18,float var_19,float var_20,float var_21) { for (int i=0; i < var_1; ++i) { float tmp_1 = +1.7238E-13f; comp += tmp_1 / (var_3 + (var_4 * coshf((+0.0f * (var_5 + (var_6 - (var_7 / logf(atan2f(+1.5160E-13f * var_8 - (var_9 * -1.2032E19f), -1.0343E-36f))))))))); if (comp == logf((var_10 / var_11 + var_12 + (var_13 * -1.2225E36f)))) { comp += -0.0f * (var_14 - var_15 + +1.7355E35f); } for (int i=0; i < var_2; ++i) { comp += (-1.2430E-43f - var_17 * +1.7458E-37f - +1.2869E-36f); var_16[i] = +1.2003E-29f; comp = var_16[i] / var_18 * (var_19 - +1.9828E1f); comp += var_20 - (var_21 + +1.6830E10f - (+1.1497E-37f + -1.0421E-44f)); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float* tmp_17 = initPointer( atof(argv[17]) ); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22); hipDeviceSynchronize(); return 0; }
7b46d6e6c2bbaa2621ab3165e37e5a097e2d238b.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float* var_16,float var_17,float var_18,float var_19,float var_20,float var_21) { for (int i=0; i < var_1; ++i) { float tmp_1 = +1.7238E-13f; comp += tmp_1 / (var_3 + (var_4 * coshf((+0.0f * (var_5 + (var_6 - (var_7 / logf(atan2f(+1.5160E-13f * var_8 - (var_9 * -1.2032E19f), -1.0343E-36f))))))))); if (comp == logf((var_10 / var_11 + var_12 + (var_13 * -1.2225E36f)))) { comp += -0.0f * (var_14 - var_15 + +1.7355E35f); } for (int i=0; i < var_2; ++i) { comp += (-1.2430E-43f - var_17 * +1.7458E-37f - +1.2869E-36f); var_16[i] = +1.2003E-29f; comp = var_16[i] / var_18 * (var_19 - +1.9828E1f); comp += var_20 - (var_21 + +1.6830E10f - (+1.1497E-37f + -1.0421E-44f)); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float* tmp_17 = initPointer( atof(argv[17]) ); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22); cudaDeviceSynchronize(); return 0; }
9d253939b6556f2530df4e8c81888635507a7c7b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include <time.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __global__ functions 2) Enable a simulation of password cracking in the absence of library with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o passwordcrackwith2initial_cuda passwordcrackwith2initial_cuda.cu ./passwordcrackwith2initial_cuda Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ /**************************************************************************** This function returns 1 if the attempt at cracking the password is identical to the plain text password string stored in the program. Otherwise,it returns 0. *****************************************************************************/ __device__ int is_a_match(char *attempt) { char plain_password1[] = "SO19"; char plain_password2[] = "NU40"; char plain_password3[] = "CH57"; char plain_password4[] = "AN45"; char *a = attempt; char *b = attempt; char *c = attempt; char *d = attempt; char *p1 = plain_password1; char *p2 = plain_password2; char *p3 = plain_password3; char *p4 = plain_password4; while(*a == *p1) { if(*a == '\0') { printf("Password: %s\n",plain_password1); break; } a++; p1++; } while(*b == *p2) { if(*b == '\0') { printf("Password: %s\n",plain_password2); break; } b++; p2++; } while(*c == *p3) { if(*c == '\0') { printf("Password: %s\n",plain_password3); break; } c++; p3++; } while(*d == *p4) { if(*d == '\0') { printf("Password: %s\n",plain_password4); return 1; } d++; p4++; } return 0; } __global__ void kernel() { char i1,i2,i3,i4; char password[7]; password[6] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char firstMatch = i; char secondMatch = j; password[0] = firstMatch; password[1] = secondMatch; for(i1='0'; i1<='9'; i1++){ for(i2='0'; i2<='9'; i2++){ for(i3='0'; i3<='9'; i3++){ for(i4='0'; i4<='9'; i4++){ password[2] = i1; password[3] = i2; password[4] = i3; password[5] = i4; if(is_a_match(password)) { } else { //printf("tried: %s\n", password); } } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipLaunchKernelGGL(( kernel) , dim3(26),dim3(26), 0, 0, ); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
9d253939b6556f2530df4e8c81888635507a7c7b.cu
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __global__ functions 2) Enable a simulation of password cracking in the absence of library with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o passwordcrackwith2initial_cuda passwordcrackwith2initial_cuda.cu ./passwordcrackwith2initial_cuda Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ /**************************************************************************** This function returns 1 if the attempt at cracking the password is identical to the plain text password string stored in the program. Otherwise,it returns 0. *****************************************************************************/ __device__ int is_a_match(char *attempt) { char plain_password1[] = "SO19"; char plain_password2[] = "NU40"; char plain_password3[] = "CH57"; char plain_password4[] = "AN45"; char *a = attempt; char *b = attempt; char *c = attempt; char *d = attempt; char *p1 = plain_password1; char *p2 = plain_password2; char *p3 = plain_password3; char *p4 = plain_password4; while(*a == *p1) { if(*a == '\0') { printf("Password: %s\n",plain_password1); break; } a++; p1++; } while(*b == *p2) { if(*b == '\0') { printf("Password: %s\n",plain_password2); break; } b++; p2++; } while(*c == *p3) { if(*c == '\0') { printf("Password: %s\n",plain_password3); break; } c++; p3++; } while(*d == *p4) { if(*d == '\0') { printf("Password: %s\n",plain_password4); return 1; } d++; p4++; } return 0; } __global__ void kernel() { char i1,i2,i3,i4; char password[7]; password[6] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char firstMatch = i; char secondMatch = j; password[0] = firstMatch; password[1] = secondMatch; for(i1='0'; i1<='9'; i1++){ for(i2='0'; i2<='9'; i2++){ for(i3='0'; i3<='9'; i3++){ for(i4='0'; i4<='9'; i4++){ password[2] = i1; password[3] = i2; password[4] = i3; password[5] = i4; if(is_a_match(password)) { } else { //printf("tried: %s\n", password); } } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); kernel <<<26,26>>>(); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
9d732c73b194eb68aa84e238a3f5f27d1a9d7a19.hip
// !!! This is a file automatically generated by hipify!!! /* * Triangle counter with workload balancing * * @author: Manish Jain * @author: Vashishtha Adtani */ #include <iostream> #include <string> #include <sstream> #include <algorithm> #include <stdlib.h> #include <math.h> #include <stdio.h> #include <vector> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <fstream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include "cudaTriangleCounter.h" #define BLOCK_SIZE 32 struct GlobalConstants { int *NodeList; int *ListLen; int numNodes; int numEdges; }; __constant__ GlobalConstants cuConstCounterParams; void CudaTriangleCounter::setup() { int deviceCount = 0; std::string name; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Initializing CUDA for CountingTriangles\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); name = deviceProps.name; printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); // By this time the graph should be loaded. Copying graph to // data structures into device memory so that it is accessible to // CUDA kernels // hipMalloc(&cudaDeviceListLen, sizeof(int ) * numNodes); hipMemcpy(cudaDeviceListLen, list_len, sizeof(int) * numNodes, hipMemcpyHostToDevice); hipMalloc((void **)&cudaDeviceNodeList, node_list_size * sizeof(int)); hipMemcpy(cudaDeviceNodeList, node_list, sizeof(int) * node_list_size, hipMemcpyHostToDevice); GlobalConstants params; params.ListLen = cudaDeviceListLen; params.NodeList = cudaDeviceNodeList; params.numNodes = numNodes; params.numEdges = numEdges; hipMemcpyToSymbol(cuConstCounterParams, &params, sizeof(GlobalConstants)); } CudaTriangleCounter::CudaTriangleCounter(char *fileName) { clock_t start, diff, malloc_diff; int node, edge_id, temp = 0; int total_nodes = 0; int total_edges = 0; int msec; std::string line; std::ifstream myfile; myfile.open(fileName); std::string token; if (strstr(fileName,"new_orkut") != NULL) { printf("This is the NEW_ORKUT FILE **\n"); total_nodes = 3072600; total_edges = 117185083 + 1; } else { std::getline(myfile,line); std::stringstream lineStream(line); while (lineStream >> token) { if (temp == 0) { total_nodes = std::stoi(token, NULL, 10) + 1; } else if (temp == 1) { total_edges = std::stoi(token, NULL, 10) + 1; } else { printf("!!!!!!!!!!!! TEMP IS %d\n ", temp); break; } temp++; } } start = clock(); numNodes = total_nodes; node_list_size = total_edges * 2; numEdges = total_edges; printf("total_nodes %d\n", total_nodes); printf("node_list_size %d\n", node_list_size); printf("numEdges %d\n", numEdges); list_len = (int *)calloc(total_nodes, sizeof(int)); start_addr = (int *)calloc(total_nodes, sizeof(int)); node_list = (int *)calloc(node_list_size, sizeof(int)); malloc_diff = clock() - start; msec = malloc_diff * 1000 / CLOCKS_PER_SEC; printf("memory allocated ......\n"); node = 1; temp = 1; int neighbors; while(std::getline(myfile, line)) { neighbors = 0; std::stringstream lineStream(line); std::string token; while(lineStream >> token) { edge_id = std::stoi(token, NULL, 10); if (edge_id > node) { node_list[temp++] = edge_id; neighbors++; } } list_len[node] = neighbors; node++; } printf("graph created......\n"); diff = clock() - start; msec = diff * 1000 / CLOCKS_PER_SEC; printf("time taken %d seconds %d milliseconds\n", msec/1000, msec%1000); myfile.close(); } CudaTriangleCounter::~CudaTriangleCounter() { free(node_list); free(list_len); } /* * Kernel to count number of triangles formed by a single edge. And store the count * in an array on which we will run reduction later to find total number of triangles * in the given graph. */ __global__ void countTriangleKernel(int *countArray, edge_tuple_t *compressed_list, int *start_addr, int num) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= num) { return; } if (i == 0) { countArray[i] = 0; return; } int j = 0, k = 0, count=0; int *node_list = cuConstCounterParams.NodeList; int *list_len = cuConstCounterParams.ListLen; edge_tuple_t *edgeList = compressed_list; int u = edgeList[i].u; int v = edgeList[i].v; /* Fetching neigbour vertices from the node list */ int *list1 = node_list + start_addr[u-1] + 1; int len1 = list_len[u]; int *list2 = node_list + start_addr[v-1] + 1; int len2 = list_len[v]; /* * Traversing both lists to find the common nodes. Each common node * will be counted as a triangle */ while ( j < len1 && k < len2) { if (list1[j] == list2[k]) { count++; j++; k++; } else if (list1[j] < list2[k]) { j++; } else { k++; } } countArray[i] = count; } /* * Creating data structure which stores all the edges */ __global__ void createEdgeList(edge_tuple_t *edge_list, int *start_addr) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= cuConstCounterParams.numNodes) { return; } if (i == 0) { return; } int *node_list = cuConstCounterParams.NodeList; int *list_len = cuConstCounterParams.ListLen; int start_index = start_addr[i-1] + 1; int *list = node_list + start_addr[i-1] + 1; int len = list_len[i]; for (int j=0; j<len; j++) { edge_list[start_index].u = i; edge_list[start_index].v = list[j]; start_index++; } } #define THRESHOLD 50000 __global__ void segregateList(edge_tuple_t *edge_list, int *small_edge, int *large_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; int *list_len = cuConstCounterParams.ListLen; if ( i >= cuConstCounterParams.numEdges) { return; } if (i == 0) { large_edge[i] = 0; small_edge[i] = 0; return; } int u = edge_list[i].u; int v = edge_list[i].v; if ((list_len[u] > THRESHOLD) || (list_len[v] > THRESHOLD)) { large_edge[i] = 1; small_edge[i] = 0; } else { large_edge[i] = 0; small_edge[i] = 1; } } __global__ void createSmallList(edge_tuple_t *edge_list, edge_tuple_t *small_edge_list, int *small_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= (cuConstCounterParams.numEdges)) { return; } if (small_edge[i] != small_edge[i+1]) { int index = small_edge[i]; small_edge_list[index] = edge_list[i]; } } __global__ void createLargeList(edge_tuple_t *edge_list, edge_tuple_t *large_edge_list, int *large_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= (cuConstCounterParams.numEdges-1)) { return; } if (large_edge[i] != large_edge[i+1]) { int index = large_edge[i]; large_edge_list[index] = edge_list[i]; } } /* * Counts the number of triangles in the given graph. We first find out the * starting address of each list where list stores the neighbours of particular * node. We then create the list of all edges from the given nodes and their * neighbours. */ void CudaTriangleCounter::countTriangles() { dim3 blockdim = BLOCK_SIZE; dim3 griddim = (numEdges + BLOCK_SIZE)/BLOCK_SIZE; dim3 griddim1 = (numNodes + BLOCK_SIZE)/BLOCK_SIZE; int count; edge_tuple_t *edge_list, *small_edge_list, *large_edge_list; int *small_edge, *large_edge; int num_small_edges, num_large_edges; int *temp; /* Calculating start address of each neighbour list */ hipMalloc(&cudaDeviceStartAddr, sizeof(int ) * numNodes); thrust::device_ptr<int> dev_ptr1(cudaDeviceListLen); thrust::device_ptr<int> output_ptr(cudaDeviceStartAddr); thrust::inclusive_scan(dev_ptr1, dev_ptr1 + numNodes, output_ptr); /* Create a list of all edges present in the graph */ hipMalloc((void **)&edge_list, numEdges * sizeof(edge_tuple_t)); hipLaunchKernelGGL(( createEdgeList), dim3(griddim1), dim3(blockdim), 0, 0, edge_list, cudaDeviceStartAddr); hipDeviceSynchronize(); hipMalloc(&small_edge, sizeof(int ) * numEdges); hipMalloc(&large_edge, sizeof(int ) * numEdges); hipLaunchKernelGGL(( segregateList), dim3(griddim), dim3(blockdim), 0, 0, edge_list, small_edge, large_edge); hipDeviceSynchronize(); thrust::device_ptr<int> small_ptr(small_edge); thrust::inclusive_scan(small_ptr, small_ptr + numEdges, small_ptr); thrust::device_ptr<int> large_ptr(large_edge); thrust::inclusive_scan(large_ptr, large_ptr + numEdges, large_ptr); temp = (int *) malloc (numEdges * sizeof(int)); hipMemcpy(temp, small_edge, sizeof(int) * numEdges, hipMemcpyDeviceToHost); hipMemcpy(&num_small_edges, &small_edge[numEdges-1], sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&num_large_edges, &large_edge[numEdges-1], sizeof(int), hipMemcpyDeviceToHost); hipMalloc((void **)&small_edge_list, ( 1 +num_small_edges) * sizeof(edge_tuple_t)); hipMalloc((void **)&large_edge_list, ( 1 + num_large_edges) * sizeof(edge_tuple_t)); hipLaunchKernelGGL(( createSmallList), dim3(griddim), dim3(blockdim), 0, 0, edge_list, small_edge_list, small_edge); hipDeviceSynchronize(); hipLaunchKernelGGL(( createLargeList), dim3(griddim), dim3(blockdim), 0, 0, edge_list, large_edge_list, large_edge); hipDeviceSynchronize(); int *countArraySmall, *countArrayLarge; hipMalloc((void **)&countArraySmall, (2 + num_small_edges) * sizeof(int)); hipMalloc((void **)&countArrayLarge, (2 + num_large_edges) * sizeof(int)); dim3 griddim2 = (num_small_edges + 1 + BLOCK_SIZE)/BLOCK_SIZE; /* Applying intersection rule on all small edges to find number of triangles */ hipLaunchKernelGGL(( countTriangleKernel), dim3(griddim2), dim3(blockdim), 0, 0, countArraySmall, small_edge_list, cudaDeviceStartAddr, num_small_edges+1); hipDeviceSynchronize(); thrust::device_ptr<int> dev_ptr2(countArraySmall); thrust::inclusive_scan(dev_ptr2, dev_ptr2 + num_small_edges+1, dev_ptr2); int count1, count2; hipMemcpy(&count1, &countArraySmall[num_small_edges], sizeof(int), hipMemcpyDeviceToHost); dim3 griddim3 = (num_large_edges + 1 + BLOCK_SIZE)/BLOCK_SIZE; /* Applying intersection rule on all large edges to find number of triangles */ hipLaunchKernelGGL(( countTriangleKernel), dim3(griddim3), dim3(blockdim), 0, 0, countArrayLarge, large_edge_list, cudaDeviceStartAddr, num_large_edges+1); hipDeviceSynchronize(); thrust::device_ptr<int> dev_ptr3(countArrayLarge); thrust::inclusive_scan(dev_ptr3, dev_ptr3 + num_large_edges + 1, dev_ptr3); hipMemcpy(&count2, &countArrayLarge[num_large_edges], sizeof(int), hipMemcpyDeviceToHost); count = count1 + count2; printf("count %d\n", count); }
9d732c73b194eb68aa84e238a3f5f27d1a9d7a19.cu
/* * Triangle counter with workload balancing * * @author: Manish Jain * @author: Vashishtha Adtani */ #include <iostream> #include <string> #include <sstream> #include <algorithm> #include <stdlib.h> #include <math.h> #include <stdio.h> #include <vector> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <fstream> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include "cudaTriangleCounter.h" #define BLOCK_SIZE 32 struct GlobalConstants { int *NodeList; int *ListLen; int numNodes; int numEdges; }; __constant__ GlobalConstants cuConstCounterParams; void CudaTriangleCounter::setup() { int deviceCount = 0; std::string name; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Initializing CUDA for CountingTriangles\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); name = deviceProps.name; printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); // By this time the graph should be loaded. Copying graph to // data structures into device memory so that it is accessible to // CUDA kernels // cudaMalloc(&cudaDeviceListLen, sizeof(int ) * numNodes); cudaMemcpy(cudaDeviceListLen, list_len, sizeof(int) * numNodes, cudaMemcpyHostToDevice); cudaMalloc((void **)&cudaDeviceNodeList, node_list_size * sizeof(int)); cudaMemcpy(cudaDeviceNodeList, node_list, sizeof(int) * node_list_size, cudaMemcpyHostToDevice); GlobalConstants params; params.ListLen = cudaDeviceListLen; params.NodeList = cudaDeviceNodeList; params.numNodes = numNodes; params.numEdges = numEdges; cudaMemcpyToSymbol(cuConstCounterParams, &params, sizeof(GlobalConstants)); } CudaTriangleCounter::CudaTriangleCounter(char *fileName) { clock_t start, diff, malloc_diff; int node, edge_id, temp = 0; int total_nodes = 0; int total_edges = 0; int msec; std::string line; std::ifstream myfile; myfile.open(fileName); std::string token; if (strstr(fileName,"new_orkut") != NULL) { printf("This is the NEW_ORKUT FILE **\n"); total_nodes = 3072600; total_edges = 117185083 + 1; } else { std::getline(myfile,line); std::stringstream lineStream(line); while (lineStream >> token) { if (temp == 0) { total_nodes = std::stoi(token, NULL, 10) + 1; } else if (temp == 1) { total_edges = std::stoi(token, NULL, 10) + 1; } else { printf("!!!!!!!!!!!! TEMP IS %d\n ", temp); break; } temp++; } } start = clock(); numNodes = total_nodes; node_list_size = total_edges * 2; numEdges = total_edges; printf("total_nodes %d\n", total_nodes); printf("node_list_size %d\n", node_list_size); printf("numEdges %d\n", numEdges); list_len = (int *)calloc(total_nodes, sizeof(int)); start_addr = (int *)calloc(total_nodes, sizeof(int)); node_list = (int *)calloc(node_list_size, sizeof(int)); malloc_diff = clock() - start; msec = malloc_diff * 1000 / CLOCKS_PER_SEC; printf("memory allocated ......\n"); node = 1; temp = 1; int neighbors; while(std::getline(myfile, line)) { neighbors = 0; std::stringstream lineStream(line); std::string token; while(lineStream >> token) { edge_id = std::stoi(token, NULL, 10); if (edge_id > node) { node_list[temp++] = edge_id; neighbors++; } } list_len[node] = neighbors; node++; } printf("graph created......\n"); diff = clock() - start; msec = diff * 1000 / CLOCKS_PER_SEC; printf("time taken %d seconds %d milliseconds\n", msec/1000, msec%1000); myfile.close(); } CudaTriangleCounter::~CudaTriangleCounter() { free(node_list); free(list_len); } /* * Kernel to count number of triangles formed by a single edge. And store the count * in an array on which we will run reduction later to find total number of triangles * in the given graph. */ __global__ void countTriangleKernel(int *countArray, edge_tuple_t *compressed_list, int *start_addr, int num) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= num) { return; } if (i == 0) { countArray[i] = 0; return; } int j = 0, k = 0, count=0; int *node_list = cuConstCounterParams.NodeList; int *list_len = cuConstCounterParams.ListLen; edge_tuple_t *edgeList = compressed_list; int u = edgeList[i].u; int v = edgeList[i].v; /* Fetching neigbour vertices from the node list */ int *list1 = node_list + start_addr[u-1] + 1; int len1 = list_len[u]; int *list2 = node_list + start_addr[v-1] + 1; int len2 = list_len[v]; /* * Traversing both lists to find the common nodes. Each common node * will be counted as a triangle */ while ( j < len1 && k < len2) { if (list1[j] == list2[k]) { count++; j++; k++; } else if (list1[j] < list2[k]) { j++; } else { k++; } } countArray[i] = count; } /* * Creating data structure which stores all the edges */ __global__ void createEdgeList(edge_tuple_t *edge_list, int *start_addr) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= cuConstCounterParams.numNodes) { return; } if (i == 0) { return; } int *node_list = cuConstCounterParams.NodeList; int *list_len = cuConstCounterParams.ListLen; int start_index = start_addr[i-1] + 1; int *list = node_list + start_addr[i-1] + 1; int len = list_len[i]; for (int j=0; j<len; j++) { edge_list[start_index].u = i; edge_list[start_index].v = list[j]; start_index++; } } #define THRESHOLD 50000 __global__ void segregateList(edge_tuple_t *edge_list, int *small_edge, int *large_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; int *list_len = cuConstCounterParams.ListLen; if ( i >= cuConstCounterParams.numEdges) { return; } if (i == 0) { large_edge[i] = 0; small_edge[i] = 0; return; } int u = edge_list[i].u; int v = edge_list[i].v; if ((list_len[u] > THRESHOLD) || (list_len[v] > THRESHOLD)) { large_edge[i] = 1; small_edge[i] = 0; } else { large_edge[i] = 0; small_edge[i] = 1; } } __global__ void createSmallList(edge_tuple_t *edge_list, edge_tuple_t *small_edge_list, int *small_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= (cuConstCounterParams.numEdges)) { return; } if (small_edge[i] != small_edge[i+1]) { int index = small_edge[i]; small_edge_list[index] = edge_list[i]; } } __global__ void createLargeList(edge_tuple_t *edge_list, edge_tuple_t *large_edge_list, int *large_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= (cuConstCounterParams.numEdges-1)) { return; } if (large_edge[i] != large_edge[i+1]) { int index = large_edge[i]; large_edge_list[index] = edge_list[i]; } } /* * Counts the number of triangles in the given graph. We first find out the * starting address of each list where list stores the neighbours of particular * node. We then create the list of all edges from the given nodes and their * neighbours. */ void CudaTriangleCounter::countTriangles() { dim3 blockdim = BLOCK_SIZE; dim3 griddim = (numEdges + BLOCK_SIZE)/BLOCK_SIZE; dim3 griddim1 = (numNodes + BLOCK_SIZE)/BLOCK_SIZE; int count; edge_tuple_t *edge_list, *small_edge_list, *large_edge_list; int *small_edge, *large_edge; int num_small_edges, num_large_edges; int *temp; /* Calculating start address of each neighbour list */ cudaMalloc(&cudaDeviceStartAddr, sizeof(int ) * numNodes); thrust::device_ptr<int> dev_ptr1(cudaDeviceListLen); thrust::device_ptr<int> output_ptr(cudaDeviceStartAddr); thrust::inclusive_scan(dev_ptr1, dev_ptr1 + numNodes, output_ptr); /* Create a list of all edges present in the graph */ cudaMalloc((void **)&edge_list, numEdges * sizeof(edge_tuple_t)); createEdgeList<<<griddim1, blockdim>>>(edge_list, cudaDeviceStartAddr); cudaDeviceSynchronize(); cudaMalloc(&small_edge, sizeof(int ) * numEdges); cudaMalloc(&large_edge, sizeof(int ) * numEdges); segregateList<<<griddim, blockdim>>>(edge_list, small_edge, large_edge); cudaDeviceSynchronize(); thrust::device_ptr<int> small_ptr(small_edge); thrust::inclusive_scan(small_ptr, small_ptr + numEdges, small_ptr); thrust::device_ptr<int> large_ptr(large_edge); thrust::inclusive_scan(large_ptr, large_ptr + numEdges, large_ptr); temp = (int *) malloc (numEdges * sizeof(int)); cudaMemcpy(temp, small_edge, sizeof(int) * numEdges, cudaMemcpyDeviceToHost); cudaMemcpy(&num_small_edges, &small_edge[numEdges-1], sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&num_large_edges, &large_edge[numEdges-1], sizeof(int), cudaMemcpyDeviceToHost); cudaMalloc((void **)&small_edge_list, ( 1 +num_small_edges) * sizeof(edge_tuple_t)); cudaMalloc((void **)&large_edge_list, ( 1 + num_large_edges) * sizeof(edge_tuple_t)); createSmallList<<<griddim, blockdim>>>(edge_list, small_edge_list, small_edge); cudaDeviceSynchronize(); createLargeList<<<griddim, blockdim>>>(edge_list, large_edge_list, large_edge); cudaDeviceSynchronize(); int *countArraySmall, *countArrayLarge; cudaMalloc((void **)&countArraySmall, (2 + num_small_edges) * sizeof(int)); cudaMalloc((void **)&countArrayLarge, (2 + num_large_edges) * sizeof(int)); dim3 griddim2 = (num_small_edges + 1 + BLOCK_SIZE)/BLOCK_SIZE; /* Applying intersection rule on all small edges to find number of triangles */ countTriangleKernel<<<griddim2, blockdim>>>(countArraySmall, small_edge_list, cudaDeviceStartAddr, num_small_edges+1); cudaDeviceSynchronize(); thrust::device_ptr<int> dev_ptr2(countArraySmall); thrust::inclusive_scan(dev_ptr2, dev_ptr2 + num_small_edges+1, dev_ptr2); int count1, count2; cudaMemcpy(&count1, &countArraySmall[num_small_edges], sizeof(int), cudaMemcpyDeviceToHost); dim3 griddim3 = (num_large_edges + 1 + BLOCK_SIZE)/BLOCK_SIZE; /* Applying intersection rule on all large edges to find number of triangles */ countTriangleKernel<<<griddim3, blockdim>>>(countArrayLarge, large_edge_list, cudaDeviceStartAddr, num_large_edges+1); cudaDeviceSynchronize(); thrust::device_ptr<int> dev_ptr3(countArrayLarge); thrust::inclusive_scan(dev_ptr3, dev_ptr3 + num_large_edges + 1, dev_ptr3); cudaMemcpy(&count2, &countArrayLarge[num_large_edges], sizeof(int), cudaMemcpyDeviceToHost); count = count1 + count2; printf("count %d\n", count); }
1c13aaf67c010b2284e49bddafb7c536ecb2b784.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <hip/hip_runtime.h> #include "workshop.h" #define N 10000000 unsigned char *fill_random_buffer(int size) { unsigned char *ret; int c; ret = (unsigned char *) malloc(size); assert(ret); for(c = 0; c < size; c++) { ret[c] = rand(); } return ret; } __global__ void compute_histogram(unsigned char *data, unsigned int *histogram) { __shared__ unsigned int cache[256]; int i = blockIdx.x * blockDim.x + threadIdx.x; cache[threadIdx.x] = 0; __syncthreads(); while(i < N) { atomicAdd(&cache[data[i]], 1); i += blockDim.x * gridDim.x; } __syncthreads(); atomicAdd(&histogram[threadIdx.x], cache[threadIdx.x]); } int main(int argc, char **argv) { unsigned char *data = (unsigned char *) fill_random_buffer(N); unsigned int histogram[256]; int c, sum; unsigned char *dev_data; unsigned int *dev_histogram; HANDLE_ERROR( hipMalloc(&dev_data, N) ); HANDLE_ERROR( hipMalloc(&dev_histogram, sizeof(unsigned int) * 256) ); hipMemcpy(dev_data, data, N, hipMemcpyHostToDevice); hipMemset(dev_histogram, 0, sizeof(unsigned int) * 256); hipLaunchKernelGGL(( compute_histogram), dim3(30), dim3(256), 0, 0, dev_data, dev_histogram); hipMemcpy(histogram, dev_histogram, sizeof(unsigned int) * 256, hipMemcpyDeviceToHost); sum = 0; for(c = 0; c < 256; c++) { printf("%3d = %d\n", c, histogram[c]); sum += histogram[c]; } if (sum != N) { printf("FAIL!\n"); } free(data); }
1c13aaf67c010b2284e49bddafb7c536ecb2b784.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <cuda.h> #include "workshop.h" #define N 10000000 unsigned char *fill_random_buffer(int size) { unsigned char *ret; int c; ret = (unsigned char *) malloc(size); assert(ret); for(c = 0; c < size; c++) { ret[c] = rand(); } return ret; } __global__ void compute_histogram(unsigned char *data, unsigned int *histogram) { __shared__ unsigned int cache[256]; int i = blockIdx.x * blockDim.x + threadIdx.x; cache[threadIdx.x] = 0; __syncthreads(); while(i < N) { atomicAdd(&cache[data[i]], 1); i += blockDim.x * gridDim.x; } __syncthreads(); atomicAdd(&histogram[threadIdx.x], cache[threadIdx.x]); } int main(int argc, char **argv) { unsigned char *data = (unsigned char *) fill_random_buffer(N); unsigned int histogram[256]; int c, sum; unsigned char *dev_data; unsigned int *dev_histogram; HANDLE_ERROR( cudaMalloc(&dev_data, N) ); HANDLE_ERROR( cudaMalloc(&dev_histogram, sizeof(unsigned int) * 256) ); cudaMemcpy(dev_data, data, N, cudaMemcpyHostToDevice); cudaMemset(dev_histogram, 0, sizeof(unsigned int) * 256); compute_histogram<<<30, 256>>>(dev_data, dev_histogram); cudaMemcpy(histogram, dev_histogram, sizeof(unsigned int) * 256, cudaMemcpyDeviceToHost); sum = 0; for(c = 0; c < 256; c++) { printf("%3d = %d\n", c, histogram[c]); sum += histogram[c]; } if (sum != N) { printf("FAIL!\n"); } free(data); }
51d23ba0361934ecb7d426a951d4a33cfeb37080.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda.h> #include<fitsio.h> #include<pil.h> #include<GetCorrelation.h> #include"svdfit.c" /* * nvcc GetCorrelation.cu $INCL $OTHERLIB -arch=sm_20 * */ int main(int argc,char*argv[]) { //declaring the local variables char parfilename[BUFSIZE],dphfilename[BUFSIZE],correlationFilename[BUFSIZE],qeFilename[BUFSIZE],badpixfilename[BUFSIZE]; int moddph_size=sizeof(float)*NUMPIXPERMODULE*NUMPIXPERMODULE; float *mod_dph,*qemap,*dphWithoutBadPix; int moduleid,llx,lly,numelements_x,numelements_y; int i=0,j=0,rv=0,ii=0,jj=0,numbadpix=0; int *mask,*badpix,*quad_dph,pixid=0; float x_center,y_center,*correlation,zs;// xs,ys*shadow, float xstart=-19.5,ystart=-19.5,xend=19.5,yend=19.5; float resol=1,mean,rms; time_t start; int tempsrc=0; float *dph_device,*correlation_device=NULL,*qemap_device=NULL; float fittedparam=0; int *mask_device=NULL,numblocks-0,threadsPerBlock=0; FILE *fp; start=clock(); char *CZTHOME; hipError_t cuerr; float maxvalue=0; int max_x=0,max_y=0; void (*funx_pointer)(float,float*)=polyfunction; float *funy,*funxy,*coeff,chisq,*shadow; float xs,ys,minval=9999999999,min_x=0,min_y=0,xt_tmp=xstart,yt_tmp=ystart,xe_tmp=xend,ye_tmp=yend; int fitcounter=0; //getting path for CZTI environment CZTHOME = getenv ("CZTWORKSPACE"); if(CZTHOME==NULL) { printf("CZTHOME Variable is not set\n"); exit(0); } //Setting par file name strcpy(parfilename,CZTHOME); strcat(parfilename,"/GetCorrelationGPU/par/GetCorrelation"); PILSetModuleName(parfilename); int r=PILInit(argc,argv); if(r<0) { printf("Error(%s:%d) : Error while loading par file\n",__FILE__,__LINE__); exit(0); } //reading inputs r=PILGetInt("moduleid",&moduleid); r=PILGetReal4("Zs",&zs); r=PILGetReal4("xstart",&xstart); r=PILGetReal4("xend",&xend); r=PILGetReal4("ystart",&ystart); r=PILGetReal4("yend",&yend); r=PILGetReal4("resolution",&resol); r=PILGetFname("dphfilename",dphfilename); r=PILGetFname("badpixfilename",badpixfilename); r=PILGetFname("qeFilename",qeFilename); r=PILGetFname("correlationfilename",correlationFilename); PILClose(r); //allocating the memory numelements_x=(int)(((xend-xstart)/resol)+1); numelements_y=(int)(((yend-ystart)/resol)+1); badpix=(int*)malloc(sizeof(int)*ROWSPERMODULE*COLSPERMODULE); mask=(int*)malloc(sizeof(int)*ROWSPERMODULE*COLSPERMODULE); correlation=(float*)malloc(sizeof(float)*numelements_y*numelements_x); quad_dph=(int*)malloc(sizeof(int)*NUMPIXPERQUAD*NUMPIXPERQUAD); mod_dph=(float*)malloc(moddph_size); qemap=(float*)malloc(sizeof(float)*NUMPIXPERMODULE*NUMPIXPERMODULE); dphWithoutBadPix=(float*)malloc(sizeof(float)*NUMPIXPERMODULE*NUMPIXPERMODULE); if(mask==NULL || badpix==NULL ||correlation==NULL ||quad_dph==NULL ||mod_dph==NULL ||qemap==NULL ||dphWithoutBadPix==NULL) { printf("Error while allocating memory\n"); exit(0); } //Reading DPH, QE files readImage(dphfilename,quad_dph,2); readFloatImage(qeFilename,2,qemap); getminmax(moduleid,&llx,&lly,&x_center,&y_center); getModule(moduleid+1,mask); //initilizing the arrays for(i=0;i<numelements_x;i++) { for(j=0;j<numelements_y;j++) correlation[j*NUMPIXPERMODULE+i]=0; } for(i=llx,ii=0;i<llx+NUMPIXPERMODULE;i++,ii++) { for(j=lly,jj=0;j<lly+NUMPIXPERMODULE;j++,jj++) mod_dph[jj*NUMPIXPERMODULE+ii]=quad_dph[j*NUMPIXPERQUAD+i]; } //reading bad pixel list fp=fopen(badpixfilename,"r"); if(fp==NULL) { printf("%s file not found\n",badpixfilename); exit(0); } while(1) { rv=fscanf(fp,"%d",&badpix[numbadpix]); if(rv==-1) break; numbadpix++; } //here assign -1 for the bad pixels so they can be ignored from computations int isbad=0; for(i=0;i<NUMPIXPERMODULE;i++) { for(j=0,isbad=0;j<NUMPIXPERMODULE;j++) { pixid=(15-i)*16+j; for(ii=0;ii<numbadpix;ii++) { if(pixid==badpix[ii]) { isbad=1;break; } } if(isbad) { dphWithoutBadPix[pixid]=-1; } else { dphWithoutBadPix[pixid]=mod_dph[pixid]; } } } numblocks=(int)ceil(((numelements_x*numelements_y)/(float)NUMTHREADS)); printf("NUM BLOCKS:%d\t",numblocks); printf("Threads per block:%d\n",threadsPerBlock); //allocating device memory hipMalloc((void **) &dph_device,moddph_size ); hipMalloc((void **) &qemap_device,moddph_size ); hipMalloc((void **) &mask_device,sizeof(int)*ROWSPERMODULE*COLSPERMODULE ); hipMalloc((void **) &correlation_device, sizeof(float)*numelements_x*numelements_y); //checking error in memory allocation if((cuerr = hipGetLastError()) != hipSuccess) { printf("\nError: Cuda Malloc %s\n", hipGetErrorString(cuerr)); return -1; } //coping data from host to device memory hipMemcpy(dph_device,dphWithoutBadPix,moddph_size,hipMemcpyHostToDevice); hipMemcpy(qemap_device,qemap,moddph_size,hipMemcpyHostToDevice); hipMemcpy(mask_device,mask,sizeof(int)*ROWSPERMODULE*COLSPERMODULE ,hipMemcpyHostToDevice); if((cuerr = hipGetLastError()) != hipSuccess) { printf("\nError: Cuda Memcpy %s\n", hipGetErrorString(cuerr)); return -1; } //calling cross-correlation kernel hipLaunchKernelGGL(( CorrelationKernel), dim3(numblocks),dim3(threadsPerBlock), 0, 0, mask_device,dph_device,qemap_device,correlation_device,numelements_x,numelements_y,xstart,ystart,resol,zs,moduleid); if((cuerr = hipGetLastError()) != hipSuccess) { printf("\nError: CUDA KERNEL ERROR: %s\n", hipGetErrorString(cuerr)); return -1; } //coping back the results from device to host memory hipMemcpy(correlation,correlation_device,sizeof(float)*numelements_x*numelements_y,hipMemcpyDeviceToHost); if((cuerr = hipGetLastError()) != hipSuccess) { printf("\nError: Cuda DEVICE TO HOST Memcpy: %s\n", hipGetErrorString(cuerr)); return -1; } //getting maximum cross-correlation value and preparing array to write FITS image for(i=0;i<numelements_x;i++) { for(j=0;j<numelements_y;j++) { if(correlation[i*numelements_y+j]>maxvalue){ maxvalue=correlation[i*numelements_y+j]; max_x=i; max_y=j; } } } printf("Max_x:%d\tMax_y:%d\n",max_x,max_y); printf("Peak:%f\tX:%f\tY:%f\n",maxvalue,xstart+(max_x*resol),ystart+(max_y*resol)); writeFloatImage(correlationFilename,correlation,numelements_x,numelements_y,resol); //performing forward fitting to get source intensity shadow=(float*)malloc(sizeof(float)*(NUMPIXPERMODULE*NUMPIXPERMODULE)+2); funy=(float*)malloc(sizeof(float)*(NUMPIXPERMODULE*NUMPIXPERMODULE)+2); funxy=(float*)malloc(sizeof(float)*(NUMPIXPERMODULE*NUMPIXPERMODULE)+2); coeff=(float*)malloc(sizeof(float)*(NUMPIXPERMODULE*NUMPIXPERMODULE)+2); xstart=xstart+(max_x*resol); ystart=ystart+(max_y*resol); int windowsize=3; printf("Fitting source for iteration :%d\n",tempsrc); for(xs=xstart-windowsize;xs<=xstart+windowsize;xs+=0.25) { for(ys=ystart-windowsize;ys<=ystart+windowsize;ys+=0.25) { gensha(moduleid,shadow,xs,ys,zs,mask); fitcounter=0; for(i=0;i<NUMPIXPERMODULE*NUMPIXPERMODULE;i++){ if(dphWithoutBadPix[i]!=-1) { funy[fitcounter+1]=shadow[i]; funxy[fitcounter+1]=dphWithoutBadPix[i]; fitcounter++; } } do_svdfit(funy,funxy,coeff,fitcounter,1,&chisq,funx_pointer); chisq/=fitcounter-1; if(chisq<minval) { minval=chisq; fittedparam=coeff[1]; min_x=xs; min_y=ys; } } } char splot[BUFSIZ],matrix[BUFSIZE],xplot[BUFSIZE],yplot[BUFSIZE]; FILE *fp1,*fp2,*fp3,*fp4; strcpy(splot,correlationFilename); strcpy(matrix,correlationFilename); strcpy(xplot,correlationFilename); strcpy(yplot,correlationFilename); strcat(splot,"_S.txt"); strcat(matrix,"_Corr.txt"); strcat(xplot,"_X.txt"); strcat(yplot,"_Y.txt"); fp1=fopen(splot,"w"); fp2=fopen(matrix,"w"); fp3=fopen(xplot,"w"); fp4=fopen(yplot,"w"); for(xs=xt_tmp,i=0;xs<=xe_tmp;xs+=0.25,i++) { for(ys=yt_tmp,j=0;ys<=ye_tmp;ys+=0.25,j++) { fprintf(fp1,"%f\t%f\t%f\n",xs,ys,correlation[i*numelements_y+j]); fprintf(fp2,"%f\t",correlation[i*numelements_y+j]); fprintf(fp3,"%f\t%f\n",xs,correlation[i*numelements_y+j]); fprintf(fp4,"%f\t%f\n",ys,correlation[i*numelements_y+j]); } fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); fclose(fp3); fclose(fp4); printf("Minimum Chisq is %f found at %f,%f\n",minval,min_x,min_y); printf("Fitted Param:%f\n",fittedparam); for(i=0;i<NUMPIXPERMODULE*NUMPIXPERMODULE;i++){ if(dphWithoutBadPix[i]!=-1) { dphWithoutBadPix[i]-=shadow[i]*fittedparam; } } printf("TIME ELAPSED : %f seconds\n\n", ((double)clock() - start) / CLOCKS_PER_SEC); } void polyfunction(float x,float*funx) { funx[1]=x; } __global__ void CorrelationKernel(int *mask,float *dph,float *qemap,float *correlation,int numelem_x,int numelem_y,float x_start,float y_start,float resol,float zs,int moduleid) { int index=blockIdx.x*blockDim.x+threadIdx.x; int i=0; float xs,ys; float *shadow,sum=0; xs=(x_start)+((index/numelem_y)*resol); ys=(y_start)+((index%numelem_y)*resol); if(xs>19.5 || xs<-19.5 || ys>19.5 || ys<-19.5) return; //allocating memory shadow=(float*)malloc(sizeof(float)*ELEM); //computing shadow gensha_device(moduleid,shadow,xs,ys,zs,mask); //Computing cross-correlation value for a direction for(i=0;i<NUMPIXPERMODULE*NUMPIXPERMODULE;i++) { if(qemap[i]!=0 && dph[i]!=-1) sum+=((dph[i]*shadow[i])/qemap[i]); } correlation[index]=sum; free(shadow); } //function to generate shadow int gensha(int moduleid,float*det,float xs,float ys,float zs,int *mask) { int detx=0, dety=0; float x_center=0.0,y_center=0.0,tx=0.0,ty=0.0,xtemp=0,ytemp=0; int rowcounter=0,colcounter=0, x_index,y_index; int i=0,j=0,ii=0,jj=0; int llx=0,lly=0; getminmax(moduleid,&llx,&lly,&x_center,&y_center); dety=(llx/16)*ROWSPERMODULE+(llx/16)*100; detx=(lly/16)*COLSPERMODULE+(lly/16)*100; xtemp=19.5-xs; ytemp=19.5+ys; xs=ytemp; ys=xtemp; xs/=0.02; ys/=0.02; zs/=0.02; int tempx,tempy; for(i=0;i<256;i++) det[i]=0; for(i=detx,ii=0;i<detx+ROWSPERMODULE;i++,ii++) { tx=asin( (ys-ii) / ( sqrt( (zs*zs)+((ys-ii) * (ys-ii)) ) ) ); tempx=(int)(ii+(HEIGHT*(tan(tx)))); x_index=(ii/123); if(x_index>14) x_index=14+((ii-(14*123))/114); else x_index=x_index+((ii-(x_index*123))/114); for(j=dety,jj=0;j<dety+COLSPERMODULE;j++,jj++) { y_index=(jj/123); if(y_index>14) y_index=14+((jj-(14*123))/114); else y_index=y_index+((jj-(y_index*123))/114); ty=asin( (xs-jj) / ( sqrt( (zs*zs)+((xs-jj) * (xs-jj)) ) ) ); tempy=(int)(jj+(HEIGHT*(tan(ty)))); if(tempx>=0 && tempx<ROWSPERMODULE && tempy>=0 &&tempy<COLSPERMODULE) { if(mask[tempx*COLSPERMODULE+tempy]==1) det[x_index*16+y_index]+=1; } } } for(i=0;i<16;i++) { if(i%16==0 ||(i+1)%16==0) rowcounter=114; else rowcounter=123; for(j=0;j<16;j++) { if(j%16==0 || (j+1)%16==0) colcounter=114; else colcounter=123; det[i*16+j]=(float)det[i*16+j]/(float)(rowcounter*colcounter); } } return 0; } //Generatee shadow function for devicee __device__ int gensha_device(int moduleid,float*det,float xs,float ys,float zs,int *mask) { int detx=0, dety=0; float x_center=0.0,y_center=0.0,tx=0.0,ty=0.0,xtemp=0,ytemp=0; int rowcounter=0,colcounter=0, x_index,y_index; int i=0,j=0,ii=0,jj=0; int llx=0,lly=0; getminmax_device(moduleid,&llx,&lly,&x_center,&y_center); dety=(llx/16)*ROWSPERMODULE+(llx/16)*100; detx=(lly/16)*COLSPERMODULE+(lly/16)*100; xtemp=19.5-xs; ytemp=19.5+ys; xs=ytemp; ys=xtemp; xs/=0.02; ys/=0.02; zs/=0.02; int tempx,tempy; for(i=0;i<256;i++) det[i]=0; for(i=detx,ii=0;i<detx+ROWSPERMODULE;i++,ii++) { tx=asin( (ys-ii) / ( sqrt( (zs*zs)+((ys-ii) * (ys-ii)) ) ) ); tempx=(int)(ii+(HEIGHT*(tan(tx)))); x_index=(ii/123); if(x_index>14) x_index=14+((ii-(14*123))/114); else x_index=x_index+((ii-(x_index*123))/114); for(j=dety,jj=0;j<dety+COLSPERMODULE;j++,jj++) { y_index=(jj/123); if(y_index>14) y_index=14+((jj-(14*123))/114); else y_index=y_index+((jj-(y_index*123))/114); ty=asin( (xs-jj) / ( sqrt( (zs*zs)+((xs-jj) * (xs-jj)) ) ) ); tempy=(int)(jj+(HEIGHT*(tan(ty)))); if(tempx>=0 && tempx<ROWSPERMODULE && tempy>=0 &&tempy<COLSPERMODULE) { if(mask[tempx*COLSPERMODULE+tempy]==1) det[x_index*16+y_index]+=1; } } } for(i=0;i<16;i++) { if(i%16==0 ||(i+1)%16==0) rowcounter=114; else rowcounter=123; for(j=0;j<16;j++) { if(j%16==0 || (j+1)%16==0) colcounter=114; else colcounter=123; det[i*16+j]=(float)det[i*16+j]/(float)(rowcounter*colcounter); } } return 0; } //function to write fits images void writeFloatImage(char *filename,float*pixels,int rows,int cols,float resol) { int bitpix = FLOAT_IMG; /* 16-bit unsigned short pixel values */ long naxis = 2; /* 2-dimensional image */ int fpixel = 1,status=0; long naxes[2] = { cols,rows }; long nelements = naxes[0] * naxes[1]; fitsfile *fptr; remove(filename); if (fits_create_file(&fptr, filename, &status)) { printf("Error(%s:%d):Creating file\n",__FILE__,__LINE__); } if ( fits_create_img(fptr, bitpix,0, naxes, &status) ) { printf("Error(%s:%d):Creating image\n",__FILE__,__LINE__); } if ( fits_create_img(fptr, bitpix, naxis, naxes, &status) ) { printf("Error(%s:%d):Creating image\n",__FILE__,__LINE__); } write_wcsaxis(fptr,1,"","","","IMX",((double)(rows+1)/2.0),resol,0,"mm",&status); write_wcsaxis(fptr,2,"","","","IMY",((double)(cols+1)/2.0),resol,0,"mm",&status); if ( fits_write_img(fptr, TFLOAT, fpixel, nelements,pixels, &status) ) { printf("Error(%s:%d):Wrting image\n",__FILE__,__LINE__); } if ( fits_close_file(fptr, &status) ) { printf("Error(%s:%d):closing file\n",__FILE__,__LINE__); } } //function to read fits images void readFloatImage(char*filename,int hduno,float*data) { fitsfile *fptr; /* pointer to the FITS file, defined in fitsio.h */ int status, nfound, anynull,hdutype; long naxes[2], fpixel, nbuffer, npixels; float nullval; int buffsize; status = 0; if ( fits_open_file(&fptr, filename, READONLY, &status) ) printf("Error while reading fits file\n"); if ( fits_movabs_hdu(fptr, hduno, &hdutype, &status) ) printf("Error while moving HDU\n"); if ( fits_read_keys_lng(fptr, "NAXIS", 1, 2, naxes, &nfound, &status) ) printf("Error while reading keys\n"); npixels = naxes[0] * naxes[1]; /* number of pixels in the image */ buffsize=npixels; fpixel = 1; nullval = 0; /* don't check for null values in the image */ while (npixels > 0) { nbuffer = npixels; if (npixels > buffsize) nbuffer = buffsize; if ( fits_read_img(fptr, TFLOAT, fpixel, nbuffer, &nullval,data, &anynull, &status) ) printf("Error while reading fits image\n"); npixels -= nbuffer; fpixel += nbuffer; } if ( fits_close_file(fptr, &status) ) printf("Error while closing file\n"); } //Writing WCS information to images int write_wcsaxis(fitsfile *imgfile, int axis, char *suffix,char *wcsname, char *wcstype, char *ctype, double crpix, double cdelt, double crval,char *cunit, int *status) { char key[20]; if (status == 0) return NULL_INPUT_PTR; if (*status != 0) return (*status); if (imgfile == 0) return (*status = NULL_INPUT_PTR); if (wcsname && wcsname[0]) { sprintf(key, "WCSNAME%s", suffix); fits_update_key(imgfile, TSTRING, key, wcsname,"Coordinate system name", status); } if (wcstype && wcstype[0]){ sprintf(key, "WCSTY%d%s", axis, suffix); fits_update_key(imgfile, TSTRING, key, wcstype,"Coordinate system axis", status); } sprintf(key, "CTYPE%d%s", axis, suffix); fits_update_key(imgfile, TSTRING, key, ctype,"Name of coordinate", status); if (cunit && cunit[0]) { sprintf(key, "CUNIT%d%s", axis, suffix); fits_update_key(imgfile, TSTRING, key, cunit,"Units of coordinate axis", status); } sprintf(key, "CRPIX%d%s", axis, suffix); fits_update_key(imgfile, TDOUBLE, key, &crpix,"Reference pixel position", status); sprintf(key, "CDELT%d%s", axis, suffix); fits_update_key(imgfile, TDOUBLE, key, &cdelt,"Pixel spacing in physical units", status); sprintf(key, "CRVAL%d%s", axis, suffix); fits_update_key(imgfile, TDOUBLE, key, &crval,"Coordinate value at reference pixel position", status); return (*status); } void normalizeData(float *data,int numelements) { float peakval=getMaximum(data,numelements); int i=0; for(i=0;i<numelements;i++) { data[i]/=peakval; } } float getMaximum(float *data,int numelements) { int i=0; float maxval=0; for(i=0;i<numelements;i++) { if(data[i]>maxval) maxval=data[i]; } return maxval; } void readImage(char*filename,int *buffer,int hduNo) { fitsfile *fptr; /* pointer to the FITS file, defined in fitsio.h */ int status, nfound, anynull,hdutype; long naxes[2], fpixel, nbuffer, npixels; float nullval; status = 0; if ( fits_open_file(&fptr, filename, READONLY, &status) ) printf("Error while opening fits file\n"); if ( fits_movabs_hdu(fptr, hduNo, &hdutype, &status) ) printf("Error while moving module\n"); if ( fits_read_keys_lng(fptr, "NAXIS", 1, 2, naxes, &nfound, &status) ) printf("Error while reading keys\n"); npixels = naxes[0] * naxes[1]; /* number of pixels in the image */ fpixel = 1; nullval = 0; /* don't check for null values in the image */ while (npixels > 0) { nbuffer = npixels; if ( fits_read_img(fptr, TINT, fpixel, nbuffer, &nullval,buffer, &anynull, &status) ) { printf("Error reading fits image\n"); } npixels -= nbuffer; fpixel += nbuffer; } if ( fits_close_file(fptr, &status) ) { printf("Error while closing fits file\n"); } return; } __device__ void getminmax_device(int moduleno,int*x,int *y,float* x_center,float* y_center) { switch(moduleno) { case 12 : *x=0; *y=0; *x_center=19.5; *y_center=19.5; break; case 13 : *x=16; *y=0; *x_center=60.5; *y_center=19.5; break; case 14 : *x=32; *y=0; *x_center=101.5; *y_center=19.5; break; case 15 : *x=48; *y=0; *x_center=142.5; *y_center=19.5; break; case 8: *x=0; *y=16; *x_center=19.5; *y_center=60.5; break; case 9 : *x=16; *y=16; *x_center=60.5; *y_center=60.5; break; case 10 : *x=32; *y=16; *x_center=101.5; *y_center=60.5; break; case 11 : *x=48; *y=16; *x_center=142.5; *y_center=60.5; break; case 4 : *x=0; *y=32; *x_center=19.5; *y_center=101.5; break; case 5 : *x=16; *y=32; *x_center=60.5; *y_center=101.5; break; case 6 : *x=32; *y=32; *x_center=101.5; *y_center=101.5; break; case 7 : *x=48; *y=32; *x_center=142.5; *y_center=101.5; break; case 0 : *x=0; *y=48; *x_center=19.5; *y_center=142.5; break; case 1 : *x=16; *y=48; *x_center=60.5; *y_center=142.5; break; case 2 : *x=32; *y=48; *x_center=101.5; *y_center=142.5; break; case 3 : *x=48; *y=48; *x_center=142.5; *y_center=142.5; break; default: printf("Invalid module id\n"); break; } } void getminmax(int moduleno,int*x,int *y,float* x_center,float* y_center) { switch(moduleno) { case 12 : *x=0; *y=0; *x_center=19.5; *y_center=19.5; break; case 13 : *x=16; *y=0; *x_center=60.5; *y_center=19.5; break; case 14 : *x=32; *y=0; *x_center=101.5; *y_center=19.5; break; case 15 : *x=48; *y=0; *x_center=142.5; *y_center=19.5; break; case 8: *x=0; *y=16; *x_center=19.5; *y_center=60.5; break; case 9 : *x=16; *y=16; *x_center=60.5; *y_center=60.5; break; case 10 : *x=32; *y=16; *x_center=101.5; *y_center=60.5; break; case 11 : *x=48; *y=16; *x_center=142.5; *y_center=60.5; break; case 4 : *x=0; *y=32; *x_center=19.5; *y_center=101.5; break; case 5 : *x=16; *y=32; *x_center=60.5; *y_center=101.5; break; case 6 : *x=32; *y=32; *x_center=101.5; *y_center=101.5; break; case 7 : *x=48; *y=32; *x_center=142.5; *y_center=101.5; break; case 0 : *x=0; *y=48; *x_center=19.5; *y_center=142.5; break; case 1 : *x=16; *y=48; *x_center=60.5; *y_center=142.5; break; case 2 : *x=32; *y=48; *x_center=101.5; *y_center=142.5; break; case 3 : *x=48; *y=48; *x_center=142.5; *y_center=142.5; break; default: printf("Invalid module id\n"); break; } } void getModule(int moduleNo,int *pixels) { int i=0,j=0,ii=0,jj=0,temp=0; int cols=COLSPERMODULE; char moduleFileName[100]="module",blockageFileName[100]="bars_",tempBuff[100]; FILE *fp,*blockage; int *data; char *CZTHOME = getenv ("CZTWORKSPACE"); if(CZTHOME==NULL) { printf("CZTHOME Variable is not set\n"); exit(0); } data=(int*)malloc(sizeof(int)*16*16); sprintf(tempBuff,"%d",moduleNo); strcpy(moduleFileName,CZTHOME); strcpy(blockageFileName,CZTHOME); strcat(moduleFileName,"/config/module"); strcat(blockageFileName,"/config/bars_"); strcat(moduleFileName,tempBuff); strcat(blockageFileName,tempBuff); fp=fopen(moduleFileName,"r"); blockage=fopen(blockageFileName,"r"); if(fp==NULL) { printf("Sorry Error while opening the module file\n%s\n",moduleFileName); exit(0); } if(blockage==NULL) { printf("Error while opening the blockage file\n%s\n",blockageFileName); exit(0); } int colPreVal=0,colInc=0,rowPreVal=0,rowInc=0; for(i=0;i<16;i++) { if(i==0||i==15) { rowInc=114; } else { rowInc=123; } for(j=0,colPreVal=0;j<16;j++) { if(j==0||j==15) { colInc=114; } else { colInc=123; } fscanf(fp,"%d",&data[i*16+j]); for(ii=rowPreVal;ii<rowPreVal+rowInc-10;ii++) { for(jj=colPreVal;jj<colPreVal+colInc;jj++) { pixels[ii*cols+jj]=data[i*16+j]; } } temp=0; if(i!=15) { fscanf(blockage,"%d",&temp); for(ii=rowPreVal+rowInc-10;ii<rowPreVal+rowInc;ii++) { for(jj=colPreVal;jj<colPreVal+colInc;jj++) { pixels[ii*cols+jj]=temp; } } } else { for(ii=rowPreVal+rowInc-10;ii<rowPreVal+rowInc;ii++) { for(jj=colPreVal;jj<colPreVal+colInc;jj++) { pixels[ii*cols+jj]=data[i*16+j]; } } } colPreVal+=colInc; } rowPreVal+=rowInc; } } void calculateSigmaClippedMean(float* pixel_count,float *mean_out,float*rms_out) { int i=0; float mean_sum=0,rms_sum=0,mean,rms; int rows=NUMPIXPERMODULE,cols=NUMPIXPERMODULE; float temp_mean=0,temp_rms=0,temp_mean_sum=0,temp_rms_sum=0,mean_count=0,rms_count=0; mean_count=0; for(i=0;i<rows*cols;i++) { if(pixel_count[i]!=0) { mean_sum+=pixel_count[i]; mean_count++; } } mean=mean_sum/(mean_count); rms_count=0; for(i=0;i<rows*cols;i++) { if(pixel_count[i]!=0) { rms_sum+=((pixel_count[i]-mean)*(pixel_count[i]-mean)); rms_count++; } } rms=(rms_sum/rms_count); rms=sqrt(rms); while(1) { temp_mean_sum=0; temp_rms_sum=0; temp_mean=0; temp_rms=0; mean_count=0; rms_count=0; for(i=0;i<rows*cols;i++) { if((pixel_count[i])<((THREASHOLD*rms)+mean) || (pixel_count[i] > (mean-(THREASHOLD*rms)))) { temp_mean_sum+=pixel_count[i]; mean_count++; } } temp_mean=temp_mean_sum/mean_count; for(i=0;i<rows*cols;i++) { if((pixel_count[i])<((THREASHOLD*rms)+mean) || (pixel_count[i] > (mean-(THREASHOLD*rms)))) { temp_rms_sum+=(pixel_count[i]-temp_mean)*(pixel_count[i]-temp_mean); rms_count++; } } if(mean_count!=rms_count) printf("Mean Rms count different\n"); temp_rms=temp_rms_sum/rms_count; temp_rms=sqrt(temp_rms); float t1=0,t2=0; t1=((mean-temp_mean)/mean); t2=((rms-temp_rms)/rms); if(t1<0) t1*=-1; if(t2<0) t2*=-1; if(t1 <0.01 && t2<0.01 ) { //mean=temp_mean; //rms=temp_rms; *mean_out=temp_mean; *rms_out=temp_rms; break; } else { mean=temp_mean; rms=temp_rms; } } }
51d23ba0361934ecb7d426a951d4a33cfeb37080.cu
#include<stdio.h> #include<cuda.h> #include<fitsio.h> #include<pil.h> #include<GetCorrelation.h> #include"svdfit.c" /* * nvcc GetCorrelation.cu $INCL $OTHERLIB -arch=sm_20 * */ int main(int argc,char*argv[]) { //declaring the local variables char parfilename[BUFSIZE],dphfilename[BUFSIZE],correlationFilename[BUFSIZE],qeFilename[BUFSIZE],badpixfilename[BUFSIZE]; int moddph_size=sizeof(float)*NUMPIXPERMODULE*NUMPIXPERMODULE; float *mod_dph,*qemap,*dphWithoutBadPix; int moduleid,llx,lly,numelements_x,numelements_y; int i=0,j=0,rv=0,ii=0,jj=0,numbadpix=0; int *mask,*badpix,*quad_dph,pixid=0; float x_center,y_center,*correlation,zs;// xs,ys*shadow, float xstart=-19.5,ystart=-19.5,xend=19.5,yend=19.5; float resol=1,mean,rms; time_t start; int tempsrc=0; float *dph_device,*correlation_device=NULL,*qemap_device=NULL; float fittedparam=0; int *mask_device=NULL,numblocks-0,threadsPerBlock=0; FILE *fp; start=clock(); char *CZTHOME; cudaError_t cuerr; float maxvalue=0; int max_x=0,max_y=0; void (*funx_pointer)(float,float*)=polyfunction; float *funy,*funxy,*coeff,chisq,*shadow; float xs,ys,minval=9999999999,min_x=0,min_y=0,xt_tmp=xstart,yt_tmp=ystart,xe_tmp=xend,ye_tmp=yend; int fitcounter=0; //getting path for CZTI environment CZTHOME = getenv ("CZTWORKSPACE"); if(CZTHOME==NULL) { printf("CZTHOME Variable is not set\n"); exit(0); } //Setting par file name strcpy(parfilename,CZTHOME); strcat(parfilename,"/GetCorrelationGPU/par/GetCorrelation"); PILSetModuleName(parfilename); int r=PILInit(argc,argv); if(r<0) { printf("Error(%s:%d) : Error while loading par file\n",__FILE__,__LINE__); exit(0); } //reading inputs r=PILGetInt("moduleid",&moduleid); r=PILGetReal4("Zs",&zs); r=PILGetReal4("xstart",&xstart); r=PILGetReal4("xend",&xend); r=PILGetReal4("ystart",&ystart); r=PILGetReal4("yend",&yend); r=PILGetReal4("resolution",&resol); r=PILGetFname("dphfilename",dphfilename); r=PILGetFname("badpixfilename",badpixfilename); r=PILGetFname("qeFilename",qeFilename); r=PILGetFname("correlationfilename",correlationFilename); PILClose(r); //allocating the memory numelements_x=(int)(((xend-xstart)/resol)+1); numelements_y=(int)(((yend-ystart)/resol)+1); badpix=(int*)malloc(sizeof(int)*ROWSPERMODULE*COLSPERMODULE); mask=(int*)malloc(sizeof(int)*ROWSPERMODULE*COLSPERMODULE); correlation=(float*)malloc(sizeof(float)*numelements_y*numelements_x); quad_dph=(int*)malloc(sizeof(int)*NUMPIXPERQUAD*NUMPIXPERQUAD); mod_dph=(float*)malloc(moddph_size); qemap=(float*)malloc(sizeof(float)*NUMPIXPERMODULE*NUMPIXPERMODULE); dphWithoutBadPix=(float*)malloc(sizeof(float)*NUMPIXPERMODULE*NUMPIXPERMODULE); if(mask==NULL || badpix==NULL ||correlation==NULL ||quad_dph==NULL ||mod_dph==NULL ||qemap==NULL ||dphWithoutBadPix==NULL) { printf("Error while allocating memory\n"); exit(0); } //Reading DPH, QE files readImage(dphfilename,quad_dph,2); readFloatImage(qeFilename,2,qemap); getminmax(moduleid,&llx,&lly,&x_center,&y_center); getModule(moduleid+1,mask); //initilizing the arrays for(i=0;i<numelements_x;i++) { for(j=0;j<numelements_y;j++) correlation[j*NUMPIXPERMODULE+i]=0; } for(i=llx,ii=0;i<llx+NUMPIXPERMODULE;i++,ii++) { for(j=lly,jj=0;j<lly+NUMPIXPERMODULE;j++,jj++) mod_dph[jj*NUMPIXPERMODULE+ii]=quad_dph[j*NUMPIXPERQUAD+i]; } //reading bad pixel list fp=fopen(badpixfilename,"r"); if(fp==NULL) { printf("%s file not found\n",badpixfilename); exit(0); } while(1) { rv=fscanf(fp,"%d",&badpix[numbadpix]); if(rv==-1) break; numbadpix++; } //here assign -1 for the bad pixels so they can be ignored from computations int isbad=0; for(i=0;i<NUMPIXPERMODULE;i++) { for(j=0,isbad=0;j<NUMPIXPERMODULE;j++) { pixid=(15-i)*16+j; for(ii=0;ii<numbadpix;ii++) { if(pixid==badpix[ii]) { isbad=1;break; } } if(isbad) { dphWithoutBadPix[pixid]=-1; } else { dphWithoutBadPix[pixid]=mod_dph[pixid]; } } } numblocks=(int)ceil(((numelements_x*numelements_y)/(float)NUMTHREADS)); printf("NUM BLOCKS:%d\t",numblocks); printf("Threads per block:%d\n",threadsPerBlock); //allocating device memory cudaMalloc((void **) &dph_device,moddph_size ); cudaMalloc((void **) &qemap_device,moddph_size ); cudaMalloc((void **) &mask_device,sizeof(int)*ROWSPERMODULE*COLSPERMODULE ); cudaMalloc((void **) &correlation_device, sizeof(float)*numelements_x*numelements_y); //checking error in memory allocation if((cuerr = cudaGetLastError()) != cudaSuccess) { printf("\nError: Cuda Malloc %s\n", cudaGetErrorString(cuerr)); return -1; } //coping data from host to device memory cudaMemcpy(dph_device,dphWithoutBadPix,moddph_size,cudaMemcpyHostToDevice); cudaMemcpy(qemap_device,qemap,moddph_size,cudaMemcpyHostToDevice); cudaMemcpy(mask_device,mask,sizeof(int)*ROWSPERMODULE*COLSPERMODULE ,cudaMemcpyHostToDevice); if((cuerr = cudaGetLastError()) != cudaSuccess) { printf("\nError: Cuda Memcpy %s\n", cudaGetErrorString(cuerr)); return -1; } //calling cross-correlation kernel CorrelationKernel<<<numblocks,threadsPerBlock>>>(mask_device,dph_device,qemap_device,correlation_device,numelements_x,numelements_y,xstart,ystart,resol,zs,moduleid); if((cuerr = cudaGetLastError()) != cudaSuccess) { printf("\nError: CUDA KERNEL ERROR: %s\n", cudaGetErrorString(cuerr)); return -1; } //coping back the results from device to host memory cudaMemcpy(correlation,correlation_device,sizeof(float)*numelements_x*numelements_y,cudaMemcpyDeviceToHost); if((cuerr = cudaGetLastError()) != cudaSuccess) { printf("\nError: Cuda DEVICE TO HOST Memcpy: %s\n", cudaGetErrorString(cuerr)); return -1; } //getting maximum cross-correlation value and preparing array to write FITS image for(i=0;i<numelements_x;i++) { for(j=0;j<numelements_y;j++) { if(correlation[i*numelements_y+j]>maxvalue){ maxvalue=correlation[i*numelements_y+j]; max_x=i; max_y=j; } } } printf("Max_x:%d\tMax_y:%d\n",max_x,max_y); printf("Peak:%f\tX:%f\tY:%f\n",maxvalue,xstart+(max_x*resol),ystart+(max_y*resol)); writeFloatImage(correlationFilename,correlation,numelements_x,numelements_y,resol); //performing forward fitting to get source intensity shadow=(float*)malloc(sizeof(float)*(NUMPIXPERMODULE*NUMPIXPERMODULE)+2); funy=(float*)malloc(sizeof(float)*(NUMPIXPERMODULE*NUMPIXPERMODULE)+2); funxy=(float*)malloc(sizeof(float)*(NUMPIXPERMODULE*NUMPIXPERMODULE)+2); coeff=(float*)malloc(sizeof(float)*(NUMPIXPERMODULE*NUMPIXPERMODULE)+2); xstart=xstart+(max_x*resol); ystart=ystart+(max_y*resol); int windowsize=3; printf("Fitting source for iteration :%d\n",tempsrc); for(xs=xstart-windowsize;xs<=xstart+windowsize;xs+=0.25) { for(ys=ystart-windowsize;ys<=ystart+windowsize;ys+=0.25) { gensha(moduleid,shadow,xs,ys,zs,mask); fitcounter=0; for(i=0;i<NUMPIXPERMODULE*NUMPIXPERMODULE;i++){ if(dphWithoutBadPix[i]!=-1) { funy[fitcounter+1]=shadow[i]; funxy[fitcounter+1]=dphWithoutBadPix[i]; fitcounter++; } } do_svdfit(funy,funxy,coeff,fitcounter,1,&chisq,funx_pointer); chisq/=fitcounter-1; if(chisq<minval) { minval=chisq; fittedparam=coeff[1]; min_x=xs; min_y=ys; } } } char splot[BUFSIZ],matrix[BUFSIZE],xplot[BUFSIZE],yplot[BUFSIZE]; FILE *fp1,*fp2,*fp3,*fp4; strcpy(splot,correlationFilename); strcpy(matrix,correlationFilename); strcpy(xplot,correlationFilename); strcpy(yplot,correlationFilename); strcat(splot,"_S.txt"); strcat(matrix,"_Corr.txt"); strcat(xplot,"_X.txt"); strcat(yplot,"_Y.txt"); fp1=fopen(splot,"w"); fp2=fopen(matrix,"w"); fp3=fopen(xplot,"w"); fp4=fopen(yplot,"w"); for(xs=xt_tmp,i=0;xs<=xe_tmp;xs+=0.25,i++) { for(ys=yt_tmp,j=0;ys<=ye_tmp;ys+=0.25,j++) { fprintf(fp1,"%f\t%f\t%f\n",xs,ys,correlation[i*numelements_y+j]); fprintf(fp2,"%f\t",correlation[i*numelements_y+j]); fprintf(fp3,"%f\t%f\n",xs,correlation[i*numelements_y+j]); fprintf(fp4,"%f\t%f\n",ys,correlation[i*numelements_y+j]); } fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); fclose(fp3); fclose(fp4); printf("Minimum Chisq is %f found at %f,%f\n",minval,min_x,min_y); printf("Fitted Param:%f\n",fittedparam); for(i=0;i<NUMPIXPERMODULE*NUMPIXPERMODULE;i++){ if(dphWithoutBadPix[i]!=-1) { dphWithoutBadPix[i]-=shadow[i]*fittedparam; } } printf("TIME ELAPSED : %f seconds\n\n", ((double)clock() - start) / CLOCKS_PER_SEC); } void polyfunction(float x,float*funx) { funx[1]=x; } __global__ void CorrelationKernel(int *mask,float *dph,float *qemap,float *correlation,int numelem_x,int numelem_y,float x_start,float y_start,float resol,float zs,int moduleid) { int index=blockIdx.x*blockDim.x+threadIdx.x; int i=0; float xs,ys; float *shadow,sum=0; xs=(x_start)+((index/numelem_y)*resol); ys=(y_start)+((index%numelem_y)*resol); if(xs>19.5 || xs<-19.5 || ys>19.5 || ys<-19.5) return; //allocating memory shadow=(float*)malloc(sizeof(float)*ELEM); //computing shadow gensha_device(moduleid,shadow,xs,ys,zs,mask); //Computing cross-correlation value for a direction for(i=0;i<NUMPIXPERMODULE*NUMPIXPERMODULE;i++) { if(qemap[i]!=0 && dph[i]!=-1) sum+=((dph[i]*shadow[i])/qemap[i]); } correlation[index]=sum; free(shadow); } //function to generate shadow int gensha(int moduleid,float*det,float xs,float ys,float zs,int *mask) { int detx=0, dety=0; float x_center=0.0,y_center=0.0,tx=0.0,ty=0.0,xtemp=0,ytemp=0; int rowcounter=0,colcounter=0, x_index,y_index; int i=0,j=0,ii=0,jj=0; int llx=0,lly=0; getminmax(moduleid,&llx,&lly,&x_center,&y_center); dety=(llx/16)*ROWSPERMODULE+(llx/16)*100; detx=(lly/16)*COLSPERMODULE+(lly/16)*100; xtemp=19.5-xs; ytemp=19.5+ys; xs=ytemp; ys=xtemp; xs/=0.02; ys/=0.02; zs/=0.02; int tempx,tempy; for(i=0;i<256;i++) det[i]=0; for(i=detx,ii=0;i<detx+ROWSPERMODULE;i++,ii++) { tx=asin( (ys-ii) / ( sqrt( (zs*zs)+((ys-ii) * (ys-ii)) ) ) ); tempx=(int)(ii+(HEIGHT*(tan(tx)))); x_index=(ii/123); if(x_index>14) x_index=14+((ii-(14*123))/114); else x_index=x_index+((ii-(x_index*123))/114); for(j=dety,jj=0;j<dety+COLSPERMODULE;j++,jj++) { y_index=(jj/123); if(y_index>14) y_index=14+((jj-(14*123))/114); else y_index=y_index+((jj-(y_index*123))/114); ty=asin( (xs-jj) / ( sqrt( (zs*zs)+((xs-jj) * (xs-jj)) ) ) ); tempy=(int)(jj+(HEIGHT*(tan(ty)))); if(tempx>=0 && tempx<ROWSPERMODULE && tempy>=0 &&tempy<COLSPERMODULE) { if(mask[tempx*COLSPERMODULE+tempy]==1) det[x_index*16+y_index]+=1; } } } for(i=0;i<16;i++) { if(i%16==0 ||(i+1)%16==0) rowcounter=114; else rowcounter=123; for(j=0;j<16;j++) { if(j%16==0 || (j+1)%16==0) colcounter=114; else colcounter=123; det[i*16+j]=(float)det[i*16+j]/(float)(rowcounter*colcounter); } } return 0; } //Generatee shadow function for devicee __device__ int gensha_device(int moduleid,float*det,float xs,float ys,float zs,int *mask) { int detx=0, dety=0; float x_center=0.0,y_center=0.0,tx=0.0,ty=0.0,xtemp=0,ytemp=0; int rowcounter=0,colcounter=0, x_index,y_index; int i=0,j=0,ii=0,jj=0; int llx=0,lly=0; getminmax_device(moduleid,&llx,&lly,&x_center,&y_center); dety=(llx/16)*ROWSPERMODULE+(llx/16)*100; detx=(lly/16)*COLSPERMODULE+(lly/16)*100; xtemp=19.5-xs; ytemp=19.5+ys; xs=ytemp; ys=xtemp; xs/=0.02; ys/=0.02; zs/=0.02; int tempx,tempy; for(i=0;i<256;i++) det[i]=0; for(i=detx,ii=0;i<detx+ROWSPERMODULE;i++,ii++) { tx=asin( (ys-ii) / ( sqrt( (zs*zs)+((ys-ii) * (ys-ii)) ) ) ); tempx=(int)(ii+(HEIGHT*(tan(tx)))); x_index=(ii/123); if(x_index>14) x_index=14+((ii-(14*123))/114); else x_index=x_index+((ii-(x_index*123))/114); for(j=dety,jj=0;j<dety+COLSPERMODULE;j++,jj++) { y_index=(jj/123); if(y_index>14) y_index=14+((jj-(14*123))/114); else y_index=y_index+((jj-(y_index*123))/114); ty=asin( (xs-jj) / ( sqrt( (zs*zs)+((xs-jj) * (xs-jj)) ) ) ); tempy=(int)(jj+(HEIGHT*(tan(ty)))); if(tempx>=0 && tempx<ROWSPERMODULE && tempy>=0 &&tempy<COLSPERMODULE) { if(mask[tempx*COLSPERMODULE+tempy]==1) det[x_index*16+y_index]+=1; } } } for(i=0;i<16;i++) { if(i%16==0 ||(i+1)%16==0) rowcounter=114; else rowcounter=123; for(j=0;j<16;j++) { if(j%16==0 || (j+1)%16==0) colcounter=114; else colcounter=123; det[i*16+j]=(float)det[i*16+j]/(float)(rowcounter*colcounter); } } return 0; } //function to write fits images void writeFloatImage(char *filename,float*pixels,int rows,int cols,float resol) { int bitpix = FLOAT_IMG; /* 16-bit unsigned short pixel values */ long naxis = 2; /* 2-dimensional image */ int fpixel = 1,status=0; long naxes[2] = { cols,rows }; long nelements = naxes[0] * naxes[1]; fitsfile *fptr; remove(filename); if (fits_create_file(&fptr, filename, &status)) { printf("Error(%s:%d):Creating file\n",__FILE__,__LINE__); } if ( fits_create_img(fptr, bitpix,0, naxes, &status) ) { printf("Error(%s:%d):Creating image\n",__FILE__,__LINE__); } if ( fits_create_img(fptr, bitpix, naxis, naxes, &status) ) { printf("Error(%s:%d):Creating image\n",__FILE__,__LINE__); } write_wcsaxis(fptr,1,"","","","IMX",((double)(rows+1)/2.0),resol,0,"mm",&status); write_wcsaxis(fptr,2,"","","","IMY",((double)(cols+1)/2.0),resol,0,"mm",&status); if ( fits_write_img(fptr, TFLOAT, fpixel, nelements,pixels, &status) ) { printf("Error(%s:%d):Wrting image\n",__FILE__,__LINE__); } if ( fits_close_file(fptr, &status) ) { printf("Error(%s:%d):closing file\n",__FILE__,__LINE__); } } //function to read fits images void readFloatImage(char*filename,int hduno,float*data) { fitsfile *fptr; /* pointer to the FITS file, defined in fitsio.h */ int status, nfound, anynull,hdutype; long naxes[2], fpixel, nbuffer, npixels; float nullval; int buffsize; status = 0; if ( fits_open_file(&fptr, filename, READONLY, &status) ) printf("Error while reading fits file\n"); if ( fits_movabs_hdu(fptr, hduno, &hdutype, &status) ) printf("Error while moving HDU\n"); if ( fits_read_keys_lng(fptr, "NAXIS", 1, 2, naxes, &nfound, &status) ) printf("Error while reading keys\n"); npixels = naxes[0] * naxes[1]; /* number of pixels in the image */ buffsize=npixels; fpixel = 1; nullval = 0; /* don't check for null values in the image */ while (npixels > 0) { nbuffer = npixels; if (npixels > buffsize) nbuffer = buffsize; if ( fits_read_img(fptr, TFLOAT, fpixel, nbuffer, &nullval,data, &anynull, &status) ) printf("Error while reading fits image\n"); npixels -= nbuffer; fpixel += nbuffer; } if ( fits_close_file(fptr, &status) ) printf("Error while closing file\n"); } //Writing WCS information to images int write_wcsaxis(fitsfile *imgfile, int axis, char *suffix,char *wcsname, char *wcstype, char *ctype, double crpix, double cdelt, double crval,char *cunit, int *status) { char key[20]; if (status == 0) return NULL_INPUT_PTR; if (*status != 0) return (*status); if (imgfile == 0) return (*status = NULL_INPUT_PTR); if (wcsname && wcsname[0]) { sprintf(key, "WCSNAME%s", suffix); fits_update_key(imgfile, TSTRING, key, wcsname,"Coordinate system name", status); } if (wcstype && wcstype[0]){ sprintf(key, "WCSTY%d%s", axis, suffix); fits_update_key(imgfile, TSTRING, key, wcstype,"Coordinate system axis", status); } sprintf(key, "CTYPE%d%s", axis, suffix); fits_update_key(imgfile, TSTRING, key, ctype,"Name of coordinate", status); if (cunit && cunit[0]) { sprintf(key, "CUNIT%d%s", axis, suffix); fits_update_key(imgfile, TSTRING, key, cunit,"Units of coordinate axis", status); } sprintf(key, "CRPIX%d%s", axis, suffix); fits_update_key(imgfile, TDOUBLE, key, &crpix,"Reference pixel position", status); sprintf(key, "CDELT%d%s", axis, suffix); fits_update_key(imgfile, TDOUBLE, key, &cdelt,"Pixel spacing in physical units", status); sprintf(key, "CRVAL%d%s", axis, suffix); fits_update_key(imgfile, TDOUBLE, key, &crval,"Coordinate value at reference pixel position", status); return (*status); } void normalizeData(float *data,int numelements) { float peakval=getMaximum(data,numelements); int i=0; for(i=0;i<numelements;i++) { data[i]/=peakval; } } float getMaximum(float *data,int numelements) { int i=0; float maxval=0; for(i=0;i<numelements;i++) { if(data[i]>maxval) maxval=data[i]; } return maxval; } void readImage(char*filename,int *buffer,int hduNo) { fitsfile *fptr; /* pointer to the FITS file, defined in fitsio.h */ int status, nfound, anynull,hdutype; long naxes[2], fpixel, nbuffer, npixels; float nullval; status = 0; if ( fits_open_file(&fptr, filename, READONLY, &status) ) printf("Error while opening fits file\n"); if ( fits_movabs_hdu(fptr, hduNo, &hdutype, &status) ) printf("Error while moving module\n"); if ( fits_read_keys_lng(fptr, "NAXIS", 1, 2, naxes, &nfound, &status) ) printf("Error while reading keys\n"); npixels = naxes[0] * naxes[1]; /* number of pixels in the image */ fpixel = 1; nullval = 0; /* don't check for null values in the image */ while (npixels > 0) { nbuffer = npixels; if ( fits_read_img(fptr, TINT, fpixel, nbuffer, &nullval,buffer, &anynull, &status) ) { printf("Error reading fits image\n"); } npixels -= nbuffer; fpixel += nbuffer; } if ( fits_close_file(fptr, &status) ) { printf("Error while closing fits file\n"); } return; } __device__ void getminmax_device(int moduleno,int*x,int *y,float* x_center,float* y_center) { switch(moduleno) { case 12 : *x=0; *y=0; *x_center=19.5; *y_center=19.5; break; case 13 : *x=16; *y=0; *x_center=60.5; *y_center=19.5; break; case 14 : *x=32; *y=0; *x_center=101.5; *y_center=19.5; break; case 15 : *x=48; *y=0; *x_center=142.5; *y_center=19.5; break; case 8: *x=0; *y=16; *x_center=19.5; *y_center=60.5; break; case 9 : *x=16; *y=16; *x_center=60.5; *y_center=60.5; break; case 10 : *x=32; *y=16; *x_center=101.5; *y_center=60.5; break; case 11 : *x=48; *y=16; *x_center=142.5; *y_center=60.5; break; case 4 : *x=0; *y=32; *x_center=19.5; *y_center=101.5; break; case 5 : *x=16; *y=32; *x_center=60.5; *y_center=101.5; break; case 6 : *x=32; *y=32; *x_center=101.5; *y_center=101.5; break; case 7 : *x=48; *y=32; *x_center=142.5; *y_center=101.5; break; case 0 : *x=0; *y=48; *x_center=19.5; *y_center=142.5; break; case 1 : *x=16; *y=48; *x_center=60.5; *y_center=142.5; break; case 2 : *x=32; *y=48; *x_center=101.5; *y_center=142.5; break; case 3 : *x=48; *y=48; *x_center=142.5; *y_center=142.5; break; default: printf("Invalid module id\n"); break; } } void getminmax(int moduleno,int*x,int *y,float* x_center,float* y_center) { switch(moduleno) { case 12 : *x=0; *y=0; *x_center=19.5; *y_center=19.5; break; case 13 : *x=16; *y=0; *x_center=60.5; *y_center=19.5; break; case 14 : *x=32; *y=0; *x_center=101.5; *y_center=19.5; break; case 15 : *x=48; *y=0; *x_center=142.5; *y_center=19.5; break; case 8: *x=0; *y=16; *x_center=19.5; *y_center=60.5; break; case 9 : *x=16; *y=16; *x_center=60.5; *y_center=60.5; break; case 10 : *x=32; *y=16; *x_center=101.5; *y_center=60.5; break; case 11 : *x=48; *y=16; *x_center=142.5; *y_center=60.5; break; case 4 : *x=0; *y=32; *x_center=19.5; *y_center=101.5; break; case 5 : *x=16; *y=32; *x_center=60.5; *y_center=101.5; break; case 6 : *x=32; *y=32; *x_center=101.5; *y_center=101.5; break; case 7 : *x=48; *y=32; *x_center=142.5; *y_center=101.5; break; case 0 : *x=0; *y=48; *x_center=19.5; *y_center=142.5; break; case 1 : *x=16; *y=48; *x_center=60.5; *y_center=142.5; break; case 2 : *x=32; *y=48; *x_center=101.5; *y_center=142.5; break; case 3 : *x=48; *y=48; *x_center=142.5; *y_center=142.5; break; default: printf("Invalid module id\n"); break; } } void getModule(int moduleNo,int *pixels) { int i=0,j=0,ii=0,jj=0,temp=0; int cols=COLSPERMODULE; char moduleFileName[100]="module",blockageFileName[100]="bars_",tempBuff[100]; FILE *fp,*blockage; int *data; char *CZTHOME = getenv ("CZTWORKSPACE"); if(CZTHOME==NULL) { printf("CZTHOME Variable is not set\n"); exit(0); } data=(int*)malloc(sizeof(int)*16*16); sprintf(tempBuff,"%d",moduleNo); strcpy(moduleFileName,CZTHOME); strcpy(blockageFileName,CZTHOME); strcat(moduleFileName,"/config/module"); strcat(blockageFileName,"/config/bars_"); strcat(moduleFileName,tempBuff); strcat(blockageFileName,tempBuff); fp=fopen(moduleFileName,"r"); blockage=fopen(blockageFileName,"r"); if(fp==NULL) { printf("Sorry Error while opening the module file\n%s\n",moduleFileName); exit(0); } if(blockage==NULL) { printf("Error while opening the blockage file\n%s\n",blockageFileName); exit(0); } int colPreVal=0,colInc=0,rowPreVal=0,rowInc=0; for(i=0;i<16;i++) { if(i==0||i==15) { rowInc=114; } else { rowInc=123; } for(j=0,colPreVal=0;j<16;j++) { if(j==0||j==15) { colInc=114; } else { colInc=123; } fscanf(fp,"%d",&data[i*16+j]); for(ii=rowPreVal;ii<rowPreVal+rowInc-10;ii++) { for(jj=colPreVal;jj<colPreVal+colInc;jj++) { pixels[ii*cols+jj]=data[i*16+j]; } } temp=0; if(i!=15) { fscanf(blockage,"%d",&temp); for(ii=rowPreVal+rowInc-10;ii<rowPreVal+rowInc;ii++) { for(jj=colPreVal;jj<colPreVal+colInc;jj++) { pixels[ii*cols+jj]=temp; } } } else { for(ii=rowPreVal+rowInc-10;ii<rowPreVal+rowInc;ii++) { for(jj=colPreVal;jj<colPreVal+colInc;jj++) { pixels[ii*cols+jj]=data[i*16+j]; } } } colPreVal+=colInc; } rowPreVal+=rowInc; } } void calculateSigmaClippedMean(float* pixel_count,float *mean_out,float*rms_out) { int i=0; float mean_sum=0,rms_sum=0,mean,rms; int rows=NUMPIXPERMODULE,cols=NUMPIXPERMODULE; float temp_mean=0,temp_rms=0,temp_mean_sum=0,temp_rms_sum=0,mean_count=0,rms_count=0; mean_count=0; for(i=0;i<rows*cols;i++) { if(pixel_count[i]!=0) { mean_sum+=pixel_count[i]; mean_count++; } } mean=mean_sum/(mean_count); rms_count=0; for(i=0;i<rows*cols;i++) { if(pixel_count[i]!=0) { rms_sum+=((pixel_count[i]-mean)*(pixel_count[i]-mean)); rms_count++; } } rms=(rms_sum/rms_count); rms=sqrt(rms); while(1) { temp_mean_sum=0; temp_rms_sum=0; temp_mean=0; temp_rms=0; mean_count=0; rms_count=0; for(i=0;i<rows*cols;i++) { if((pixel_count[i])<((THREASHOLD*rms)+mean) || (pixel_count[i] > (mean-(THREASHOLD*rms)))) { temp_mean_sum+=pixel_count[i]; mean_count++; } } temp_mean=temp_mean_sum/mean_count; for(i=0;i<rows*cols;i++) { if((pixel_count[i])<((THREASHOLD*rms)+mean) || (pixel_count[i] > (mean-(THREASHOLD*rms)))) { temp_rms_sum+=(pixel_count[i]-temp_mean)*(pixel_count[i]-temp_mean); rms_count++; } } if(mean_count!=rms_count) printf("Mean Rms count different\n"); temp_rms=temp_rms_sum/rms_count; temp_rms=sqrt(temp_rms); float t1=0,t2=0; t1=((mean-temp_mean)/mean); t2=((rms-temp_rms)/rms); if(t1<0) t1*=-1; if(t2<0) t2*=-1; if(t1 <0.01 && t2<0.01 ) { //mean=temp_mean; //rms=temp_rms; *mean_out=temp_mean; *rms_out=temp_rms; break; } else { mean=temp_mean; rms=temp_rms; } } }
515414356aea30b17562b1948a866d535dda80e3.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/utilities/error.hpp> #include <cudf/detail/valid_if.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <strings/utilities.hpp> #include <strings/utilities.cuh> #include <rmm/thrust_rmm_allocator.h> #include <thrust/transform_reduce.h> #include <thrust/for_each.h> namespace cudf { // Create a strings-type column from vector of pointer/size pairs std::unique_ptr<column> make_strings_column( const rmm::device_vector<thrust::pair<const char*,size_type>>& strings, hipStream_t stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); size_type strings_count = strings.size(); if( strings_count==0 ) return strings::detail::make_empty_strings_column(mr,stream); auto execpol = rmm::exec_policy(stream); auto d_strings = strings.data().get(); // check total size is not too large for cudf column size_t bytes = thrust::transform_reduce( execpol->on(stream), thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(strings_count), [d_strings] __device__ (size_t idx) { auto item = d_strings[idx]; return (item.first!=nullptr) ? item.second : 0; }, 0, thrust::plus<size_t>()); CUDF_EXPECTS( bytes < std::numeric_limits<size_type>::max(), "total size of strings is too large for cudf column" ); // build offsets column from the strings sizes auto offsets_transformer = [d_strings] __device__ (size_type idx) { thrust::pair<const char*,size_type> item = d_strings[idx]; return ( item.first!=nullptr ? static_cast<int32_t>(item.second) : 0 ); }; auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), offsets_transformer ); auto offsets_column = strings::detail::make_offsets_child_column(offsets_transformer_itr, offsets_transformer_itr+strings_count, mr, stream); auto offsets_view = offsets_column->view(); auto d_offsets = offsets_view.data<int32_t>(); // create null mask auto new_nulls = experimental::detail::valid_if( thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), [d_strings] __device__ (size_type idx) { return d_strings[idx].first!=nullptr; }, stream, mr); auto null_count = new_nulls.second; rmm::device_buffer null_mask; if( null_count > 0 ) null_mask = std::move(new_nulls.first); // build chars column auto chars_column = strings::detail::create_chars_child_column( strings_count, null_count, bytes, mr, stream ); auto chars_view = chars_column->mutable_view(); auto d_chars = chars_view.data<char>(); thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count, [d_strings, d_offsets, d_chars] __device__(size_type idx){ // place individual strings auto item = d_strings[idx]; if( item.first!=nullptr ) memcpy(d_chars + d_offsets[idx], item.first, item.second ); }); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), null_count, std::move(null_mask), stream, mr); } struct string_view_to_pair { string_view null_placeholder; string_view_to_pair(string_view n) : null_placeholder(n) {} __device__ thrust::pair<const char *, size_type> operator()(const string_view& i) { return (i.data() == null_placeholder.data()) ? thrust::pair<const char *, size_type>{nullptr, 0} : thrust::pair<const char *, size_type>{i.data(), i.size_bytes()}; } }; // Create a strings-type column from vector of string_view std::unique_ptr<column> make_strings_column( const rmm::device_vector<string_view>& string_views, const string_view null_placeholder, hipStream_t stream, rmm::mr::device_memory_resource* mr) { auto it_pair = thrust::make_transform_iterator( string_views.begin(), string_view_to_pair{null_placeholder}); const rmm::device_vector<thrust::pair<const char *, size_type>> dev_strings( it_pair, it_pair + string_views.size()); return make_strings_column(dev_strings, stream, mr); } // Create a strings-type column from device vector of chars and vector of offsets. std::unique_ptr<column> make_strings_column( const rmm::device_vector<char>& strings, const rmm::device_vector<size_type>& offsets, const rmm::device_vector<bitmask_type>& valid_mask, size_type null_count, hipStream_t stream, rmm::mr::device_memory_resource* mr ) { CUDF_FUNC_RANGE(); size_type num_strings = offsets.size()-1; if( num_strings==0 ) return strings::detail::make_empty_strings_column(mr,stream); CUDF_EXPECTS( null_count < num_strings, "null strings column not yet supported"); if( null_count > 0 ) { CUDF_EXPECTS( !valid_mask.empty(), "Cannot have null elements without a null mask." ); } auto execpol = rmm::exec_policy(stream); size_type bytes = offsets.back(); CUDF_EXPECTS( bytes >=0, "invalid offsets vector"); // build offsets column -- this is the number of strings + 1 auto offsets_column = make_numeric_column( data_type{INT32}, num_strings+1, mask_state::UNALLOCATED, stream, mr ); auto offsets_view = offsets_column->mutable_view(); CUDA_TRY(hipMemcpyAsync( offsets_view.data<int32_t>(), offsets.data().get(), (num_strings+1)*sizeof(int32_t), hipMemcpyDeviceToDevice, stream )); // build null bitmask rmm::device_buffer null_mask{ valid_mask.data().get(), valid_mask.size() * sizeof(bitmask_type)}; // Or this works too: sizeof(typename std::remove_reference_t<decltype(valid_mask)>::value_type) // Following give the incorrect value of 8 instead of 4 because of smart references: // sizeof(valid_mask[0]), sizeof(decltype(valid_mask.front())) // build chars column auto chars_column = strings::detail::create_chars_child_column( num_strings, null_count, bytes, mr, stream ); auto chars_view = chars_column->mutable_view(); CUDA_TRY(hipMemcpyAsync( chars_view.data<char>(), strings.data().get(), bytes, hipMemcpyDeviceToDevice, stream )); return make_strings_column(num_strings, std::move(offsets_column), std::move(chars_column), null_count, std::move(null_mask), stream, mr); } // Create strings column from host vectors std::unique_ptr<column> make_strings_column( const std::vector<char>& strings, const std::vector<size_type>& offsets, const std::vector<bitmask_type>& null_mask, size_type null_count, hipStream_t stream, rmm::mr::device_memory_resource* mr) { rmm::device_vector<char> d_strings{strings}; rmm::device_vector<size_type> d_offsets{offsets}; rmm::device_vector<bitmask_type> d_null_mask{null_mask}; return make_strings_column(d_strings, d_offsets, d_null_mask, null_count, stream, mr); } // std::unique_ptr<column> make_strings_column( size_type num_strings, std::unique_ptr<column> offsets_column, std::unique_ptr<column> chars_column, size_type null_count, rmm::device_buffer&& null_mask, hipStream_t stream, rmm::mr::device_memory_resource* mr) { if( null_count > 0 ) CUDF_EXPECTS( null_mask.size() > 0, "Column with nulls must be nullable."); CUDF_EXPECTS( num_strings == offsets_column->size()-1, "Invalid offsets column size for strings column." ); CUDF_EXPECTS( offsets_column->null_count()==0, "Offsets column should not contain nulls"); CUDF_EXPECTS( chars_column->null_count()==0, "Chars column should not contain nulls"); std::vector<std::unique_ptr<column>> children; children.emplace_back(std::move(offsets_column)); children.emplace_back(std::move(chars_column)); return std::make_unique<column>( data_type{STRING}, num_strings, rmm::device_buffer{0,stream,mr}, null_mask, null_count, std::move(children)); } } // namespace cudf
515414356aea30b17562b1948a866d535dda80e3.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/utilities/error.hpp> #include <cudf/detail/valid_if.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <strings/utilities.hpp> #include <strings/utilities.cuh> #include <rmm/thrust_rmm_allocator.h> #include <thrust/transform_reduce.h> #include <thrust/for_each.h> namespace cudf { // Create a strings-type column from vector of pointer/size pairs std::unique_ptr<column> make_strings_column( const rmm::device_vector<thrust::pair<const char*,size_type>>& strings, cudaStream_t stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); size_type strings_count = strings.size(); if( strings_count==0 ) return strings::detail::make_empty_strings_column(mr,stream); auto execpol = rmm::exec_policy(stream); auto d_strings = strings.data().get(); // check total size is not too large for cudf column size_t bytes = thrust::transform_reduce( execpol->on(stream), thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(strings_count), [d_strings] __device__ (size_t idx) { auto item = d_strings[idx]; return (item.first!=nullptr) ? item.second : 0; }, 0, thrust::plus<size_t>()); CUDF_EXPECTS( bytes < std::numeric_limits<size_type>::max(), "total size of strings is too large for cudf column" ); // build offsets column from the strings sizes auto offsets_transformer = [d_strings] __device__ (size_type idx) { thrust::pair<const char*,size_type> item = d_strings[idx]; return ( item.first!=nullptr ? static_cast<int32_t>(item.second) : 0 ); }; auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), offsets_transformer ); auto offsets_column = strings::detail::make_offsets_child_column(offsets_transformer_itr, offsets_transformer_itr+strings_count, mr, stream); auto offsets_view = offsets_column->view(); auto d_offsets = offsets_view.data<int32_t>(); // create null mask auto new_nulls = experimental::detail::valid_if( thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), [d_strings] __device__ (size_type idx) { return d_strings[idx].first!=nullptr; }, stream, mr); auto null_count = new_nulls.second; rmm::device_buffer null_mask; if( null_count > 0 ) null_mask = std::move(new_nulls.first); // build chars column auto chars_column = strings::detail::create_chars_child_column( strings_count, null_count, bytes, mr, stream ); auto chars_view = chars_column->mutable_view(); auto d_chars = chars_view.data<char>(); thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count, [d_strings, d_offsets, d_chars] __device__(size_type idx){ // place individual strings auto item = d_strings[idx]; if( item.first!=nullptr ) memcpy(d_chars + d_offsets[idx], item.first, item.second ); }); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), null_count, std::move(null_mask), stream, mr); } struct string_view_to_pair { string_view null_placeholder; string_view_to_pair(string_view n) : null_placeholder(n) {} __device__ thrust::pair<const char *, size_type> operator()(const string_view& i) { return (i.data() == null_placeholder.data()) ? thrust::pair<const char *, size_type>{nullptr, 0} : thrust::pair<const char *, size_type>{i.data(), i.size_bytes()}; } }; // Create a strings-type column from vector of string_view std::unique_ptr<column> make_strings_column( const rmm::device_vector<string_view>& string_views, const string_view null_placeholder, cudaStream_t stream, rmm::mr::device_memory_resource* mr) { auto it_pair = thrust::make_transform_iterator( string_views.begin(), string_view_to_pair{null_placeholder}); const rmm::device_vector<thrust::pair<const char *, size_type>> dev_strings( it_pair, it_pair + string_views.size()); return make_strings_column(dev_strings, stream, mr); } // Create a strings-type column from device vector of chars and vector of offsets. std::unique_ptr<column> make_strings_column( const rmm::device_vector<char>& strings, const rmm::device_vector<size_type>& offsets, const rmm::device_vector<bitmask_type>& valid_mask, size_type null_count, cudaStream_t stream, rmm::mr::device_memory_resource* mr ) { CUDF_FUNC_RANGE(); size_type num_strings = offsets.size()-1; if( num_strings==0 ) return strings::detail::make_empty_strings_column(mr,stream); CUDF_EXPECTS( null_count < num_strings, "null strings column not yet supported"); if( null_count > 0 ) { CUDF_EXPECTS( !valid_mask.empty(), "Cannot have null elements without a null mask." ); } auto execpol = rmm::exec_policy(stream); size_type bytes = offsets.back(); CUDF_EXPECTS( bytes >=0, "invalid offsets vector"); // build offsets column -- this is the number of strings + 1 auto offsets_column = make_numeric_column( data_type{INT32}, num_strings+1, mask_state::UNALLOCATED, stream, mr ); auto offsets_view = offsets_column->mutable_view(); CUDA_TRY(cudaMemcpyAsync( offsets_view.data<int32_t>(), offsets.data().get(), (num_strings+1)*sizeof(int32_t), cudaMemcpyDeviceToDevice, stream )); // build null bitmask rmm::device_buffer null_mask{ valid_mask.data().get(), valid_mask.size() * sizeof(bitmask_type)}; // Or this works too: sizeof(typename std::remove_reference_t<decltype(valid_mask)>::value_type) // Following give the incorrect value of 8 instead of 4 because of smart references: // sizeof(valid_mask[0]), sizeof(decltype(valid_mask.front())) // build chars column auto chars_column = strings::detail::create_chars_child_column( num_strings, null_count, bytes, mr, stream ); auto chars_view = chars_column->mutable_view(); CUDA_TRY(cudaMemcpyAsync( chars_view.data<char>(), strings.data().get(), bytes, cudaMemcpyDeviceToDevice, stream )); return make_strings_column(num_strings, std::move(offsets_column), std::move(chars_column), null_count, std::move(null_mask), stream, mr); } // Create strings column from host vectors std::unique_ptr<column> make_strings_column( const std::vector<char>& strings, const std::vector<size_type>& offsets, const std::vector<bitmask_type>& null_mask, size_type null_count, cudaStream_t stream, rmm::mr::device_memory_resource* mr) { rmm::device_vector<char> d_strings{strings}; rmm::device_vector<size_type> d_offsets{offsets}; rmm::device_vector<bitmask_type> d_null_mask{null_mask}; return make_strings_column(d_strings, d_offsets, d_null_mask, null_count, stream, mr); } // std::unique_ptr<column> make_strings_column( size_type num_strings, std::unique_ptr<column> offsets_column, std::unique_ptr<column> chars_column, size_type null_count, rmm::device_buffer&& null_mask, cudaStream_t stream, rmm::mr::device_memory_resource* mr) { if( null_count > 0 ) CUDF_EXPECTS( null_mask.size() > 0, "Column with nulls must be nullable."); CUDF_EXPECTS( num_strings == offsets_column->size()-1, "Invalid offsets column size for strings column." ); CUDF_EXPECTS( offsets_column->null_count()==0, "Offsets column should not contain nulls"); CUDF_EXPECTS( chars_column->null_count()==0, "Chars column should not contain nulls"); std::vector<std::unique_ptr<column>> children; children.emplace_back(std::move(offsets_column)); children.emplace_back(std::move(chars_column)); return std::make_unique<column>( data_type{STRING}, num_strings, rmm::device_buffer{0,stream,mr}, null_mask, null_count, std::move(children)); } } // namespace cudf
8b85f291e1f951fd913359b53c20fae10a2fd36e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/zgesellcmmv.cu, normal z -> d, Tue Aug 30 09:38:42 2016 */ #include "magmasparse_internal.h" #define PRECISION_d //#define TEXTURE // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_1( int num_rows, int num_cols, int blocksize, int T, double alpha, const double * __restrict__ dval, const magma_index_t * __restrict__ dcolind, const magma_index_t * __restrict__ drowptr, const double *__restrict__ dx, double beta, double * __restrict__ dy) { // threads assigned to rows //int Idx = blockDim.x * blockIdx.x + threadIdx.x; //int offset = drowptr[ blockIdx.x ]; //int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; // T threads assigned to each row int idx = threadIdx.x; // local row int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * 256 + idx; // global row index // int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) ); int lrow = threadIdx.x%blocksize; // local row; if( row < num_rows ) { int offset = drowptr[ row/blocksize ]; int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize; double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++) { int col = dcolind [ offset+ blocksize * n + lrow ]; double val = dval[ offset+ blocksize * n + lrow ]; dot = dot + val * dx [ col ]; } if (betazero) { dy[ row ] = dot * alpha; } else { dy[ row ] = dot * alpha + beta * dy [ row ]; } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_4( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ) { shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_8( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_16( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_32( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } /************************* same but using texture mem *************************/ #if defined(PRECISION_d) && defined(TEXTURE) __inline__ __device__ double read_from_tex( hipTextureObject_t texdx, const int& i) { int2 temp = tex1Dfetch<int2>( texdx, i ); return __hiloint2double(temp.y,temp.x); } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_4_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ) { shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_8_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_16_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_32_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } #endif /********************* end of texture versions **************************/ /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLP. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] blocksize magma_int_t number of rows in one ELL-slice @param[in] slices magma_int_t number of slices in matrix @param[in] alignment magma_int_t number of threads assigned to one row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in SELLP @param[in] dcolind magmaIndex_ptr columnindices of A in SELLP @param[in] drowptr magmaIndex_ptr rowpointer of SELLP @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { // using a 2D thread grid int num_threads = blocksize*alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = min( int( sqrt( double( slices ))), 65535 ); int dimgrid2 = min(magma_ceildiv( slices, dimgrid1 ), 65535); int dimgrid3 = magma_ceildiv( slices, dimgrid1*dimgrid2 ); int num_tx = blocksize; int Ms = num_threads * sizeof( double ); // special case: alignment 1: if( alignment == 1 ){ Ms = 0; num_tx = 256; int num_blocks = magma_ceildiv( n, 256 ); dimgrid1 = num_blocks; //min( int( sqrt( double( num_blocks ))), 65535 ); dimgrid2 = 1; //magma_ceildiv( num_blocks, dimgrid1 ); dimgrid3 = 1; //blocksize = 256; } dim3 block( num_tx, alignment, 1); if( dimgrid3 > 65535 ){ printf("error: too many GPU thread blocks requested.\n"); } dim3 grid( dimgrid1, dimgrid2, 1); #if defined(PRECISION_d) && defined(TEXTURE) // Create channel. hipChannelFormatDesc channel_desc; channel_desc = hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned); // Create resource descriptor. struct hipResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = hipResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)dx; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m*sizeof(double); // Specify texture object parameters. struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.filterMode = hipFilterModePoint; texDesc.readMode = hipReadModeElementType; // Create texture object. hipTextureObject_t texdx = 0; hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); if ( alignment == 1) { if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<true>), dim3(grid2), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 4){ if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 8){ if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 16){ if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 32){ if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } hipDestroyTextureObject(texdx); #else if ( alignment == 1) { if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 4){ if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_4<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_4<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 8){ if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_8<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_8<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 16){ if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_16<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_16<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 32){ if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_32<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_32<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else { printf("error: alignment %d not supported.\n", int(alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } #endif return MAGMA_SUCCESS; }
8b85f291e1f951fd913359b53c20fae10a2fd36e.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/zgesellcmmv.cu, normal z -> d, Tue Aug 30 09:38:42 2016 */ #include "magmasparse_internal.h" #define PRECISION_d //#define TEXTURE // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_1( int num_rows, int num_cols, int blocksize, int T, double alpha, const double * __restrict__ dval, const magma_index_t * __restrict__ dcolind, const magma_index_t * __restrict__ drowptr, const double *__restrict__ dx, double beta, double * __restrict__ dy) { // threads assigned to rows //int Idx = blockDim.x * blockIdx.x + threadIdx.x; //int offset = drowptr[ blockIdx.x ]; //int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; // T threads assigned to each row int idx = threadIdx.x; // local row int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * 256 + idx; // global row index // int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) ); int lrow = threadIdx.x%blocksize; // local row; if( row < num_rows ) { int offset = drowptr[ row/blocksize ]; int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize; double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++) { int col = dcolind [ offset+ blocksize * n + lrow ]; double val = dval[ offset+ blocksize * n + lrow ]; dot = dot + val * dx [ col ]; } if (betazero) { dy[ row ] = dot * alpha; } else { dy[ row ] = dot * alpha + beta * dy [ row ]; } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_4( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ) { shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_8( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_16( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_32( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } /************************* same but using texture mem *************************/ #if defined(PRECISION_d) && defined(TEXTURE) __inline__ __device__ double read_from_tex( cudaTextureObject_t texdx, const int& i) { int2 temp = tex1Dfetch<int2>( texdx, i ); return __hiloint2double(temp.y,temp.x); } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_4_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ) { shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_8_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_16_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_32_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ) { double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } #endif /********************* end of texture versions **************************/ /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLP. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] blocksize magma_int_t number of rows in one ELL-slice @param[in] slices magma_int_t number of slices in matrix @param[in] alignment magma_int_t number of threads assigned to one row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in SELLP @param[in] dcolind magmaIndex_ptr columnindices of A in SELLP @param[in] drowptr magmaIndex_ptr rowpointer of SELLP @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { // using a 2D thread grid int num_threads = blocksize*alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = min( int( sqrt( double( slices ))), 65535 ); int dimgrid2 = min(magma_ceildiv( slices, dimgrid1 ), 65535); int dimgrid3 = magma_ceildiv( slices, dimgrid1*dimgrid2 ); int num_tx = blocksize; int Ms = num_threads * sizeof( double ); // special case: alignment 1: if( alignment == 1 ){ Ms = 0; num_tx = 256; int num_blocks = magma_ceildiv( n, 256 ); dimgrid1 = num_blocks; //min( int( sqrt( double( num_blocks ))), 65535 ); dimgrid2 = 1; //magma_ceildiv( num_blocks, dimgrid1 ); dimgrid3 = 1; //blocksize = 256; } dim3 block( num_tx, alignment, 1); if( dimgrid3 > 65535 ){ printf("error: too many GPU thread blocks requested.\n"); } dim3 grid( dimgrid1, dimgrid2, 1); #if defined(PRECISION_d) && defined(TEXTURE) // Create channel. cudaChannelFormatDesc channel_desc; channel_desc = cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned); // Create resource descriptor. struct cudaResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = cudaResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)dx; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m*sizeof(double); // Specify texture object parameters. struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; // Create texture object. cudaTextureObject_t texdx = 0; cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); if ( alignment == 1) { if (beta == MAGMA_D_ZERO) { zgesellptmv2d_kernel_1<true><<< grid2, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_1<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 4){ if (beta == MAGMA_D_ZERO) { zgesellptmv2d_kernel_4_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { zgesellptmv2d_kernel_4_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 8){ if (beta == MAGMA_D_ZERO) { zgesellptmv2d_kernel_8_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { zgesellptmv2d_kernel_8_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 16){ if (beta == MAGMA_D_ZERO) { zgesellptmv2d_kernel_16_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { zgesellptmv2d_kernel_16_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 32){ if (beta == MAGMA_D_ZERO) { zgesellptmv2d_kernel_32_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { zgesellptmv2d_kernel_32_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } cudaDestroyTextureObject(texdx); #else if ( alignment == 1) { if (beta == MAGMA_D_ZERO) { zgesellptmv2d_kernel_1<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_1<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 4){ if (beta == MAGMA_D_ZERO) { zgesellptmv2d_kernel_4<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_4<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 8){ if (beta == MAGMA_D_ZERO) { zgesellptmv2d_kernel_8<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_8<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 16){ if (beta == MAGMA_D_ZERO) { zgesellptmv2d_kernel_16<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_16<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 32){ if (beta == MAGMA_D_ZERO) { zgesellptmv2d_kernel_32<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_32<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else { printf("error: alignment %d not supported.\n", int(alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } #endif return MAGMA_SUCCESS; }
aae43b0b5e9321d102b4d2423078e91355a3f997.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <GL/glut.h> #include "../sequential/common.h" #include "render.h" #include "../sequential/compute-barneshut.h" float angle=0.0f; float lxcam=-60.0f,lzcam=-60.0f, lycam=-30.0f; float xcam=100.0f,zcam=100.0f,ycam=100.0f; float deltaAngle = 0.0f; int xOrigin = -1; int yOrigin = -1; float camera_speed = 10.0f; float camera_x = 0.0f, camera_y = 0.0f, camera_z = 0.0f; void display_tree(bnode* node){ queue* q = create_queue(1024); enqueue(q, node); glBegin(GL_LINES); while(q->size != 0){ bnode* curr = dequeue(q); glColor3f (0, 250, 250); glVertex3f(curr->min_x, curr->min_y, curr->min_z); glVertex3f(curr->max_x, curr->min_y, curr->min_z); glColor3f (0, 250, 250); glVertex3f(curr->min_x, curr->min_y, curr->max_z); glVertex3f(curr->max_x, curr->min_y, curr->max_z); glColor3f (0, 250, 250); glVertex3f(curr->min_x, curr->max_y, curr->min_z); glVertex3f(curr->max_x, curr->max_y, curr->min_z); glColor3f (0, 250, 250); glVertex3f(curr->min_x, curr->max_y, curr->max_z); glVertex3f(curr->max_x, curr->max_y, curr->max_z); glColor3f (250, 0, 0); glVertex3f(curr->min_x, curr->min_y, curr->min_z); glVertex3f(curr->min_x, curr->max_y, curr->min_z); glColor3f (250, 0, 0); glVertex3f(curr->min_x, curr->min_y, curr->max_z); glVertex3f(curr->min_x, curr->max_y, curr->max_z); glColor3f (250, 0, 0); glVertex3f(curr->max_x, curr->min_y, curr->min_z); glVertex3f(curr->max_x, curr->max_y, curr->min_z); glColor3f (250, 0, 0); glVertex3f(curr->max_x, curr->min_y, curr->max_z); glVertex3f(curr->max_x, curr->max_y, curr->max_z); glColor3f (0, 250, 0); glVertex3f(curr->min_x, curr->min_y, curr->min_z); glVertex3f(curr->min_x, curr->min_y, curr->max_z); glColor3f (0, 250, 0); glVertex3f(curr->min_x, curr->max_y, curr->min_z); glVertex3f(curr->min_x, curr->max_y, curr->max_z); glColor3f (0, 250, 0); glVertex3f(curr->max_x, curr->min_y, curr->min_z); glVertex3f(curr->max_x, curr->min_y, curr->max_z); glColor3f (0, 250, 0); glVertex3f(curr->max_x, curr->max_y, curr->min_z); glVertex3f(curr->max_x, curr->max_y, curr->max_z); if(curr->body == -2){ enqueue(q, curr->o0); enqueue(q, curr->o1); enqueue(q, curr->o2); enqueue(q, curr->o3); enqueue(q, curr->o4); enqueue(q, curr->o5); enqueue(q, curr->o6); enqueue(q, curr->o7); } } glEnd(); destruct_queue(q); } void draw_axis(){ glBegin(GL_LINES); glColor3f (255.0, 0.0, 0.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(10000000000000000.0, 0.0, 0.0); glColor3f (0.0, 255.0, 0.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(0.0, 10000000000000000.0, 0.0); glColor3f (0.0, 0.0, 255.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(0.0, 0.0, 10000000000000000.0); glColor3f (255.0, 0.0, 0.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(-10000000000000000.0, 0.0, 0.0); glColor3f (0, 255.0, 0.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(0.0, -10000000000000000.0, 0.0); glColor3f (0.0, 0.0, 255.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(0.0, 0.0, -10000000000000000.0); glEnd(); } void draw_body(int i){ glPushMatrix(); glColor3f(0, 1, 1); glTranslatef (x[i], y[i], z[i]); glutWireSphere(150, 16.0, 16.0); glPopMatrix(); } void timerfunc(int v) { glLoadIdentity(); gluLookAt(xcam,ycam, zcam, xcam+lxcam,ycam+lycam,zcam+lzcam, 0.0f,1.0f,0.0f); glutPostRedisplay(); glutTimerFunc(1, timerfunc, v); } void reshape(GLint w, GLint h) { glViewport(0, 0, w, h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(90.0, (GLfloat)w/(GLfloat)h, 1.0, 100000000000.0); glMatrixMode(GL_MODELVIEW); } void processSpecialKeys(int key, int xx, int yy) { switch (key) { case GLUT_KEY_LEFT : xcam += lxcam * camera_speed; ycam += lycam * camera_speed; break; case GLUT_KEY_RIGHT : xcam -= lxcam * camera_speed; ycam -= lycam * camera_speed; break; case GLUT_KEY_UP : xcam += lxcam * camera_speed; zcam += lzcam * camera_speed; break; case GLUT_KEY_DOWN : xcam -= lxcam * camera_speed; zcam -= lzcam * camera_speed; break; } } void mouseButton(int button, int state, int xcam, int ycam) { if (button == GLUT_LEFT_BUTTON) { if (state == GLUT_UP) { angle -= deltaAngle; xOrigin = -1; yOrigin = -1; } else { xOrigin = xcam; yOrigin = ycam; } } } void mouseMove(int xcam, int ycam) { if (xOrigin >= 0) { deltaAngle = (xcam - xOrigin) * 0.005f; lxcam = sin(angle - deltaAngle) * camera_speed; lzcam = -cos(angle - deltaAngle) * camera_speed; } if (yOrigin >= 0) { deltaAngle = (ycam - yOrigin) * 0.005f; lycam = tan(angle - deltaAngle) * camera_speed; } }
aae43b0b5e9321d102b4d2423078e91355a3f997.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #include <GL/glut.h> #include "../sequential/common.h" #include "render.h" #include "../sequential/compute-barneshut.h" float angle=0.0f; float lxcam=-60.0f,lzcam=-60.0f, lycam=-30.0f; float xcam=100.0f,zcam=100.0f,ycam=100.0f; float deltaAngle = 0.0f; int xOrigin = -1; int yOrigin = -1; float camera_speed = 10.0f; float camera_x = 0.0f, camera_y = 0.0f, camera_z = 0.0f; void display_tree(bnode* node){ queue* q = create_queue(1024); enqueue(q, node); glBegin(GL_LINES); while(q->size != 0){ bnode* curr = dequeue(q); glColor3f (0, 250, 250); glVertex3f(curr->min_x, curr->min_y, curr->min_z); glVertex3f(curr->max_x, curr->min_y, curr->min_z); glColor3f (0, 250, 250); glVertex3f(curr->min_x, curr->min_y, curr->max_z); glVertex3f(curr->max_x, curr->min_y, curr->max_z); glColor3f (0, 250, 250); glVertex3f(curr->min_x, curr->max_y, curr->min_z); glVertex3f(curr->max_x, curr->max_y, curr->min_z); glColor3f (0, 250, 250); glVertex3f(curr->min_x, curr->max_y, curr->max_z); glVertex3f(curr->max_x, curr->max_y, curr->max_z); glColor3f (250, 0, 0); glVertex3f(curr->min_x, curr->min_y, curr->min_z); glVertex3f(curr->min_x, curr->max_y, curr->min_z); glColor3f (250, 0, 0); glVertex3f(curr->min_x, curr->min_y, curr->max_z); glVertex3f(curr->min_x, curr->max_y, curr->max_z); glColor3f (250, 0, 0); glVertex3f(curr->max_x, curr->min_y, curr->min_z); glVertex3f(curr->max_x, curr->max_y, curr->min_z); glColor3f (250, 0, 0); glVertex3f(curr->max_x, curr->min_y, curr->max_z); glVertex3f(curr->max_x, curr->max_y, curr->max_z); glColor3f (0, 250, 0); glVertex3f(curr->min_x, curr->min_y, curr->min_z); glVertex3f(curr->min_x, curr->min_y, curr->max_z); glColor3f (0, 250, 0); glVertex3f(curr->min_x, curr->max_y, curr->min_z); glVertex3f(curr->min_x, curr->max_y, curr->max_z); glColor3f (0, 250, 0); glVertex3f(curr->max_x, curr->min_y, curr->min_z); glVertex3f(curr->max_x, curr->min_y, curr->max_z); glColor3f (0, 250, 0); glVertex3f(curr->max_x, curr->max_y, curr->min_z); glVertex3f(curr->max_x, curr->max_y, curr->max_z); if(curr->body == -2){ enqueue(q, curr->o0); enqueue(q, curr->o1); enqueue(q, curr->o2); enqueue(q, curr->o3); enqueue(q, curr->o4); enqueue(q, curr->o5); enqueue(q, curr->o6); enqueue(q, curr->o7); } } glEnd(); destruct_queue(q); } void draw_axis(){ glBegin(GL_LINES); glColor3f (255.0, 0.0, 0.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(10000000000000000.0, 0.0, 0.0); glColor3f (0.0, 255.0, 0.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(0.0, 10000000000000000.0, 0.0); glColor3f (0.0, 0.0, 255.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(0.0, 0.0, 10000000000000000.0); glColor3f (255.0, 0.0, 0.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(-10000000000000000.0, 0.0, 0.0); glColor3f (0, 255.0, 0.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(0.0, -10000000000000000.0, 0.0); glColor3f (0.0, 0.0, 255.0); glVertex3f(0.0, 0.0, 0.0); glVertex3f(0.0, 0.0, -10000000000000000.0); glEnd(); } void draw_body(int i){ glPushMatrix(); glColor3f(0, 1, 1); glTranslatef (x[i], y[i], z[i]); glutWireSphere(150, 16.0, 16.0); glPopMatrix(); } void timerfunc(int v) { glLoadIdentity(); gluLookAt(xcam,ycam, zcam, xcam+lxcam,ycam+lycam,zcam+lzcam, 0.0f,1.0f,0.0f); glutPostRedisplay(); glutTimerFunc(1, timerfunc, v); } void reshape(GLint w, GLint h) { glViewport(0, 0, w, h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(90.0, (GLfloat)w/(GLfloat)h, 1.0, 100000000000.0); glMatrixMode(GL_MODELVIEW); } void processSpecialKeys(int key, int xx, int yy) { switch (key) { case GLUT_KEY_LEFT : xcam += lxcam * camera_speed; ycam += lycam * camera_speed; break; case GLUT_KEY_RIGHT : xcam -= lxcam * camera_speed; ycam -= lycam * camera_speed; break; case GLUT_KEY_UP : xcam += lxcam * camera_speed; zcam += lzcam * camera_speed; break; case GLUT_KEY_DOWN : xcam -= lxcam * camera_speed; zcam -= lzcam * camera_speed; break; } } void mouseButton(int button, int state, int xcam, int ycam) { if (button == GLUT_LEFT_BUTTON) { if (state == GLUT_UP) { angle -= deltaAngle; xOrigin = -1; yOrigin = -1; } else { xOrigin = xcam; yOrigin = ycam; } } } void mouseMove(int xcam, int ycam) { if (xOrigin >= 0) { deltaAngle = (xcam - xOrigin) * 0.005f; lxcam = sin(angle - deltaAngle) * camera_speed; lzcam = -cos(angle - deltaAngle) * camera_speed; } if (yOrigin >= 0) { deltaAngle = (ycam - yOrigin) * 0.005f; lycam = tan(angle - deltaAngle) * camera_speed; } }
d0d8d2631d86b90dfbb5a9878668a665e618da75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zgeqr2x_gpu-v4.cu normal z -> c, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #include "commonblas_c.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /** Purpose ------- CGEQR2 computes a QR factorization of a complex m by n matrix A: A = Q * R. This expert routine requires two more arguments than the standard cgeqr2, namely, dT and ddA, explained below. The storage for A is also not as in the LAPACK's cgeqr2 routine (see below). The first is used to output the triangular n x n factor T of the block reflector used in the factorization. The second holds the diagonal nxn blocks of A, i.e., the diagonal submatrices of R. This routine implements the left looking QR. This version adds internal blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA COMPLEX array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the unitary matrix Q as a product of elementary reflectors (see Further Details). \n the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the unitary matrix Q as a product of elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] dtau COMPLEX array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] dT COMPLEX array, dimension N x N. Stores the triangular N x N factor T of the block reflector used in the factorization. The lower triangular part is 0. @param[out] ddA COMPLEX array, dimension N x N. Stores the elements of the upper N x N diagonal block of A. LAPACK stores this array in A. There are 0s below the diagonal. @param dwork (workspace) DOUBLE_PRECISION array, dimension (3 N) @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_cgeqrf_comp ********************************************************************/ extern "C" magma_int_t magma_cgeqr2x4_gpu( magma_int_t m, magma_int_t n, magmaFloatComplex *dA, magma_int_t ldda, magmaFloatComplex *dtau, magmaFloatComplex *dT, magmaFloatComplex *ddA, float *dwork, magma_int_t *info, magma_queue_t stream) { #define dA(i_,j_) (dA + (j_)*(ldda) + (i_)) #define dT(i_,j_) (dT + (j_)*(k) + (i_)) #define BS 32 magma_int_t i, k; float *dnorm = (float *)dwork; magmaFloatComplex *work = (magmaFloatComplex *)(dwork+2*n); magma_queue_t cstream; magmablasGetKernelStream(&cstream); magmablasSetKernelStream(stream); *info = 0; if (m < 0) { *info = -1; } else if (n < 0) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } /* Compute the norms of the trailing columns */ k = min(m,n); magmablas_scnrm2_cols(m, k, dA(0,0), ldda, dnorm); for (magma_int_t b=0; b < k; b += BS) { for (i = b; i < min(k, b+BS); ++i) { /* Apply H' to A(:,i) from the left */ if (i-b > 0) { /* Compute the (i-1)th column of T */ if ( i-1 > 0 ) { hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(i-1), dim3(BLOCK_SIZE), 0, magma_stream , m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); hipLaunchKernelGGL(( magma_ctrmv_kernel2), dim3(i-1), dim3(i-1), 0, magma_stream , dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* dwork = V' c */ hipLaunchKernelGGL(( magma_cgemv_kernel1), dim3(i-b), dim3(BLOCK_SIZE), 0, magma_stream , m-b, dA(b, b), ldda, dA(b,i), work); /* dwork = T' work */ hipLaunchKernelGGL(( magma_ctrmv_tkernel), dim3(i-b), dim3(i-b), 0, magma_stream , dT(b,b), k, work, work+i-b); /* c = c - V work */ if ( m-b > 0 ) { dim3 blocks3( (m-b + BLOCK_SIZE-1) / BLOCK_SIZE ); dim3 threads3( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_cgemv_kernel2), dim3(blocks3), dim3(threads3), 0, magma_stream , m-b, i-b, dA(b,b), ldda, work+i-b, dA(b, i)); } } /* Adjust the dnorm[i] to hold the norm of A(i:m,i) */ if ( i > 0 ) { hipLaunchKernelGGL(( magma_scnrm2_adjust_kernel), dim3(1), dim3(i), 0, magma_stream , dnorm+i, dA(0, i)); } /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) 1. 1 is not yet put on the diagonal of A 2. Elements above the diagonal are copied in ddA and the ones in A are set to zero 3. update T */ magma_clarfgx_gpu(m-i, dA(i, i), dA(min(i+1,m),i), dtau+i, dnorm+i, ddA + i + i*n, i); if (i == 0) { magmaFloatComplex tt = MAGMA_C_ONE; magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, dT(0,0), 1); magma_csetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, magma_stream); } } if ( i-1 > 0 ) { hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(i-1), dim3(BLOCK_SIZE), 0, magma_stream , m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); hipLaunchKernelGGL(( magma_ctrmv_kernel2), dim3(i-1), dim3(i-1), 0, magma_stream , dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* Apply the transformations to the trailing matrix. */ //magma_clarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise, magma_clarfb2_gpu( m-b, k-i, BS, dA(b, b), ldda, dT+b+b*k, k, dA(b, i), ldda, work, k-i); } magmablasSetKernelStream(cstream); return *info; } /* magma_cgeqr2 */
d0d8d2631d86b90dfbb5a9878668a665e618da75.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zgeqr2x_gpu-v4.cu normal z -> c, Wed Sep 17 15:08:23 2014 */ #include "common_magma.h" #include "commonblas_c.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /** Purpose ------- CGEQR2 computes a QR factorization of a complex m by n matrix A: A = Q * R. This expert routine requires two more arguments than the standard cgeqr2, namely, dT and ddA, explained below. The storage for A is also not as in the LAPACK's cgeqr2 routine (see below). The first is used to output the triangular n x n factor T of the block reflector used in the factorization. The second holds the diagonal nxn blocks of A, i.e., the diagonal submatrices of R. This routine implements the left looking QR. This version adds internal blocking. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in,out] dA COMPLEX array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the unitary matrix Q as a product of elementary reflectors (see Further Details). \n the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the unitary matrix Q as a product of elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] dtau COMPLEX array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] dT COMPLEX array, dimension N x N. Stores the triangular N x N factor T of the block reflector used in the factorization. The lower triangular part is 0. @param[out] ddA COMPLEX array, dimension N x N. Stores the elements of the upper N x N diagonal block of A. LAPACK stores this array in A. There are 0s below the diagonal. @param dwork (workspace) DOUBLE_PRECISION array, dimension (3 N) @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_cgeqrf_comp ********************************************************************/ extern "C" magma_int_t magma_cgeqr2x4_gpu( magma_int_t m, magma_int_t n, magmaFloatComplex *dA, magma_int_t ldda, magmaFloatComplex *dtau, magmaFloatComplex *dT, magmaFloatComplex *ddA, float *dwork, magma_int_t *info, magma_queue_t stream) { #define dA(i_,j_) (dA + (j_)*(ldda) + (i_)) #define dT(i_,j_) (dT + (j_)*(k) + (i_)) #define BS 32 magma_int_t i, k; float *dnorm = (float *)dwork; magmaFloatComplex *work = (magmaFloatComplex *)(dwork+2*n); magma_queue_t cstream; magmablasGetKernelStream(&cstream); magmablasSetKernelStream(stream); *info = 0; if (m < 0) { *info = -1; } else if (n < 0) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } /* Compute the norms of the trailing columns */ k = min(m,n); magmablas_scnrm2_cols(m, k, dA(0,0), ldda, dnorm); for (magma_int_t b=0; b < k; b += BS) { for (i = b; i < min(k, b+BS); ++i) { /* Apply H' to A(:,i) from the left */ if (i-b > 0) { /* Compute the (i-1)th column of T */ if ( i-1 > 0 ) { magma_cgemv_kernel3<<< i-1, BLOCK_SIZE, 0, magma_stream >>> ( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); magma_ctrmv_kernel2<<< i-1, i-1, 0, magma_stream >>> ( dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* dwork = V' c */ magma_cgemv_kernel1<<< i-b, BLOCK_SIZE, 0, magma_stream >>> (m-b, dA(b, b), ldda, dA(b,i), work); /* dwork = T' work */ magma_ctrmv_tkernel<<< i-b, i-b, 0, magma_stream >>> (dT(b,b), k, work, work+i-b); /* c = c - V work */ if ( m-b > 0 ) { dim3 blocks3( (m-b + BLOCK_SIZE-1) / BLOCK_SIZE ); dim3 threads3( BLOCK_SIZE ); magma_cgemv_kernel2<<< blocks3, threads3, 0, magma_stream >>> (m-b, i-b, dA(b,b), ldda, work+i-b, dA(b, i)); } } /* Adjust the dnorm[i] to hold the norm of A(i:m,i) */ if ( i > 0 ) { magma_scnrm2_adjust_kernel<<< 1, i, 0, magma_stream >>>(dnorm+i, dA(0, i)); } /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) 1. 1 is not yet put on the diagonal of A 2. Elements above the diagonal are copied in ddA and the ones in A are set to zero 3. update T */ magma_clarfgx_gpu(m-i, dA(i, i), dA(min(i+1,m),i), dtau+i, dnorm+i, ddA + i + i*n, i); if (i == 0) { magmaFloatComplex tt = MAGMA_C_ONE; magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, dT(0,0), 1); magma_csetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, magma_stream); } } if ( i-1 > 0 ) { magma_cgemv_kernel3<<< i-1, BLOCK_SIZE, 0, magma_stream >>> ( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), work, dtau+i-1); magma_ctrmv_kernel2<<< i-1, i-1, 0, magma_stream >>> ( dT(0,0), k, work, dT(0,i-1), dtau+i-1); } /* Apply the transformations to the trailing matrix. */ //magma_clarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise, magma_clarfb2_gpu( m-b, k-i, BS, dA(b, b), ldda, dT+b+b*k, k, dA(b, i), ldda, work, k-i); } magmablasSetKernelStream(cstream); return *info; } /* magma_cgeqr2 */
004d71b963ecfc8db914792104f0be4de67353f0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "meanMatrix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dMatrix = NULL; hipMalloc(&dMatrix, XSIZE*YSIZE); double *dMean = NULL; hipMalloc(&dMean, XSIZE*YSIZE); int dSize = XSIZE*YSIZE; int *d_mutex = NULL; hipMalloc(&d_mutex, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( meanMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, dMatrix,dMean,dSize,d_mutex); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( meanMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, dMatrix,dMean,dSize,d_mutex); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( meanMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, dMatrix,dMean,dSize,d_mutex); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
004d71b963ecfc8db914792104f0be4de67353f0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "meanMatrix.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dMatrix = NULL; cudaMalloc(&dMatrix, XSIZE*YSIZE); double *dMean = NULL; cudaMalloc(&dMean, XSIZE*YSIZE); int dSize = XSIZE*YSIZE; int *d_mutex = NULL; cudaMalloc(&d_mutex, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); meanMatrix<<<gridBlock,threadBlock>>>(dMatrix,dMean,dSize,d_mutex); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { meanMatrix<<<gridBlock,threadBlock>>>(dMatrix,dMean,dSize,d_mutex); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { meanMatrix<<<gridBlock,threadBlock>>>(dMatrix,dMean,dSize,d_mutex); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b4ee527efdd7336a97c4f3b66b4117ae95eb782a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010-2013 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #define TILE_WIDTH_A 256 #define TILE_WIDTH_B 16 #define TILE_K (TILE_WIDTH_A/TILE_WIDTH_B) __global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A x B * where A is a (m x k) matrix * where B is a (k x n) matrix * where C is a (m x n) matrix * * Use register and shared memory tiling and thread coarsening * * NOTE: A and C are column major, B is row major * ********************************************************************/ // Macros for accessing flattened matrices #define A(row,col) A[(row) + (col)*m] #define B(row,col) B[(row)*n + (col)] #define C(row,col) C[(row) + (col)*m] //tiling for B and output C __shared__ float shared_B[TILE_K][TILE_WIDTH_B]; float C_RT[TILE_WIDTH_B]; for(int i = 0; i<TILE_WIDTH_B;i++) C_RT[i] = 0.0; //Get block and thread idxs to load in tiles int by = blockIdx.y, bx = blockIdx.x, tx = threadIdx.x, ty = threadIdx.y; int b_col = tx + bx * TILE_WIDTH_B; // For every block y, ty ranges from 0-TILE_K. TILE_WIDTH_B*TILE_K = TILE_WIDTH_A // So every by should add TILE_WIDTH_A int a_row = tx + ty * TILE_WIDTH_B + by * TILE_WIDTH_A; int p_col_offset = bx * TILE_WIDTH_B; for (int i = 0; i < ceil(double(k)/double(TILE_K)); i++){//loop through all k tiles //each thread load in an element of B Tile int b_row = i * TILE_K + ty; if (b_row < k && b_col < n){ shared_B[ty][tx] = B(b_row,b_col); }else{ shared_B[ty][tx] = 0; } __syncthreads();//wait for threads to load into shared mem for (int j = 0; j < TILE_K; j++){ float a = 0; int a_col = i * TILE_K + j; if (a_col < k && a_row < m){ a = A(a_row,a_col); } for (int l = 0; l < TILE_WIDTH_B; l++){//compute partial multiplication C_RT[l] += a*shared_B[j][l]; } } __syncthreads();//wait for all threads to perform computations from b } for (int i = 0; i < TILE_WIDTH_B; i++) { if (a_row < m && i+p_col_offset < n){ C(a_row,i+p_col_offset) = C_RT[i]; } } } void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'T') && (transb != 't')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } // Initialize thread block and kernel grid dimensions --------------------- dim3 dimGrid(ceil(double(n)/double(TILE_WIDTH_B)),ceil(double(m)/double(TILE_K)),1); dim3 dimBlock(TILE_WIDTH_B,TILE_K,1); // Invoke CUDA kernel ----------------------------------------------------- hipLaunchKernelGGL(( mysgemm), dim3(dimGrid), dim3(dimBlock), 0, 0, m,n,k,A,B,C); }
b4ee527efdd7336a97c4f3b66b4117ae95eb782a.cu
/****************************************************************************** *cr *cr (C) Copyright 2010-2013 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #define TILE_WIDTH_A 256 #define TILE_WIDTH_B 16 #define TILE_K (TILE_WIDTH_A/TILE_WIDTH_B) __global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A x B * where A is a (m x k) matrix * where B is a (k x n) matrix * where C is a (m x n) matrix * * Use register and shared memory tiling and thread coarsening * * NOTE: A and C are column major, B is row major * ********************************************************************/ // Macros for accessing flattened matrices #define A(row,col) A[(row) + (col)*m] #define B(row,col) B[(row)*n + (col)] #define C(row,col) C[(row) + (col)*m] //tiling for B and output C __shared__ float shared_B[TILE_K][TILE_WIDTH_B]; float C_RT[TILE_WIDTH_B]; for(int i = 0; i<TILE_WIDTH_B;i++) C_RT[i] = 0.0; //Get block and thread idxs to load in tiles int by = blockIdx.y, bx = blockIdx.x, tx = threadIdx.x, ty = threadIdx.y; int b_col = tx + bx * TILE_WIDTH_B; // For every block y, ty ranges from 0-TILE_K. TILE_WIDTH_B*TILE_K = TILE_WIDTH_A // So every by should add TILE_WIDTH_A int a_row = tx + ty * TILE_WIDTH_B + by * TILE_WIDTH_A; int p_col_offset = bx * TILE_WIDTH_B; for (int i = 0; i < ceil(double(k)/double(TILE_K)); i++){//loop through all k tiles //each thread load in an element of B Tile int b_row = i * TILE_K + ty; if (b_row < k && b_col < n){ shared_B[ty][tx] = B(b_row,b_col); }else{ shared_B[ty][tx] = 0; } __syncthreads();//wait for threads to load into shared mem for (int j = 0; j < TILE_K; j++){ float a = 0; int a_col = i * TILE_K + j; if (a_col < k && a_row < m){ a = A(a_row,a_col); } for (int l = 0; l < TILE_WIDTH_B; l++){//compute partial multiplication C_RT[l] += a*shared_B[j][l]; } } __syncthreads();//wait for all threads to perform computations from b } for (int i = 0; i < TILE_WIDTH_B; i++) { if (a_row < m && i+p_col_offset < n){ C(a_row,i+p_col_offset) = C_RT[i]; } } } void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'T') && (transb != 't')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } // Initialize thread block and kernel grid dimensions --------------------- dim3 dimGrid(ceil(double(n)/double(TILE_WIDTH_B)),ceil(double(m)/double(TILE_K)),1); dim3 dimBlock(TILE_WIDTH_B,TILE_K,1); // Invoke CUDA kernel ----------------------------------------------------- mysgemm<<<dimGrid, dimBlock>>>(m,n,k,A,B,C); }
444092ce94e6f67090a2555287a0abf82989b53d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017 Stanford, NVIDIA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::batch_norm(std::string name, Tensor input, bool relu) { assert(input.numDim == 4); //Only support 4D BN for now IndexSpaceT<3> task_is; BatchNorm *bn = new BatchNorm(name, config, input, task_is, relu); layers.push_back(bn); return bn->output; } /* locals[0] = scale locals[1] = bias */ BatchNorm::BatchNorm(std::string _name, FFConfig _config, Tensor _input, IndexSpaceT<3> _task_is, bool _relu) : Op(_name, _input), relu(_relu), profiling(_config.profiling) { Context ctx = _config.lg_ctx; HighLevelRuntime* runtime = _config.lg_hlr; Rect<3> part_rect = runtime->get_index_space_domain(ctx, task_is); num_replica = part_rect.volume(); // Create output tensor int output_w = _input.adim[0]; int output_h = _input.adim[1]; int output_nc = _input.adim[2] * _input.adim[3]; int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1; int num_par_n = part_rect.hi[2] - part_rect.lo[2] + 1; FieldSpace fs = _config.field_space; Rect<3, coord_t> output_rect(Point<3>(0, 0, 0), Point<3>(output_w-1, output_h-1, output_nc-1)); IndexSpaceT<3> output_is = runtime->create_index_space(ctx, output_rect); LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs); LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs); Transform<3, 3, coord_t> trans; int extent_w = (output_w + num_par_w - 1) / num_par_w; int extent_h = (output_h + num_par_h - 1) / num_par_h; int extent_nc = output_nc / num_par_n; assert(output_nc % num_par_n == 0); Rect<3, coord_t> ext(Point<3>(0, 0, 0), Point<3>(extent_w-1, extent_h-1, extent_nc-1)); trans[0][0] = extent_w; trans[0][1] = 0; trans[0][2] = 0; trans[1][0] = 0; trans[1][1] = extent_h; trans[1][2] = 0; trans[2][0] = 0; trans[2][1] = 0; trans[2][2] = extent_nc; IndexPartition output_ip = runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext); assert(runtime->is_index_partition_disjoint(ctx, output_ip)); assert(runtime->is_index_partition_complete(ctx, output_ip)); LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip); LogicalPartition output_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, output_ip); int bias_nc = num_replica * _input.adim[2]; /*input_channels*/ Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1); Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1); IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect); IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect); LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion bias_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); LogicalRegion scale_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); IndexPartition bias_grad_ip = runtime->create_equal_partition(ctx, bias_grad_is, task_is); LogicalPartition bias_grad_lp = runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip); LogicalPartition scale_grad_lp = runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip); Tensor scale_tensor, bias_tensor; scale_tensor.region = scale_lr; scale_tensor.region_grad = scale_grad_lr; scale_tensor.part = LogicalPartition::NO_PART; scale_tensor.part_grad = scale_grad_lp; locals[0] = scale_tensor; bias_tensor.region = bias_lr; bias_tensor.region_grad = bias_grad_lr; bias_tensor.part = LogicalPartition::NO_PART; bias_tensor.part_grad = bias_grad_lp; locals[1] = bias_tensor; numLocals = 2; output = _input; output.region = output_lr; output.part = output_lp; output.region_grad = output_grad_lr; output.part_grad = output_grad_lp; printf("Create bn layer: output(%d %d %d %d)\n", output.adim[3], output.adim[2], output.adim[1], output.adim[0]); input_lps[0] = _input.part; } /* regions[0]: input regions[1]: output regions[2](I): scale regions[3](I): bias */ __host__ OpMeta* BatchNorm::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const BatchNorm* bm = (BatchNorm*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); const AccessorRO<float, 3> acc_input(regions[0], FID_DATA); const AccessorWO<float, 3> acc_output(regions[1], FID_DATA); const AccessorRO<float, 1> acc_scale(regions[2], FID_DATA); const AccessorRO<float, 1> acc_bias(regions[3], FID_DATA); Rect<1> rect_scale, rect_bias; Rect<3> rect_input, rect_output; rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_scale = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); assert(acc_input.accessor.is_dense_arbitrary(rect_input)); assert(acc_output.accessor.is_dense_arbitrary(rect_output)); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); const float *input_ptr = acc_input.ptr(rect_input.lo); float *output_ptr = acc_output.ptr(rect_output.lo); const float *scale_ptr = acc_scale.ptr(rect_scale.lo); const float *bias_ptr = acc_bias.ptr(rect_bias.lo); BatchNormMeta* m = new BatchNormMeta(handle); #ifndef DISABLE_COMPUTATION m->relu = bm->relu; m->mode = CUDNN_BATCHNORM_SPATIAL; #if CUDNN_VERSION >= 7000 m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; #endif checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&m->biasTensor)); assert(rect_input == rect_output); int input_w = rect_input.hi[0] - rect_input.lo[0] + 1; int input_h = rect_input.hi[1] - rect_input.lo[1] + 1; int channel = bm->inputs[0].pdim[2]; checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, bm->inputs[0].pdim[3], channel, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, bm->inputs[0].pdim[3], channel, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, channel, 1, 1)); //float *runningMean, *runningVar, *saveMean, *saveVar; checkCUDA(hipMalloc(&m->runningMean, sizeof(float) * channel)); checkCUDA(hipMalloc(&m->runningVar, sizeof(float) * channel)); checkCUDA(hipMalloc(&m->saveMean, sizeof(float) * channel)); checkCUDA(hipMalloc(&m->saveVar, sizeof(float) * channel)); if (m->relu) { checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); } #endif return m; } /* regions[0](O): scale, initilized to ones regions[1](O): bias, initilized to zeros */ __host__ void BatchNorm::init_para_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); //const BatchNorm* bm = (BatchNorm*) task->args; const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA); const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA); Rect<1> rect_scale, rect_bias; rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); float *scale_ptr = acc_scale.ptr(rect_scale.lo); float *bias_ptr = acc_bias.ptr(rect_bias.lo); // init kernel and bias #ifdef PARAMETER_ALL_ONES hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0, scale_ptr, rect_scale.volume()); hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0, bias_ptr, rect_bias.volume()); #else //hipStream_t stream; //checkCUDA(hipStreamCreate(&stream)); //hiprandGenerator_t genGPU; //hiprandCreateGenerator(&genGPU, HIPRAND_RNG_PSEUDO_DEFAULT); //hiprandSetStream(genGPU, stream); //hiprandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL); //hiprandGenerateUniform(genGPU, scale_ptr, rect_scale.volume()); hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0, scale_ptr, rect_scale.volume(), 1.0f); hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0, bias_ptr, rect_bias.volume(), 0.0f); //hiprandDestroyGenerator(genGPU); #endif } __host__ void BatchNorm::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; // First we initialize the scale and bias parameters { TaskLauncher para_launcher(BATCHNORM_INIT_PARA_TASK_ID, TaskArgument(NULL, 0)); para_launcher.add_region_requirement( RegionRequirement(locals[0].region, WRITE_DISCARD, EXCLUSIVE, locals[0].region)); para_launcher.add_field(0, FID_DATA); para_launcher.add_region_requirement( RegionRequirement(locals[1].region, WRITE_DISCARD, EXCLUSIVE, locals[1].region)); para_launcher.add_field(1, FID_DATA); runtime->execute_task(ctx, para_launcher); } Rect<3> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<3> it(rect); it(); it++) { FFHandler handle = ff.handlers[idx++]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher init_launcher(BATCHNORM_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); init_launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); init_launcher.add_field(0, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, output.region)); init_launcher.add_field(1, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(locals[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, locals[0].region)); init_launcher.add_field(2, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(locals[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, locals[1].region)); init_launcher.add_field(3, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, init_launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<3> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /* regions[0](I): input regions[1](O): ouptut regions[2](I): scale regions[3](I): bias */ __host__ void BatchNorm::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 4); assert(task->regions.size() == 4); float alpha = 1.0f, beta = 0.0f; const BatchNorm* bm = (BatchNorm*) task->args; const BatchNormMeta* m = *((BatchNormMeta**) task->local_args); const AccessorRO<float, 3> acc_input(regions[0], FID_DATA); const AccessorWO<float, 3> acc_output(regions[1], FID_DATA); const AccessorRO<float, 1> acc_scale(regions[2], FID_DATA); const AccessorRO<float, 1> acc_bias(regions[3], FID_DATA); Rect<3> rect_input, rect_output; Rect<1> rect_scale, rect_bias; rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_scale = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); assert(acc_input.accessor.is_dense_arbitrary(rect_input)); assert(acc_output.accessor.is_dense_arbitrary(rect_output)); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); const float *input_ptr = acc_input.ptr(rect_input.lo); float *output_ptr = acc_output.ptr(rect_output.lo); const float *scale_ptr = acc_scale.ptr(rect_scale.lo); const float *bias_ptr = acc_bias.ptr(rect_bias.lo); hipEvent_t t_start, t_end; if (bm->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); coord_t numChannels = bm->inputs[0].pdim[2]; hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningMean, numChannels, 0.0f); hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningVar, numChannels, 0.0f); checkCUDNN(cudnnBatchNormalizationForwardTraining( m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, input_ptr, m->outputTensor, output_ptr, m->biasTensor, scale_ptr, bias_ptr, 1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); if (bm->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("BatchNorm forward time (BF) = %.2fms\n", elapsed); } #endif } __host__ void BatchNorm::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<3> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<3> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, output.region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, locals[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, locals[1].region)); launcher.add_field(3, FID_DATA); runtime->execute_index_space(ctx, launcher); } /* regions[0](I): input regions[1](O): input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): scale regions[5](O): scale_grad regions[6](O): bias_grad */ __host__ void BatchNorm::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 7); assert(task->regions.size() == 7); float alpha = 1.0f, beta = 0.0f; const BatchNorm* bm = (BatchNorm*) task->args; const BatchNormMeta* m = *((BatchNormMeta**) task->local_args); const AccessorRO<float, 3> acc_input(regions[0], FID_DATA); const AccessorWO<float, 3> acc_input_grad(regions[1], FID_DATA); const AccessorRO<float, 3> acc_output(regions[2], FID_DATA); const AccessorRW<float, 3> acc_output_grad(regions[3], FID_DATA); const AccessorRO<float, 1> acc_scale(regions[4], FID_DATA); const AccessorWO<float, 1> acc_scale_grad(regions[5], FID_DATA); const AccessorWO<float, 1> acc_bias_grad(regions[6], FID_DATA); Rect<3> rect_input, rect_input_grad, rect_output, rect_output_grad; Rect<1> rect_scale, rect_scale_grad, rect_bias_grad; rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_input_grad = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_output = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_output_grad = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); rect_scale = runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space()); rect_scale_grad = runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space()); rect_bias_grad = runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space()); // make sure all regions are dense assert(acc_input.accessor.is_dense_arbitrary(rect_input)); assert(acc_input_grad.accessor.is_dense_arbitrary(rect_input_grad)); assert(acc_output.accessor.is_dense_arbitrary(rect_output)); assert(acc_output_grad.accessor.is_dense_arbitrary(rect_output_grad)); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_scale_grad.accessor.is_dense_arbitrary(rect_scale_grad)); assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad)); const float *input_ptr = acc_input.ptr(rect_input.lo); float *input_grad_ptr = acc_input_grad.ptr(rect_input_grad.lo); const float *output_ptr = acc_output.ptr(rect_output.lo); float *output_grad_ptr = acc_output_grad.ptr(rect_output_grad.lo); const float *scale_ptr = acc_scale.ptr(rect_scale.lo); float *scale_grad_ptr = acc_scale_grad.ptr(rect_scale_grad.lo); float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo); hipEvent_t t_start, t_end; if (bm->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); if (m->relu) { int n = rect_output.volume(); hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, 0, output_grad_ptr, output_ptr, n); } checkCUDNN(cudnnBatchNormalizationBackward( m->handle.dnn, m->mode, &alpha, &beta, &alpha, &beta, m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr, m->inputTensor, input_grad_ptr, m->biasTensor, scale_ptr, scale_grad_ptr, bias_grad_ptr, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); if (bm->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("BatchNorm backward time = %.2fms\n", elapsed); } #endif } __host__ void BatchNorm::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<3> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<3> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](O): input_grad (we only need grad tensors) launcher.add_region_requirement( RegionRequirement(inputs[0].part_grad, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); // regions[2](I): output launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, output.region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(output.part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, output.region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(locals[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, locals[0].region)); launcher.add_field(4, FID_DATA); // regions[5](O): filter_grad launcher.add_region_requirement( RegionRequirement(locals[0].part_grad, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, locals[0].region_grad)); launcher.add_field(5, FID_DATA); // regions[6](O): bias_grad launcher.add_region_requirement( RegionRequirement(locals[1].part_grad, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, locals[1].region_grad)); launcher.add_field(6, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); } __host__ void BatchNorm::update(const FFModel& ff) { //FIXME: we didn't sync batch norm parameters for now }
444092ce94e6f67090a2555287a0abf82989b53d.cu
/* Copyright 2017 Stanford, NVIDIA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::batch_norm(std::string name, Tensor input, bool relu) { assert(input.numDim == 4); //Only support 4D BN for now IndexSpaceT<3> task_is; BatchNorm *bn = new BatchNorm(name, config, input, task_is, relu); layers.push_back(bn); return bn->output; } /* locals[0] = scale locals[1] = bias */ BatchNorm::BatchNorm(std::string _name, FFConfig _config, Tensor _input, IndexSpaceT<3> _task_is, bool _relu) : Op(_name, _input), relu(_relu), profiling(_config.profiling) { Context ctx = _config.lg_ctx; HighLevelRuntime* runtime = _config.lg_hlr; Rect<3> part_rect = runtime->get_index_space_domain(ctx, task_is); num_replica = part_rect.volume(); // Create output tensor int output_w = _input.adim[0]; int output_h = _input.adim[1]; int output_nc = _input.adim[2] * _input.adim[3]; int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1; int num_par_n = part_rect.hi[2] - part_rect.lo[2] + 1; FieldSpace fs = _config.field_space; Rect<3, coord_t> output_rect(Point<3>(0, 0, 0), Point<3>(output_w-1, output_h-1, output_nc-1)); IndexSpaceT<3> output_is = runtime->create_index_space(ctx, output_rect); LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs); LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs); Transform<3, 3, coord_t> trans; int extent_w = (output_w + num_par_w - 1) / num_par_w; int extent_h = (output_h + num_par_h - 1) / num_par_h; int extent_nc = output_nc / num_par_n; assert(output_nc % num_par_n == 0); Rect<3, coord_t> ext(Point<3>(0, 0, 0), Point<3>(extent_w-1, extent_h-1, extent_nc-1)); trans[0][0] = extent_w; trans[0][1] = 0; trans[0][2] = 0; trans[1][0] = 0; trans[1][1] = extent_h; trans[1][2] = 0; trans[2][0] = 0; trans[2][1] = 0; trans[2][2] = extent_nc; IndexPartition output_ip = runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext); assert(runtime->is_index_partition_disjoint(ctx, output_ip)); assert(runtime->is_index_partition_complete(ctx, output_ip)); LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip); LogicalPartition output_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, output_ip); int bias_nc = num_replica * _input.adim[2]; /*input_channels*/ Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1); Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1); IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect); IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect); LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion bias_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); LogicalRegion scale_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); IndexPartition bias_grad_ip = runtime->create_equal_partition(ctx, bias_grad_is, task_is); LogicalPartition bias_grad_lp = runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip); LogicalPartition scale_grad_lp = runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip); Tensor scale_tensor, bias_tensor; scale_tensor.region = scale_lr; scale_tensor.region_grad = scale_grad_lr; scale_tensor.part = LogicalPartition::NO_PART; scale_tensor.part_grad = scale_grad_lp; locals[0] = scale_tensor; bias_tensor.region = bias_lr; bias_tensor.region_grad = bias_grad_lr; bias_tensor.part = LogicalPartition::NO_PART; bias_tensor.part_grad = bias_grad_lp; locals[1] = bias_tensor; numLocals = 2; output = _input; output.region = output_lr; output.part = output_lp; output.region_grad = output_grad_lr; output.part_grad = output_grad_lp; printf("Create bn layer: output(%d %d %d %d)\n", output.adim[3], output.adim[2], output.adim[1], output.adim[0]); input_lps[0] = _input.part; } /* regions[0]: input regions[1]: output regions[2](I): scale regions[3](I): bias */ __host__ OpMeta* BatchNorm::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const BatchNorm* bm = (BatchNorm*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); const AccessorRO<float, 3> acc_input(regions[0], FID_DATA); const AccessorWO<float, 3> acc_output(regions[1], FID_DATA); const AccessorRO<float, 1> acc_scale(regions[2], FID_DATA); const AccessorRO<float, 1> acc_bias(regions[3], FID_DATA); Rect<1> rect_scale, rect_bias; Rect<3> rect_input, rect_output; rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_scale = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); assert(acc_input.accessor.is_dense_arbitrary(rect_input)); assert(acc_output.accessor.is_dense_arbitrary(rect_output)); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); const float *input_ptr = acc_input.ptr(rect_input.lo); float *output_ptr = acc_output.ptr(rect_output.lo); const float *scale_ptr = acc_scale.ptr(rect_scale.lo); const float *bias_ptr = acc_bias.ptr(rect_bias.lo); BatchNormMeta* m = new BatchNormMeta(handle); #ifndef DISABLE_COMPUTATION m->relu = bm->relu; m->mode = CUDNN_BATCHNORM_SPATIAL; #if CUDNN_VERSION >= 7000 m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; #endif checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&m->biasTensor)); assert(rect_input == rect_output); int input_w = rect_input.hi[0] - rect_input.lo[0] + 1; int input_h = rect_input.hi[1] - rect_input.lo[1] + 1; int channel = bm->inputs[0].pdim[2]; checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, bm->inputs[0].pdim[3], channel, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, bm->inputs[0].pdim[3], channel, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, channel, 1, 1)); //float *runningMean, *runningVar, *saveMean, *saveVar; checkCUDA(cudaMalloc(&m->runningMean, sizeof(float) * channel)); checkCUDA(cudaMalloc(&m->runningVar, sizeof(float) * channel)); checkCUDA(cudaMalloc(&m->saveMean, sizeof(float) * channel)); checkCUDA(cudaMalloc(&m->saveVar, sizeof(float) * channel)); if (m->relu) { checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); } #endif return m; } /* regions[0](O): scale, initilized to ones regions[1](O): bias, initilized to zeros */ __host__ void BatchNorm::init_para_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); //const BatchNorm* bm = (BatchNorm*) task->args; const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA); const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA); Rect<1> rect_scale, rect_bias; rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); float *scale_ptr = acc_scale.ptr(rect_scale.lo); float *bias_ptr = acc_bias.ptr(rect_bias.lo); // init kernel and bias #ifdef PARAMETER_ALL_ONES ones_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>( scale_ptr, rect_scale.volume()); ones_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>( bias_ptr, rect_bias.volume()); #else //cudaStream_t stream; //checkCUDA(cudaStreamCreate(&stream)); //curandGenerator_t genGPU; //curandCreateGenerator(&genGPU, CURAND_RNG_PSEUDO_DEFAULT); //curandSetStream(genGPU, stream); //curandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL); //curandGenerateUniform(genGPU, scale_ptr, rect_scale.volume()); assign_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>( scale_ptr, rect_scale.volume(), 1.0f); assign_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>( bias_ptr, rect_bias.volume(), 0.0f); //curandDestroyGenerator(genGPU); #endif } __host__ void BatchNorm::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; // First we initialize the scale and bias parameters { TaskLauncher para_launcher(BATCHNORM_INIT_PARA_TASK_ID, TaskArgument(NULL, 0)); para_launcher.add_region_requirement( RegionRequirement(locals[0].region, WRITE_DISCARD, EXCLUSIVE, locals[0].region)); para_launcher.add_field(0, FID_DATA); para_launcher.add_region_requirement( RegionRequirement(locals[1].region, WRITE_DISCARD, EXCLUSIVE, locals[1].region)); para_launcher.add_field(1, FID_DATA); runtime->execute_task(ctx, para_launcher); } Rect<3> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<3> it(rect); it(); it++) { FFHandler handle = ff.handlers[idx++]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher init_launcher(BATCHNORM_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); init_launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); init_launcher.add_field(0, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, output.region)); init_launcher.add_field(1, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(locals[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, locals[0].region)); init_launcher.add_field(2, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(locals[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, locals[1].region)); init_launcher.add_field(3, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, init_launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<3> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /* regions[0](I): input regions[1](O): ouptut regions[2](I): scale regions[3](I): bias */ __host__ void BatchNorm::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 4); assert(task->regions.size() == 4); float alpha = 1.0f, beta = 0.0f; const BatchNorm* bm = (BatchNorm*) task->args; const BatchNormMeta* m = *((BatchNormMeta**) task->local_args); const AccessorRO<float, 3> acc_input(regions[0], FID_DATA); const AccessorWO<float, 3> acc_output(regions[1], FID_DATA); const AccessorRO<float, 1> acc_scale(regions[2], FID_DATA); const AccessorRO<float, 1> acc_bias(regions[3], FID_DATA); Rect<3> rect_input, rect_output; Rect<1> rect_scale, rect_bias; rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_scale = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); assert(acc_input.accessor.is_dense_arbitrary(rect_input)); assert(acc_output.accessor.is_dense_arbitrary(rect_output)); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); const float *input_ptr = acc_input.ptr(rect_input.lo); float *output_ptr = acc_output.ptr(rect_output.lo); const float *scale_ptr = acc_scale.ptr(rect_scale.lo); const float *bias_ptr = acc_bias.ptr(rect_bias.lo); cudaEvent_t t_start, t_end; if (bm->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); coord_t numChannels = bm->inputs[0].pdim[2]; assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningMean, numChannels, 0.0f); assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningVar, numChannels, 0.0f); checkCUDNN(cudnnBatchNormalizationForwardTraining( m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, input_ptr, m->outputTensor, output_ptr, m->biasTensor, scale_ptr, bias_ptr, 1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); if (bm->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("BatchNorm forward time (BF) = %.2fms\n", elapsed); } #endif } __host__ void BatchNorm::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<3> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<3> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, output.region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, locals[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(locals[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, locals[1].region)); launcher.add_field(3, FID_DATA); runtime->execute_index_space(ctx, launcher); } /* regions[0](I): input regions[1](O): input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): scale regions[5](O): scale_grad regions[6](O): bias_grad */ __host__ void BatchNorm::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 7); assert(task->regions.size() == 7); float alpha = 1.0f, beta = 0.0f; const BatchNorm* bm = (BatchNorm*) task->args; const BatchNormMeta* m = *((BatchNormMeta**) task->local_args); const AccessorRO<float, 3> acc_input(regions[0], FID_DATA); const AccessorWO<float, 3> acc_input_grad(regions[1], FID_DATA); const AccessorRO<float, 3> acc_output(regions[2], FID_DATA); const AccessorRW<float, 3> acc_output_grad(regions[3], FID_DATA); const AccessorRO<float, 1> acc_scale(regions[4], FID_DATA); const AccessorWO<float, 1> acc_scale_grad(regions[5], FID_DATA); const AccessorWO<float, 1> acc_bias_grad(regions[6], FID_DATA); Rect<3> rect_input, rect_input_grad, rect_output, rect_output_grad; Rect<1> rect_scale, rect_scale_grad, rect_bias_grad; rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_input_grad = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_output = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_output_grad = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); rect_scale = runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space()); rect_scale_grad = runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space()); rect_bias_grad = runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space()); // make sure all regions are dense assert(acc_input.accessor.is_dense_arbitrary(rect_input)); assert(acc_input_grad.accessor.is_dense_arbitrary(rect_input_grad)); assert(acc_output.accessor.is_dense_arbitrary(rect_output)); assert(acc_output_grad.accessor.is_dense_arbitrary(rect_output_grad)); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_scale_grad.accessor.is_dense_arbitrary(rect_scale_grad)); assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad)); const float *input_ptr = acc_input.ptr(rect_input.lo); float *input_grad_ptr = acc_input_grad.ptr(rect_input_grad.lo); const float *output_ptr = acc_output.ptr(rect_output.lo); float *output_grad_ptr = acc_output_grad.ptr(rect_output_grad.lo); const float *scale_ptr = acc_scale.ptr(rect_scale.lo); float *scale_grad_ptr = acc_scale_grad.ptr(rect_scale_grad.lo); float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo); cudaEvent_t t_start, t_end; if (bm->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); if (m->relu) { int n = rect_output.volume(); reluBackward<<<GET_BLOCKS(n), CUDA_NUM_THREADS>>>(output_grad_ptr, output_ptr, n); } checkCUDNN(cudnnBatchNormalizationBackward( m->handle.dnn, m->mode, &alpha, &beta, &alpha, &beta, m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr, m->inputTensor, input_grad_ptr, m->biasTensor, scale_ptr, scale_grad_ptr, bias_grad_ptr, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); if (bm->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("BatchNorm backward time = %.2fms\n", elapsed); } #endif } __host__ void BatchNorm::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<3> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<3> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](O): input_grad (we only need grad tensors) launcher.add_region_requirement( RegionRequirement(inputs[0].part_grad, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); // regions[2](I): output launcher.add_region_requirement( RegionRequirement(output.part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, output.region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(output.part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, output.region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(locals[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, locals[0].region)); launcher.add_field(4, FID_DATA); // regions[5](O): filter_grad launcher.add_region_requirement( RegionRequirement(locals[0].part_grad, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, locals[0].region_grad)); launcher.add_field(5, FID_DATA); // regions[6](O): bias_grad launcher.add_region_requirement( RegionRequirement(locals[1].part_grad, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, locals[1].region_grad)); launcher.add_field(6, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); } __host__ void BatchNorm::update(const FFModel& ff) { //FIXME: we didn't sync batch norm parameters for now }
4f7268ad341e15ce21f50d4fdf8ba9e63a886826.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdlib> #include <cstdio> #include <hiprand/hiprand_kernel.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> using namespace std; __device__ int d_count = 0; __global__ void colourMax (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int currentColour){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } if (colouring[i]!=0){ return; } int myMax = numbers[i]; // printf("I am node %d with value %d\n", i+1, myMax); int start = -1, stop = -1; start = vertexArray[i]; if (i==n-1){ stop = 2*m; } else{ stop = vertexArray[i+1]; } for (int j=start; j<stop; j++){ // printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1); int neighbour = neighbourArray[j]-1; if (colouring[neighbour]==0 && numbers[neighbour] >= myMax){ if (numbers[neighbour] == myMax){ if (i < neighbour){ continue; } } return; } } colouring[i] = currentColour; atomicAdd(&d_count, 1); } __global__ void setup_kernel (hiprandState_t * state, unsigned long seed ){ int i= blockDim.x * blockIdx.x + threadIdx.x; hiprand_init (seed, i, 0, &state[i]); } __global__ void randomNumbering (hiprandState_t* globalState, int *degreeCount, int n, int limit){ int i= blockDim.x * blockIdx.x + threadIdx.x; hiprandState_t localState = globalState[i]; float RANDOM = hiprand_uniform( &localState ); globalState[i] = localState; RANDOM *= (limit - 1 + 0.999999); RANDOM += 1; degreeCount[i] = (int) RANDOM; } __global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int start = -1, stop = -1; int diff=0; start = vertexArray[i]; if (i==n-1){ stop = 2*m; } else{ stop = vertexArray[i+1]; } diff = stop-start; degreeCount[i]=diff; } void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){ for (int i=0; i<n-1; i++){ for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){ cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl; /* code */ } } for (int j = vertexArray[n-1]; j < m; ++j) { cout<<"e "<<n<<" "<<neighbourArray[j]<<endl; /* code */ } } int main(int argc, char const *argv[]) { string a, b; int n, m; cin>>a>>b>>n>>m; int *h_count = new int; int *h_vertexArray = new int [n]; int *h_neighbourArray = new int [2*m]; int *h_degreeCount = new int [n]; int *h_colour = new int [n]; int *d_vertexArray = NULL; hipMalloc((void **)&d_vertexArray, n*sizeof(int)); int *d_neighbourArray = NULL; hipMalloc((void **)&d_neighbourArray, 2*m*sizeof(int)); int *d_colour = NULL; hipMalloc((void **)&d_colour, (n)*sizeof(int)); hipMemset((void *)d_colour, 0, (n)*sizeof(int)); int *d_degreeCount = NULL; hipMalloc((void **)&d_degreeCount, (n)*sizeof(int)); hipMemset((void *)d_degreeCount, 0, (n)*sizeof(int)); hiprandState_t* devStates; hipMalloc ( &devStates, n*sizeof( hiprandState_t ) ); for (int i = 0; i < n; ++i) { /* code */ h_vertexArray[i]=2*m; } int NSlast = 0; int NSoffset = 0; int NSprev=0; for (int i=0; i<2*m; i++){ int start, end; cin>>start>>end; for (int j=NSlast+1; j<start; j++){ h_vertexArray[j-1]=NSoffset; } if (NSprev!=start){ NSlast=start; h_vertexArray[start-1]=NSoffset; NSprev=start; } h_neighbourArray[NSoffset]=end; NSoffset++; } // int offset = 0; // int current = 0; // int mark = 1; // for (int i = 0; i < 2*m; ++i) // { // int start; // int end; // cin>>start>>end; // if (start!=mark){ // if (start == mark+1 && h_vertexArray[mark-1]!=2*m){ // } // else{ // for (int j = mark; j<start; j++){ // h_vertexArray[j-1]=offset; // } // } // mark = start; // } // if (start==current){ // h_neighbourArray[offset]=end; // offset++; // } // else { // current = start; // h_vertexArray[current-1]=offset; // h_neighbourArray[offset]=end; // offset++; // } // } hipMemcpy(d_vertexArray, h_vertexArray, n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), hipMemcpyHostToDevice); int threadsPerBlock = 512; int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock; //cout<<threadsPerBlock<<" "<<blocksPerGrid<<endl; hipLaunchKernelGGL(( degreeCalc), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, d_degreeCount, n, m); // hipMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), hipMemcpyDeviceToHost); // for (int i=0; i<n; i++){ // cout<<h_degreeCount[i]<<endl; // } // thrust::device_ptr<int> d_ptr = thrust::device_pointer_cast(d_degreeCount); int maxDegree = *(thrust::max_element(d_ptr, d_ptr + n)); cout<<"Max = "<<maxDegree<<endl; hipLaunchKernelGGL(( setup_kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, time(NULL) ); hipLaunchKernelGGL(( randomNumbering), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, d_degreeCount, n, n); hipMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), hipMemcpyDeviceToHost); // cout<<"Random numbers: "<<endl; // // for (int i=0; i<n; i++){ // cout<<h_degreeCount[i]<<endl; // } int colourCount = 1; while (1){ hipLaunchKernelGGL(( colourMax), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, colourCount); hipMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, hipMemcpyDeviceToHost); cout<<"H Count = "<<*h_count<<"at colour: "<<colourCount<<endl; if (*h_count == n){ break; } colourCount++; } hipMemcpy(h_colour, d_colour, n*sizeof(int), hipMemcpyDeviceToHost); thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_colour); int maxColour = *(thrust::max_element(c_ptr, c_ptr + n)); cout<<"Max Colour = "<<maxColour<<endl; // cout<<"Colour numbers: "<<endl; // // for (int i=0; i<n; i++){ // cout<<h_colour[i]<<endl; // } delete h_count; delete[] h_vertexArray; delete[] h_neighbourArray; delete[] h_degreeCount; delete[] h_colour; hipFree(d_neighbourArray); hipFree(d_vertexArray); hipFree(d_degreeCount); hipFree(d_colour); hipDeviceReset(); return 0; }
4f7268ad341e15ce21f50d4fdf8ba9e63a886826.cu
#include <iostream> #include <cstdlib> #include <cstdio> #include <curand_kernel.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/device_ptr.h> using namespace std; __device__ int d_count = 0; __global__ void colourMax (int *vertexArray, int *neighbourArray, int *numbers, int n, int m, int *colouring, int currentColour){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } if (colouring[i]!=0){ return; } int myMax = numbers[i]; // printf("I am node %d with value %d\n", i+1, myMax); int start = -1, stop = -1; start = vertexArray[i]; if (i==n-1){ stop = 2*m; } else{ stop = vertexArray[i+1]; } for (int j=start; j<stop; j++){ // printf("My neighbour %d with value %d from %d \n", neighbourArray[j], numbers[neighbourArray[j]-1], i+1); int neighbour = neighbourArray[j]-1; if (colouring[neighbour]==0 && numbers[neighbour] >= myMax){ if (numbers[neighbour] == myMax){ if (i < neighbour){ continue; } } return; } } colouring[i] = currentColour; atomicAdd(&d_count, 1); } __global__ void setup_kernel (curandState * state, unsigned long seed ){ int i= blockDim.x * blockIdx.x + threadIdx.x; curand_init (seed, i, 0, &state[i]); } __global__ void randomNumbering (curandState* globalState, int *degreeCount, int n, int limit){ int i= blockDim.x * blockIdx.x + threadIdx.x; curandState localState = globalState[i]; float RANDOM = curand_uniform( &localState ); globalState[i] = localState; RANDOM *= (limit - 1 + 0.999999); RANDOM += 1; degreeCount[i] = (int) RANDOM; } __global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int start = -1, stop = -1; int diff=0; start = vertexArray[i]; if (i==n-1){ stop = 2*m; } else{ stop = vertexArray[i+1]; } diff = stop-start; degreeCount[i]=diff; } void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){ for (int i=0; i<n-1; i++){ for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){ cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl; /* code */ } } for (int j = vertexArray[n-1]; j < m; ++j) { cout<<"e "<<n<<" "<<neighbourArray[j]<<endl; /* code */ } } int main(int argc, char const *argv[]) { string a, b; int n, m; cin>>a>>b>>n>>m; int *h_count = new int; int *h_vertexArray = new int [n]; int *h_neighbourArray = new int [2*m]; int *h_degreeCount = new int [n]; int *h_colour = new int [n]; int *d_vertexArray = NULL; cudaMalloc((void **)&d_vertexArray, n*sizeof(int)); int *d_neighbourArray = NULL; cudaMalloc((void **)&d_neighbourArray, 2*m*sizeof(int)); int *d_colour = NULL; cudaMalloc((void **)&d_colour, (n)*sizeof(int)); cudaMemset((void *)d_colour, 0, (n)*sizeof(int)); int *d_degreeCount = NULL; cudaMalloc((void **)&d_degreeCount, (n)*sizeof(int)); cudaMemset((void *)d_degreeCount, 0, (n)*sizeof(int)); curandState* devStates; cudaMalloc ( &devStates, n*sizeof( curandState ) ); for (int i = 0; i < n; ++i) { /* code */ h_vertexArray[i]=2*m; } int NSlast = 0; int NSoffset = 0; int NSprev=0; for (int i=0; i<2*m; i++){ int start, end; cin>>start>>end; for (int j=NSlast+1; j<start; j++){ h_vertexArray[j-1]=NSoffset; } if (NSprev!=start){ NSlast=start; h_vertexArray[start-1]=NSoffset; NSprev=start; } h_neighbourArray[NSoffset]=end; NSoffset++; } // int offset = 0; // int current = 0; // int mark = 1; // for (int i = 0; i < 2*m; ++i) // { // int start; // int end; // cin>>start>>end; // if (start!=mark){ // if (start == mark+1 && h_vertexArray[mark-1]!=2*m){ // } // else{ // for (int j = mark; j<start; j++){ // h_vertexArray[j-1]=offset; // } // } // mark = start; // } // if (start==current){ // h_neighbourArray[offset]=end; // offset++; // } // else { // current = start; // h_vertexArray[current-1]=offset; // h_neighbourArray[offset]=end; // offset++; // } // } cudaMemcpy(d_vertexArray, h_vertexArray, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), cudaMemcpyHostToDevice); int threadsPerBlock = 512; int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock; //cout<<threadsPerBlock<<" "<<blocksPerGrid<<endl; degreeCalc<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m); // cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost); // for (int i=0; i<n; i++){ // cout<<h_degreeCount[i]<<endl; // } // thrust::device_ptr<int> d_ptr = thrust::device_pointer_cast(d_degreeCount); int maxDegree = *(thrust::max_element(d_ptr, d_ptr + n)); cout<<"Max = "<<maxDegree<<endl; setup_kernel <<<blocksPerGrid, threadsPerBlock>>> ( devStates, time(NULL) ); randomNumbering<<<blocksPerGrid, threadsPerBlock>>>(devStates, d_degreeCount, n, n); cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost); // cout<<"Random numbers: "<<endl; // // for (int i=0; i<n; i++){ // cout<<h_degreeCount[i]<<endl; // } int colourCount = 1; while (1){ colourMax<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_colour, colourCount); cudaMemcpyFromSymbol(h_count, d_count, sizeof(int), 0, cudaMemcpyDeviceToHost); cout<<"H Count = "<<*h_count<<"at colour: "<<colourCount<<endl; if (*h_count == n){ break; } colourCount++; } cudaMemcpy(h_colour, d_colour, n*sizeof(int), cudaMemcpyDeviceToHost); thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_colour); int maxColour = *(thrust::max_element(c_ptr, c_ptr + n)); cout<<"Max Colour = "<<maxColour<<endl; // cout<<"Colour numbers: "<<endl; // // for (int i=0; i<n; i++){ // cout<<h_colour[i]<<endl; // } delete h_count; delete[] h_vertexArray; delete[] h_neighbourArray; delete[] h_degreeCount; delete[] h_colour; cudaFree(d_neighbourArray); cudaFree(d_vertexArray); cudaFree(d_degreeCount); cudaFree(d_colour); cudaDeviceReset(); return 0; }
4aabba2efaff90a71e3577da6fb9b4734542c800.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "tensor_5d_equals.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const int c = 1; const int d = 1; const int h = 1; const int w = 1; const DTYPE *x = NULL; hipMalloc(&x, XSIZE*YSIZE); const int offset_x = 1; const int n_x = 1; const int c_x = 1; const int d_x = 1; const int h_x = 1; const int w_x = 1; const DTYPE *y = NULL; hipMalloc(&y, XSIZE*YSIZE); const int offset_y = 1; const int n_y = 1; const int c_y = 1; const int d_y = 1; const int h_y = 1; const int w_y = 1; int *eq_flag = NULL; hipMalloc(&eq_flag, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( tensor_5d_equals), dim3(gridBlock),dim3(threadBlock), 0, 0, n,c,d,h,w,x,offset_x,n_x,c_x,d_x,h_x,w_x,y,offset_y,n_y,c_y,d_y,h_y,w_y,eq_flag); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( tensor_5d_equals), dim3(gridBlock),dim3(threadBlock), 0, 0, n,c,d,h,w,x,offset_x,n_x,c_x,d_x,h_x,w_x,y,offset_y,n_y,c_y,d_y,h_y,w_y,eq_flag); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( tensor_5d_equals), dim3(gridBlock),dim3(threadBlock), 0, 0, n,c,d,h,w,x,offset_x,n_x,c_x,d_x,h_x,w_x,y,offset_y,n_y,c_y,d_y,h_y,w_y,eq_flag); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4aabba2efaff90a71e3577da6fb9b4734542c800.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "tensor_5d_equals.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const int c = 1; const int d = 1; const int h = 1; const int w = 1; const DTYPE *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); const int offset_x = 1; const int n_x = 1; const int c_x = 1; const int d_x = 1; const int h_x = 1; const int w_x = 1; const DTYPE *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); const int offset_y = 1; const int n_y = 1; const int c_y = 1; const int d_y = 1; const int h_y = 1; const int w_y = 1; int *eq_flag = NULL; cudaMalloc(&eq_flag, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); tensor_5d_equals<<<gridBlock,threadBlock>>>(n,c,d,h,w,x,offset_x,n_x,c_x,d_x,h_x,w_x,y,offset_y,n_y,c_y,d_y,h_y,w_y,eq_flag); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { tensor_5d_equals<<<gridBlock,threadBlock>>>(n,c,d,h,w,x,offset_x,n_x,c_x,d_x,h_x,w_x,y,offset_y,n_y,c_y,d_y,h_y,w_y,eq_flag); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { tensor_5d_equals<<<gridBlock,threadBlock>>>(n,c,d,h,w,x,offset_x,n_x,c_x,d_x,h_x,w_x,y,offset_y,n_y,c_y,d_y,h_y,w_y,eq_flag); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
dad421d6f01489eeac017fc9170c146193988a80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lightconv_cuda.cuh" #include "lightconv_cuda_forward.cu" #include "lightconv_cuda_backward.cu" #include "hip_utils.hip" template<int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_forward_kernel(const scalar_t* input, const scalar_t* filters, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, scalar_t* output) { const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int featureIdx = blockIdx.y; const int filterIdx = featureIdx / numFiltersInBlock; const int IOOffset = numFeatures * sequenceLength * batchIdx + featureIdx * sequenceLength; const scalar_t* inputFeature = &input[IOOffset]; scalar_t* outputFeature = &output[IOOffset]; const scalar_t* inputFilter = &filters[filterIdx * FS]; assert(blockDim.x == SB); scalar_t filter[FS]; #pragma unroll for (int i = 0; i < FS; ++i) { filter[i] = inputFilter[i]; } __shared__ scalar_t temp[SB + FS]; zeroSharedMem<FS, SB, padding_l>(temp); const int numIterations = divUp<int, int>(sequenceLength, SB); for (int i = 0; i < numIterations; ++i) { // Read input into shared memory const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset, sequenceLength, i, numIterations, (numIterations == 1), temp); __syncthreads(); scalar_t out = 0; #pragma unroll for (int j = 0; j < FS; ++j) { out += filter[j] * temp[tid + j]; } // Write output const int outputOffset = inputOffset; if ((outputOffset + tid) < sequenceLength) { outputFeature[outputOffset + tid] = out; } __syncthreads(); } } template<int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_grad_wrt_input_kernel( const scalar_t* input, const scalar_t* filters, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, scalar_t* output) { // input grad kernel is similar to forward kernel const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int featureIdx = blockIdx.y; const int filterIdx = featureIdx / numFiltersInBlock; const int IOOffset = numFeatures * sequenceLength * batchIdx + featureIdx * sequenceLength; const scalar_t* inputFeature = &input[IOOffset]; scalar_t* outputFeature = &output[IOOffset]; const scalar_t* inputFilter = &filters[filterIdx * FS]; assert(blockDim.x == SB); scalar_t filter[FS]; // The only change is loading the filter in reverse #pragma unroll for (int i = 0; i < FS; ++i) { filter[i] = inputFilter[FS - i - 1]; } __shared__ scalar_t temp[SB + FS]; const int padding = FS - padding_l - 1; zeroSharedMem<FS, SB, padding>(temp); __syncthreads(); const int numIterations = divUp<int, int>(sequenceLength, SB); for (int i = 0; i < numIterations; ++i) { // Read input into shared memory const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding>(inputFeature, inputOffset, sequenceLength, i, numIterations, false, temp); __syncthreads(); scalar_t out = 0; #pragma unroll for (int j = 0; j < FS; ++j) { out += filter[j] * temp[tid + j]; } // Write output const int outputOffset = inputOffset; if ((outputOffset + tid) < sequenceLength) { outputFeature[outputOffset + tid] = out; } __syncthreads(); } } // This is by far the most expensive kernel in terms of time taken. // Can be 16x slower than the forward or grad_wrt_input when filter size is 31 template<int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_grad_wrt_weights_firstpass_short_kernel( const scalar_t* input, const scalar_t* gradInput, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, int numHeads, float* output) { const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int filterIdx = blockIdx.y; const int numIterations = divUp<int, int>(sequenceLength, SB); float* tempOutputGradWeight = &output[filterIdx * FS * minibatch]; assert(blockDim.x == SB); __shared__ scalar_t tempInput[SB + FS]; __shared__ scalar_t tempGradInput[SB + FS]; // local weight accumulation float accumWeights[FS]; // Initialize memory for (int i = 0; i < FS; ++i) { accumWeights[i] = float(0.0); } // loop over each sequence within filterblock for (int idxInFilterBlock = 0; idxInFilterBlock < numFiltersInBlock; ++idxInFilterBlock) { const int featureOffset = batchIdx * numFeatures * sequenceLength + (filterIdx * numFiltersInBlock + idxInFilterBlock) * sequenceLength; const scalar_t* inputFeature = &input[featureOffset]; const scalar_t* gradInputFeature = &gradInput[featureOffset]; zeroSharedMem<FS, SB, padding_l>(tempInput); zeroSharedMem<FS, SB, (FS/2)>(tempGradInput); __syncthreads(); for (int i = 0; i < numIterations; ++i) { const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset, sequenceLength, i, numIterations, false, tempInput); load_input_to_shared<FS, SB, (FS/2)>(gradInputFeature, inputOffset, sequenceLength, i, numIterations, false, tempGradInput); __syncthreads(); const int gradIndex = (FS/2) + tid; scalar_t tempGrad = tempGradInput[gradIndex]; #pragma unroll for (int j = 0; j < FS; j++) { const int inputIndex = tid + j; accumWeights[j] += tempInput[inputIndex] * tempGrad; } __syncthreads(); } } // Row-major sum for (int filterWeightIdx = 0; filterWeightIdx < FS; ++filterWeightIdx) { float temp; if (tid < sequenceLength) { temp = accumWeights[filterWeightIdx]; } else { temp = float(0.0); } const int outputOffset = filterWeightIdx * minibatch + batchIdx; temp = blockReduce(temp); if (tid == 0) { tempOutputGradWeight[outputOffset] = temp; } } } template<int FS, int SB, typename scalar_t> __global__ void lightconv_grad_wrt_weights_secondpass_short_kernel( const float* input, const int minibatch, const int numFiltersInBlock, scalar_t* output) { assert(blockDim.x == SB); const int tid = threadIdx.x; const int filterIdx = blockIdx.x; const int filterWeightIdx = blockIdx.y; const int inputOffset = filterIdx * FS * minibatch + filterWeightIdx * minibatch; const float* tempInput = &input[inputOffset]; // read into shared memory for reduction int readIndex = tid; float sum = 0.0; while (readIndex < minibatch) { sum += tempInput[readIndex]; readIndex += SB; } float temp = blockReduce(sum); if (tid == 0) { output[blockIdx.x * FS + blockIdx.y] = temp; } } // This is by far the most expensive kernel in terms of time taken. // Can be 16x slower than the forward or grad_wrt_input when filter size is 31 template<int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_grad_wrt_weights_firstpass_kernel( const scalar_t* input, const scalar_t* gradInput, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, float* output) { assert(blockDim.x == SB); const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int featureIdx = blockIdx.y; const int filterIdx = featureIdx / numFiltersInBlock; const int idxInFilterBlock = featureIdx % numFiltersInBlock; const int numIterations = divUp<int, int>(sequenceLength, SB); float temp; __shared__ scalar_t tempInput[SB + FS]; __shared__ scalar_t tempGradInput[SB + FS]; zeroSharedMem<FS, SB, padding_l>(tempInput); zeroSharedMem<FS, SB, (FS/2)>(tempGradInput); __syncthreads(); float accumWeights[FS]; for (int i = 0; i < FS; ++i) { accumWeights[i] = float(0.0); } const int IOOffset = batchIdx * numFeatures * sequenceLength + featureIdx * sequenceLength; const scalar_t* inputFeature = &input[IOOffset]; const scalar_t* gradInputFeature = &gradInput[IOOffset]; float* tempOutputGradWeight = &output[filterIdx * FS * minibatch * numFiltersInBlock]; for (int i = 0; i < numIterations; ++i) { const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset, sequenceLength, i, numIterations, false, tempInput); load_input_to_shared<FS, SB, (FS/2)>(gradInputFeature, inputOffset, sequenceLength, i, numIterations, false, tempGradInput); __syncthreads(); #pragma unroll for (int j = 0; j < FS; ++j) { accumWeights[j] += tempInput[tid + j] * tempGradInput[tid + (FS/2)]; } __syncthreads(); } // Row-major sum for (int filterWeightIdx = 0; filterWeightIdx < FS; ++filterWeightIdx) { // Write to shared memory before reduction if (tid < sequenceLength) { temp = accumWeights[filterWeightIdx]; } else { temp = float(0.0); } temp = blockReduce(temp); const int outputOffset = filterWeightIdx * minibatch * numFiltersInBlock + batchIdx * numFiltersInBlock + idxInFilterBlock; if (tid == 0) { tempOutputGradWeight[outputOffset] = temp; } } } template<int FS, int SB, typename scalar_t> __global__ void lightconv_grad_wrt_weights_secondpass_kernel( const float* input, const int minibatch, const int numFiltersInBlock, scalar_t* output) { assert(blockDim.x == SB); const int tid = threadIdx.x; // What is the id within a minibatch const int filterIdx = blockIdx.x; const int filterWeightIdx = blockIdx.y; const int inputOffset = filterIdx * FS * minibatch * numFiltersInBlock + filterWeightIdx * minibatch * numFiltersInBlock; const float* tempInput = &input[inputOffset]; int readIndex = tid; float sum = float(0.0); while (readIndex < (minibatch * numFiltersInBlock)) { sum += tempInput[readIndex]; readIndex += SB; } float temp = blockReduce(sum); if (tid == 0) { output[blockIdx.x * FS + blockIdx.y] = temp; } }
dad421d6f01489eeac017fc9170c146193988a80.cu
#include "lightconv_cuda.cuh" #include "lightconv_cuda_forward.cu" #include "lightconv_cuda_backward.cu" #include "cuda_utils.cu" template<int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_forward_kernel(const scalar_t* input, const scalar_t* filters, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, scalar_t* output) { const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int featureIdx = blockIdx.y; const int filterIdx = featureIdx / numFiltersInBlock; const int IOOffset = numFeatures * sequenceLength * batchIdx + featureIdx * sequenceLength; const scalar_t* inputFeature = &input[IOOffset]; scalar_t* outputFeature = &output[IOOffset]; const scalar_t* inputFilter = &filters[filterIdx * FS]; assert(blockDim.x == SB); scalar_t filter[FS]; #pragma unroll for (int i = 0; i < FS; ++i) { filter[i] = inputFilter[i]; } __shared__ scalar_t temp[SB + FS]; zeroSharedMem<FS, SB, padding_l>(temp); const int numIterations = divUp<int, int>(sequenceLength, SB); for (int i = 0; i < numIterations; ++i) { // Read input into shared memory const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset, sequenceLength, i, numIterations, (numIterations == 1), temp); __syncthreads(); scalar_t out = 0; #pragma unroll for (int j = 0; j < FS; ++j) { out += filter[j] * temp[tid + j]; } // Write output const int outputOffset = inputOffset; if ((outputOffset + tid) < sequenceLength) { outputFeature[outputOffset + tid] = out; } __syncthreads(); } } template<int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_grad_wrt_input_kernel( const scalar_t* input, const scalar_t* filters, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, scalar_t* output) { // input grad kernel is similar to forward kernel const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int featureIdx = blockIdx.y; const int filterIdx = featureIdx / numFiltersInBlock; const int IOOffset = numFeatures * sequenceLength * batchIdx + featureIdx * sequenceLength; const scalar_t* inputFeature = &input[IOOffset]; scalar_t* outputFeature = &output[IOOffset]; const scalar_t* inputFilter = &filters[filterIdx * FS]; assert(blockDim.x == SB); scalar_t filter[FS]; // The only change is loading the filter in reverse #pragma unroll for (int i = 0; i < FS; ++i) { filter[i] = inputFilter[FS - i - 1]; } __shared__ scalar_t temp[SB + FS]; const int padding = FS - padding_l - 1; zeroSharedMem<FS, SB, padding>(temp); __syncthreads(); const int numIterations = divUp<int, int>(sequenceLength, SB); for (int i = 0; i < numIterations; ++i) { // Read input into shared memory const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding>(inputFeature, inputOffset, sequenceLength, i, numIterations, false, temp); __syncthreads(); scalar_t out = 0; #pragma unroll for (int j = 0; j < FS; ++j) { out += filter[j] * temp[tid + j]; } // Write output const int outputOffset = inputOffset; if ((outputOffset + tid) < sequenceLength) { outputFeature[outputOffset + tid] = out; } __syncthreads(); } } // This is by far the most expensive kernel in terms of time taken. // Can be 16x slower than the forward or grad_wrt_input when filter size is 31 template<int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_grad_wrt_weights_firstpass_short_kernel( const scalar_t* input, const scalar_t* gradInput, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, int numHeads, float* output) { const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int filterIdx = blockIdx.y; const int numIterations = divUp<int, int>(sequenceLength, SB); float* tempOutputGradWeight = &output[filterIdx * FS * minibatch]; assert(blockDim.x == SB); __shared__ scalar_t tempInput[SB + FS]; __shared__ scalar_t tempGradInput[SB + FS]; // local weight accumulation float accumWeights[FS]; // Initialize memory for (int i = 0; i < FS; ++i) { accumWeights[i] = float(0.0); } // loop over each sequence within filterblock for (int idxInFilterBlock = 0; idxInFilterBlock < numFiltersInBlock; ++idxInFilterBlock) { const int featureOffset = batchIdx * numFeatures * sequenceLength + (filterIdx * numFiltersInBlock + idxInFilterBlock) * sequenceLength; const scalar_t* inputFeature = &input[featureOffset]; const scalar_t* gradInputFeature = &gradInput[featureOffset]; zeroSharedMem<FS, SB, padding_l>(tempInput); zeroSharedMem<FS, SB, (FS/2)>(tempGradInput); __syncthreads(); for (int i = 0; i < numIterations; ++i) { const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset, sequenceLength, i, numIterations, false, tempInput); load_input_to_shared<FS, SB, (FS/2)>(gradInputFeature, inputOffset, sequenceLength, i, numIterations, false, tempGradInput); __syncthreads(); const int gradIndex = (FS/2) + tid; scalar_t tempGrad = tempGradInput[gradIndex]; #pragma unroll for (int j = 0; j < FS; j++) { const int inputIndex = tid + j; accumWeights[j] += tempInput[inputIndex] * tempGrad; } __syncthreads(); } } // Row-major sum for (int filterWeightIdx = 0; filterWeightIdx < FS; ++filterWeightIdx) { float temp; if (tid < sequenceLength) { temp = accumWeights[filterWeightIdx]; } else { temp = float(0.0); } const int outputOffset = filterWeightIdx * minibatch + batchIdx; temp = blockReduce(temp); if (tid == 0) { tempOutputGradWeight[outputOffset] = temp; } } } template<int FS, int SB, typename scalar_t> __global__ void lightconv_grad_wrt_weights_secondpass_short_kernel( const float* input, const int minibatch, const int numFiltersInBlock, scalar_t* output) { assert(blockDim.x == SB); const int tid = threadIdx.x; const int filterIdx = blockIdx.x; const int filterWeightIdx = blockIdx.y; const int inputOffset = filterIdx * FS * minibatch + filterWeightIdx * minibatch; const float* tempInput = &input[inputOffset]; // read into shared memory for reduction int readIndex = tid; float sum = 0.0; while (readIndex < minibatch) { sum += tempInput[readIndex]; readIndex += SB; } float temp = blockReduce(sum); if (tid == 0) { output[blockIdx.x * FS + blockIdx.y] = temp; } } // This is by far the most expensive kernel in terms of time taken. // Can be 16x slower than the forward or grad_wrt_input when filter size is 31 template<int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_grad_wrt_weights_firstpass_kernel( const scalar_t* input, const scalar_t* gradInput, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, float* output) { assert(blockDim.x == SB); const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int featureIdx = blockIdx.y; const int filterIdx = featureIdx / numFiltersInBlock; const int idxInFilterBlock = featureIdx % numFiltersInBlock; const int numIterations = divUp<int, int>(sequenceLength, SB); float temp; __shared__ scalar_t tempInput[SB + FS]; __shared__ scalar_t tempGradInput[SB + FS]; zeroSharedMem<FS, SB, padding_l>(tempInput); zeroSharedMem<FS, SB, (FS/2)>(tempGradInput); __syncthreads(); float accumWeights[FS]; for (int i = 0; i < FS; ++i) { accumWeights[i] = float(0.0); } const int IOOffset = batchIdx * numFeatures * sequenceLength + featureIdx * sequenceLength; const scalar_t* inputFeature = &input[IOOffset]; const scalar_t* gradInputFeature = &gradInput[IOOffset]; float* tempOutputGradWeight = &output[filterIdx * FS * minibatch * numFiltersInBlock]; for (int i = 0; i < numIterations; ++i) { const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset, sequenceLength, i, numIterations, false, tempInput); load_input_to_shared<FS, SB, (FS/2)>(gradInputFeature, inputOffset, sequenceLength, i, numIterations, false, tempGradInput); __syncthreads(); #pragma unroll for (int j = 0; j < FS; ++j) { accumWeights[j] += tempInput[tid + j] * tempGradInput[tid + (FS/2)]; } __syncthreads(); } // Row-major sum for (int filterWeightIdx = 0; filterWeightIdx < FS; ++filterWeightIdx) { // Write to shared memory before reduction if (tid < sequenceLength) { temp = accumWeights[filterWeightIdx]; } else { temp = float(0.0); } temp = blockReduce(temp); const int outputOffset = filterWeightIdx * minibatch * numFiltersInBlock + batchIdx * numFiltersInBlock + idxInFilterBlock; if (tid == 0) { tempOutputGradWeight[outputOffset] = temp; } } } template<int FS, int SB, typename scalar_t> __global__ void lightconv_grad_wrt_weights_secondpass_kernel( const float* input, const int minibatch, const int numFiltersInBlock, scalar_t* output) { assert(blockDim.x == SB); const int tid = threadIdx.x; // What is the id within a minibatch const int filterIdx = blockIdx.x; const int filterWeightIdx = blockIdx.y; const int inputOffset = filterIdx * FS * minibatch * numFiltersInBlock + filterWeightIdx * minibatch * numFiltersInBlock; const float* tempInput = &input[inputOffset]; int readIndex = tid; float sum = float(0.0); while (readIndex < (minibatch * numFiltersInBlock)) { sum += tempInput[readIndex]; readIndex += SB; } float temp = blockReduce(sum); if (tid == 0) { output[blockIdx.x * FS + blockIdx.y] = temp; } }
190ae083c891f28e0f6cc417a8fd2aecdc4dafee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define BLOCK_WIDTH 16 #define BLOCK_HEIGHT 16 enum NEIGH_TYPE {NEIGH_FOUR = 0, NEIGH_EIGHT = 1}; __device__ int getNeighboursLocalIndexes(int neighbours[], int nType); __device__ int getLocalIndex(int localRow, int localCol); __device__ bool inLocalBorder(); __device__ int findRoot(int equivalenceMatrix[], int elementIndex); __device__ bool threadInImage(int height, int width); __device__ int localAddrToGlobal(int label, int imHeight); __global__ void localCCL(const int* input, int* output, const int height, const int width){ __shared__ int segments[BLOCK_WIDTH * BLOCK_HEIGHT]; __shared__ int labels[BLOCK_WIDTH * BLOCK_HEIGHT]; __shared__ int changed; int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; int localRow = threadIdx.x; int localCol = threadIdx.y; int localIndex = localCol * blockDim.y + localRow; int globalIndex = col * height + row; int newLabel; int nType = NEIGH_EIGHT; // load corresponding image tile to shared memory segments[localIndex] = input[globalIndex]; // clear borders in every tile // if(inLocalBorder()){ // segments[localIndex] = 0; // } __syncthreads(); int label = localIndex; int neighboursIndexes[8]; int numOfNeighbours; if(threadInImage(height, width)){ while(1){ labels[localIndex] = label; if(localRow == 0 && localCol == 0) changed = 0; __syncthreads(); newLabel = label; numOfNeighbours = getNeighboursLocalIndexes(neighboursIndexes, nType); for(int n = 0; n < numOfNeighbours; n++) if(segments[localIndex] == segments[neighboursIndexes[n]]) newLabel = min(newLabel, labels[neighboursIndexes[n]]); __syncthreads(); if(newLabel < label){ atomicMin(labels + label, newLabel); changed = 1; } __syncthreads(); if(changed == 0) break; label = findRoot(labels, label); __syncthreads(); } } output[globalIndex] = localAddrToGlobal(label, height); // if(input[globalIndex] == 0) // output[globalIndex] = 0; } __device__ int localAddrToGlobal(int label, int imHeight){ int row = blockIdx.y*blockDim.y + label/BLOCK_WIDTH; int col = blockIdx.x*blockDim.x + label%16; return col * imHeight + row; } //returns length of neighbours list __device__ int getNeighboursLocalIndexes(int neighbours[], int nType){ int localRow = threadIdx.x; int localCol = threadIdx.y; int length; if(nType == NEIGH_FOUR){ if(localRow == 0){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); length = 2; } else if(localCol == BLOCK_HEIGHT - 1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); length = 2; } else{ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[3] = getLocalIndex(localRow + 1, localCol); length = 3; } } else if(localRow == BLOCK_HEIGHT-1){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); length = 2; } else if(localCol == BLOCK_HEIGHT - 1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); length = 2; } else{ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[2] = getLocalIndex(localRow - 1, localCol); length = 3; } } else if(localCol == 0){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[2] = getLocalIndex(localRow + 1, localCol); length = 3; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow, localCol - 1); neighbours[2] = getLocalIndex(localRow + 1, localCol); length = 3; } else{ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[2] = getLocalIndex(localRow + 1, localCol); neighbours[3] = getLocalIndex(localRow, localCol - 1); length = 4; } } else if(nType == NEIGH_EIGHT){ if(localRow == 0){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); neighbours[2] = getLocalIndex(localRow + 1, localCol + 1); length = 3; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); neighbours[2] = getLocalIndex(localRow + 1, localCol - 1); length = 3; } else{ neighbours[0] = getLocalIndex(localRow + 1, localCol - 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); neighbours[2] = getLocalIndex(localRow + 1, localCol + 1); neighbours[3] = getLocalIndex(localRow, localCol - 1); neighbours[4] = getLocalIndex(localRow, localCol + 1); length = 5; } } else if(localRow == BLOCK_HEIGHT-1){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol + 1); length = 3; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol - 1); length = 3; } else{ neighbours[0] = getLocalIndex(localRow - 1, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol + 1); neighbours[3] = getLocalIndex(localRow, localCol - 1); neighbours[4] = getLocalIndex(localRow, localCol + 1); length = 5; } } else if(localCol == 0){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow - 1, localCol + 1); neighbours[2] = getLocalIndex(localRow, localCol + 1); neighbours[3] = getLocalIndex(localRow + 1, localCol); neighbours[4] = getLocalIndex(localRow + 1, localCol + 1); length = 5; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow - 1, localCol - 1); neighbours[2] = getLocalIndex(localRow, localCol - 1); neighbours[3] = getLocalIndex(localRow + 1, localCol); neighbours[4] = getLocalIndex(localRow + 1, localCol - 1); length = 5; } else{ neighbours[0] = getLocalIndex(localRow - 1, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol + 1); neighbours[3] = getLocalIndex(localRow, localCol + 1); neighbours[4] = getLocalIndex(localRow + 1, localCol + 1); neighbours[5] = getLocalIndex(localRow + 1, localCol); neighbours[6] = getLocalIndex(localRow + 1, localCol - 1); neighbours[7] = getLocalIndex(localRow, localCol - 1); length = 8; } } return length; } __device__ int getLocalIndex(int localRow, int localCol){ return localCol * blockDim.y + localRow; } __device__ bool inLocalBorder(){ return (threadIdx.x == 0 || threadIdx.x == BLOCK_WIDTH-1 || threadIdx.y == 0 || threadIdx.y == BLOCK_HEIGHT-1); } __device__ bool threadInImage(int height, int width){ int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; return (row >= 0 || row <= height-1 || col >= 0 || col <= width-1); } __device__ int findRoot(int equivalenceMatrix[], int elementIndex){ while(equivalenceMatrix[elementIndex] != elementIndex) elementIndex = equivalenceMatrix[elementIndex]; return elementIndex; }
190ae083c891f28e0f6cc417a8fd2aecdc4dafee.cu
#define BLOCK_WIDTH 16 #define BLOCK_HEIGHT 16 enum NEIGH_TYPE {NEIGH_FOUR = 0, NEIGH_EIGHT = 1}; __device__ int getNeighboursLocalIndexes(int neighbours[], int nType); __device__ int getLocalIndex(int localRow, int localCol); __device__ bool inLocalBorder(); __device__ int findRoot(int equivalenceMatrix[], int elementIndex); __device__ bool threadInImage(int height, int width); __device__ int localAddrToGlobal(int label, int imHeight); __global__ void localCCL(const int* input, int* output, const int height, const int width){ __shared__ int segments[BLOCK_WIDTH * BLOCK_HEIGHT]; __shared__ int labels[BLOCK_WIDTH * BLOCK_HEIGHT]; __shared__ int changed; int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; int localRow = threadIdx.x; int localCol = threadIdx.y; int localIndex = localCol * blockDim.y + localRow; int globalIndex = col * height + row; int newLabel; int nType = NEIGH_EIGHT; // load corresponding image tile to shared memory segments[localIndex] = input[globalIndex]; // clear borders in every tile // if(inLocalBorder()){ // segments[localIndex] = 0; // } __syncthreads(); int label = localIndex; int neighboursIndexes[8]; int numOfNeighbours; if(threadInImage(height, width)){ while(1){ labels[localIndex] = label; if(localRow == 0 && localCol == 0) changed = 0; __syncthreads(); newLabel = label; numOfNeighbours = getNeighboursLocalIndexes(neighboursIndexes, nType); for(int n = 0; n < numOfNeighbours; n++) if(segments[localIndex] == segments[neighboursIndexes[n]]) newLabel = min(newLabel, labels[neighboursIndexes[n]]); __syncthreads(); if(newLabel < label){ atomicMin(labels + label, newLabel); changed = 1; } __syncthreads(); if(changed == 0) break; label = findRoot(labels, label); __syncthreads(); } } output[globalIndex] = localAddrToGlobal(label, height); // if(input[globalIndex] == 0) // output[globalIndex] = 0; } __device__ int localAddrToGlobal(int label, int imHeight){ int row = blockIdx.y*blockDim.y + label/BLOCK_WIDTH; int col = blockIdx.x*blockDim.x + label%16; return col * imHeight + row; } //returns length of neighbours list __device__ int getNeighboursLocalIndexes(int neighbours[], int nType){ int localRow = threadIdx.x; int localCol = threadIdx.y; int length; if(nType == NEIGH_FOUR){ if(localRow == 0){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); length = 2; } else if(localCol == BLOCK_HEIGHT - 1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); length = 2; } else{ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[3] = getLocalIndex(localRow + 1, localCol); length = 3; } } else if(localRow == BLOCK_HEIGHT-1){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); length = 2; } else if(localCol == BLOCK_HEIGHT - 1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); length = 2; } else{ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[2] = getLocalIndex(localRow - 1, localCol); length = 3; } } else if(localCol == 0){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[2] = getLocalIndex(localRow + 1, localCol); length = 3; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow, localCol - 1); neighbours[2] = getLocalIndex(localRow + 1, localCol); length = 3; } else{ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow, localCol + 1); neighbours[2] = getLocalIndex(localRow + 1, localCol); neighbours[3] = getLocalIndex(localRow, localCol - 1); length = 4; } } else if(nType == NEIGH_EIGHT){ if(localRow == 0){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); neighbours[2] = getLocalIndex(localRow + 1, localCol + 1); length = 3; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); neighbours[2] = getLocalIndex(localRow + 1, localCol - 1); length = 3; } else{ neighbours[0] = getLocalIndex(localRow + 1, localCol - 1); neighbours[1] = getLocalIndex(localRow + 1, localCol); neighbours[2] = getLocalIndex(localRow + 1, localCol + 1); neighbours[3] = getLocalIndex(localRow, localCol - 1); neighbours[4] = getLocalIndex(localRow, localCol + 1); length = 5; } } else if(localRow == BLOCK_HEIGHT-1){ if(localCol == 0){ neighbours[0] = getLocalIndex(localRow, localCol + 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol + 1); length = 3; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol - 1); length = 3; } else{ neighbours[0] = getLocalIndex(localRow - 1, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol + 1); neighbours[3] = getLocalIndex(localRow, localCol - 1); neighbours[4] = getLocalIndex(localRow, localCol + 1); length = 5; } } else if(localCol == 0){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow - 1, localCol + 1); neighbours[2] = getLocalIndex(localRow, localCol + 1); neighbours[3] = getLocalIndex(localRow + 1, localCol); neighbours[4] = getLocalIndex(localRow + 1, localCol + 1); length = 5; } else if(localCol == BLOCK_WIDTH-1){ neighbours[0] = getLocalIndex(localRow - 1, localCol); neighbours[1] = getLocalIndex(localRow - 1, localCol - 1); neighbours[2] = getLocalIndex(localRow, localCol - 1); neighbours[3] = getLocalIndex(localRow + 1, localCol); neighbours[4] = getLocalIndex(localRow + 1, localCol - 1); length = 5; } else{ neighbours[0] = getLocalIndex(localRow - 1, localCol - 1); neighbours[1] = getLocalIndex(localRow - 1, localCol); neighbours[2] = getLocalIndex(localRow - 1, localCol + 1); neighbours[3] = getLocalIndex(localRow, localCol + 1); neighbours[4] = getLocalIndex(localRow + 1, localCol + 1); neighbours[5] = getLocalIndex(localRow + 1, localCol); neighbours[6] = getLocalIndex(localRow + 1, localCol - 1); neighbours[7] = getLocalIndex(localRow, localCol - 1); length = 8; } } return length; } __device__ int getLocalIndex(int localRow, int localCol){ return localCol * blockDim.y + localRow; } __device__ bool inLocalBorder(){ return (threadIdx.x == 0 || threadIdx.x == BLOCK_WIDTH-1 || threadIdx.y == 0 || threadIdx.y == BLOCK_HEIGHT-1); } __device__ bool threadInImage(int height, int width){ int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; return (row >= 0 || row <= height-1 || col >= 0 || col <= width-1); } __device__ int findRoot(int equivalenceMatrix[], int elementIndex){ while(equivalenceMatrix[elementIndex] != elementIndex) elementIndex = equivalenceMatrix[elementIndex]; return elementIndex; }
fa1358bbda57da25c8d4fd38327ecaf9997a14f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> inline hipError_t checkCuda(hipError_t result) { if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } return result; } void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for(int i = idx; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *array, int N) { for(int i = 0; i < N; i++) { if(array[i] != target) { printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target); exit(1); } } printf("SUCCESS! All values added correctly.\n"); } int main() { const int N = 2<<20; size_t size = N * sizeof(float); float *a; float *b; float *c; checkCuda( hipMallocManaged(&a, size) ); checkCuda( hipMallocManaged(&b, size) ); checkCuda( hipMallocManaged(&c, size) ); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); size_t threadsPerBlock = 256; size_t numOfBlocks = 32; hipLaunchKernelGGL(( addVectorsInto), dim3(numOfBlocks),dim3(threadsPerBlock), 0, 0, c, a, b, N); checkCuda( hipGetLastError() ); checkCuda( hipDeviceSynchronize() ); checkElementsAre(7, c, N); hipFree(a); hipFree(b); hipFree(c); }
fa1358bbda57da25c8d4fd38327ecaf9997a14f9.cu
#include <stdio.h> #include <assert.h> inline cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for(int i = idx; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *array, int N) { for(int i = 0; i < N; i++) { if(array[i] != target) { printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target); exit(1); } } printf("SUCCESS! All values added correctly.\n"); } int main() { const int N = 2<<20; size_t size = N * sizeof(float); float *a; float *b; float *c; checkCuda( cudaMallocManaged(&a, size) ); checkCuda( cudaMallocManaged(&b, size) ); checkCuda( cudaMallocManaged(&c, size) ); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); size_t threadsPerBlock = 256; size_t numOfBlocks = 32; addVectorsInto<<<numOfBlocks,threadsPerBlock>>>(c, a, b, N); checkCuda( cudaGetLastError() ); checkCuda( cudaDeviceSynchronize() ); checkElementsAre(7, c, N); cudaFree(a); cudaFree(b); cudaFree(c); }
dc3a128f9c5667d42f300f8293c86621e732dc5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "compute/negative/negative_internal.h" namespace magmadnn { namespace internal { template <typename T> __global__ void kernel_negative_full_device(T *x, T *out, unsigned int size) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { out[i] = -x[i]; } } template <typename T> void negative_full_device(Tensor<T> *x, Tensor<T> *out) { unsigned int size = out->get_size(); hipLaunchKernelGGL(( kernel_negative_full_device) , dim3(1), dim3(size) , 0, 0, x->get_ptr(), out->get_ptr(), size); } template void negative_full_device(Tensor<int> *x, Tensor<int> *out); template void negative_full_device(Tensor<float> *x, Tensor<float> *out); template void negative_full_device(Tensor<double> *x, Tensor<double> *out); } // namespace op } // namespace magmadnn
dc3a128f9c5667d42f300f8293c86621e732dc5f.cu
#include "compute/negative/negative_internal.h" namespace magmadnn { namespace internal { template <typename T> __global__ void kernel_negative_full_device(T *x, T *out, unsigned int size) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { out[i] = -x[i]; } } template <typename T> void negative_full_device(Tensor<T> *x, Tensor<T> *out) { unsigned int size = out->get_size(); kernel_negative_full_device <<< 1, size >>> (x->get_ptr(), out->get_ptr(), size); } template void negative_full_device(Tensor<int> *x, Tensor<int> *out); template void negative_full_device(Tensor<float> *x, Tensor<float> *out); template void negative_full_device(Tensor<double> *x, Tensor<double> *out); } // namespace op } // namespace magmadnn
65ce32443f150d07a45691de54461c870095d0ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<math.h> #include<time.h> #include<stdexcept> #include<iostream> using namespace std; __global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS); void sum( int* A, int* B, int* C, int n_el); int main() { int NUMBER_OF_ELEMENTS; cout<<"\nEnter number of elements:"; cin>>NUMBER_OF_ELEMENTS; int SIZE = NUMBER_OF_ELEMENTS*sizeof(int); int* hostA = (int*)malloc(SIZE); int* hostB = (int*)malloc(SIZE); int* hostC = (int*)malloc(SIZE); int* ans = (int*)malloc(SIZE); int* deviceA,*deviceB,*deviceC; hipEvent_t start,end,start1,end1; hipEventCreate(&start1); hipEventCreate(&end1); srand(time(0)); int i; for(i=0;i<NUMBER_OF_ELEMENTS;i++) { hostA[i] = rand()%NUMBER_OF_ELEMENTS; hostB[i] = rand()%NUMBER_OF_ELEMENTS; } hipEventRecord(start1); for(i=0;i<NUMBER_OF_ELEMENTS;i++) { ans[i]=hostA[i]+hostB[i]; } hipEventRecord(end1); hipEventSynchronize(end1); float t1=0; hipEventElapsedTime(&t1,start1,end1); hipEventCreate(&start); hipEventCreate(&end); hipMalloc(&deviceA,SIZE); hipMalloc(&deviceB,SIZE); hipMalloc(&deviceC,SIZE); hipMemcpy(deviceA,hostA,SIZE,hipMemcpyHostToDevice); hipMemcpy(deviceB,hostB,SIZE,hipMemcpyHostToDevice); hipEventRecord(start); sum(deviceA,deviceB,deviceC,NUMBER_OF_ELEMENTS); hipEventRecord(end); hipEventSynchronize(end); float t=0; hipEventElapsedTime(&t,start,end); hipMemcpy(hostC,deviceC,SIZE,hipMemcpyDeviceToHost); hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); double error = 0; for(i = 0;i<NUMBER_OF_ELEMENTS;i++) { double diff = double((hostA[i]+hostB[i])-hostC[i]); error+=diff; cout<<"\nExpected value="<<ans[i]; cout<<"\tActual value="<<hostC[i]; } error = sqrt(error); cout<<"\nError = "<<error<<endl; cout<<"\nSequential time="<<t1; cout<<"\nParallel time="<<t<<endl; delete[] hostA; delete[] hostB; delete[] hostC; return hipDeviceSynchronize(); } void sum( int* A, int* B, int* C, int n_el) { int threadsPerblock,blocksperGrid; if(n_el<512) { threadsPerblock = n_el; blocksperGrid = 1; } else { threadsPerblock = 512; blocksperGrid = ceil(double(n_el)/double(threadsPerblock)); } //now invoke kernel method hipLaunchKernelGGL(( kernel_sum), dim3(blocksperGrid),dim3(threadsPerblock), 0, 0, A,B,C,n_el); } __global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS) { //calculate unique thread index int index = blockDim.x * blockIdx.x + threadIdx.x; if(index<NUMBERofELEMENTS) C[index] = A[index] + B[index]; }
65ce32443f150d07a45691de54461c870095d0ac.cu
#include<math.h> #include<time.h> #include<stdexcept> #include<iostream> using namespace std; __global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS); void sum( int* A, int* B, int* C, int n_el); int main() { int NUMBER_OF_ELEMENTS; cout<<"\nEnter number of elements:"; cin>>NUMBER_OF_ELEMENTS; int SIZE = NUMBER_OF_ELEMENTS*sizeof(int); int* hostA = (int*)malloc(SIZE); int* hostB = (int*)malloc(SIZE); int* hostC = (int*)malloc(SIZE); int* ans = (int*)malloc(SIZE); int* deviceA,*deviceB,*deviceC; cudaEvent_t start,end,start1,end1; cudaEventCreate(&start1); cudaEventCreate(&end1); srand(time(0)); int i; for(i=0;i<NUMBER_OF_ELEMENTS;i++) { hostA[i] = rand()%NUMBER_OF_ELEMENTS; hostB[i] = rand()%NUMBER_OF_ELEMENTS; } cudaEventRecord(start1); for(i=0;i<NUMBER_OF_ELEMENTS;i++) { ans[i]=hostA[i]+hostB[i]; } cudaEventRecord(end1); cudaEventSynchronize(end1); float t1=0; cudaEventElapsedTime(&t1,start1,end1); cudaEventCreate(&start); cudaEventCreate(&end); cudaMalloc(&deviceA,SIZE); cudaMalloc(&deviceB,SIZE); cudaMalloc(&deviceC,SIZE); cudaMemcpy(deviceA,hostA,SIZE,cudaMemcpyHostToDevice); cudaMemcpy(deviceB,hostB,SIZE,cudaMemcpyHostToDevice); cudaEventRecord(start); sum(deviceA,deviceB,deviceC,NUMBER_OF_ELEMENTS); cudaEventRecord(end); cudaEventSynchronize(end); float t=0; cudaEventElapsedTime(&t,start,end); cudaMemcpy(hostC,deviceC,SIZE,cudaMemcpyDeviceToHost); cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); double error = 0; for(i = 0;i<NUMBER_OF_ELEMENTS;i++) { double diff = double((hostA[i]+hostB[i])-hostC[i]); error+=diff; cout<<"\nExpected value="<<ans[i]; cout<<"\tActual value="<<hostC[i]; } error = sqrt(error); cout<<"\nError = "<<error<<endl; cout<<"\nSequential time="<<t1; cout<<"\nParallel time="<<t<<endl; delete[] hostA; delete[] hostB; delete[] hostC; return cudaDeviceSynchronize(); } void sum( int* A, int* B, int* C, int n_el) { int threadsPerblock,blocksperGrid; if(n_el<512) { threadsPerblock = n_el; blocksperGrid = 1; } else { threadsPerblock = 512; blocksperGrid = ceil(double(n_el)/double(threadsPerblock)); } //now invoke kernel method kernel_sum<<<blocksperGrid,threadsPerblock>>>(A,B,C,n_el); } __global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS) { //calculate unique thread index int index = blockDim.x * blockIdx.x + threadIdx.x; if(index<NUMBERofELEMENTS) C[index] = A[index] + B[index]; }
204c47c7be402cdeedac5c02eac4ef46b1d4221a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/normalize_l1_op.h" #include "caffe2/operators/normalize_op.h" namespace caffe2 { __global__ void NormalizeKernel( const int m, const int n, const int sf, const float* xData, float* yData, const float kEps) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < n; i += gridDim.x) { auto base = (i / sf) * sf * m + (i % sf); float sum = 0.0; __shared__ float norm; for (int j = threadIdx.x; j < m; j += blockDim.x) { const auto x_ij = xData[base + j * sf]; sum += x_ij * x_ij; } float reduce_result = BlockReduce(temp_storage).Sum(sum); if (threadIdx.x == 0) { norm = sqrtf(reduce_result); norm = fmaxf(norm, kEps); } __syncthreads(); for (int j = threadIdx.x; j < m; j += blockDim.x) { const auto index = base + j * sf; yData[index] = xData[index] / norm; } } } __global__ void NormalizeGradientKernel( const int M, const int N, const int SF, const float* in_mat, const float* grad_out_mat, float* grad_mat, const float kEps) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage_sum; __shared__ BlockReduce::TempStorage temp_storage_norm; for (int i = blockIdx.x; i < M; i += gridDim.x) { float sum = 0.0; float norm = 0.0; __shared__ float row_sum; __shared__ float row_norm; __shared__ float row_norm_3; auto base = (i / SF) * SF * N + (i % SF); for (int j = threadIdx.x; j < N; j += blockDim.x) { int index = base + j * SF; sum += in_mat[index] * grad_out_mat[index]; norm += in_mat[index] * in_mat[index]; } float reduce_result = BlockReduce(temp_storage_sum).Sum(sum); float reduce_norm = BlockReduce(temp_storage_norm).Sum(norm); if (threadIdx.x == 0) { row_sum = reduce_result; row_norm = sqrtf(reduce_norm); row_norm = fmaxf(row_norm, kEps); row_norm_3 = powf(row_norm, 3); } __syncthreads(); for (int j = threadIdx.x; j < N; j += blockDim.x) { int index = base + j * SF; const float x_ij = in_mat[index]; const float dy_ij = grad_out_mat[index]; grad_mat[index] = (dy_ij / row_norm) - ((x_ij / row_norm_3) * row_sum); } } } template <> void NormalizeOp<float, CUDAContext>::DoNormalize( const float* xData, float* yData, const int m, const int n, const int sf) { hipLaunchKernelGGL(( NormalizeKernel), dim3(min(n, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), m, n, sf, xData, yData, kEps_); } template <> bool NormalizeGradientOp<float, CUDAContext>::RunOnDevice() { const auto& X = Input(0); const auto& dY = Input(1); auto* dX = Output(0); dX->ResizeLike(X); const auto canonical_axis = X.canonical_axis_index(OperatorBase::GetSingleArgument<int>("axis", -1)); int N = X.dim32(canonical_axis); int M = X.numel() / N; const int SF = X.size_from_dim(canonical_axis + 1); hipLaunchKernelGGL(( NormalizeGradientKernel), dim3(min(M, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), M, N, SF, X.data<float>(), dY.data<float>(), dX->template mutable_data<float>(), kEps_); return true; } namespace { __global__ void NormalizeL1Kernel( const int m, const int n, const int sf, const float* xData, float* yData) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < n; i += gridDim.x) { auto base = (i / sf) * sf * m + (i % sf); float sum = 0.0; __shared__ float norm; for (int j = threadIdx.x; j < m; j += blockDim.x) { const auto x_ij = xData[base + j * sf]; sum += fabsf(x_ij); } float reduce_result = BlockReduce(temp_storage).Sum(sum); if (threadIdx.x == 0) { norm = reduce_result; } __syncthreads(); if (norm != 0) { for (int j = threadIdx.x; j < m; j += blockDim.x) { const auto index = base + j * sf; yData[index] = xData[index] / norm; } } } } } // namespace template <> void NormalizeL1Op<float, CUDAContext>::DoNormalize( const float* xData, float* yData, const int m, const int n, const int sf) { hipLaunchKernelGGL(( NormalizeL1Kernel), dim3(min(n, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), m, n, sf, xData, yData); } REGISTER_CUDA_OPERATOR(Normalize, NormalizeOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( NormalizeGradient, NormalizeGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(NormalizeL1, NormalizeL1Op<float, CUDAContext>); } // namespace caffe2
204c47c7be402cdeedac5c02eac4ef46b1d4221a.cu
#include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/normalize_l1_op.h" #include "caffe2/operators/normalize_op.h" namespace caffe2 { __global__ void NormalizeKernel( const int m, const int n, const int sf, const float* xData, float* yData, const float kEps) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < n; i += gridDim.x) { auto base = (i / sf) * sf * m + (i % sf); float sum = 0.0; __shared__ float norm; for (int j = threadIdx.x; j < m; j += blockDim.x) { const auto x_ij = xData[base + j * sf]; sum += x_ij * x_ij; } float reduce_result = BlockReduce(temp_storage).Sum(sum); if (threadIdx.x == 0) { norm = sqrtf(reduce_result); norm = fmaxf(norm, kEps); } __syncthreads(); for (int j = threadIdx.x; j < m; j += blockDim.x) { const auto index = base + j * sf; yData[index] = xData[index] / norm; } } } __global__ void NormalizeGradientKernel( const int M, const int N, const int SF, const float* in_mat, const float* grad_out_mat, float* grad_mat, const float kEps) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage_sum; __shared__ BlockReduce::TempStorage temp_storage_norm; for (int i = blockIdx.x; i < M; i += gridDim.x) { float sum = 0.0; float norm = 0.0; __shared__ float row_sum; __shared__ float row_norm; __shared__ float row_norm_3; auto base = (i / SF) * SF * N + (i % SF); for (int j = threadIdx.x; j < N; j += blockDim.x) { int index = base + j * SF; sum += in_mat[index] * grad_out_mat[index]; norm += in_mat[index] * in_mat[index]; } float reduce_result = BlockReduce(temp_storage_sum).Sum(sum); float reduce_norm = BlockReduce(temp_storage_norm).Sum(norm); if (threadIdx.x == 0) { row_sum = reduce_result; row_norm = sqrtf(reduce_norm); row_norm = fmaxf(row_norm, kEps); row_norm_3 = powf(row_norm, 3); } __syncthreads(); for (int j = threadIdx.x; j < N; j += blockDim.x) { int index = base + j * SF; const float x_ij = in_mat[index]; const float dy_ij = grad_out_mat[index]; grad_mat[index] = (dy_ij / row_norm) - ((x_ij / row_norm_3) * row_sum); } } } template <> void NormalizeOp<float, CUDAContext>::DoNormalize( const float* xData, float* yData, const int m, const int n, const int sf) { NormalizeKernel<<< min(n, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(m, n, sf, xData, yData, kEps_); } template <> bool NormalizeGradientOp<float, CUDAContext>::RunOnDevice() { const auto& X = Input(0); const auto& dY = Input(1); auto* dX = Output(0); dX->ResizeLike(X); const auto canonical_axis = X.canonical_axis_index(OperatorBase::GetSingleArgument<int>("axis", -1)); int N = X.dim32(canonical_axis); int M = X.numel() / N; const int SF = X.size_from_dim(canonical_axis + 1); NormalizeGradientKernel<<< min(M, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( M, N, SF, X.data<float>(), dY.data<float>(), dX->template mutable_data<float>(), kEps_); return true; } namespace { __global__ void NormalizeL1Kernel( const int m, const int n, const int sf, const float* xData, float* yData) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < n; i += gridDim.x) { auto base = (i / sf) * sf * m + (i % sf); float sum = 0.0; __shared__ float norm; for (int j = threadIdx.x; j < m; j += blockDim.x) { const auto x_ij = xData[base + j * sf]; sum += fabsf(x_ij); } float reduce_result = BlockReduce(temp_storage).Sum(sum); if (threadIdx.x == 0) { norm = reduce_result; } __syncthreads(); if (norm != 0) { for (int j = threadIdx.x; j < m; j += blockDim.x) { const auto index = base + j * sf; yData[index] = xData[index] / norm; } } } } } // namespace template <> void NormalizeL1Op<float, CUDAContext>::DoNormalize( const float* xData, float* yData, const int m, const int n, const int sf) { NormalizeL1Kernel<<< min(n, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(m, n, sf, xData, yData); } REGISTER_CUDA_OPERATOR(Normalize, NormalizeOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( NormalizeGradient, NormalizeGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(NormalizeL1, NormalizeL1Op<float, CUDAContext>); } // namespace caffe2
b7fbde9f2554dc172f540af286c185befe8cc0df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h> using namespace PyTorchMemEffAttention; __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_128x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_128x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k65536_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k65536_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k65536_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k65536_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k65536_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k65536_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
b7fbde9f2554dc172f540af286c185befe8cc0df.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h> using namespace PyTorchMemEffAttention; __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_128x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_128x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k65536_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k65536_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k65536_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k65536_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k65536_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k65536_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
b7520509ca5da81acac078118473232e412ac805.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../shared/globals.hpp" #include "../shared/timer.hpp" #include "../shared/argument_parsing.cuh" #include "../shared/graph.cuh" #include "../shared/subgraph.cuh" #include "../shared/partitioner.cuh" #include "../shared/subgraph_generator.cuh" #include "../shared/gpu_error_check.cuh" #include "../shared/gpu_kernels.cuh" #include "../shared/subway_utilities.hpp" int main(int argc, char** argv) { hipFree(0); ArgumentParser arguments(argc, argv, false, false); Timer timer; timer.Start(); Graph<OutEdge> graph(arguments.input, false); graph.ReadGraph(); float readtime = timer.Finish(); cout << "Graph Reading finished in " << readtime/1000 << " (s).\n"; for(unsigned int i=0; i<graph.num_nodes; i++) { graph.value[i] = i; graph.label1[i] = false; graph.label2[i] = true; } gpuErrorcheck(hipMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(u_int64_t), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(unsigned int), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(graph.d_label1, graph.label1, graph.num_nodes * sizeof(bool), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(graph.d_label2, graph.label2, graph.num_nodes * sizeof(bool), hipMemcpyHostToDevice)); Subgraph<OutEdge> subgraph(graph.num_nodes, graph.num_edges); SubgraphGenerator<OutEdge> subgen(graph); subgen.generate(graph, subgraph); Partitioner<OutEdge> partitioner; timer.Start(); uint itr = 0; while (subgraph.numActiveNodes>0) { itr++; partitioner.partition(subgraph, subgraph.numActiveNodes); // a super iteration for(int i=0; i<partitioner.numPartitions; i++) { hipDeviceSynchronize(); gpuErrorcheck(hipMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdge), hipMemcpyHostToDevice)); hipDeviceSynchronize(); hipLaunchKernelGGL(( moveUpLabels), dim3(partitioner.partitionNodeSize[i]/512 + 1) , dim3(512) , 0, 0, subgraph.d_activeNodes, graph.d_label1, graph.d_label2, partitioner.partitionNodeSize[i], partitioner.fromNode[i]); hipLaunchKernelGGL(( cc_kernel), dim3(partitioner.partitionNodeSize[i]/512 + 1) , dim3(512) , 0, 0, partitioner.partitionNodeSize[i], partitioner.fromNode[i], partitioner.fromEdge[i], subgraph.d_activeNodes, subgraph.d_activeNodesPointer, subgraph.d_activeEdgeList, graph.d_outDegree, graph.d_value, //d_finished, graph.d_label1, graph.d_label2); hipDeviceSynchronize(); gpuErrorcheck( hipPeekAtLastError() ); } subgen.generate(graph, subgraph); } float runtime = timer.Finish(); cout << "Processing finished in " << runtime/1000 << " (s).\n"; cout << "Number of iterations = " << itr << endl; gpuErrorcheck(hipMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(uint), hipMemcpyDeviceToHost)); utilities::PrintResults(graph.value, min(30, graph.num_nodes)); if(arguments.hasOutput) utilities::SaveResults(arguments.output, graph.value, graph.num_nodes); }
b7520509ca5da81acac078118473232e412ac805.cu
#include "../shared/globals.hpp" #include "../shared/timer.hpp" #include "../shared/argument_parsing.cuh" #include "../shared/graph.cuh" #include "../shared/subgraph.cuh" #include "../shared/partitioner.cuh" #include "../shared/subgraph_generator.cuh" #include "../shared/gpu_error_check.cuh" #include "../shared/gpu_kernels.cuh" #include "../shared/subway_utilities.hpp" int main(int argc, char** argv) { cudaFree(0); ArgumentParser arguments(argc, argv, false, false); Timer timer; timer.Start(); Graph<OutEdge> graph(arguments.input, false); graph.ReadGraph(); float readtime = timer.Finish(); cout << "Graph Reading finished in " << readtime/1000 << " (s).\n"; for(unsigned int i=0; i<graph.num_nodes; i++) { graph.value[i] = i; graph.label1[i] = false; graph.label2[i] = true; } gpuErrorcheck(cudaMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(u_int64_t), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(unsigned int), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(graph.d_label1, graph.label1, graph.num_nodes * sizeof(bool), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(graph.d_label2, graph.label2, graph.num_nodes * sizeof(bool), cudaMemcpyHostToDevice)); Subgraph<OutEdge> subgraph(graph.num_nodes, graph.num_edges); SubgraphGenerator<OutEdge> subgen(graph); subgen.generate(graph, subgraph); Partitioner<OutEdge> partitioner; timer.Start(); uint itr = 0; while (subgraph.numActiveNodes>0) { itr++; partitioner.partition(subgraph, subgraph.numActiveNodes); // a super iteration for(int i=0; i<partitioner.numPartitions; i++) { cudaDeviceSynchronize(); gpuErrorcheck(cudaMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdge), cudaMemcpyHostToDevice)); cudaDeviceSynchronize(); moveUpLabels<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(subgraph.d_activeNodes, graph.d_label1, graph.d_label2, partitioner.partitionNodeSize[i], partitioner.fromNode[i]); cc_kernel<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(partitioner.partitionNodeSize[i], partitioner.fromNode[i], partitioner.fromEdge[i], subgraph.d_activeNodes, subgraph.d_activeNodesPointer, subgraph.d_activeEdgeList, graph.d_outDegree, graph.d_value, //d_finished, graph.d_label1, graph.d_label2); cudaDeviceSynchronize(); gpuErrorcheck( cudaPeekAtLastError() ); } subgen.generate(graph, subgraph); } float runtime = timer.Finish(); cout << "Processing finished in " << runtime/1000 << " (s).\n"; cout << "Number of iterations = " << itr << endl; gpuErrorcheck(cudaMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(uint), cudaMemcpyDeviceToHost)); utilities::PrintResults(graph.value, min(30, graph.num_nodes)); if(arguments.hasOutput) utilities::SaveResults(arguments.output, graph.value, graph.num_nodes); }
Histogram.hip
// !!! This is a file automatically generated by hipify!!! #include <wb.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define HISTOGRAM_LENGTH 256 #define TILE_WIDTH 16 #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) __global__ void castToUChar(float * inputImage, unsigned char * ucharImage, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { ucharImage[index] = (unsigned char)(255 * inputImage[index]); } } __global__ void convertToGreyscale(unsigned char * ucharImage, unsigned char * greyImage, int imageWidth, int imageHeight) { int col = blockDim.x*blockIdx.x + threadIdx.x; int row = blockDim.y*blockIdx.y + threadIdx.y; if (col < imageWidth && row < imageHeight) { int index = imageWidth*row + col; unsigned char r = ucharImage[3 * index]; unsigned char g = ucharImage[3 * index + 1]; unsigned char b = ucharImage[3 * index + 2]; greyImage[index] = (unsigned char)(0.21*r + 0.71*g + 0.07*b); } } __global__ void calcHistogram(unsigned char * greyImage, unsigned int * histogram, long imageSize) { __shared__ unsigned int localHistogram[HISTOGRAM_LENGTH]; // Initalize to zero int i = threadIdx.x + blockIdx.x * blockDim.x; if (threadIdx.x < HISTOGRAM_LENGTH) { localHistogram[threadIdx.x] = 0; } __syncthreads(); int stride = blockDim.x * gridDim.x; while (i < imageSize) { atomicAdd(&(localHistogram[greyImage[i]]), 1); i += stride; } // wait for all other threads in the block to finish __syncthreads(); if (threadIdx.x < HISTOGRAM_LENGTH) { atomicAdd(&(histogram[threadIdx.x]), localHistogram[threadIdx.x]); } } __device__ float normalize(int x, float normConstant) { return normConstant*x; } //Cumulative Distribution Function of histogram ////Block size must be HISTOGRAM_LENGTH and grid size must be 1 __global__ void calcCDF(unsigned int * histogram, float * cdf, float normConstant) { __shared__ float localHistogram[HISTOGRAM_LENGTH]; localHistogram[threadIdx.x] = histogram[threadIdx.x]; __syncthreads(); int sum = 0; for (int i = 0; i <= threadIdx.x; ++i) { sum += localHistogram[i]; } cdf[threadIdx.x] = normalize(sum, normConstant); } //Block size must be HISTOGRAM_LENGTH/2 and grid size must be 1 __global__ void minimum(float * cdf, float * result) { __shared__ float partialMin[HISTOGRAM_LENGTH]; int loadIndex; for (int i = 0; i < 2; ++i) { loadIndex = 2 * blockIdx.x*blockDim.x + i*blockDim.x + threadIdx.x; if (loadIndex < HISTOGRAM_LENGTH) { partialMin[i*blockDim.x + threadIdx.x] = cdf[loadIndex]; } else { partialMin[i*blockDim.x + threadIdx.x] = cdf[0]; } } //Traverse the reduction tree int t = threadIdx.x; for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) { partialMin[t] = min(partialMin[t], partialMin[t + stride]); } } __syncthreads(); if (t == 0) { *result = partialMin[0]; } } __device__ float clamp(float x, float start, float end) { return min(max(x, start), end); } __device__ unsigned char correct_colour(int val, float * cdf, float * cdfmin) { return (unsigned char)clamp(255 * (cdf[val] - cdfmin[0]) / (1 - cdfmin[0]), 0, 255); } __global__ void equalizeImage(unsigned char * ucharImage, float * cdf, float * cdfmin, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { ucharImage[index] = correct_colour(ucharImage[index], cdf, cdfmin); } } __global__ void castToFloat(float * inputImage, unsigned char * ucharImage, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { inputImage[index] = (float)(ucharImage[index] / 255.0); } } int main(int argc, char ** argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; float * deviceInputImageData; unsigned char * deviceGreyImage; unsigned char * deviceUCharImage; unsigned int * deviceHistogram; float * deviceCDF; float * deviceCDFMin; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_stop(Generic, "Importing data and creating memory on host"); wbTime_start(GPU, "Doing GPU memory allocation"); hipMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); hipMalloc((void **)&deviceUCharImage, imageWidth * imageHeight * imageChannels * sizeof(unsigned char)); hipMalloc((void **)&deviceGreyImage, imageWidth * imageHeight * sizeof(unsigned char)); hipMalloc((void **)&deviceHistogram, HISTOGRAM_LENGTH * sizeof(unsigned int)); hipMemset(deviceHistogram, 0, HISTOGRAM_LENGTH * sizeof(unsigned int)); hipMalloc((void **)&deviceCDF, HISTOGRAM_LENGTH * sizeof(float)); hipMalloc((void **)&deviceCDFMin, sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck(hipMemcpy( deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyHostToDevice )); wbTime_stop(GPU, "Copying input memory to the GPU."); dim3 dimBlock(TILE_WIDTH*TILE_WIDTH, 1, 1); dim3 dimGrid((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "Converting image input to uchar"); hipLaunchKernelGGL(( castToUChar), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInputImageData, deviceUCharImage, imageWidth*imageHeight*imageChannels); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "Converting image input to uchar"); dimBlock = dim3(TILE_WIDTH, TILE_WIDTH, 1); dimGrid = dim3((imageWidth - 1) / TILE_WIDTH + 1, (imageHeight - 1) / TILE_WIDTH + 1, 1); wbTime_start(Compute, "Converting to greyscale"); hipLaunchKernelGGL(( convertToGreyscale), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceUCharImage, deviceGreyImage, imageWidth, imageHeight); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "Converting to greyscale"); dimBlock = dim3(HISTOGRAM_LENGTH, 1, 1); dimGrid = dim3(6, 1, 1); wbTime_start(Compute, "Calculating histogram"); calcHistogram << <dimGrid, dimBlock >> >(deviceGreyImage, deviceHistogram, imageWidth*imageHeight); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "Calculating histogram"); dimBlock = dim3(HISTOGRAM_LENGTH, 1, 1); dimGrid = dim3(1, 1, 1); wbTime_start(Compute, "Calculating CDF"); hipLaunchKernelGGL(( calcCDF), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceHistogram, deviceCDF, (float)(1.0/(imageWidth*imageHeight))); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "Calculating CDF"); dimBlock = dim3(HISTOGRAM_LENGTH/2, 1, 1); dimGrid = dim3(1, 1, 1); wbTime_start(Compute, "Calculating CDF min"); hipLaunchKernelGGL(( minimum), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceCDF, deviceCDFMin); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "Calculating CDF min"); dimBlock = dim3(TILE_WIDTH*TILE_WIDTH, 1, 1); dimGrid = dim3((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "equalize uchar image"); hipLaunchKernelGGL(( equalizeImage), dim3(dimGrid), dim3(dimBlock) , 0, 0, deviceUCharImage, deviceCDF, deviceCDFMin, imageWidth*imageHeight*imageChannels); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "equalize uchar image"); dimBlock = dim3(TILE_WIDTH*TILE_WIDTH, 1, 1); dimGrid = dim3((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "uchar image to float"); castToFloat<< <dimGrid, dimBlock >> >(deviceInputImageData, deviceUCharImage, imageWidth*imageHeight*imageChannels); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "uchar image to float"); wbTime_start(Copy, "Copying data from the GPU"); hipMemcpy( hostOutputImageData, deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbSolution(args, outputImage); hipFree(deviceInputImageData); hipFree(deviceGreyImage); hipFree(deviceUCharImage); hipFree(deviceHistogram); hipFree(deviceCDF); hipFree(deviceCDFMin); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
Histogram.cu
#include <wb.h> #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #define HISTOGRAM_LENGTH 256 #define TILE_WIDTH 16 #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) __global__ void castToUChar(float * inputImage, unsigned char * ucharImage, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { ucharImage[index] = (unsigned char)(255 * inputImage[index]); } } __global__ void convertToGreyscale(unsigned char * ucharImage, unsigned char * greyImage, int imageWidth, int imageHeight) { int col = blockDim.x*blockIdx.x + threadIdx.x; int row = blockDim.y*blockIdx.y + threadIdx.y; if (col < imageWidth && row < imageHeight) { int index = imageWidth*row + col; unsigned char r = ucharImage[3 * index]; unsigned char g = ucharImage[3 * index + 1]; unsigned char b = ucharImage[3 * index + 2]; greyImage[index] = (unsigned char)(0.21*r + 0.71*g + 0.07*b); } } __global__ void calcHistogram(unsigned char * greyImage, unsigned int * histogram, long imageSize) { __shared__ unsigned int localHistogram[HISTOGRAM_LENGTH]; // Initalize to zero int i = threadIdx.x + blockIdx.x * blockDim.x; if (threadIdx.x < HISTOGRAM_LENGTH) { localHistogram[threadIdx.x] = 0; } __syncthreads(); int stride = blockDim.x * gridDim.x; while (i < imageSize) { atomicAdd(&(localHistogram[greyImage[i]]), 1); i += stride; } // wait for all other threads in the block to finish __syncthreads(); if (threadIdx.x < HISTOGRAM_LENGTH) { atomicAdd(&(histogram[threadIdx.x]), localHistogram[threadIdx.x]); } } __device__ float normalize(int x, float normConstant) { return normConstant*x; } //Cumulative Distribution Function of histogram ////Block size must be HISTOGRAM_LENGTH and grid size must be 1 __global__ void calcCDF(unsigned int * histogram, float * cdf, float normConstant) { __shared__ float localHistogram[HISTOGRAM_LENGTH]; localHistogram[threadIdx.x] = histogram[threadIdx.x]; __syncthreads(); int sum = 0; for (int i = 0; i <= threadIdx.x; ++i) { sum += localHistogram[i]; } cdf[threadIdx.x] = normalize(sum, normConstant); } //Block size must be HISTOGRAM_LENGTH/2 and grid size must be 1 __global__ void minimum(float * cdf, float * result) { __shared__ float partialMin[HISTOGRAM_LENGTH]; int loadIndex; for (int i = 0; i < 2; ++i) { loadIndex = 2 * blockIdx.x*blockDim.x + i*blockDim.x + threadIdx.x; if (loadIndex < HISTOGRAM_LENGTH) { partialMin[i*blockDim.x + threadIdx.x] = cdf[loadIndex]; } else { partialMin[i*blockDim.x + threadIdx.x] = cdf[0]; } } //Traverse the reduction tree int t = threadIdx.x; for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) { partialMin[t] = min(partialMin[t], partialMin[t + stride]); } } __syncthreads(); if (t == 0) { *result = partialMin[0]; } } __device__ float clamp(float x, float start, float end) { return min(max(x, start), end); } __device__ unsigned char correct_colour(int val, float * cdf, float * cdfmin) { return (unsigned char)clamp(255 * (cdf[val] - cdfmin[0]) / (1 - cdfmin[0]), 0, 255); } __global__ void equalizeImage(unsigned char * ucharImage, float * cdf, float * cdfmin, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { ucharImage[index] = correct_colour(ucharImage[index], cdf, cdfmin); } } __global__ void castToFloat(float * inputImage, unsigned char * ucharImage, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { inputImage[index] = (float)(ucharImage[index] / 255.0); } } int main(int argc, char ** argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; float * deviceInputImageData; unsigned char * deviceGreyImage; unsigned char * deviceUCharImage; unsigned int * deviceHistogram; float * deviceCDF; float * deviceCDFMin; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_stop(Generic, "Importing data and creating memory on host"); wbTime_start(GPU, "Doing GPU memory allocation"); cudaMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); cudaMalloc((void **)&deviceUCharImage, imageWidth * imageHeight * imageChannels * sizeof(unsigned char)); cudaMalloc((void **)&deviceGreyImage, imageWidth * imageHeight * sizeof(unsigned char)); cudaMalloc((void **)&deviceHistogram, HISTOGRAM_LENGTH * sizeof(unsigned int)); cudaMemset(deviceHistogram, 0, HISTOGRAM_LENGTH * sizeof(unsigned int)); cudaMalloc((void **)&deviceCDF, HISTOGRAM_LENGTH * sizeof(float)); cudaMalloc((void **)&deviceCDFMin, sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck(cudaMemcpy( deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyHostToDevice )); wbTime_stop(GPU, "Copying input memory to the GPU."); dim3 dimBlock(TILE_WIDTH*TILE_WIDTH, 1, 1); dim3 dimGrid((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "Converting image input to uchar"); castToUChar<<<dimGrid, dimBlock>>>(deviceInputImageData, deviceUCharImage, imageWidth*imageHeight*imageChannels); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "Converting image input to uchar"); dimBlock = dim3(TILE_WIDTH, TILE_WIDTH, 1); dimGrid = dim3((imageWidth - 1) / TILE_WIDTH + 1, (imageHeight - 1) / TILE_WIDTH + 1, 1); wbTime_start(Compute, "Converting to greyscale"); convertToGreyscale<<<dimGrid, dimBlock>>>(deviceUCharImage, deviceGreyImage, imageWidth, imageHeight); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "Converting to greyscale"); dimBlock = dim3(HISTOGRAM_LENGTH, 1, 1); dimGrid = dim3(6, 1, 1); wbTime_start(Compute, "Calculating histogram"); calcHistogram << <dimGrid, dimBlock >> >(deviceGreyImage, deviceHistogram, imageWidth*imageHeight); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "Calculating histogram"); dimBlock = dim3(HISTOGRAM_LENGTH, 1, 1); dimGrid = dim3(1, 1, 1); wbTime_start(Compute, "Calculating CDF"); calcCDF<<<dimGrid, dimBlock>>>(deviceHistogram, deviceCDF, (float)(1.0/(imageWidth*imageHeight))); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "Calculating CDF"); dimBlock = dim3(HISTOGRAM_LENGTH/2, 1, 1); dimGrid = dim3(1, 1, 1); wbTime_start(Compute, "Calculating CDF min"); minimum<<<dimGrid, dimBlock>>>(deviceCDF, deviceCDFMin); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "Calculating CDF min"); dimBlock = dim3(TILE_WIDTH*TILE_WIDTH, 1, 1); dimGrid = dim3((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "equalize uchar image"); equalizeImage<<<dimGrid, dimBlock >>>(deviceUCharImage, deviceCDF, deviceCDFMin, imageWidth*imageHeight*imageChannels); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "equalize uchar image"); dimBlock = dim3(TILE_WIDTH*TILE_WIDTH, 1, 1); dimGrid = dim3((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "uchar image to float"); castToFloat<< <dimGrid, dimBlock >> >(deviceInputImageData, deviceUCharImage, imageWidth*imageHeight*imageChannels); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "uchar image to float"); wbTime_start(Copy, "Copying data from the GPU"); cudaMemcpy( hostOutputImageData, deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbSolution(args, outputImage); cudaFree(deviceInputImageData); cudaFree(deviceGreyImage); cudaFree(deviceUCharImage); cudaFree(deviceHistogram); cudaFree(deviceCDF); cudaFree(deviceCDFMin); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
744491748949a97ea3512299cad66582de943257.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _ZQ_CUDA_POISSON_SOLVER_3D_CLOSED_POISSON_CU_ #define _ZQ_CUDA_POISSON_SOLVER_3D_CLOSED_POISSON_CU_ #include "ZQ_CUDA_PoissonSolver3D_ClosedPoisson.cuh" #include "ZQ_CUDA_ImageProcessing3D.cuh" namespace ZQ_CUDA_PoissonSolver3D { __global__ void Adjust_MAC_u_ClosedPoisson_Kernel(float* mac_u, const float* p, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; //warning: x is in[0,width-2] int y = threadIdx.y + blockIdx.y * blockDim.y; x = x + 1; // then x is in [1,width-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1]; } __global__ void Adjust_MAC_v_ClosedPoisson_Kernel(float* mac_v, const float* p, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; // warning: y is in [0, height-2] y = y + 1; // then y is in [1,height-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x]; } __global__ void Adjust_MAC_w_ClosedPoisson_Kernel(float* mac_w, const float* p, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; for(int z = 1;z < depth;z++) mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x]; } __global__ void Adjust_MAC_u_ClosedPoisson_occupy_Kernel(float* mac_u, const float* p, const bool* occupy, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; //warning: x is in[0,width-2] int y = threadIdx.y + blockIdx.y * blockDim.y; x = x + 1; // then x is in [1,width-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) { if(!occupy[z*height*width+y*width+x-1] && !occupy[z*height*width+y*width+x]) mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1]; } } __global__ void Adjust_MAC_v_ClosedPoisson_occupy_Kernel(float* mac_v, const float* p, const bool* occupy, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; // warning: y is in [0, height-2] y = y + 1; // then y is in [1,height-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) { if(!occupy[z*height*width+(y-1)*width+x] && !occupy[z*height*width+y*width+x]) mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x]; } } __global__ void Adjust_MAC_w_ClosedPoisson_occupy_Kernel(float* mac_w, const float* p, const bool* occupy, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; for(int z = 1;z < depth;z++) { if(!occupy[(z-1)*height*width+y*width+x] && !occupy[z*height*width+y*width+x]) mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x]; } } __global__ void Adjust_MAC_u_ClosedPoisson_FaceRatio_Kernel(float* mac_u, const float* p, const float* unoccupyU, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; //warning: x is in[0,width-2] int y = threadIdx.y + blockIdx.y * blockDim.y; x = x + 1; // then x is in [1,width-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) { if(unoccupyU[z*height*(width+1)+y*(width+1)+x] != 0) mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1]; } } __global__ void Adjust_MAC_v_ClosedPoisson_FaceRatio_Kernel(float* mac_v, const float* p, const float* unoccupyV, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; // warning: y is in [0, height-2] y = y + 1; // then y is in [1,height-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) { if(unoccupyV[z*(height+1)*width+y*width+x] != 0) mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x]; } } __global__ void Adjust_MAC_w_ClosedPoisson_FaceRatio_Kernel(float* mac_w, const float* p, const float* unoccupyW, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; for(int z = 1;z < depth;z++) { if(unoccupyW[z*height*width+y*width+x] != 0) mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x]; } } __global__ void SolvePressure_ClosedPoisson_RedBlack_Kernel(float* p, const float* divergence, const float div_per_volume, const int width, const int height, const int depth, const bool redkernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; int start = redkernel ? rest : (1-rest); for(int z = start; z < depth; z += 2) { int offset = z*height*width+y*width+x; float coeff = 0; float sigma = 0; if(z == 0) { sigma += p[offset+height*width]; coeff += 1.0f; } else if(z == depth-1) { sigma += p[offset-height*width]; coeff += 1.0f; } else { sigma += p[offset-height*width]+p[offset+height*width]; coeff += 2.0f; } if(y == 0) { sigma += p[offset+width]; coeff += 1.0f; } else if(y == height-1) { sigma += p[offset-width]; coeff += 1.0f; } else { sigma += p[offset-width]+p[offset+width]; coeff += 2.0f; } if(x == 0) { sigma += p[offset+1]; coeff += 1.0f; } else if(x == width-1) { sigma += p[offset-1]; coeff += 1.0f; } else { sigma += p[offset+1]+p[offset-1]; coeff += 2.0f; } sigma -= divergence[offset] - div_per_volume; p[offset] = sigma/coeff; } } __global__ void SolvePressure_ClosedPoisson_occupy_RedBlack_Kernel(float* p, const float* divergence, const bool* occupy, const float div_per_volume, const int width, const int height, const int depth, const bool redkernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; int start = redkernel ? rest : (1-rest); for(int z = start; z < depth; z += 2) { int offset = z*height*width+y*width+x; if(occupy[offset]) { p[offset] = 0; continue ; } float coeff = 0.0f,sigma = 0.0f; if(z == 0) { if(!occupy[offset+height*width]) { sigma += p[offset+height*width]; coeff += 1.0f; } } else if(z == depth-1) { if(!occupy[offset-height*width]) { sigma += p[offset-height*width]; coeff += 1.0f; } } else { if(!occupy[offset-height*width]) { sigma += p[offset-height*width]; coeff += 1.0f; } if(!occupy[offset+height*width]) { sigma += p[offset+height*width]; coeff += 1.0f; } } if(y == 0) { if(!occupy[offset+width]) { sigma += p[offset+width]; coeff += 1.0f; } } else if(y == height-1) { if(!occupy[offset-width]) { sigma += p[offset-width]; coeff += 1.0f; } } else { if(!occupy[offset+width]) { sigma += p[offset+width]; coeff += 1.0f; } if(!occupy[offset-width]) { sigma += p[offset-width]; coeff += 1.0f; } } if(x == 0) { if(!occupy[offset+1]) { sigma += p[offset+1]; coeff += 1.0f; } } else if(x == width-1) { if(!occupy[offset-1]) { sigma += p[offset-1]; coeff += 1.0f; } } else { if(!occupy[offset+1]) { sigma += p[offset+1]; coeff += 1.0f; } if(!occupy[offset-1]) { sigma += p[offset-1]; coeff += 1.0f; } } sigma -= divergence[offset] - div_per_volume; p[offset] = sigma/coeff; } } __global__ void SolvePressure_ClosedPoisson_FaceRatio_RedBlack_Kernel(float* p, const float* divergence, const float* unoccupyVolume, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW, const float div_per_volume, const int width, const int height, const int depth, const bool redkernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; int start = redkernel ? rest : (1-rest); for(int z = start; z < depth; z += 2) { int offset = z*height*width+y*width+x; float coeff = 0,sigma = 0; if(unoccupyVolume[offset] == 0) { p[offset] = 0; continue ; } if(z == 0) { float cur_ratio = unoccupyW[(z+1)*height*width+y*width+x]; sigma += cur_ratio*p[offset+height*width]; coeff += cur_ratio; } else if(z == depth-1) { float cur_ratio = unoccupyW[z*height*width+y*width+x]; sigma += cur_ratio*p[offset-height*width]; coeff += cur_ratio; } else { float cur_ratio = unoccupyW[(z+1)*height*width+y*width+x]; sigma += cur_ratio*p[offset+height*width]; coeff += cur_ratio; cur_ratio = unoccupyW[z*height*width+y*width+x]; sigma += cur_ratio*p[offset-height*width]; coeff += cur_ratio; } if(y == 0) { float cur_ratio = unoccupyV[z*(height+1)*width+(y+1)*width+x]; sigma += cur_ratio*p[offset+width]; coeff += cur_ratio; } else if(y == height-1) { float cur_ratio = unoccupyV[z*(height+1)*width+y*width+x]; sigma += cur_ratio*p[offset-width]; coeff += cur_ratio; } else { float cur_ratio = unoccupyV[z*(height+1)*width+(y+1)*width+x]; sigma += cur_ratio*p[offset+width]; coeff += cur_ratio; cur_ratio = unoccupyV[z*(height+1)*width+y*width+x]; sigma += cur_ratio*p[offset-width]; coeff += cur_ratio; } if(x == 0) { float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x+1]; sigma += cur_ratio*p[offset+1]; coeff += cur_ratio; } else if(x == width-1) { float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x]; sigma += cur_ratio*p[offset-1]; coeff += cur_ratio; } else { float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x+1]; sigma += cur_ratio*p[offset+1]; coeff += cur_ratio; cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x]; sigma += cur_ratio*p[offset-1]; coeff += cur_ratio; } sigma -= divergence[offset] - div_per_volume*unoccupyVolume[offset]; if(coeff > 0) p[offset] = sigma/coeff; else p[offset] = 0; } } /*****************************************************************************/ void cu_SolveClosedPoissonRedBlack_MAC(float* mac_u, float* mac_v, float* mac_w, const float div_per_volume, const int width, const int height, const int depth, const int maxIter) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 u_gridSize((width-1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height-1+blockSize.y-1)/blockSize.y); dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); float* b_d = 0; float* p_d = 0; checkCudaErrors( hipMalloc((void**)&b_d,sizeof(float)*width*height*depth)); checkCudaErrors( hipMalloc((void**)&p_d,sizeof(float)*width*height*depth)); checkCudaErrors( hipMemset(b_d,0,sizeof(float)*width*height*depth)); checkCudaErrors( hipMemset(p_d,0,sizeof(float)*width*height*depth)); hipLaunchKernelGGL(( Calculate_Divergence_of_MAC_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, b_d,mac_u,mac_v,mac_w,width,height,depth); for(int i = 0;i < maxIter;i++) { hipLaunchKernelGGL(( SolvePressure_ClosedPoisson_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,div_per_volume,width,height,depth,true); hipLaunchKernelGGL(( SolvePressure_ClosedPoisson_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,div_per_volume,width,height,depth,false); } hipLaunchKernelGGL(( Adjust_MAC_u_ClosedPoisson_Kernel), dim3(u_gridSize),dim3(blockSize), 0, 0, mac_u,p_d,width,height,depth); hipLaunchKernelGGL(( Adjust_MAC_v_ClosedPoisson_Kernel), dim3(v_gridSize),dim3(blockSize), 0, 0, mac_v,p_d,width,height,depth); hipLaunchKernelGGL(( Adjust_MAC_w_ClosedPoisson_Kernel), dim3(w_gridSize),dim3(blockSize), 0, 0, mac_w,p_d,width,height,depth); checkCudaErrors( hipFree(b_d) ); checkCudaErrors( hipFree(p_d) ); b_d = 0; p_d = 0; } void cu_SolveClosedPoissonRedBlackwithOccupy_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const float div_per_volume, const int width, const int height, const int depth, const int maxIter) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 u_gridSize((width-1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height-1+blockSize.y-1)/blockSize.y); dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); float* b_d = 0; float* p_d = 0; checkCudaErrors( hipMalloc((void**)&b_d,sizeof(float)*width*height*depth)); checkCudaErrors( hipMalloc((void**)&p_d,sizeof(float)*width*height*depth)); checkCudaErrors( hipMemset(b_d,0,sizeof(float)*width*height*depth)); checkCudaErrors( hipMemset(p_d,0,sizeof(float)*width*height*depth)); hipLaunchKernelGGL(( Calculate_Divergence_of_MAC_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, b_d,mac_u,mac_v,mac_w,width,height,depth); for(int i = 0;i < maxIter;i++) { hipLaunchKernelGGL(( SolvePressure_ClosedPoisson_occupy_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,occupy,div_per_volume,width,height,depth,true); hipLaunchKernelGGL(( SolvePressure_ClosedPoisson_occupy_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,occupy,div_per_volume,width,height,depth,false); } hipLaunchKernelGGL(( Adjust_MAC_u_ClosedPoisson_occupy_Kernel), dim3(u_gridSize),dim3(blockSize), 0, 0, mac_u,p_d,occupy,width,height,depth); hipLaunchKernelGGL(( Adjust_MAC_v_ClosedPoisson_occupy_Kernel), dim3(v_gridSize),dim3(blockSize), 0, 0, mac_v,p_d,occupy,width,height,depth); hipLaunchKernelGGL(( Adjust_MAC_w_ClosedPoisson_occupy_Kernel), dim3(w_gridSize),dim3(blockSize), 0, 0, mac_w,p_d,occupy,width,height,depth); checkCudaErrors( hipFree(b_d) ); checkCudaErrors( hipFree(p_d) ); b_d = 0; p_d = 0; } void cu_SolveClosedPoissonRedBlackwithFaceRatio_MAC(float* mac_u, float* mac_v, float* mac_w, const float* unoccupyVolume, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW, const float div_per_volume, const int width ,const int height, const int depth, const int maxIter) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 u_gridSize((width-1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height-1+blockSize.y-1)/blockSize.y); dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); float* b_d = 0; float* p_d = 0; checkCudaErrors( hipMalloc((void**)&b_d,sizeof(float)*width*height*depth)); checkCudaErrors( hipMalloc((void**)&p_d,sizeof(float)*width*height*depth)); checkCudaErrors( hipMemset(b_d,0,sizeof(float)*width*height*depth)); checkCudaErrors( hipMemset(p_d,0,sizeof(float)*width*height*depth)); hipLaunchKernelGGL(( Calculate_Divergence_of_MAC_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, b_d,mac_u,mac_v,mac_w,width,height,depth); for(int i = 0;i < maxIter;i++) { hipLaunchKernelGGL(( SolvePressure_ClosedPoisson_FaceRatio_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,unoccupyVolume,unoccupyU,unoccupyV,unoccupyW,div_per_volume,width,height,depth,true); hipLaunchKernelGGL(( SolvePressure_ClosedPoisson_FaceRatio_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,unoccupyVolume,unoccupyU,unoccupyV,unoccupyW,div_per_volume,width,height,depth,false); } hipLaunchKernelGGL(( Adjust_MAC_u_ClosedPoisson_FaceRatio_Kernel), dim3(u_gridSize),dim3(blockSize), 0, 0, mac_u,p_d,unoccupyU,width,height,depth); hipLaunchKernelGGL(( Adjust_MAC_v_ClosedPoisson_FaceRatio_Kernel), dim3(v_gridSize),dim3(blockSize), 0, 0, mac_v,p_d,unoccupyV,width,height,depth); hipLaunchKernelGGL(( Adjust_MAC_w_ClosedPoisson_FaceRatio_Kernel), dim3(w_gridSize),dim3(blockSize), 0, 0, mac_w,p_d,unoccupyV,width,height,depth); checkCudaErrors( hipFree(b_d) ); checkCudaErrors( hipFree(p_d) ); b_d = 0; p_d = 0; } /*************************************************************/ extern "C" void SolveClosedPoissonRedBlack3D_MAC(float* mac_u, float* mac_v, float* mac_w, const float div_per_volume, const int width, const int height, const int depth, const int maxIter) { float* mac_u_d = 0; float* mac_v_d = 0; float* mac_w_d = 0; checkCudaErrors( hipMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) ); checkCudaErrors( hipMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) ); checkCudaErrors( hipMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) ); checkCudaErrors( hipMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),hipMemcpyHostToDevice) ); cu_SolveClosedPoissonRedBlack_MAC(mac_u_d,mac_v_d,mac_w_d,div_per_volume,width,height,depth,maxIter); checkCudaErrors( hipMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(mac_u_d) ); checkCudaErrors( hipFree(mac_v_d) ); checkCudaErrors( hipFree(mac_w_d) ); mac_u_d = 0; mac_v_d = 0; mac_w_d = 0; } extern "C" void SolveClosedPoissonRedBlackwithOccupy3D_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const float div_per_volume, const int width, const int height, const int depth, const int maxIter) { float* mac_u_d = 0; float* mac_v_d = 0; float* mac_w_d = 0; bool* occupy_d = 0; checkCudaErrors( hipMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) ); checkCudaErrors( hipMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) ); checkCudaErrors( hipMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) ); checkCudaErrors( hipMalloc((void**)&occupy_d,sizeof(bool)*width*height*depth) ); checkCudaErrors( hipMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(occupy_d,occupy,sizeof(bool)*width*height*depth,hipMemcpyHostToDevice) ); cu_SolveClosedPoissonRedBlackwithOccupy_MAC(mac_u_d,mac_v_d,mac_w_d,occupy_d,div_per_volume,width,height,depth,maxIter); checkCudaErrors( hipMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(mac_u_d) ); checkCudaErrors( hipFree(mac_v_d) ); checkCudaErrors( hipFree(mac_w_d) ); checkCudaErrors( hipFree(occupy_d) ); mac_u_d = 0; mac_v_d = 0; mac_w_d = 0; occupy_d = 0; } extern "C" void SolveClosedPoissonRedBlackwithFaceRatio3D_MAC(float* mac_u, float* mac_v, float* mac_w, const float* unoccupyVolume, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW, const float div_per_volume, const int width, const int height, const int depth, const int maxIter) { float* mac_u_d = 0; float* mac_v_d = 0; float* mac_w_d = 0; float* unoccupyVolume_d = 0; float* unoccupyU_d = 0; float* unoccupyV_d = 0; float* unoccupyW_d = 0; checkCudaErrors( hipMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) ); checkCudaErrors( hipMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) ); checkCudaErrors( hipMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) ); checkCudaErrors( hipMalloc((void**)&unoccupyVolume_d,sizeof(float)*width*height*depth) ); checkCudaErrors( hipMalloc((void**)&unoccupyU_d,sizeof(float)*(width+1)*height*depth) ); checkCudaErrors( hipMalloc((void**)&unoccupyV_d,sizeof(float)*width*(height+1)*depth) ); checkCudaErrors( hipMalloc((void**)&unoccupyW_d,sizeof(float)*width*height*(depth+1)) ); checkCudaErrors( hipMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(unoccupyVolume_d,unoccupyVolume,sizeof(float)*width*height*depth,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(unoccupyU_d,unoccupyU,sizeof(float)*(width+1)*height*depth,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(unoccupyV_d,unoccupyV,sizeof(float)*width*(height+1)*depth,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(unoccupyW_d,unoccupyW,sizeof(float)*width*height*(depth+1),hipMemcpyHostToDevice) ); cu_SolveClosedPoissonRedBlackwithFaceRatio_MAC(mac_u_d,mac_v_d,mac_w_d,unoccupyVolume_d,unoccupyU_d,unoccupyV_d,unoccupyW_d,div_per_volume,width,height,depth,maxIter); checkCudaErrors( hipMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(mac_u_d) ); checkCudaErrors( hipFree(mac_v_d) ); checkCudaErrors( hipFree(mac_w_d) ); checkCudaErrors( hipFree(unoccupyVolume_d) ); checkCudaErrors( hipFree(unoccupyU_d) ); checkCudaErrors( hipFree(unoccupyV_d) ); checkCudaErrors( hipFree(unoccupyW_d) ); mac_u_d = 0; mac_v_d = 0; mac_w_d = 0; unoccupyVolume_d = 0; unoccupyU_d = 0; unoccupyV_d = 0; unoccupyW_d = 0; } } #endif
744491748949a97ea3512299cad66582de943257.cu
#ifndef _ZQ_CUDA_POISSON_SOLVER_3D_CLOSED_POISSON_CU_ #define _ZQ_CUDA_POISSON_SOLVER_3D_CLOSED_POISSON_CU_ #include "ZQ_CUDA_PoissonSolver3D_ClosedPoisson.cuh" #include "ZQ_CUDA_ImageProcessing3D.cuh" namespace ZQ_CUDA_PoissonSolver3D { __global__ void Adjust_MAC_u_ClosedPoisson_Kernel(float* mac_u, const float* p, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; //warning: x is in[0,width-2] int y = threadIdx.y + blockIdx.y * blockDim.y; x = x + 1; // then x is in [1,width-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1]; } __global__ void Adjust_MAC_v_ClosedPoisson_Kernel(float* mac_v, const float* p, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; // warning: y is in [0, height-2] y = y + 1; // then y is in [1,height-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x]; } __global__ void Adjust_MAC_w_ClosedPoisson_Kernel(float* mac_w, const float* p, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; for(int z = 1;z < depth;z++) mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x]; } __global__ void Adjust_MAC_u_ClosedPoisson_occupy_Kernel(float* mac_u, const float* p, const bool* occupy, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; //warning: x is in[0,width-2] int y = threadIdx.y + blockIdx.y * blockDim.y; x = x + 1; // then x is in [1,width-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) { if(!occupy[z*height*width+y*width+x-1] && !occupy[z*height*width+y*width+x]) mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1]; } } __global__ void Adjust_MAC_v_ClosedPoisson_occupy_Kernel(float* mac_v, const float* p, const bool* occupy, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; // warning: y is in [0, height-2] y = y + 1; // then y is in [1,height-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) { if(!occupy[z*height*width+(y-1)*width+x] && !occupy[z*height*width+y*width+x]) mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x]; } } __global__ void Adjust_MAC_w_ClosedPoisson_occupy_Kernel(float* mac_w, const float* p, const bool* occupy, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; for(int z = 1;z < depth;z++) { if(!occupy[(z-1)*height*width+y*width+x] && !occupy[z*height*width+y*width+x]) mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x]; } } __global__ void Adjust_MAC_u_ClosedPoisson_FaceRatio_Kernel(float* mac_u, const float* p, const float* unoccupyU, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; //warning: x is in[0,width-2] int y = threadIdx.y + blockIdx.y * blockDim.y; x = x + 1; // then x is in [1,width-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) { if(unoccupyU[z*height*(width+1)+y*(width+1)+x] != 0) mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1]; } } __global__ void Adjust_MAC_v_ClosedPoisson_FaceRatio_Kernel(float* mac_v, const float* p, const float* unoccupyV, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; // warning: y is in [0, height-2] y = y + 1; // then y is in [1,height-1] if(x >= width || y >= height) return ; for(int z = 0;z < depth;z++) { if(unoccupyV[z*(height+1)*width+y*width+x] != 0) mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x]; } } __global__ void Adjust_MAC_w_ClosedPoisson_FaceRatio_Kernel(float* mac_w, const float* p, const float* unoccupyW, const int width, const int height, const int depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; for(int z = 1;z < depth;z++) { if(unoccupyW[z*height*width+y*width+x] != 0) mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x]; } } __global__ void SolvePressure_ClosedPoisson_RedBlack_Kernel(float* p, const float* divergence, const float div_per_volume, const int width, const int height, const int depth, const bool redkernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; int start = redkernel ? rest : (1-rest); for(int z = start; z < depth; z += 2) { int offset = z*height*width+y*width+x; float coeff = 0; float sigma = 0; if(z == 0) { sigma += p[offset+height*width]; coeff += 1.0f; } else if(z == depth-1) { sigma += p[offset-height*width]; coeff += 1.0f; } else { sigma += p[offset-height*width]+p[offset+height*width]; coeff += 2.0f; } if(y == 0) { sigma += p[offset+width]; coeff += 1.0f; } else if(y == height-1) { sigma += p[offset-width]; coeff += 1.0f; } else { sigma += p[offset-width]+p[offset+width]; coeff += 2.0f; } if(x == 0) { sigma += p[offset+1]; coeff += 1.0f; } else if(x == width-1) { sigma += p[offset-1]; coeff += 1.0f; } else { sigma += p[offset+1]+p[offset-1]; coeff += 2.0f; } sigma -= divergence[offset] - div_per_volume; p[offset] = sigma/coeff; } } __global__ void SolvePressure_ClosedPoisson_occupy_RedBlack_Kernel(float* p, const float* divergence, const bool* occupy, const float div_per_volume, const int width, const int height, const int depth, const bool redkernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; int start = redkernel ? rest : (1-rest); for(int z = start; z < depth; z += 2) { int offset = z*height*width+y*width+x; if(occupy[offset]) { p[offset] = 0; continue ; } float coeff = 0.0f,sigma = 0.0f; if(z == 0) { if(!occupy[offset+height*width]) { sigma += p[offset+height*width]; coeff += 1.0f; } } else if(z == depth-1) { if(!occupy[offset-height*width]) { sigma += p[offset-height*width]; coeff += 1.0f; } } else { if(!occupy[offset-height*width]) { sigma += p[offset-height*width]; coeff += 1.0f; } if(!occupy[offset+height*width]) { sigma += p[offset+height*width]; coeff += 1.0f; } } if(y == 0) { if(!occupy[offset+width]) { sigma += p[offset+width]; coeff += 1.0f; } } else if(y == height-1) { if(!occupy[offset-width]) { sigma += p[offset-width]; coeff += 1.0f; } } else { if(!occupy[offset+width]) { sigma += p[offset+width]; coeff += 1.0f; } if(!occupy[offset-width]) { sigma += p[offset-width]; coeff += 1.0f; } } if(x == 0) { if(!occupy[offset+1]) { sigma += p[offset+1]; coeff += 1.0f; } } else if(x == width-1) { if(!occupy[offset-1]) { sigma += p[offset-1]; coeff += 1.0f; } } else { if(!occupy[offset+1]) { sigma += p[offset+1]; coeff += 1.0f; } if(!occupy[offset-1]) { sigma += p[offset-1]; coeff += 1.0f; } } sigma -= divergence[offset] - div_per_volume; p[offset] = sigma/coeff; } } __global__ void SolvePressure_ClosedPoisson_FaceRatio_RedBlack_Kernel(float* p, const float* divergence, const float* unoccupyVolume, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW, const float div_per_volume, const int width, const int height, const int depth, const bool redkernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; int start = redkernel ? rest : (1-rest); for(int z = start; z < depth; z += 2) { int offset = z*height*width+y*width+x; float coeff = 0,sigma = 0; if(unoccupyVolume[offset] == 0) { p[offset] = 0; continue ; } if(z == 0) { float cur_ratio = unoccupyW[(z+1)*height*width+y*width+x]; sigma += cur_ratio*p[offset+height*width]; coeff += cur_ratio; } else if(z == depth-1) { float cur_ratio = unoccupyW[z*height*width+y*width+x]; sigma += cur_ratio*p[offset-height*width]; coeff += cur_ratio; } else { float cur_ratio = unoccupyW[(z+1)*height*width+y*width+x]; sigma += cur_ratio*p[offset+height*width]; coeff += cur_ratio; cur_ratio = unoccupyW[z*height*width+y*width+x]; sigma += cur_ratio*p[offset-height*width]; coeff += cur_ratio; } if(y == 0) { float cur_ratio = unoccupyV[z*(height+1)*width+(y+1)*width+x]; sigma += cur_ratio*p[offset+width]; coeff += cur_ratio; } else if(y == height-1) { float cur_ratio = unoccupyV[z*(height+1)*width+y*width+x]; sigma += cur_ratio*p[offset-width]; coeff += cur_ratio; } else { float cur_ratio = unoccupyV[z*(height+1)*width+(y+1)*width+x]; sigma += cur_ratio*p[offset+width]; coeff += cur_ratio; cur_ratio = unoccupyV[z*(height+1)*width+y*width+x]; sigma += cur_ratio*p[offset-width]; coeff += cur_ratio; } if(x == 0) { float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x+1]; sigma += cur_ratio*p[offset+1]; coeff += cur_ratio; } else if(x == width-1) { float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x]; sigma += cur_ratio*p[offset-1]; coeff += cur_ratio; } else { float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x+1]; sigma += cur_ratio*p[offset+1]; coeff += cur_ratio; cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x]; sigma += cur_ratio*p[offset-1]; coeff += cur_ratio; } sigma -= divergence[offset] - div_per_volume*unoccupyVolume[offset]; if(coeff > 0) p[offset] = sigma/coeff; else p[offset] = 0; } } /*****************************************************************************/ void cu_SolveClosedPoissonRedBlack_MAC(float* mac_u, float* mac_v, float* mac_w, const float div_per_volume, const int width, const int height, const int depth, const int maxIter) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 u_gridSize((width-1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height-1+blockSize.y-1)/blockSize.y); dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); float* b_d = 0; float* p_d = 0; checkCudaErrors( cudaMalloc((void**)&b_d,sizeof(float)*width*height*depth)); checkCudaErrors( cudaMalloc((void**)&p_d,sizeof(float)*width*height*depth)); checkCudaErrors( cudaMemset(b_d,0,sizeof(float)*width*height*depth)); checkCudaErrors( cudaMemset(p_d,0,sizeof(float)*width*height*depth)); Calculate_Divergence_of_MAC_Kernel<<<gridSize,blockSize>>>(b_d,mac_u,mac_v,mac_w,width,height,depth); for(int i = 0;i < maxIter;i++) { SolvePressure_ClosedPoisson_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,div_per_volume,width,height,depth,true); SolvePressure_ClosedPoisson_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,div_per_volume,width,height,depth,false); } Adjust_MAC_u_ClosedPoisson_Kernel<<<u_gridSize,blockSize>>>(mac_u,p_d,width,height,depth); Adjust_MAC_v_ClosedPoisson_Kernel<<<v_gridSize,blockSize>>>(mac_v,p_d,width,height,depth); Adjust_MAC_w_ClosedPoisson_Kernel<<<w_gridSize,blockSize>>>(mac_w,p_d,width,height,depth); checkCudaErrors( cudaFree(b_d) ); checkCudaErrors( cudaFree(p_d) ); b_d = 0; p_d = 0; } void cu_SolveClosedPoissonRedBlackwithOccupy_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const float div_per_volume, const int width, const int height, const int depth, const int maxIter) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 u_gridSize((width-1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height-1+blockSize.y-1)/blockSize.y); dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); float* b_d = 0; float* p_d = 0; checkCudaErrors( cudaMalloc((void**)&b_d,sizeof(float)*width*height*depth)); checkCudaErrors( cudaMalloc((void**)&p_d,sizeof(float)*width*height*depth)); checkCudaErrors( cudaMemset(b_d,0,sizeof(float)*width*height*depth)); checkCudaErrors( cudaMemset(p_d,0,sizeof(float)*width*height*depth)); Calculate_Divergence_of_MAC_Kernel<<<gridSize,blockSize>>>(b_d,mac_u,mac_v,mac_w,width,height,depth); for(int i = 0;i < maxIter;i++) { SolvePressure_ClosedPoisson_occupy_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,occupy,div_per_volume,width,height,depth,true); SolvePressure_ClosedPoisson_occupy_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,occupy,div_per_volume,width,height,depth,false); } Adjust_MAC_u_ClosedPoisson_occupy_Kernel<<<u_gridSize,blockSize>>>(mac_u,p_d,occupy,width,height,depth); Adjust_MAC_v_ClosedPoisson_occupy_Kernel<<<v_gridSize,blockSize>>>(mac_v,p_d,occupy,width,height,depth); Adjust_MAC_w_ClosedPoisson_occupy_Kernel<<<w_gridSize,blockSize>>>(mac_w,p_d,occupy,width,height,depth); checkCudaErrors( cudaFree(b_d) ); checkCudaErrors( cudaFree(p_d) ); b_d = 0; p_d = 0; } void cu_SolveClosedPoissonRedBlackwithFaceRatio_MAC(float* mac_u, float* mac_v, float* mac_w, const float* unoccupyVolume, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW, const float div_per_volume, const int width ,const int height, const int depth, const int maxIter) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 u_gridSize((width-1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height-1+blockSize.y-1)/blockSize.y); dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); float* b_d = 0; float* p_d = 0; checkCudaErrors( cudaMalloc((void**)&b_d,sizeof(float)*width*height*depth)); checkCudaErrors( cudaMalloc((void**)&p_d,sizeof(float)*width*height*depth)); checkCudaErrors( cudaMemset(b_d,0,sizeof(float)*width*height*depth)); checkCudaErrors( cudaMemset(p_d,0,sizeof(float)*width*height*depth)); Calculate_Divergence_of_MAC_Kernel<<<gridSize,blockSize>>>(b_d,mac_u,mac_v,mac_w,width,height,depth); for(int i = 0;i < maxIter;i++) { SolvePressure_ClosedPoisson_FaceRatio_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,unoccupyVolume,unoccupyU,unoccupyV,unoccupyW,div_per_volume,width,height,depth,true); SolvePressure_ClosedPoisson_FaceRatio_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,unoccupyVolume,unoccupyU,unoccupyV,unoccupyW,div_per_volume,width,height,depth,false); } Adjust_MAC_u_ClosedPoisson_FaceRatio_Kernel<<<u_gridSize,blockSize>>>(mac_u,p_d,unoccupyU,width,height,depth); Adjust_MAC_v_ClosedPoisson_FaceRatio_Kernel<<<v_gridSize,blockSize>>>(mac_v,p_d,unoccupyV,width,height,depth); Adjust_MAC_w_ClosedPoisson_FaceRatio_Kernel<<<w_gridSize,blockSize>>>(mac_w,p_d,unoccupyV,width,height,depth); checkCudaErrors( cudaFree(b_d) ); checkCudaErrors( cudaFree(p_d) ); b_d = 0; p_d = 0; } /*************************************************************/ extern "C" void SolveClosedPoissonRedBlack3D_MAC(float* mac_u, float* mac_v, float* mac_w, const float div_per_volume, const int width, const int height, const int depth, const int maxIter) { float* mac_u_d = 0; float* mac_v_d = 0; float* mac_w_d = 0; checkCudaErrors( cudaMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) ); checkCudaErrors( cudaMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) ); checkCudaErrors( cudaMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) ); checkCudaErrors( cudaMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),cudaMemcpyHostToDevice) ); cu_SolveClosedPoissonRedBlack_MAC(mac_u_d,mac_v_d,mac_w_d,div_per_volume,width,height,depth,maxIter); checkCudaErrors( cudaMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(mac_u_d) ); checkCudaErrors( cudaFree(mac_v_d) ); checkCudaErrors( cudaFree(mac_w_d) ); mac_u_d = 0; mac_v_d = 0; mac_w_d = 0; } extern "C" void SolveClosedPoissonRedBlackwithOccupy3D_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const float div_per_volume, const int width, const int height, const int depth, const int maxIter) { float* mac_u_d = 0; float* mac_v_d = 0; float* mac_w_d = 0; bool* occupy_d = 0; checkCudaErrors( cudaMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) ); checkCudaErrors( cudaMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) ); checkCudaErrors( cudaMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) ); checkCudaErrors( cudaMalloc((void**)&occupy_d,sizeof(bool)*width*height*depth) ); checkCudaErrors( cudaMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(occupy_d,occupy,sizeof(bool)*width*height*depth,cudaMemcpyHostToDevice) ); cu_SolveClosedPoissonRedBlackwithOccupy_MAC(mac_u_d,mac_v_d,mac_w_d,occupy_d,div_per_volume,width,height,depth,maxIter); checkCudaErrors( cudaMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(mac_u_d) ); checkCudaErrors( cudaFree(mac_v_d) ); checkCudaErrors( cudaFree(mac_w_d) ); checkCudaErrors( cudaFree(occupy_d) ); mac_u_d = 0; mac_v_d = 0; mac_w_d = 0; occupy_d = 0; } extern "C" void SolveClosedPoissonRedBlackwithFaceRatio3D_MAC(float* mac_u, float* mac_v, float* mac_w, const float* unoccupyVolume, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW, const float div_per_volume, const int width, const int height, const int depth, const int maxIter) { float* mac_u_d = 0; float* mac_v_d = 0; float* mac_w_d = 0; float* unoccupyVolume_d = 0; float* unoccupyU_d = 0; float* unoccupyV_d = 0; float* unoccupyW_d = 0; checkCudaErrors( cudaMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) ); checkCudaErrors( cudaMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) ); checkCudaErrors( cudaMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) ); checkCudaErrors( cudaMalloc((void**)&unoccupyVolume_d,sizeof(float)*width*height*depth) ); checkCudaErrors( cudaMalloc((void**)&unoccupyU_d,sizeof(float)*(width+1)*height*depth) ); checkCudaErrors( cudaMalloc((void**)&unoccupyV_d,sizeof(float)*width*(height+1)*depth) ); checkCudaErrors( cudaMalloc((void**)&unoccupyW_d,sizeof(float)*width*height*(depth+1)) ); checkCudaErrors( cudaMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(unoccupyVolume_d,unoccupyVolume,sizeof(float)*width*height*depth,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(unoccupyU_d,unoccupyU,sizeof(float)*(width+1)*height*depth,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(unoccupyV_d,unoccupyV,sizeof(float)*width*(height+1)*depth,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(unoccupyW_d,unoccupyW,sizeof(float)*width*height*(depth+1),cudaMemcpyHostToDevice) ); cu_SolveClosedPoissonRedBlackwithFaceRatio_MAC(mac_u_d,mac_v_d,mac_w_d,unoccupyVolume_d,unoccupyU_d,unoccupyV_d,unoccupyW_d,div_per_volume,width,height,depth,maxIter); checkCudaErrors( cudaMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(mac_u_d) ); checkCudaErrors( cudaFree(mac_v_d) ); checkCudaErrors( cudaFree(mac_w_d) ); checkCudaErrors( cudaFree(unoccupyVolume_d) ); checkCudaErrors( cudaFree(unoccupyU_d) ); checkCudaErrors( cudaFree(unoccupyV_d) ); checkCudaErrors( cudaFree(unoccupyW_d) ); mac_u_d = 0; mac_v_d = 0; mac_w_d = 0; unoccupyVolume_d = 0; unoccupyU_d = 0; unoccupyV_d = 0; unoccupyW_d = 0; } } #endif
dfffa843eebf4cf6874b185286f21b1dc963f7fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/ATen.h> #include <chrono> #include <limits> #include <spconv/reordering.cu.h> #include <spconv/reordering.h> #include <tensorview/cuda_utils.h> #include <tensorview/kernel_utils.h> #include <tensorview/mp_helper.h> #include <tensorview/tensor.h> #include <tensorview/tensorview.h> #include <tensorview/torch_utils.h> #include <type_traits> #include <utility/timer.h> namespace spconv { using float_types_t = tv::mp_list<float, double, at::Half>; using int_types_t = tv::mp_list<int32_t, int64_t>; template <typename T> struct half_vec{ using type = typename std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>; }; template <typename T> struct half_vec_sadd{ using type = typename std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>; }; using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>; void sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features, torch::Tensor indices, int size) { if (size <= 0) return; int numPlanes = features.size(1); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto dtype = features.scalar_type(); auto inds_dtype = indices.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = typename half_vec_sadd<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>([=, &buffer, &features, &indices, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; // constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor)); int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { hipLaunchKernelGGL(( gatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>) , dim3(dim3(size / NumTLP, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0, stream, buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor); #ifdef TV_LOG_KERNEL_INFO hipFuncAttributes attr; checkCudaErrors(hipFuncGetAttributes( &attr, gatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)); tv::ssprint("gatherVecBlockKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { hipLaunchKernelGGL(( gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>) , dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0, stream, buffer.data_ptr<T>() + nHotBlock * numPlanes, features.data_ptr<T>(), indices.data_ptr<Index>() + nHotBlock, size - nHotBlock, numPlanes / vecloadFactor); #ifdef TV_LOG_KERNEL_INFO hipFuncAttributes attr; checkCudaErrors(hipFuncGetAttributes( &attr, gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)); tv::ssprint("gatherVecKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; hipLaunchKernelGGL(( gatherGenericKernel<T, Index, NumTLP, NumILP>) , dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream, buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes); #ifdef TV_LOG_KERNEL_INFO hipFuncAttributes attr; checkCudaErrors(hipFuncGetAttributes( &attr, gatherGenericKernel<T, Index, NumTLP, NumILP>)); tv::ssprint("gatherGenericKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } }); }); } void sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures, torch::Tensor indices, int size) { if (size <= 0) return; int numPlanes = outFeatures.size(1); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto dtype = outFeatures.scalar_type(); auto inds_dtype = indices.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = typename half_vec_sadd<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); // important for half. tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices, &notFound](auto NumTLP) { // constexpr int NumILP = NumTLP / (64 / (NumTLP / // vecloadFactor)); constexpr int NumILP = NumTLP / 4; int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { hipLaunchKernelGGL(( scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>) , dim3(dim3(size / NumTLP, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor); #ifdef TV_LOG_KERNEL_INFO hipFuncAttributes attr; checkCudaErrors(hipFuncGetAttributes( &attr, scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)); tv::ssprint("scatterAddVecBlockKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { hipLaunchKernelGGL(( scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>) , dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>() + nHotBlock * numPlanes, indices.data_ptr<Index>() + nHotBlock, size - nHotBlock, numPlanes); #ifdef TV_LOG_KERNEL_INFO hipFuncAttributes attr; checkCudaErrors(hipFuncGetAttributes( &attr, scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>)); tv::ssprint("scatterAddGenericKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; hipLaunchKernelGGL(( scatterAddGenericKernel<T, Index, NumTLP, NumILP>) , dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes); #ifdef TV_LOG_KERNEL_INFO hipFuncAttributes attr; checkCudaErrors(hipFuncGetAttributes( &attr, scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>)); tv::ssprint("notfound scatterAddGenericKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } }); }); } void batch_sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features, torch::Tensor indices, int size) { // indices: [volume, inds_stride] // buffer: [volume, num_points, num_features] // size == volume * num_points if (size <= 0) return; int numPlanes = features.size(1); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto dtype = features.scalar_type(); auto inds_dtype = indices.scalar_type(); int inds_stride = indices.size(1); int feature_stride = buffer.size(1); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = typename half_vec<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>( [=, &buffer, &features, &indices, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; // constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor)); int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { hipLaunchKernelGGL(( batchGatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>) , dim3(dim3(size / NumTLP, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0, stream, buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR_V2("batchGatherVecBlockKernel"); } if (size - nHotBlock > 0) { hipLaunchKernelGGL(( batchGatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>) , dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0, stream, buffer.data_ptr<T>() + nHotBlock * numPlanes, features.data_ptr<T>(), indices.data_ptr<Index>(), size - nHotBlock, nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR_V2("batchGatherVecKernel"); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; hipLaunchKernelGGL(( batchGatherGenericKernel<T, Index, NumTLP, NumILP>) , dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream, buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } }); }); } void batch_sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures, torch::Tensor indices, int size) { // indices: [volume, inds_stride] // buffer: [volume, num_points, num_features] // size == volume * num_points if (size <= 0) return; int numPlanes = outFeatures.size(1); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto dtype = outFeatures.scalar_type(); auto inds_dtype = indices.scalar_type(); int inds_stride = indices.size(1); int feature_stride = buffer.size(1); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = typename half_vec_sadd<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = 1; // important for half. tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices, &notFound](auto NumTLP) { // constexpr int NumILP = NumTLP / (64 / (NumTLP / // vecloadFactor)); constexpr int NumILP = NumTLP / 4; int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { hipLaunchKernelGGL(( batchScatterAddBlockKernel<T, Index, int(NumTLP), NumILP>) , dim3(dim3(size / NumTLP, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { hipLaunchKernelGGL(( batchScatterAddGenericKernel<T, Index, int(NumTLP), NumILP>) , dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>() + nHotBlock * numPlanes, indices.data_ptr<Index>(), size - nHotBlock, nHotBlock, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; hipLaunchKernelGGL(( batchScatterAddGenericKernel<T, Index, NumTLP, NumILP>) , dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), size, 0, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } }); }); } } // namespace spconv
dfffa843eebf4cf6874b185286f21b1dc963f7fe.cu
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/ATen.h> #include <chrono> #include <limits> #include <spconv/reordering.cu.h> #include <spconv/reordering.h> #include <tensorview/cuda_utils.h> #include <tensorview/kernel_utils.h> #include <tensorview/mp_helper.h> #include <tensorview/tensor.h> #include <tensorview/tensorview.h> #include <tensorview/torch_utils.h> #include <type_traits> #include <utility/timer.h> namespace spconv { using float_types_t = tv::mp_list<float, double, at::Half>; using int_types_t = tv::mp_list<int32_t, int64_t>; template <typename T> struct half_vec{ using type = typename std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>; }; template <typename T> struct half_vec_sadd{ using type = typename std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>; }; using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>; void sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features, torch::Tensor indices, int size) { if (size <= 0) return; int numPlanes = features.size(1); auto stream = at::cuda::getCurrentCUDAStream(); auto dtype = features.scalar_type(); auto inds_dtype = indices.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = typename half_vec_sadd<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>([=, &buffer, &features, &indices, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; // constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor)); int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { gatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, gatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)); tv::ssprint("gatherVecBlockKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(buffer.data_ptr<T>() + nHotBlock * numPlanes, features.data_ptr<T>(), indices.data_ptr<Index>() + nHotBlock, size - nHotBlock, numPlanes / vecloadFactor); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)); tv::ssprint("gatherVecKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; gatherGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, gatherGenericKernel<T, Index, NumTLP, NumILP>)); tv::ssprint("gatherGenericKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } }); }); } void sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures, torch::Tensor indices, int size) { if (size <= 0) return; int numPlanes = outFeatures.size(1); auto stream = at::cuda::getCurrentCUDAStream(); auto dtype = outFeatures.scalar_type(); auto inds_dtype = indices.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = typename half_vec_sadd<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); // important for half. tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices, &notFound](auto NumTLP) { // constexpr int NumILP = NumTLP / (64 / (NumTLP / // vecloadFactor)); constexpr int NumILP = NumTLP / 4; int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)); tv::ssprint("scatterAddVecBlockKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { scatterAddGenericKernel<T, Index, int(NumTLP), NumILP> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>() + nHotBlock * numPlanes, indices.data_ptr<Index>() + nHotBlock, size - nHotBlock, numPlanes); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>)); tv::ssprint("scatterAddGenericKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; scatterAddGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>)); tv::ssprint("notfound scatterAddGenericKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } }); }); } void batch_sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features, torch::Tensor indices, int size) { // indices: [volume, inds_stride] // buffer: [volume, num_points, num_features] // size == volume * num_points if (size <= 0) return; int numPlanes = features.size(1); auto stream = at::cuda::getCurrentCUDAStream(); auto dtype = features.scalar_type(); auto inds_dtype = indices.scalar_type(); int inds_stride = indices.size(1); int feature_stride = buffer.size(1); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = typename half_vec<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>( [=, &buffer, &features, &indices, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; // constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor)); int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { batchGatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR_V2("batchGatherVecBlockKernel"); } if (size - nHotBlock > 0) { batchGatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(buffer.data_ptr<T>() + nHotBlock * numPlanes, features.data_ptr<T>(), indices.data_ptr<Index>(), size - nHotBlock, nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR_V2("batchGatherVecKernel"); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; batchGatherGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } }); }); } void batch_sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures, torch::Tensor indices, int size) { // indices: [volume, inds_stride] // buffer: [volume, num_points, num_features] // size == volume * num_points if (size <= 0) return; int numPlanes = outFeatures.size(1); auto stream = at::cuda::getCurrentCUDAStream(); auto dtype = outFeatures.scalar_type(); auto inds_dtype = indices.scalar_type(); int inds_stride = indices.size(1); int feature_stride = buffer.size(1); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = typename half_vec_sadd<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = 1; // important for half. tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices, &notFound](auto NumTLP) { // constexpr int NumILP = NumTLP / (64 / (NumTLP / // vecloadFactor)); constexpr int NumILP = NumTLP / 4; int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { batchScatterAddBlockKernel<T, Index, int(NumTLP), NumILP> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { batchScatterAddGenericKernel<T, Index, int(NumTLP), NumILP> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>() + nHotBlock * numPlanes, indices.data_ptr<Index>(), size - nHotBlock, nHotBlock, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; batchScatterAddGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), size, 0, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } }); }); } } // namespace spconv
ebb784e58c643d2fcf27fa511c3a5a29f159cc8c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "im2col.hpp" // converts a batch of images of shape: data_im: batch x ic x ih x iw (ic: input_channels in image) // to 2D col of shape: data_col: batch x (ic * kh * kw) x (hcol * wcol) // filter size: kh x kw // kernel multiplication patches: hcol x wcol (Based on input size, kernel size, padding, stride) // Each thread writes one kernel multiplication patch (kh x kw) in data_col // n is the number of tasks (here: ic * hcol * wcol, ie number of kernel patches per image) __global__ void im2col_kernel(const float * data_im, float * data_col, const int n, const int kh, const int kw, const int pad, const int stride, const int ih, const int iw, const int ic, const int hcol, const int wcol) { // esentially this loop could have run batch size number of times // but since we are launching enough threads to handle each image separately, it executes just once // here it is majorly prevents any extra threads we launch from accessing memory CUDA_KERNEL_LOOP(index, n) { // figure out which part of which image you will work on int imidx = blockIdx.y; int w_out = index % wcol; index /= wcol; int h_out = index % hcol; int channel_in = index / hcol; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; // this thread will write the output patch (kh x kw) at location (imidx, channel_out, h_out, w_out) // that patch is based on the image patch at (imidx, channel_in, h_in, w_in) // i.e. will do the work for patch centred at (channel_in, h_in, w_in) in image imidx data_im += ((imidx * ic + channel_in) * ih + h_in) * iw + w_in; data_col += ((imidx * ic + channel_in) * kh * kw * hcol + h_out) * wcol + w_out; #pragma unroll for (int i = 0; i < kh; ++i) { for (int j = 0; j < kw; ++j) { int h = h_in + i; int w = w_in + j; *data_col = (h >= 0 && w >= 0 && h < ih && w < iw) ? data_im[i * iw + j]: 0; data_col += hcol * wcol; } } } } // takes a batch of images on GPU: bs x ic x ih x iw (ic: input channels, bs: batch size) // and the kernels on GPU: oc x ic x kh x kw (oc: output channels) // does the convolution based on padding (pad) and stride // data_col is used for intermediate col form storage // output is returned in data_out void im2col_gemm_gpu(const float * data_im, const float * data_ker, hipblasHandle_t handle, const int kh, const int kw, const int pad, const int stride, const int ih, const int iw, const int ic, const int oc, float * data_col, float * data_out, int bs) { // Step 1: convert the image to col form // dimensions of the col corr to this image int hcol = (ih + 2 * pad - kh) / stride + 1; int wcol = (iw + 2 * pad - kw) / stride + 1; // We are going to launch bs groups of ic * hcol * wcol kernels threads for im2col, // each thread is responsible for copying a single-channel kernel multiplication patch // i.e. one thread per output pixel in the output of conv // So, all images in batch are converted to col form parallely int op_size = ic * hcol * wcol; dim3 blocks(GET_BLOCKS(op_size), bs, 1); dim3 threads(CUDA_NUM_THREADS, 1, 1); hipLaunchKernelGGL(( im2col_kernel), dim3(blocks), dim3(threads), 0, 0, data_im, data_col, op_size, kh, kw, pad, stride, ih, iw, ic, hcol, wcol); CUDA_POST_KERNEL_CHECK; // check if there was any error // now, the col form shall be multiplied with the kernels laid out straight i.e. (ic * kh * kw) // so, since, oc is the number of kernels, we get: // "2D kernel matrix" oc x (ic * kh * kw) // and the "2D col matrix" for one image is: (ic * kh * kw) x (hcol * wcol) // and you see that magically, their multiplication output is: // output: oc x (hcol * wcol)... ie oc x hcol x wcol, the exact shape needed by next convolution // output: oc x (hcol * wcol)... ie oc x hcol x wcol, the exact shape needed by next im2col // so, there is no need to ever work things back (col2im) or reshape either // in sumamary, we do matmul(kernel, im2col(im_input)) -> conv_output (in "correct" form) // Step 2: GEMM using libcublas // get params ready for GEMM call // Performs C + i*strideC = op(A + i*strideA) op(B + i*strideB) + (C + i* strideC) // for i [0, batchSize 1] // Thus, this one call will parallely do the matrix multiplication for all images in the batch // Since we are doing A * B, we need = 1, = 0 // Since we don't need any transpose, op = HIPBLAS_OP_N const float alpha = 1.0f; const float beta = 0.0f; int ldA, ldB, ldC; int m = ldA = ldC = hcol * wcol; int n = oc; int k = ldB = ic * kh * kw; long long int strideA = m * k; // size of each col form long long int strideB = 0; // reusing the same kernel matrix for each image long long int strideC = m * n; // size of output feature map // CUDA sees matrices as column major // So, a matrix we see as HxW, it would see as WxH in the same memory layout // So, matA (our view) -> matA' (CUDA view) // Thus, to do matA * matB in our view, we shall run CUDA for matB * matA. // Output would be matB' * matA' (CUDA view) = (matA * matB)' (CUDA view) = matA * matB (our view) // In essence, trust me when I do col * kernel to achieve kernel * col hipblasStatus_t ret = hipblasSgemmStridedBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, data_col, ldA, strideA, data_ker, ldB, strideB, &beta, data_out, ldC, strideC, bs); CUBLAS_CHECK(ret, "cublas Sgemm returned an error!"); } // takes a batch of images on CPU: data_im: batch x ic x ih x iw (ic: input channels) // and the kernels on CPU: data_ker: oc x ic x kh x kw (oc: output channels) // does the convolution based on padding (pad) and stride // returns the convolution output on CPU // conv_time & overhead_time are used for kernel timing float * im2colWithCuda(const float * data_im, const float * data_ker, const int batch, const int kh, const int kw, const int pad, const int stride, const int ih, const int iw, const int ic, const int oc, float& conv_time, float& overhead_time) { // Timing variables - CUDA Event API overhead_time = 0; conv_time = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // image dim ssize_t image_size = ic * ih * iw; ssize_t images_size = batch * image_size; // kernel dim ssize_t K = ic * kh * kw; ssize_t kernels_size = oc * K; // col dim ssize_t hcol = (ih + 2 * pad - kh) / stride + 1; ssize_t wcol = (iw + 2 * pad - kw) / stride + 1; ssize_t one_col = ic * kh * kw * hcol * wcol; ssize_t col_batch = batch * one_col; // output dim ssize_t output_feature = oc * hcol * wcol; ssize_t result_size = batch * output_feature; // move images to GPU float * dev_image = nullptr; CUDA_CHECK(hipMalloc((void**)&dev_image, images_size * sizeof(float))); CUDA_CHECK(hipMemcpy(dev_image, data_im, images_size * sizeof(float), hipMemcpyHostToDevice)); // move kernels to GPU float * dev_kernel = nullptr; CUDA_CHECK(hipMalloc((void**)&dev_kernel, kernels_size * sizeof(float))); CUDA_CHECK(hipMemcpy(dev_kernel, data_ker, kernels_size * sizeof(float), hipMemcpyHostToDevice)); // allocate GPU memory for intermediate col form float * dev_col = nullptr; CUDA_CHECK(hipMalloc((void**)&dev_col, col_batch * sizeof(float))); // allocate GPU memory for convlution result float * dev_ret = nullptr; CUDA_CHECK(hipMalloc((void**)&dev_ret, result_size * sizeof(float))); // cuBLAS initialize hipblasHandle_t handle; CUBLAS_CHECK(hipblasCreate(&handle), "hipblasCreate() error!"); // Record the kernel run time hipEventRecord(start); // Kernel launch - this single call will handle all the images in the batch parallely im2col_gemm_gpu(dev_image, dev_kernel, handle, kh, kw, pad, stride, ih, iw, ic, oc, dev_col, dev_ret, batch); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&conv_time, start, stop); // cuBLAS finalize CUBLAS_CHECK(hipblasDestroy(handle), "hipblasDestroy() error!"); // Check for any errors launching the kernel CUDA_POST_KERNEL_CHECK; // Copy output vector from GPU to host memory. float * data_ret = (float *)malloc(result_size * sizeof(float)); CUDA_CHECK(hipMemcpy(data_ret, dev_ret, result_size * sizeof(float), hipMemcpyDeviceToHost)); // Free CUDA memory hipFree(dev_image); hipFree(dev_col); hipFree(dev_kernel); hipFree(dev_ret); // Free timing resources hipEventDestroy(start); hipEventDestroy(stop); return data_ret; } // The exposed library function which just calls im2colWithCuda the right way float* IM2COL::forward(int out_size, int channel, int kernel_height, int kernel_width, int pad, int stride, float* kernel, int batch_size, int input_height, int input_width, float* input, float& conv_time, float& overhead_time) { return im2colWithCuda(input, kernel, batch_size, kernel_height, kernel_width, pad, stride, input_height, input_width, channel, out_size, conv_time, overhead_time); }
ebb784e58c643d2fcf27fa511c3a5a29f159cc8c.cu
#include "im2col.hpp" // converts a batch of images of shape: data_im: batch x ic x ih x iw (ic: input_channels in image) // to 2D col of shape: data_col: batch x (ic * kh * kw) x (hcol * wcol) // filter size: kh x kw // kernel multiplication patches: hcol x wcol (Based on input size, kernel size, padding, stride) // Each thread writes one kernel multiplication patch (kh x kw) in data_col // n is the number of tasks (here: ic * hcol * wcol, ie number of kernel patches per image) __global__ void im2col_kernel(const float * data_im, float * data_col, const int n, const int kh, const int kw, const int pad, const int stride, const int ih, const int iw, const int ic, const int hcol, const int wcol) { // esentially this loop could have run batch size number of times // but since we are launching enough threads to handle each image separately, it executes just once // here it is majorly prevents any extra threads we launch from accessing memory CUDA_KERNEL_LOOP(index, n) { // figure out which part of which image you will work on int imidx = blockIdx.y; int w_out = index % wcol; index /= wcol; int h_out = index % hcol; int channel_in = index / hcol; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; // this thread will write the output patch (kh x kw) at location (imidx, channel_out, h_out, w_out) // that patch is based on the image patch at (imidx, channel_in, h_in, w_in) // i.e. will do the work for patch centred at (channel_in, h_in, w_in) in image imidx data_im += ((imidx * ic + channel_in) * ih + h_in) * iw + w_in; data_col += ((imidx * ic + channel_in) * kh * kw * hcol + h_out) * wcol + w_out; #pragma unroll for (int i = 0; i < kh; ++i) { for (int j = 0; j < kw; ++j) { int h = h_in + i; int w = w_in + j; *data_col = (h >= 0 && w >= 0 && h < ih && w < iw) ? data_im[i * iw + j]: 0; data_col += hcol * wcol; } } } } // takes a batch of images on GPU: bs x ic x ih x iw (ic: input channels, bs: batch size) // and the kernels on GPU: oc x ic x kh x kw (oc: output channels) // does the convolution based on padding (pad) and stride // data_col is used for intermediate col form storage // output is returned in data_out void im2col_gemm_gpu(const float * data_im, const float * data_ker, cublasHandle_t handle, const int kh, const int kw, const int pad, const int stride, const int ih, const int iw, const int ic, const int oc, float * data_col, float * data_out, int bs) { // Step 1: convert the image to col form // dimensions of the col corr to this image int hcol = (ih + 2 * pad - kh) / stride + 1; int wcol = (iw + 2 * pad - kw) / stride + 1; // We are going to launch bs groups of ic * hcol * wcol kernels threads for im2col, // each thread is responsible for copying a single-channel kernel multiplication patch // i.e. one thread per output pixel in the output of conv // So, all images in batch are converted to col form parallely int op_size = ic * hcol * wcol; dim3 blocks(GET_BLOCKS(op_size), bs, 1); dim3 threads(CUDA_NUM_THREADS, 1, 1); im2col_kernel<<<blocks, threads>>>(data_im, data_col, op_size, kh, kw, pad, stride, ih, iw, ic, hcol, wcol); CUDA_POST_KERNEL_CHECK; // check if there was any error // now, the col form shall be multiplied with the kernels laid out straight i.e. (ic * kh * kw) // so, since, oc is the number of kernels, we get: // "2D kernel matrix" oc x (ic * kh * kw) // and the "2D col matrix" for one image is: (ic * kh * kw) x (hcol * wcol) // and you see that magically, their multiplication output is: // output: oc x (hcol * wcol)... ie oc x hcol x wcol, the exact shape needed by next convolution // output: oc x (hcol * wcol)... ie oc x hcol x wcol, the exact shape needed by next im2col // so, there is no need to ever work things back (col2im) or reshape either // in sumamary, we do matmul(kernel, im2col(im_input)) -> conv_output (in "correct" form) // Step 2: GEMM using libcublas // get params ready for GEMM call // Performs C + i*strideC = α op(A + i*strideA) op(B + i*strideB) + β(C + i* strideC) // for i ∈ [0, batchSize − 1] // Thus, this one call will parallely do the matrix multiplication for all images in the batch // Since we are doing A * B, we need α = 1, β = 0 // Since we don't need any transpose, op = CUBLAS_OP_N const float alpha = 1.0f; const float beta = 0.0f; int ldA, ldB, ldC; int m = ldA = ldC = hcol * wcol; int n = oc; int k = ldB = ic * kh * kw; long long int strideA = m * k; // size of each col form long long int strideB = 0; // reusing the same kernel matrix for each image long long int strideC = m * n; // size of output feature map // CUDA sees matrices as column major // So, a matrix we see as HxW, it would see as WxH in the same memory layout // So, matA (our view) -> matA' (CUDA view) // Thus, to do matA * matB in our view, we shall run CUDA for matB * matA. // Output would be matB' * matA' (CUDA view) = (matA * matB)' (CUDA view) = matA * matB (our view) // In essence, trust me when I do col * kernel to achieve kernel * col cublasStatus_t ret = cublasSgemmStridedBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, data_col, ldA, strideA, data_ker, ldB, strideB, &beta, data_out, ldC, strideC, bs); CUBLAS_CHECK(ret, "cublas Sgemm returned an error!"); } // takes a batch of images on CPU: data_im: batch x ic x ih x iw (ic: input channels) // and the kernels on CPU: data_ker: oc x ic x kh x kw (oc: output channels) // does the convolution based on padding (pad) and stride // returns the convolution output on CPU // conv_time & overhead_time are used for kernel timing float * im2colWithCuda(const float * data_im, const float * data_ker, const int batch, const int kh, const int kw, const int pad, const int stride, const int ih, const int iw, const int ic, const int oc, float& conv_time, float& overhead_time) { // Timing variables - CUDA Event API overhead_time = 0; conv_time = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // image dim ssize_t image_size = ic * ih * iw; ssize_t images_size = batch * image_size; // kernel dim ssize_t K = ic * kh * kw; ssize_t kernels_size = oc * K; // col dim ssize_t hcol = (ih + 2 * pad - kh) / stride + 1; ssize_t wcol = (iw + 2 * pad - kw) / stride + 1; ssize_t one_col = ic * kh * kw * hcol * wcol; ssize_t col_batch = batch * one_col; // output dim ssize_t output_feature = oc * hcol * wcol; ssize_t result_size = batch * output_feature; // move images to GPU float * dev_image = nullptr; CUDA_CHECK(cudaMalloc((void**)&dev_image, images_size * sizeof(float))); CUDA_CHECK(cudaMemcpy(dev_image, data_im, images_size * sizeof(float), cudaMemcpyHostToDevice)); // move kernels to GPU float * dev_kernel = nullptr; CUDA_CHECK(cudaMalloc((void**)&dev_kernel, kernels_size * sizeof(float))); CUDA_CHECK(cudaMemcpy(dev_kernel, data_ker, kernels_size * sizeof(float), cudaMemcpyHostToDevice)); // allocate GPU memory for intermediate col form float * dev_col = nullptr; CUDA_CHECK(cudaMalloc((void**)&dev_col, col_batch * sizeof(float))); // allocate GPU memory for convlution result float * dev_ret = nullptr; CUDA_CHECK(cudaMalloc((void**)&dev_ret, result_size * sizeof(float))); // cuBLAS initialize cublasHandle_t handle; CUBLAS_CHECK(cublasCreate(&handle), "cublasCreate() error!"); // Record the kernel run time cudaEventRecord(start); // Kernel launch - this single call will handle all the images in the batch parallely im2col_gemm_gpu(dev_image, dev_kernel, handle, kh, kw, pad, stride, ih, iw, ic, oc, dev_col, dev_ret, batch); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&conv_time, start, stop); // cuBLAS finalize CUBLAS_CHECK(cublasDestroy(handle), "cublasDestroy() error!"); // Check for any errors launching the kernel CUDA_POST_KERNEL_CHECK; // Copy output vector from GPU to host memory. float * data_ret = (float *)malloc(result_size * sizeof(float)); CUDA_CHECK(cudaMemcpy(data_ret, dev_ret, result_size * sizeof(float), cudaMemcpyDeviceToHost)); // Free CUDA memory cudaFree(dev_image); cudaFree(dev_col); cudaFree(dev_kernel); cudaFree(dev_ret); // Free timing resources cudaEventDestroy(start); cudaEventDestroy(stop); return data_ret; } // The exposed library function which just calls im2colWithCuda the right way float* IM2COL::forward(int out_size, int channel, int kernel_height, int kernel_width, int pad, int stride, float* kernel, int batch_size, int input_height, int input_width, float* input, float& conv_time, float& overhead_time) { return im2colWithCuda(input, kernel, batch_size, kernel_height, kernel_width, pad, stride, input_height, input_width, channel, out_size, conv_time, overhead_time); }
ef03650279d7d97e221ddc8f84a5320166aa6564.hip
// !!! This is a file automatically generated by hipify!!! /** * @file unstagger.cpp * * Calculate the co-located velocity field for U and V * Description: The grid in a staggered arrangement consists of central points ('o') where most parameter values e.g. temperature and pressure are stored. The u-velocity points ('u') are shifted east by a half grid spacing and v-velocity points ('v') shifted south by a half grid spacing. A example on a 3x3 grid is given below. o---u---o---u---o---u | | | v v v | | | o---u---o---u---o---u | | | v v v | | | o---u---o---u---o---u | | | v v v If collocation of all parameters is required, u- and v-velocity needs to be unstaggered. The unstaggering is done by solving a set of linear equations. For the example grid shown above the linear equations are. Case u: Case v: 1*u(1,1) = o(1,1) 1*v(1,1) = o(1,1) 0.5*u(1,1) + 0.5*u(1,2) = o(1,2) 1*v(1,2) = o(1,2) 0.5*u(1,2) + 0.5*u(1,3) = o(1,3) 1*v(1,3) = o(1,3) 1*u(2,1) = o(2,1) 0.5*v(1,1) + 0.5*v(2,1) = o(2,1) 0.5*u(2,1) + 0.5*u(2,2) = o(2,2) 0.5*v(1,2) + 0.5*v(2,2) = o(2,2) 0.5*u(2,2) + 0.5*u(2,3) = o(2,3) 0.5*v(1,3) + 0.5*v(2,3) = o(2,3) 1*u(3,1) = o(3,1) 0.5*v(2,1) + 0.5*v(3,1) = o(3,1) 0.5*u(3,1) + 0.5*u(3,2) = o(3,2) 0.5*v(2,2) + 0.5*v(3,2) = o(3,2) 0.5*u(3,2) + 0.5*u(3,3) = o(3,3) 0.5*v(2,3) + 0.5*v(3,3) = o(3,3) These equations can be re-written in matrix-vector form |- -| | u(1,1) | | . | | . | | u(3,3) | |- -| |- -| |- -| | w11 ... w19 | | o(1,1) | | . . | | . | | . . | | . | | w91 ... w99 | | o(3,3) | |- -| |- -| where the weighting matrices for the u- and v-case are diagonal matrices. The diagonal matrix to unstagger the u-velocity field has the form | 2 0 0 0 0 0 0 0 0 | | 1 1 0 0 0 0 0 0 0 | | 0 1 1 0 0 0 0 0 0 | | 0 0 0 2 0 0 0 0 0 | 1/2 * | 0 0 0 1 1 0 0 0 0 | = U_unstagger | 0 0 0 0 1 1 0 0 0 | | 0 0 0 0 0 0 2 0 0 | | 0 0 0 0 0 0 1 1 0 | | 0 0 0 0 0 0 0 1 1 | and the diagonal matrix to unstagger the v-velocity | 2 0 0 0 0 0 0 0 0 | | 0 2 0 0 0 0 0 0 0 | | 0 0 2 0 0 0 0 0 0 | | 1 0 0 1 0 0 0 0 0 | 1/2 * | 0 1 0 0 1 0 0 0 0 | = V_unstagger | 0 0 1 0 0 1 0 0 0 | | 0 0 0 1 0 0 1 0 0 | | 0 0 0 0 1 0 0 1 0 | | 0 0 0 0 0 1 0 0 1 | In this form the problem can be solved efficiently using a sparse matrix library matrix-vector multiplication. In this implementation CUSP is used for that purpose. **/ #include "cuda_plugin_helper.h" #include "unstagger.cuh" #ifdef DEBUG #undef DEBUG #include <cusp/array1d.h> #include <cusp/array2d.h> #include <cusp/dia_matrix.h> #include <cusp/multiply.h> #include <thrust/system/hip/execution_policy.h> #define DEBUG #else #include <cusp/array1d.h> #include <cusp/array2d.h> #include <cusp/dia_matrix.h> #include <cusp/multiply.h> #include <thrust/system/hip/execution_policy.h> #endif cusp::dia_matrix<int, double, cusp::device_memory> U_unstagger; cusp::dia_matrix<int, double, cusp::device_memory> V_unstagger; void himan::plugin::unstagger_cuda::Init(size_t NX, size_t NY) { // create diagonal matix with constant coefficiants size_t N = NX * NY; cusp::dia_matrix<int, double, cusp::host_memory> h_U_unstagger(N, N, 2 * N, 2); cusp::dia_matrix<int, double, cusp::host_memory> h_V_unstagger(N, N, 2 * N, 2); cusp::array2d<double, cusp::device_memory> Diags(N, 2, 0.5); h_U_unstagger.diagonal_offsets[0] = 0; h_U_unstagger.diagonal_offsets[1] = -1; h_U_unstagger.values = Diags; // alter coefficient for interpolation of first column in U for (size_t i = 0; i < NY; ++i) { h_U_unstagger.values(i * NX, 0) = 1.0; h_U_unstagger.values(i * NX, 1) = 0.0; } h_V_unstagger.diagonal_offsets[0] = 0; h_V_unstagger.diagonal_offsets[1] = -NX; h_V_unstagger.values = Diags; // alter coefficient for interpolation of first row in V for (size_t i = 0; i < NX; ++i) { h_V_unstagger.values(i, 0) = 1.0; h_V_unstagger.values(i, 1) = 0.0; } // copy matrices to device U_unstagger = h_U_unstagger; V_unstagger = h_V_unstagger; } std::pair<std::vector<double>, std::vector<double>> himan::plugin::unstagger_cuda::Process(std::vector<double>& U_in, std::vector<double>& V_in) { size_t N = U_in.size(); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); std::vector<double> U_out(N); std::vector<double> V_out(N); CUDA_CHECK(hipHostRegister(U_in.data(), sizeof(double) * N, 0)); CUDA_CHECK(hipHostRegister(V_in.data(), sizeof(double) * N, 0)); CUDA_CHECK(hipHostRegister(U_out.data(), sizeof(double) * N, 0)); CUDA_CHECK(hipHostRegister(V_out.data(), sizeof(double) * N, 0)); // create 1d arrays on device double* d_U = nullptr; // pointer to device memory pointing to incoming data of U double* d_V = nullptr; // pointer to device memory pointing to incoming data of V double* d_U_out = nullptr; // pointer to device memory to unstaggered data of U double* d_V_out = nullptr; // pointer to device memory to unstaggered data of V // allocate memory CUDA_CHECK(hipMalloc((void**)&d_U, sizeof(double) * N)); CUDA_CHECK(hipMalloc((void**)&d_V, sizeof(double) * N)); CUDA_CHECK(hipMalloc((void**)&d_U_out, sizeof(double) * N)); CUDA_CHECK(hipMalloc((void**)&d_V_out, sizeof(double) * N)); // copy data to device CUDA_CHECK(hipMemcpyAsync(d_U, U_in.data(), sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_V, V_in.data(), sizeof(double) * N, hipMemcpyHostToDevice, stream)); // cast raw pointer to thrust device pointer thrust::device_ptr<double> dt_U = thrust::device_pointer_cast(d_U); thrust::device_ptr<double> dt_V = thrust::device_pointer_cast(d_V); thrust::device_ptr<double> dt_U_out = thrust::device_pointer_cast(d_U_out); thrust::device_ptr<double> dt_V_out = thrust::device_pointer_cast(d_V_out); // create cusp::array1d auto U_device = cusp::array1d_view<thrust::device_ptr<double>>(dt_U, dt_U + N); auto V_device = cusp::array1d_view<thrust::device_ptr<double>>(dt_V, dt_V + N); auto U_device_out = cusp::array1d_view<thrust::device_ptr<double>>(dt_U_out, dt_U_out + N); auto V_device_out = cusp::array1d_view<thrust::device_ptr<double>>(dt_V_out, dt_V_out + N); // perform the unstagger operation cusp::multiply(thrust::hip::par.on(stream), U_unstagger, U_device, U_device_out); cusp::multiply(thrust::hip::par.on(stream), V_unstagger, V_device, V_device_out); // copy result back to host CUDA_CHECK(hipMemcpyAsync(U_out.data(), d_U_out, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(V_out.data(), d_V_out, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); // free memory CUDA_CHECK(hipFree(d_U)); CUDA_CHECK(hipFree(d_V)); CUDA_CHECK(hipFree(d_U_out)); CUDA_CHECK(hipFree(d_V_out)); CUDA_CHECK(hipHostUnregister(U_in.data())); CUDA_CHECK(hipHostUnregister(V_in.data())); CUDA_CHECK(hipHostUnregister(U_out.data())); CUDA_CHECK(hipHostUnregister(V_out.data())); CUDA_CHECK(hipStreamDestroy(stream)); return std::make_pair(U_out, V_out); }
ef03650279d7d97e221ddc8f84a5320166aa6564.cu
/** * @file unstagger.cpp * * Calculate the co-located velocity field for U and V * Description: The grid in a staggered arrangement consists of central points ('o') where most parameter values e.g. temperature and pressure are stored. The u-velocity points ('u') are shifted east by a half grid spacing and v-velocity points ('v') shifted south by a half grid spacing. A example on a 3x3 grid is given below. o---u---o---u---o---u | | | v v v | | | o---u---o---u---o---u | | | v v v | | | o---u---o---u---o---u | | | v v v If collocation of all parameters is required, u- and v-velocity needs to be unstaggered. The unstaggering is done by solving a set of linear equations. For the example grid shown above the linear equations are. Case u: Case v: 1*u(1,1) = o(1,1) 1*v(1,1) = o(1,1) 0.5*u(1,1) + 0.5*u(1,2) = o(1,2) 1*v(1,2) = o(1,2) 0.5*u(1,2) + 0.5*u(1,3) = o(1,3) 1*v(1,3) = o(1,3) 1*u(2,1) = o(2,1) 0.5*v(1,1) + 0.5*v(2,1) = o(2,1) 0.5*u(2,1) + 0.5*u(2,2) = o(2,2) 0.5*v(1,2) + 0.5*v(2,2) = o(2,2) 0.5*u(2,2) + 0.5*u(2,3) = o(2,3) 0.5*v(1,3) + 0.5*v(2,3) = o(2,3) 1*u(3,1) = o(3,1) 0.5*v(2,1) + 0.5*v(3,1) = o(3,1) 0.5*u(3,1) + 0.5*u(3,2) = o(3,2) 0.5*v(2,2) + 0.5*v(3,2) = o(3,2) 0.5*u(3,2) + 0.5*u(3,3) = o(3,3) 0.5*v(2,3) + 0.5*v(3,3) = o(3,3) These equations can be re-written in matrix-vector form |- -| | u(1,1) | | . | | . | | u(3,3) | |- -| |- -| |- -| | w11 ... w19 | | o(1,1) | | . . | | . | | . . | | . | | w91 ... w99 | | o(3,3) | |- -| |- -| where the weighting matrices for the u- and v-case are diagonal matrices. The diagonal matrix to unstagger the u-velocity field has the form | 2 0 0 0 0 0 0 0 0 | | 1 1 0 0 0 0 0 0 0 | | 0 1 1 0 0 0 0 0 0 | | 0 0 0 2 0 0 0 0 0 | 1/2 * | 0 0 0 1 1 0 0 0 0 | = U_unstagger | 0 0 0 0 1 1 0 0 0 | | 0 0 0 0 0 0 2 0 0 | | 0 0 0 0 0 0 1 1 0 | | 0 0 0 0 0 0 0 1 1 | and the diagonal matrix to unstagger the v-velocity | 2 0 0 0 0 0 0 0 0 | | 0 2 0 0 0 0 0 0 0 | | 0 0 2 0 0 0 0 0 0 | | 1 0 0 1 0 0 0 0 0 | 1/2 * | 0 1 0 0 1 0 0 0 0 | = V_unstagger | 0 0 1 0 0 1 0 0 0 | | 0 0 0 1 0 0 1 0 0 | | 0 0 0 0 1 0 0 1 0 | | 0 0 0 0 0 1 0 0 1 | In this form the problem can be solved efficiently using a sparse matrix library matrix-vector multiplication. In this implementation CUSP is used for that purpose. **/ #include "cuda_plugin_helper.h" #include "unstagger.cuh" #ifdef DEBUG #undef DEBUG #include <cusp/array1d.h> #include <cusp/array2d.h> #include <cusp/dia_matrix.h> #include <cusp/multiply.h> #include <thrust/system/cuda/execution_policy.h> #define DEBUG #else #include <cusp/array1d.h> #include <cusp/array2d.h> #include <cusp/dia_matrix.h> #include <cusp/multiply.h> #include <thrust/system/cuda/execution_policy.h> #endif cusp::dia_matrix<int, double, cusp::device_memory> U_unstagger; cusp::dia_matrix<int, double, cusp::device_memory> V_unstagger; void himan::plugin::unstagger_cuda::Init(size_t NX, size_t NY) { // create diagonal matix with constant coefficiants size_t N = NX * NY; cusp::dia_matrix<int, double, cusp::host_memory> h_U_unstagger(N, N, 2 * N, 2); cusp::dia_matrix<int, double, cusp::host_memory> h_V_unstagger(N, N, 2 * N, 2); cusp::array2d<double, cusp::device_memory> Diags(N, 2, 0.5); h_U_unstagger.diagonal_offsets[0] = 0; h_U_unstagger.diagonal_offsets[1] = -1; h_U_unstagger.values = Diags; // alter coefficient for interpolation of first column in U for (size_t i = 0; i < NY; ++i) { h_U_unstagger.values(i * NX, 0) = 1.0; h_U_unstagger.values(i * NX, 1) = 0.0; } h_V_unstagger.diagonal_offsets[0] = 0; h_V_unstagger.diagonal_offsets[1] = -NX; h_V_unstagger.values = Diags; // alter coefficient for interpolation of first row in V for (size_t i = 0; i < NX; ++i) { h_V_unstagger.values(i, 0) = 1.0; h_V_unstagger.values(i, 1) = 0.0; } // copy matrices to device U_unstagger = h_U_unstagger; V_unstagger = h_V_unstagger; } std::pair<std::vector<double>, std::vector<double>> himan::plugin::unstagger_cuda::Process(std::vector<double>& U_in, std::vector<double>& V_in) { size_t N = U_in.size(); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); std::vector<double> U_out(N); std::vector<double> V_out(N); CUDA_CHECK(cudaHostRegister(U_in.data(), sizeof(double) * N, 0)); CUDA_CHECK(cudaHostRegister(V_in.data(), sizeof(double) * N, 0)); CUDA_CHECK(cudaHostRegister(U_out.data(), sizeof(double) * N, 0)); CUDA_CHECK(cudaHostRegister(V_out.data(), sizeof(double) * N, 0)); // create 1d arrays on device double* d_U = nullptr; // pointer to device memory pointing to incoming data of U double* d_V = nullptr; // pointer to device memory pointing to incoming data of V double* d_U_out = nullptr; // pointer to device memory to unstaggered data of U double* d_V_out = nullptr; // pointer to device memory to unstaggered data of V // allocate memory CUDA_CHECK(cudaMalloc((void**)&d_U, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((void**)&d_V, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((void**)&d_U_out, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((void**)&d_V_out, sizeof(double) * N)); // copy data to device CUDA_CHECK(cudaMemcpyAsync(d_U, U_in.data(), sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_V, V_in.data(), sizeof(double) * N, cudaMemcpyHostToDevice, stream)); // cast raw pointer to thrust device pointer thrust::device_ptr<double> dt_U = thrust::device_pointer_cast(d_U); thrust::device_ptr<double> dt_V = thrust::device_pointer_cast(d_V); thrust::device_ptr<double> dt_U_out = thrust::device_pointer_cast(d_U_out); thrust::device_ptr<double> dt_V_out = thrust::device_pointer_cast(d_V_out); // create cusp::array1d auto U_device = cusp::array1d_view<thrust::device_ptr<double>>(dt_U, dt_U + N); auto V_device = cusp::array1d_view<thrust::device_ptr<double>>(dt_V, dt_V + N); auto U_device_out = cusp::array1d_view<thrust::device_ptr<double>>(dt_U_out, dt_U_out + N); auto V_device_out = cusp::array1d_view<thrust::device_ptr<double>>(dt_V_out, dt_V_out + N); // perform the unstagger operation cusp::multiply(thrust::cuda::par.on(stream), U_unstagger, U_device, U_device_out); cusp::multiply(thrust::cuda::par.on(stream), V_unstagger, V_device, V_device_out); // copy result back to host CUDA_CHECK(cudaMemcpyAsync(U_out.data(), d_U_out, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(V_out.data(), d_V_out, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); // free memory CUDA_CHECK(cudaFree(d_U)); CUDA_CHECK(cudaFree(d_V)); CUDA_CHECK(cudaFree(d_U_out)); CUDA_CHECK(cudaFree(d_V_out)); CUDA_CHECK(cudaHostUnregister(U_in.data())); CUDA_CHECK(cudaHostUnregister(V_in.data())); CUDA_CHECK(cudaHostUnregister(U_out.data())); CUDA_CHECK(cudaHostUnregister(V_out.data())); CUDA_CHECK(cudaStreamDestroy(stream)); return std::make_pair(U_out, V_out); }