text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include <cuda_runtime.h> #include <cuda_gl_interop.h> #include <cublas_v2.h> #include <cusparse.h> #ifdef SOLVER #include <cusolverDn.h> #endif #include "helper_cuda.h" #include "kernel_impl.cuh" #include "thrust/device_ptr.h" #include "thrust/device_vector.h" #include "thrust/for_each.h" #include "thrust/iterator/zip_iterator.h" //#include "thrust/sort.h" //#include "thrust/reduce.h" #include "thrust/transform.h" //#include <stdlib.h> //#include <string.h> #include <stdio.h> #ifdef PRINT #include "cusp/csr_matrix.h" #include "cusp/print.h" //#include "cusp/multiply.h" //#include "cusp/transpose.h" #include "cusp/array1d.h" #endif typedef typename cusp::array1d_view< thrust::device_vector<uint>::iterator > IndexArrayView; typedef typename cusp::array1d_view< thrust::device_vector<float>::iterator > ValueArrayView; typedef cusp::csr_matrix_view<IndexArrayView, IndexArrayView, ValueArrayView> CSRView; cublasHandle_t cublasHandle; cusparseHandle_t cusparseHandle; cusparseMatDescr_t matDescr; #ifdef SOLVER cusolverDnHandle_t cusolverHandle; #endif thrust::device_vector<uint> Jr; // row offsets for J thrust::device_vector<uint> Jc; // col indices for J thrust::device_vector<float> Jv; // values for J // D contains Diag of A thrust::device_vector<uint> Dr; // row offsets for D thrust::device_vector<uint> Dc; // col indices for D thrust::device_vector<float> Dv; // values for D thrust::device_vector<uint> occurences; // number of constraints affecting a particle thrust::device_vector<float> W; // vector of inverse masses thrust::device_vector<float> JT; // dense matrix of J transpose thrust::device_vector<float> A; // Solution to J * W * JT // doubles as T in jacobi iteration thrust::device_vector<float> B; thrust::device_vector<float> B2; thrust::device_vector<float> C; //thrust::device_vector<float> deltas; float *d_oldPos; int workSize; float *d_work; int *d_devIpiv; int *d_devInfo; ////////////////////////// RIGID BODIES /////////////////////// thrust::device_vector<float> d_rbAngle; thrust::device_vector<float> d_rbCenterMass; // dim2 thrust::device_vector<uint> d_rbPartStart; thrust::device_vector<uint> d_rbRStart; thrust::device_vector<uint> d_rbSize; thrust::device_vector<float> d_Rs; // dim 2 extern "C" { void allocateArray(void **devPtr, size_t size) { checkCudaErrors(cudaMalloc(devPtr, size)); } void freeArray(void *devPtr) { checkCudaErrors(cudaFree(devPtr)); } void cudaInit() { int devID; // use command-line specified CUDA device, otherwise use device with highest Gflops/s devID = findCudaDevice(); if (devID < 0) { printf("No CUDA Capable devices found, exiting...\n"); exit(EXIT_SUCCESS); } checkCudaErrors(cublasCreate(&cublasHandle)); checkCudaErrors(cusparseCreate(&cusparseHandle)); checkCudaErrors(cusparseCreateMatDescr(&matDescr)); cusparseSetMatType(matDescr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(matDescr,CUSPARSE_INDEX_BASE_ZERO); #ifdef SOLVER checkCudaErrors(cusolverDnCreate(&cusolverHandle)); #endif } void cudaClean() { // Destroy the handles checkCudaErrors(cublasDestroy(cublasHandle)); checkCudaErrors(cusparseDestroy(cusparseHandle)); #ifdef SOLVER checkCudaErrors(cusolverDnDestroy(cusolverHandle)); #endif } void initDeviceVectors(uint *indices, uint numParticles, uint numConstraints) { thrust::device_ptr<uint> d_i(indices); occurences.resize(numParticles); thrust::device_vector<uint> dv_is(numConstraints * 2); thrust::copy(d_i, d_i + numConstraints * 2, dv_is.begin()); thrust::device_vector<uint> d_occurs(dv_is.size(), 1.f); thrust::sort(dv_is.begin(), dv_is.end()); thrust::reduce_by_key(dv_is.begin(), dv_is.end(), d_occurs.begin(), dv_is.begin(), occurences.begin()); uint p1i, p2i; // row offsets and column indices for matrix J for (uint i = 0; i < numConstraints; i++) { Dr.push_back(i); Dc.push_back(i); Jr.push_back(i*4); p1i = d_i[i]; p2i = d_i[numConstraints + i]; if (p1i < p2i) { Jc.push_back(p1i * 2); Jc.push_back(p1i*2+1); Jc.push_back(p2i * 2); Jc.push_back(p2i*2+1); } else { Jc.push_back(p2i * 2); Jc.push_back(p2i*2+1); Jc.push_back(p1i * 2); Jc.push_back(p1i*2+1); } } Dr.push_back(numConstraints); Jr.push_back(numConstraints*4); W.resize(numParticles); thrust::fill(W.begin(), W.end(), 1.f); #ifdef PRINT thrust::device_ptr<uint> O(occurences.data()); printf("Occurs:\n"); for (uint i = 0; i < occurences.size(); i++) { printf("%u ", (uint)*(O + i)); } printf("\n"); thrust::device_ptr<float> d_W(W.data()); printf("W:\n"); for (uint i = 0; i < W.size(); i++) { printf("%.2f ", (float)*(d_W + i)); } printf("\n"); #endif uint numParticles2 = numParticles * 2; Dv.resize(numConstraints); Jv.resize(numConstraints * 4); JT.resize(numConstraints * numParticles2); thrust::fill(JT.begin(), JT.end(), 0.f); A.resize(numConstraints * numConstraints); B.resize(numConstraints); B2.resize(numConstraints); C.resize(numConstraints); // deltas.resize(numParticles2); allocateArray((void **)&d_oldPos, numParticles * 4 * sizeof(float)); #ifdef SOLVER float *pA = thrust::raw_pointer_cast(A.data()); cusolverDnSpotrf_bufferSize(cusolverHandle, CUBLAS_FILL_MODE_LOWER, numConstraints, pA, numConstraints, &workSize); allocateArray((void**)&d_work, workSize * sizeof(float)); allocateArray((void **)&d_devIpiv, numConstraints * sizeof(int)); allocateArray((void**)&d_devInfo, sizeof(int)); #endif } void freeDeviceVectors() { // set size to zero Jr.clear(); Jc.clear(); Jv.clear(); Dr.clear(); Dc.clear(); Dv.clear(); occurences.clear(); W.clear(); JT.clear(); A.clear(); B.clear(); B2.clear(); C.clear(); // deltas.clear(); // free memory Jr.shrink_to_fit(); Jc.shrink_to_fit(); Jv.shrink_to_fit(); Dr.shrink_to_fit(); Dc.shrink_to_fit(); Dv.shrink_to_fit(); occurences.shrink_to_fit(); W.shrink_to_fit(); JT.shrink_to_fit(); A.shrink_to_fit(); B.shrink_to_fit(); B2.shrink_to_fit(); C.shrink_to_fit(); // deltas.shrink_to_fit(); freeArray(d_oldPos); #ifdef SOLVER freeArray(d_work); freeArray(d_devIpiv); freeArray(d_devInfo); #endif d_rbAngle.clear(); d_rbCenterMass.clear(); d_rbPartStart.clear(); d_rbRStart.clear(); d_rbSize.clear(); d_Rs.clear(); d_rbAngle.shrink_to_fit(); d_rbCenterMass.shrink_to_fit(); d_rbPartStart.shrink_to_fit(); d_rbRStart.shrink_to_fit(); d_rbSize.shrink_to_fit(); d_Rs.shrink_to_fit(); } void copyArrayToDevice(void *device, const void *host, int offset, int size) { checkCudaErrors(cudaMemcpy((char *) device + offset, host, size, cudaMemcpyHostToDevice)); } void registerGLBufferObject(unsigned int vbo, struct cudaGraphicsResource **cuda_vbo_resource) { checkCudaErrors(cudaGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo, cudaGraphicsMapFlagsNone)); } void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource) { checkCudaErrors(cudaGraphicsUnregisterResource(cuda_vbo_resource)); } void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource) { void *ptr; checkCudaErrors(cudaGraphicsMapResources(1, cuda_vbo_resource, 0)); size_t num_bytes; checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes, *cuda_vbo_resource)); return ptr; } void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource) { checkCudaErrors(cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0)); } void copyArrayFromDevice(void *host, const void *device, int size) { checkCudaErrors(cudaMemcpy(host, device, size, cudaMemcpyDeviceToHost)); } void setParameters(SimParams *hostParams) { // copy parameters to constant memory checkCudaErrors(cudaMemcpyToSymbol(params, hostParams, sizeof(SimParams))); } //Round a / b to nearest higher integer value uint iDivUp(uint a, uint b) { return (a % b != 0) ? (a / b + 1) : (a / b); } // compute grid and thread block size for a given number of elements void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads) { numThreads = min(blockSize, n); numBlocks = iDivUp(n, numThreads); } void integrateSystem(float *pos, float *vel, float deltaTime, uint numParticles) { thrust::device_ptr<float4> d_pos4((float4 *)pos); thrust::device_ptr<float4> d_vel4((float4 *)vel); thrust::device_ptr<float4> d_old4((float4 *)d_oldPos); thrust::copy(d_pos4, d_pos4 + numParticles, d_old4); thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(d_pos4, d_vel4)), thrust::make_zip_iterator(thrust::make_tuple(d_pos4+numParticles, d_vel4+numParticles)), integrate_functor(deltaTime)); #ifdef PRINT for (uint i = 0; i < numParticles; i++) { printf("Integrated Pos : (%.2f, %.2f, %.2f, %.2f)\n", ((float4)*(d_pos4+i)).x,((float4)*(d_pos4+i)).y, ((float4)*(d_pos4+i)).z,((float4)*(d_pos4+i)).w); } #endif } void calcHash(uint *gridParticleHash, uint *gridParticleIndex, float *pos, int numParticles) { uint numThreads, numBlocks; computeGridSize(numParticles, 256, numBlocks, numThreads); // execute the kernel calcHashD<<< numBlocks, numThreads >>>(gridParticleHash, gridParticleIndex, (float4 *) pos, numParticles); // check if kernel invocation generated an error getLastCudaError("Kernel execution failed"); } void reorderDataAndFindCellStart(uint *cellStart, uint *cellEnd, float *sortedPos, float *sortedVel, uint *gridParticleHash, uint *gridParticleIndex, float *oldPos, float *oldVel, uint numParticles, uint numCells) { uint numThreads, numBlocks; computeGridSize(numParticles, 256, numBlocks, numThreads); // set all cells to empty checkCudaErrors(cudaMemset(cellStart, 0xffffffff, numCells*sizeof(uint))); checkCudaErrors(cudaBindTexture(0, oldPosTex, oldPos, numParticles*sizeof(float4))); checkCudaErrors(cudaBindTexture(0, oldVelTex, oldVel, numParticles*sizeof(float4))); uint smemSize = sizeof(uint)*(numThreads+1); reorderDataAndFindCellStartD<<< numBlocks, numThreads, smemSize>>>( cellStart, cellEnd, (float4 *) sortedPos, (float4 *) sortedVel, gridParticleHash, gridParticleIndex, (float4 *) oldPos, (float4 *) oldVel, numParticles); getLastCudaError("Kernel execution failed: reorderDataAndFindCellStartD"); checkCudaErrors(cudaUnbindTexture(oldPosTex)); checkCudaErrors(cudaUnbindTexture(oldVelTex)); } void collide(float *newVel, float *sortedPos, float *sortedVel, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, uint numParticles, uint numCells) { checkCudaErrors(cudaBindTexture(0, oldPosTex, sortedPos, numParticles*sizeof(float4))); checkCudaErrors(cudaBindTexture(0, oldVelTex, sortedVel, numParticles*sizeof(float4))); checkCudaErrors(cudaBindTexture(0, cellStartTex, cellStart, numCells*sizeof(uint))); checkCudaErrors(cudaBindTexture(0, cellEndTex, cellEnd, numCells*sizeof(uint))); // thread per particle uint numThreads, numBlocks; computeGridSize(numParticles, 64, numBlocks, numThreads); // execute the kernel collideD<<< numBlocks, numThreads >>>((float4 *)newVel, (float4 *)sortedPos, (float4 *)sortedVel, gridParticleIndex, cellStart, cellEnd, numParticles); // check if kernel invocation generated an error getLastCudaError("Kernel execution failed"); checkCudaErrors(cudaUnbindTexture(oldPosTex)); checkCudaErrors(cudaUnbindTexture(oldVelTex)); checkCudaErrors(cudaUnbindTexture(cellStartTex)); checkCudaErrors(cudaUnbindTexture(cellEndTex)); } void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles) { thrust::sort_by_key(thrust::device_ptr<uint>(dGridParticleHash), thrust::device_ptr<uint>(dGridParticleHash + numParticles), thrust::device_ptr<uint>(dGridParticleIndex)); } // void solvePointConstraints(uint *indices, float *points, float *particles, uint numConstraints) // { // thrust::device_ptr<uint> d_indices(indices); // thrust::device_ptr<float4> d_points((float4 *) points); // thrust::for_each( // thrust::make_zip_iterator(thrust::make_tuple(d_indices, d_points)), // thrust::make_zip_iterator(thrust::make_tuple(d_indices+numConstraints, d_points+numConstraints)), // point_constraint_functor((float4 *)particles)); // } // void iterativeSolveDistanceConstraints(uint *indices, float *distance, float *particles, uint numConstraints) // { // // copy indices to device vector (this gets manipulated) // thrust::device_vector<uint> d_indices(indices, indices + numConstraints*2); // // pointers to the locations of the first and second indices // thrust::device_ptr<uint> d_index1 = d_indices.data(); // thrust::device_ptr<uint> d_index2 = d_indices.data() + numConstraints; // // pointer to the distance values // thrust::device_ptr<float> d_distances(distance); // // device vector of delta values (to accumulate then sum) // thrust::device_vector<float4> d_deltas_vec(numConstraints*2); // thrust::device_ptr<float4> d_deltas_ptr = d_deltas_vec.data(); // // calculate the delta values for each point // thrust::for_each( // thrust::make_zip_iterator(thrust::make_tuple(d_index1, d_index2, d_distances, d_deltas_ptr, d_deltas_ptr+ numConstraints)), // thrust::make_zip_iterator(thrust::make_tuple(d_index1+numConstraints, d_index2+numConstraints, d_distances+numConstraints, d_deltas_ptr+numConstraints, d_deltas_ptr+numConstraints*2)), // delta_computing_functor((float4 *) particles)); // thrust::sort_by_key(d_index1, d_index1+numConstraints*2, d_deltas_ptr); // thrust::pair<thrust::device_ptr<uint>,thrust::device_ptr<float4>> new_end; // new_end = thrust::reduce_by_key(d_index1, d_index1+numConstraints*2, d_deltas_ptr, d_index1, d_deltas_ptr); // thrust::for_each( // thrust::make_zip_iterator(thrust::make_tuple(d_index1, d_deltas_ptr)), // thrust::make_zip_iterator(thrust::make_tuple(new_end.first, new_end.second)), // distance_constraint_functor((float4 *)particles)); // } void calcVelocity(float *hpos, float *dpos, float *vel, float deltaTime, uint numParticles) { // thrust::host_vector<float> h_origPos(hpos, hpos + numParticles*4); // thrust::device_vector<float> d_origPos = h_origPos; thrust::device_ptr<float4> d_origPos((float4*)d_oldPos); thrust::device_ptr<float4> d_solvedPos((float4*)dpos); thrust::device_ptr<float4> d_vel((float4*)vel); thrust::transform(d_origPos, d_origPos + numParticles * 4, d_solvedPos, d_vel, subtract_functor(deltaTime)); } // void buildJAndB(uint *indices, float *distance, float *particles, float *jay_, float *b, uint numParticles, uint numConstraints) // { //// uint2 sizeJ = make_uint2(numParticles * 2, numConstraints); // rows, cols //// // raw pointer to device memory ////// float *_J, *_b; ////// allocateArray((void **)&_J, sizeJ.x * sizeJ.y * sizeof(float)); ////// allocateArray((void **)&_b, numConstraints * sizeof(float)); //// // wrap raw pointer with a device_ptr //// thrust::device_ptr<float> d_J(J); // thrust::device_ptr<float> d_b(b); //// // use device_ptr in thrust algorithms //// thrust::fill(d_J, d_J + sizeJ.x * sizeJ.y, 0.f); //// thrust::fill(d_b, d_b + numConstraints, 0.f); // uint numPartsExp = numParticles * 2; // * 3 for 3D //// cusp::print(W); //// J.column_indices(1, 1) = 0; //#ifdef PRINT // thrust::device_ptr<float4> d_particles((float4*)particles); // for (uint i = 0; i < numParticles; i++) // { // printf("particles before : (%.2f, %.2f, %.2f, %.2f)\n", // ((float4)*(d_particles+i)).x,((float4)*(d_particles+i)).y, // ((float4)*(d_particles+i)).z,((float4)*(d_particles+i)).w); // } // printf("MATRIX J (before):\n"); // for (uint r = 0; r < sizeJ.x; r++) // { // for (uint c = 0; c < sizeJ.y; c++) // { // uint index = c * sizeJ.x + r; // printf("%.2f ", (float)*(d_J+index)); // } // printf("\n"); // } // printf("B (before):\n"); // for (uint i = 0; i < numConstraints * 4; i++) // { // printf("%.2f, ", (float)*(d_b+i)); // } // printf("\b \n"); //#endif //// // copy indices to device vector (this gets manipulated) //// thrust::device_vector<uint> d_indices(indices, indices + numConstraints*2); //// // pointers to the locations of the first and second indices //// thrust::device_ptr<uint> d_index1 = d_indices.data(); //// thrust::device_ptr<uint> d_index2 = d_indices.data() + numConstraints; // // pointer to the indices // thrust::device_ptr<uint> d_index1(indices); // thrust::device_ptr<uint> d_index2(indices + numConstraints); // // pointer to the distance values // thrust::device_ptr<float> d_distances(distance); // // keeps track of the index // thrust::counting_iterator<uint> first(0); // thrust::counting_iterator<uint> last = first + numConstraints; //// J.row_offsets[0] = 0; //// J.row_offsets[1] = 4; //// J.row_offsets[2] = 7; //// J.column_indices[0] = 1; J.values[0] = 1.f; //// J.column_indices[1] = 3; J.values[1] = 2.f; //// J.column_indices[2] = 4; J.values[2] = 3.f; //// J.column_indices[3] = 5; J.values[3] = 4.f; //// J.column_indices[4] = 0; J.values[4] = 5.f; //// J.column_indices[5] = 1; J.values[5] = 6.f; //// J.column_indices[6] = 2; J.values[6] = 7.f; //// J.column_indices[7] = 4; J.values[7] = 8.f; // cusp::csr_matrix<uint, float, cusp::device_memory> J(numConstraints, numPartsExp, numConstraints * 4); // uint *row_off = thrust::raw_pointer_cast(&J.row_offsets[0]); // uint *col_ind = thrust::raw_pointer_cast(&J.column_indices[0]); // float *val = thrust::raw_pointer_cast(&J.values[0]); //// CSRView j(2, 6, 8, ); // // fill J and b //// thrust::for_each( //// thrust::make_zip_iterator(thrust::make_tuple(first, d_index1, d_index2, d_distances, d_b)), //// thrust::make_zip_iterator(thrust::make_tuple(last, d_index1+numConstraints, d_index2+numConstraints, d_distances+numConstraints, d_b+numConstraints)), //// gradient_functor((float4 *) particles, row_off, col_ind, val, numConstraints)); // J.row_offsets[numConstraints] = numConstraints * 4 - 1; //#ifdef PRINT // printf("MATRIX J (after):\n"); // for (uint r = 0; r < sizeJ.x; r++) // { // for (uint c = 0; c < sizeJ.y; c++) // { // uint index = c * sizeJ.x + r; // printf("%.2f ", (float)*(d_J+index)); // } // printf("\n"); // } // printf("B (after):\n"); // for (uint i = 0; i < numConstraints; i++) // { // printf("%.2f, ", (float)*(d_b+i)); // } // printf("\b \n"); //#endif // cusp::csr_matrix<uint, float, cusp::device_memory> JW(numConstraints, numPartsExp, numConstraints * 4); // cusp::csr_matrix<uint, float, cusp::device_memory> JT(numPartsExp, numConstraints, numConstraints * 4); // cusp::csr_matrix<uint, float, cusp::device_memory> A(numConstraints, numConstraints, numConstraints * numConstraints); //// cusp::multiply(J, W, JW); //// cusp::transpose(J, JT); //// cusp::multiply(JW,JT,A); //// printf("J:\n"); //// cusp::print(J); //// printf("JW:\n"); //// cusp::print(JW); //// printf("JT:\n"); //// cusp::print(J); //// printf("A:\n"); //// cusp::print(A); //// freeArray(_J); //// freeArray(_b); // } // void makeA(float *J, float *A, uint numParticles, uint numConstraints) // { // float alpha = 1.f, beta = 0.f; // checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, numConstraints, numConstraints, numParticles * 2, &alpha, J, numParticles * 2, J, numParticles * 2, &beta, A, numConstraints)); //#ifdef PRINT // thrust::device_ptr<float> d_A(A); // printf("MATRIX A:\n"); // for (uint r = 0; r < numConstraints; r++) // { // for (uint c = 0; c < numConstraints; c++) // { // uint index = c * numConstraints + r; // printf("%.2f ", (float)*(d_A+index)); // } // printf("\n"); // } //#endif // } void solveAxb(uint *indices, float *distances, float *particles, uint numParticles, uint numConstraints, int gaussIters, float omega) { uint numParticles2 = numParticles * 2; float *val = thrust::raw_pointer_cast(Jv.data()); // pointer to the indices thrust::device_ptr<uint> d_index1(indices); thrust::device_ptr<uint> d_index2(indices + numConstraints); // pointer to the distance values thrust::device_ptr<float> d_distances(distances); #ifdef PRINT printf("dis: "); for (int i = 0; i < numConstraints*2; i++) printf("%u ", (uint)*(d_index1 + i)); printf("\n"); printf("ds: "); for (int i = 0; i < numConstraints; i++) printf("%u ", (uint)*(d_distances + i)); printf("\n"); #endif // keeps track of the index thrust::counting_iterator<uint> first(0); thrust::counting_iterator<uint> last = first + numConstraints; thrust::device_ptr<float> d_B(B.data()); float *pJT = thrust::raw_pointer_cast(JT.data()); // fill J and b thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(first, d_index1, d_index2, d_distances, d_B)), thrust::make_zip_iterator(thrust::make_tuple(last, d_index1+numConstraints, d_index2+numConstraints, d_distances+numConstraints, d_B+numConstraints)), gradient_functor((float4 *) particles, val, pJT, numParticles, numConstraints)); float alpha = 1.f; float beta = 0.f; // raw pointers to data from vectors float *pJv = thrust::raw_pointer_cast(Jv.data()); int *pJr = (int*)thrust::raw_pointer_cast(Jr.data()); int *pJc = (int*)thrust::raw_pointer_cast(Jc.data()); float *pDv = thrust::raw_pointer_cast(Dv.data()); int *pDr = (int*)thrust::raw_pointer_cast(Dr.data()); int *pDc = (int*)thrust::raw_pointer_cast(Dc.data()); float *pA = thrust::raw_pointer_cast(A.data()); float *pB = thrust::raw_pointer_cast(B.data()); float *pB2 = thrust::raw_pointer_cast(B2.data()); float *pC = thrust::raw_pointer_cast(C.data()); // A = J * JT cusparseScsrmm( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, numConstraints, // m: rows of J numConstraints, // n: cols of JT and A numParticles2, // k: cols of J numConstraints * 4, // nnz: num non-zero elements of J &alpha, // needs to be 1.f matDescr, // matrix descriptor for J pJv, // non zeros values of J pJr, // row offsets of J pJc, // col indices of J pJT, // dense matrix JT numParticles2, // leading dimension of JT &beta, // needs to be 0.f pA, // matrix A: answer is stored here numConstraints); // leading dimension of A #ifdef GAUSS float minimumError = 0.0001f; uint maximumIterations = gaussIters; uint numThreads, numBlocks; computeGridSize(numConstraints, 256, numBlocks, numThreads); thrust::fill(C.begin(), C.end(), 0.f); // thrust::fill(deltas.begin(), deltas.end(), 0.f); // float *pDeltas = thrust::raw_pointer_cast(deltas.data()); uint *pOccurences = thrust::raw_pointer_cast(occurences.data()); // execute the kernel constraintCentricSolveD<<< numBlocks, numThreads >>>(pA, pB, pJT, pC, particles, indices, pOccurences, numParticles, numConstraints, minimumError, maximumIterations, omega); #endif #ifdef JACOBI // A[0] = 2; // A[1] = 5; // A[2] = 1; // A[3] = 7; // B[0] = 11; // B[1] = 13; // set D as diagonal from A and set A diag to zeros thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(first, Dv.begin())), thrust::make_zip_iterator(thrust::make_tuple(last, Dv.end())), diag_extraction_functor(pA, numConstraints)); // T = -D * A (A doubles as T after this computation) alpha = -1.f; cusparseScsrmm( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, numConstraints, // m: rows of D numConstraints, // n: cols of D and T (stored in A) numConstraints, // k: cols of D numConstraints, // nnz: num non-zero elements of D &alpha, // -1.f matDescr, // matrix descriptor for D pDv, // non zeros values of D pDr, // row offsets of D pDc, // col indices of D pA, // dense matrix A numConstraints, // leading dimension of A &beta, // needs to be 0.f pA, // matrix A: answer is stored here numConstraints); // leading dimension of A // c = D * b alpha = 1.f; cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, numConstraints, numConstraints, numConstraints, &alpha, matDescr, pDv, pDr, pDc, pB, &beta, pC); thrust::fill(B.begin(), B.end(), 1.f); beta = 1.f; for (int i = 0; i < 100; i++) { thrust::copy(C.begin(), C.end(), B2.begin()); cublasSgemv(cublasHandle, CUBLAS_OP_N, numConstraints, numConstraints, &alpha, pA, numConstraints, pB, 1, &beta, pB2, 1); thrust::copy(C.begin(), C.end(), B.begin()); cublasSgemv(cublasHandle, CUBLAS_OP_N, numConstraints, numConstraints, &alpha, pA, numConstraints, pB2, 1, &beta, pB, 1); } #endif #ifdef SOLVER cusolverDnSpotrf( cusolverHandle, CUBLAS_FILL_MODE_LOWER, numConstraints, pA, numConstraints, d_work, workSize, d_devInfo); // overwrite b cusolverDnSpotrs( cusolverHandle, CUBLAS_FILL_MODE_LOWER, numConstraints, 1, pA, numConstraints, pB, numConstraints, d_devInfo); #endif #ifndef GAUSS // update particles thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(first, W.begin(), occurences.begin())), thrust::make_zip_iterator(thrust::make_tuple(last, W.end(), occurences.end())), col_reduction((float4 *) particles, pB, pJT, numParticles2, numConstraints)); #endif #ifdef PRINT thrust::device_ptr<float> d_deltas(deltas.data()); printf("Deltas:\n"); for (uint c = 0; c < numParticles2; c++) { printf("%.2f ", (float)*(d_deltas+c)); } printf("\n"); CSRView J(numConstraints, numParticles2, numConstraints * 4, cusp::make_array1d_view(Jr.begin(), Jr.end()), cusp::make_array1d_view(Jc.begin(), Jc.end()), cusp::make_array1d_view(Jv.begin(), Jv.end())); printf("J:\n"); cusp::print(J); thrust::device_ptr<float> d_JT(JT.data()); printf("MATRIX JT:\n"); for (uint r = 0; r < numParticles2; r++) { for (uint c = 0; c < numConstraints; c++) { uint index = c * numParticles2 + r; printf("%.2f ", (float)*(d_JT+index)); } printf("\n"); } printf("\n"); thrust::device_ptr<float> d_A(A.data()); printf("MATRIX A:\n"); for (uint r = 0; r < numConstraints; r++) { for (uint c = 0; c < numConstraints; c++) { uint index = c * numConstraints + r; printf("%.2f ", (float)*(d_A+index)); } printf("\n"); } printf("\n"); CSRView D(numConstraints, numConstraints, numConstraints, cusp::make_array1d_view(Dr.begin(), Dr.end()), cusp::make_array1d_view(Dc.begin(), Dc.end()), cusp::make_array1d_view(Dv.begin(), Dv.end())); printf("MATRIX D:\n"); cusp::print(D); printf("Vector B:\n"); for (uint c = 0; c < numConstraints; c++) { printf("%.2f ", (float)*(d_B+c)); } printf("\n"); thrust::device_ptr<float> d_B2(B2.data()); printf("Vector B2:\n"); for (uint c = 0; c < numConstraints; c++) { printf("%.2f ", (float)*(d_B2+c)); } printf("\n"); thrust::device_ptr<float> d_C(C.data()); printf("Vector C:\n"); for (uint c = 0; c < numConstraints; c++) { printf("%.2f ", (float)*(d_C+c)); } printf("\n"); thrust::device_ptr<float4> d_pos4((float4 *)particles); for (uint i = 0; i < numParticles; i++) { printf("Pos : (%.2f, %.2f, %.2f, %.2f)\n", ((float4)*(d_pos4+i)).x,((float4)*(d_pos4+i)).y, ((float4)*(d_pos4+i)).z,((float4)*(d_pos4+i)).w); } #endif } void addRigidBody(float *hpos, int width, int height, float2 center, float particleRadius) { float diameter = particleRadius * 2; float startX = center.x - (width * .5f) * diameter + particleRadius; float startY = center.y - (height * .5f) * diameter + particleRadius; float2 posSum = make_float2(0.f); int numParticles = 0; d_rbAngle.push_back(0); d_rbRStart.push_back(d_Rs.size()); d_rbPartStart.push_back(0); float2 pos; int index = 0; for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { pos = make_float2(startX + i * diameter, startY + j * diameter); hpos[index] = pos.x; hpos[index+1] = pos.y; hpos[index+2] = 0.f; hpos[index+3] = 1.f; d_Rs.push_back(pos.x - center.x); d_Rs.push_back(pos.y - center.y); posSum += pos; numParticles++; index += 4; } } d_rbCenterMass.push_back(posSum.x / numParticles); d_rbCenterMass.push_back(posSum.y / numParticles); d_rbSize.push_back(numParticles); } void solveRigidBodies() { float *dRs = thrust::raw_pointer_cast(d_Rs.data()); thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(d_rbAngle.begin(), d_rbCenterMass.begin(), d_rbPartStart.begin(), d_rbRStart.begin(), d_rbSize.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_rbAngle.end(), d_rbCenterMass.end(), d_rbPartStart.end(), d_rbRStart.end(), d_rbSize.end())), rigid_body_functor((float2*) dRs)); } // thrust::device_vector<float> d_rbAngle; // thrust::device_vector<float> d_rbCenterMass; // dim2 // thrust::device_vector<uint> d_rbPartStart; // thrust::device_vector<uint> d_rbRStart; // thrust::device_vector<uint> d_rbSize; // thrust::device_vector<float> d_Rs; // dim 2 }
the_stack
#include "SharedMemory.cuh" // INTEGER BASED #include "i_Sum_i.cuh" #include "i_MinIdx_2i.cuh" #include "i_MaxIdx_2i.cuh" #include "i_MinIdxMaxIdx_4i.cuh" // SINGLE BASED #include "f_Sum_f.cuh" #include "f_MinMax_2f.cuh" #include "f_MinIdx_fi.cuh" #include "f_MinIdx_ff.cuh" #include "f_MaxIdx_fi.cuh" #include "f_MaxIdx_ff.cuh" #include "f_MinMax_2f.cuh" #include "f_MinIdxMaxIdx_fifi.cuh" #include "f_Average_f.cuh" #include "c_Average_c.cuh" #include "c_Sum_c.cuh" // DOT PRODUCT BASED #include "i_Dot_i.cuh" #include "f_Dot_f.cuh" #include "f_Cosine_f.cuh" #include "c_ComplexDot_c.cuh" using namespace std; #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) static void HandleError(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { printf("%s in %s at line %d \n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } int randl() { return (rand() << 16) + rand(); } template<typename R, unsigned int tCnt, bool finalize> __forceinline__ __device__ void LogStepShared(R* out, volatile R* partials) { const unsigned int tid = threadIdx.x; if (tCnt >= 1024) { if (tid < 512) { partials[tid].op(partials[tid + 512]); } __syncthreads(); } if (tCnt >= 512) { if (tid < 256) { partials[tid].op(partials[tid + 256]); } __syncthreads(); } if (tCnt >= 256) { if (tid < 128) { partials[tid].op(partials[tid + 128]); } __syncthreads(); } if (tCnt >= 128) { if (tid < 64) { partials[tid].op(partials[tid + 64]); } __syncthreads(); } if (tid < 32) { if (tCnt >= 64 && tid < 32) { partials[tid].op(partials[tid + 32]); } if (tCnt >= 32 && tid < 16) { partials[tid].op(partials[tid + 16]); } if (tCnt >= 16 && tid < 8) { partials[tid].op(partials[tid + 8]); } if (tCnt >= 8 && tid < 4) { partials[tid].op(partials[tid + 4]); } if (tCnt >= 4 && tid < 2) { partials[tid].op(partials[tid + 2]); } if (tCnt >= 2 && tid < 1) { partials[tid].op(partials[tid + 1]); } } if (tid == 0) { if (finalize) partials[0].finalize(out); else *out = partials[0]; } } __device__ int buffer[8192]; __device__ unsigned int barrier = 0; template<typename R, typename T, unsigned int tCnt> __forceinline__ __device__ void DReduction(void* rawOut, volatile const void* rawIn, void* tempBuffer, unsigned int size, unsigned int outOff, unsigned int inOff, unsigned int stride, bool segmented) { __syncthreads(); if (!tempBuffer) tempBuffer = (void*)buffer; unsigned int gridDim_x = gridDim.x; unsigned int blockIdx_x = blockIdx.x; if (segmented) { gridDim_x = 1; blockIdx_x = 0; outOff = blockIdx.x; inOff = blockIdx.x * size; } R* out = reinterpret_cast<R*>(tempBuffer); volatile const T* in = reinterpret_cast<volatile const T*>(rawIn) + inOff; SharedMemory<R> sPartials; const unsigned int tid = threadIdx.x; R sum; for (unsigned int i = stride * (blockIdx_x * tCnt + tid); i < size; i += stride * tCnt * gridDim_x) { sum.op(in[i], i + inOff); } sPartials[tid] = sum; __syncthreads(); if (gridDim_x == 1) { out = reinterpret_cast<R*>(reinterpret_cast<char*>(rawOut) + R::outSize * outOff); LogStepShared<R, tCnt, false>(out, sPartials); return; } LogStepShared<R, tCnt, false>(&out[blockIdx_x], sPartials); __shared__ bool lastBlock; __threadfence(); if (tid == 0) { unsigned int ticket = atomicAdd(&barrier, 1); lastBlock = (ticket == gridDim_x - 1); } __syncthreads(); if (lastBlock) { R sum; for (unsigned int i = tid; i < gridDim_x; i += tCnt) { sum.op(out[i]); } sPartials[threadIdx.x] = sum; __syncthreads(); out = reinterpret_cast<R*>(reinterpret_cast<char*>(rawOut) + R::outSize * outOff); LogStepShared<R, tCnt, false>(out, sPartials); barrier = 0; } } template<typename R, typename T, unsigned int tCnt> __global__ void Reduction(void* rawOut, volatile const void* rawIn, void* tempBuffer, unsigned int size, unsigned int outOff, unsigned int inOff, unsigned int stride, bool segmented) { DReduction<R, T, tCnt>(rawOut, rawIn, tempBuffer, size, outOff, inOff, stride, segmented); } template<typename R, typename T, unsigned int tCnt> __forceinline__ __device__ void DDotProduct(void* rawOut, unsigned int outOff, volatile const void* rawIn1, volatile const void* rawIn2, void* tempBuffer, unsigned int size, bool segmented, bool distributed) { __syncthreads(); if (tempBuffer == nullptr) tempBuffer = (void*)buffer; unsigned int gridDim_x = gridDim.x; unsigned int blockIdx_x = blockIdx.x; R* out = reinterpret_cast<R*>(tempBuffer); volatile const T* in1 = reinterpret_cast<volatile const T*>(rawIn1); volatile const T* in2 = reinterpret_cast<volatile const T*>(rawIn2); if (segmented) { gridDim_x = 1; blockIdx_x = 0; in1 = reinterpret_cast<volatile const T*>(rawIn1) + !distributed * (blockIdx.x * size); in2 = reinterpret_cast<volatile const T*>(rawIn2) + blockIdx.x * size; outOff = blockIdx.x; } SharedMemory<R> sPartials; const unsigned int tid = threadIdx.x; R sum; for (unsigned int i = blockIdx_x * tCnt + tid; i < size; i += tCnt * gridDim_x) { sum.op(in1[i], in2[i], i); } sPartials[tid] = sum; __syncthreads(); if (gridDim_x == 1) { out = reinterpret_cast<R*>(reinterpret_cast<char*>(rawOut) + R::outSize * outOff); LogStepShared<R, tCnt, true>(out, sPartials); return; } LogStepShared<R, tCnt, false>(&out[blockIdx_x], sPartials); __shared__ bool lastBlock; __threadfence(); if (tid == 0) { unsigned int ticket = atomicAdd(&barrier, 1); lastBlock = (ticket == gridDim_x - 1); } __syncthreads(); if (lastBlock) { R sum; for (unsigned int i = tid; i < gridDim_x; i += tCnt) { sum.op(out[i]); } sPartials[threadIdx.x] = sum; __syncthreads(); out = reinterpret_cast<R*>(reinterpret_cast<char*>(rawOut) + R::outSize * outOff); LogStepShared<R, tCnt, true>(out, sPartials); barrier = 0; } } template<typename R, typename T, unsigned int tCnt> __global__ void DotProduct(void* rawOut, unsigned int outOff, volatile const void* rawIn1, volatile const void* rawIn2, void* tempBuffer, unsigned int size, bool segmented, bool distributed) { DDotProduct<R, T, tCnt>(rawOut, outOff, rawIn1, rawIn2, tempBuffer, size, segmented, distributed); } template<typename R, typename T> void ReductionTemplate() { Reduction<R, T, 32> << <0, 0 >> >(0, 0, 0, 0, 0, 0, 0, 0); Reduction<R, T, 64> << <0, 0 >> >(0, 0, 0, 0, 0, 0, 0, 0); Reduction<R, T, 128> << <0, 0 >> >(0, 0, 0, 0, 0, 0, 0, 0); Reduction<R, T, 256> << <0, 0 >> >(0, 0, 0, 0, 0, 0, 0, 0); Reduction<R, T, 512> << <0, 0 >> >(0, 0, 0, 0, 0, 0, 0, 0); } template<typename R, typename T> void DotProductTemplate() { DotProduct<R, T, 32> << <0, 0 >> >(0, 0, 0, 0, 0, 0, 0, 0); DotProduct<R, T, 64> << <0, 0 >> >(0, 0, 0, 0, 0, 0, 0, 0); DotProduct<R, T, 128> << <0, 0 >> >(0, 0, 0, 0, 0, 0, 0, 0); DotProduct<R, T, 256> << <0, 0 >> >(0, 0, 0, 0, 0, 0, 0, 0); DotProduct<R, T, 512> << <0, 0 >> >(0, 0, 0, 0, 0, 0, 0, 0); } extern "C" void InstantiationDummy() { // INTEGER BASED ReductionTemplate < i_Sum_i, int >(); ReductionTemplate < i_MinIdx_2i, int >(); ReductionTemplate < i_MaxIdx_2i, int >(); ReductionTemplate < i_MinIdxMaxIdx_4i, int >(); // SINGLE BASED ReductionTemplate < f_Sum_f, float >(); ReductionTemplate < f_MinMax_2f, float >(); ReductionTemplate < f_MinIdx_fi, float >(); ReductionTemplate < f_MinIdx_ff, float >(); ReductionTemplate < f_MaxIdx_fi, float >(); ReductionTemplate < f_MaxIdx_ff, float >(); ReductionTemplate < f_MinIdxMaxIdx_fifi, float >(); ReductionTemplate < f_Average_f, float >(); ReductionTemplate < c_Average_c, Complex >(); ReductionTemplate < c_Sum_c, Complex >(); // DOT PRODUCT DotProductTemplate <i_Dot_i, int >(); DotProductTemplate <f_Dot_f, float >(); DotProductTemplate <f_Cosine_f, float >(); DotProductTemplate <c_ComplexDot_c, Complex>(); } typedef void(*reduction_type)(void*, volatile const void*, void*, unsigned int, unsigned int, unsigned int, unsigned int, bool); template<typename R, typename T, const int bCnt> void TestReduction(reduction_type kernel, const char* name, int repetitions, int sizeMax, int min, int max, float div, bool segmented) { const int w = 20; for (int r = 0; r < repetitions; ++r) { cudaEvent_t startGPU, stopGPU; HANDLE_ERROR(cudaEventCreate(&startGPU)); HANDLE_ERROR(cudaEventCreate(&stopGPU)); float timeGPU; float timeCPU; int inSize = randl() % sizeMax + 1; int inOff = randl() % inSize; int size = randl() % (inSize - inOff) + 1; T* d_in, *h_in = new T[inSize]; HANDLE_ERROR(cudaMalloc(&d_in, sizeof(T) * inSize)); for (int i = 0; i < inSize; ++i) { h_in[i] = static_cast<T>(randl() % (max - min) + min) / div; } HANDLE_ERROR(cudaMemcpy(d_in, h_in, sizeof(T) * inSize, cudaMemcpyHostToDevice)); int stride = 1; if (randl() % 2 == 0) stride = randl() % (32) + 1; int outOff = segmented ? bCnt : randl() % 1000; R* d_out; R* h_out = reinterpret_cast<R*>(new char[R::outSize*(outOff + 1)]); R* c_out = reinterpret_cast<R*>(new char[R::outSize*(outOff + 1)]); HANDLE_ERROR(cudaMalloc(&d_out, R::outSize * (outOff + 1))); HANDLE_ERROR(cudaMemcpy(d_out, h_out, R::outSize * (outOff + 1), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaEventRecord(startGPU, 0)); if (segmented) kernel << <bCnt, 1024, sizeof(R) * 1024 >> >(d_out, d_in, nullptr, size / bCnt, 0, 0, stride, segmented); else kernel << <bCnt, 1024, sizeof(R) * 1024 >> >(d_out, d_in, nullptr, size, outOff, inOff, stride, segmented); HANDLE_ERROR(cudaEventRecord(stopGPU, 0)); HANDLE_ERROR(cudaEventSynchronize(stopGPU)); HANDLE_ERROR(cudaEventElapsedTime(&timeGPU, startGPU, stopGPU)); HANDLE_ERROR(cudaMemcpy(h_out, d_out, R::outSize * (outOff + 1), cudaMemcpyDeviceToHost)); time_t startCPU, stopCPU; int cycles = 100000000 / (inSize - inOff) >= 1 ? 100000000 / (inSize - inOff) : 1; startCPU = clock(); if (segmented) for (size_t b = 0; b < bCnt; ++b) { unsigned int chunkSize = size / bCnt; unsigned int chunkOffset = b * chunkSize; for (size_t c = 0; c < cycles; ++c) R::simulate(c_out, h_in, chunkSize, b, chunkOffset, stride); } else for (size_t c = 0; c < cycles; ++c) R::simulate(c_out, h_in, size, outOff, inOff, stride); stopCPU = clock(); timeCPU = difftime(stopCPU, startCPU) / cycles; cout << "=== Test: " << name << " ===" << endl; cout << left << setw(w) << "Speedup" << setw(w) << "GPU time" << setw(w) << "CPU time" << setw(w) << "input size" << setw(w) << "size" << setw(w) << "output offset" << setw(w) << "input offset" << setw(w) << "stride" << endl; cout << left << setw(w) << (timeCPU / timeGPU) << setw(w) << timeGPU << setw(w) << timeCPU << setw(w) << inSize << setw(w) << size << setw(w) << outOff << setw(w) << inOff << setw(w) << stride << endl; printf("Check (GPU == CPU): \n"); bool passed = true; if (segmented) for (size_t b = 0; b < bCnt; b++) passed &= R::check(h_out, c_out, b, h_in); else passed = R::check(h_out, c_out, outOff, h_in); printf("------------\n"); if (passed) printf("| PASSED |\n"); else printf("| FAILED |\n"); printf("------------\n\n"); HANDLE_ERROR(cudaFree(d_in)); HANDLE_ERROR(cudaFree(d_out)); delete[] h_in; delete[] h_out; delete[] c_out; } } typedef void(*dotproduct_type)(void*, unsigned int, volatile const void*, volatile const void*, void*, unsigned int, bool, bool); template<typename T> void Randomize(T& t, int min, int max, float div) { t = static_cast<T>(float(randl() % (max - min) + min) / div); } template<> void Randomize(Complex& t, int min, int max, float div) { t.R = static_cast<float>(float(randl() % (max - min) + min) / div); t.I = static_cast<float>(float(randl() % (max - min) + min) / div); } template<typename R, typename T, const int bCnt> void TestDotProduct(dotproduct_type kernel, const char* name, int repetitions, int sizeMax, int min, int max, float div, bool segmented, bool distributed) { const int w = 20; for (int r = 0; r < repetitions; ++r) { cudaEvent_t startGPU, stopGPU; HANDLE_ERROR(cudaEventCreate(&startGPU)); HANDLE_ERROR(cudaEventCreate(&stopGPU)); float timeGPU; float timeCPU; int size = randl() % sizeMax + 1; T* d_in1, *d_in2, *h_in1 = new T[size], *h_in2 = new T[size]; HANDLE_ERROR(cudaMalloc(&d_in1, sizeof(T) * size)); HANDLE_ERROR(cudaMalloc(&d_in2, sizeof(T) * size)); for (int i = 0; i < size; ++i) { Randomize<T>(h_in1[i], min, max, div); Randomize<T>(h_in2[i], min, max, div); } HANDLE_ERROR(cudaMemcpy(d_in1, h_in1, sizeof(T) * size, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_in2, h_in2, sizeof(T) * size, cudaMemcpyHostToDevice)); int outOff = segmented ? bCnt : randl() % 1000; R* d_out; R* h_out = reinterpret_cast<R*>(new char[R::outSize*(outOff + 1)]); R* c_out = reinterpret_cast<R*>(new char[R::outSize*(outOff + 1)]); HANDLE_ERROR(cudaMalloc(&d_out, R::outSize * (outOff + 1))); HANDLE_ERROR(cudaMemcpy(d_out, h_out, R::outSize * (outOff + 1), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaEventRecord(startGPU, 0)); if (segmented) kernel << <bCnt, 1024, sizeof(R) * 1024 >> >(d_out, 0, d_in1, d_in2, nullptr, size / bCnt, segmented, distributed); else kernel << <bCnt, 1024, sizeof(R) * 1024 >> >(d_out, outOff, d_in1, d_in2, nullptr, size, segmented, distributed); HANDLE_ERROR(cudaEventRecord(stopGPU, 0)); HANDLE_ERROR(cudaEventSynchronize(stopGPU)); HANDLE_ERROR(cudaEventElapsedTime(&timeGPU, startGPU, stopGPU)); HANDLE_ERROR(cudaMemcpy(h_out, d_out, R::outSize * (outOff + 1), cudaMemcpyDeviceToHost)); time_t startCPU, stopCPU; int cycles = 100000000 / size >= 1 ? 100000000 / size : 1; startCPU = clock(); if (segmented) for (size_t b = 0; b < bCnt; ++b) { unsigned int chunkSize = size / bCnt; unsigned int chunkOffset = b * chunkSize; for (int c = 0; c < cycles; ++c) R::simulate(c_out, b, h_in1 + chunkOffset, h_in2 + chunkOffset, chunkSize); } else for (size_t c = 0; c < cycles; ++c) R::simulate(c_out, outOff, h_in1, h_in2, size); stopCPU = clock(); timeCPU = difftime(stopCPU, startCPU) / cycles; cout << "=== Test: " << name << " ===" << endl; cout << left << setw(w) << "Speedup" << setw(w) << "GPU time" << setw(w) << "CPU time" << setw(w) << "size" << setw(w) << "output offset" << endl; cout << left << setw(w) << (timeCPU / timeGPU) << setw(w) << timeGPU << setw(w) << timeCPU << setw(w) << size << setw(w) << outOff << endl; printf("Check (GPU == CPU): \n"); bool passed = true; if (segmented) for (size_t b = 0; b < bCnt; b++) passed &= R::check(h_out, c_out, b, h_in1, h_in2); else passed = R::check(h_out, c_out, outOff, h_in1, h_in2); printf("------------\n"); if (passed) printf("| PASSED |\n"); else printf("| FAILED |\n"); printf("------------\n\n"); HANDLE_ERROR(cudaFree(d_in1)); HANDLE_ERROR(cudaFree(d_in2)); HANDLE_ERROR(cudaFree(d_out)); delete[] h_in1; delete[] h_in2; delete[] h_out; delete[] c_out; } } int main(int argc, char* argv[]) { srand(time(NULL)); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); int repetitions = 10; int sizeMax = 10000000; // INTEGER BASED TestReduction<i_Sum_i, int, 10>(Reduction<i_Sum_i, int, 512>, "Reduction i_Sum_i", repetitions, sizeMax, -10, 10, 1, false); TestReduction<i_MinIdx_2i, int, 10>(Reduction<i_MinIdx_2i, int, 512>, "Reduction i_MinIdx_2i", repetitions, sizeMax, -10000, 10000, 1, false); TestReduction<i_MaxIdx_2i, int, 10>(Reduction<i_MaxIdx_2i, int, 512>, "Reduction i_MaxIdx_2i", repetitions, sizeMax, -10000, 10000, 1, false); TestReduction<i_MinIdxMaxIdx_4i, int, 10>(Reduction<i_MinIdxMaxIdx_4i, int, 512>, "Reduction i_MinIdxMaxIdx_4i", repetitions, sizeMax, 0, 10000, 1, false); // INTEGER BASED SEGMENTED TestReduction<i_Sum_i, int, 10>(Reduction<i_Sum_i, int, 512>, "Reduction i_Sum_i", repetitions, sizeMax, -10, 10, 1, true); TestReduction<i_MinIdx_2i, int, 10>(Reduction<i_MinIdx_2i, int, 512>, "Reduction i_MinIdx_2i", repetitions, sizeMax, -10000, 10000, 1, true); TestReduction<i_MaxIdx_2i, int, 10>(Reduction<i_MaxIdx_2i, int, 512>, "Reduction i_MaxIdx_2i", repetitions, sizeMax, -10000, 10000, 1, true); TestReduction<i_MinIdxMaxIdx_4i, int, 10>(Reduction<i_MinIdxMaxIdx_4i, int, 512>, "Reduction i_MinIdxMaxIdx_4i", repetitions, sizeMax, 0, 10000, 1, true); // SINGLE BASED TestReduction<f_Sum_f, float, 10>(Reduction<f_Sum_f, float, 512>, "Reduction f_Sum_f", repetitions, sizeMax, -100, 100, 100, false); TestReduction<f_MinMax_2f, float, 10>(Reduction<f_MinMax_2f, float, 512>, "Reduction f_MinMax_2f", repetitions, sizeMax, -100000, 100000, 1000, false); TestReduction<f_MinIdx_fi, float, 10>(Reduction<f_MinIdx_fi, float, 512>, "Reduction f_MinIdx_fi", repetitions, sizeMax, -100000, 100000, 1000, false); TestReduction<f_MaxIdx_fi, float, 10>(Reduction<f_MaxIdx_fi, float, 512>, "Reduction f_MaxIdx_fi", repetitions, sizeMax, -100000, 100000, 1000, false); TestReduction<f_MinIdxMaxIdx_fifi, float, 10>(Reduction<f_MinIdxMaxIdx_fifi, float, 512>, "Reduction f_MinIdxMaxIdx_fifi", repetitions, sizeMax, 0, 100000, 1000, false); // SINGLE BASED SEGMENTED TestReduction<f_Sum_f, float, 10>(Reduction<f_Sum_f, float, 512>, "Reduction f_Sum_f", repetitions, sizeMax, -100, 100, 100, true); TestReduction<f_MinMax_2f, float, 10>(Reduction<f_MinMax_2f, float, 512>, "Reduction f_MinMax_2f", repetitions, sizeMax, -100000, 100000, 1000, true); TestReduction<f_MinIdx_fi, float, 10>(Reduction<f_MinIdx_fi, float, 512>, "Reduction f_MinIdx_fi", repetitions, sizeMax, -100000, 100000, 1000, true); TestReduction<f_MaxIdx_fi, float, 10>(Reduction<f_MaxIdx_fi, float, 512>, "Reduction f_MaxIdx_fi", repetitions, sizeMax, -100000, 100000, 1000, true); TestReduction<f_MinIdxMaxIdx_fifi, float, 10>(Reduction<f_MinIdxMaxIdx_fifi, float, 512>, "Reduction f_MinIdxMaxIdx_fifi", repetitions, sizeMax, 0, 100000, 1000, true); // DOT PRODUCT TestDotProduct<i_Dot_i, int, 10>(DotProduct<i_Dot_i, int, 512>, "DotProduct i_Dot_i", repetitions, sizeMax, -10, 10, 1, false, false); TestDotProduct<f_Dot_f, float, 10>(DotProduct<f_Dot_f, float, 512>, "DotProduct f_Dot_f", repetitions, sizeMax, -100, 100, 100, false, false); TestDotProduct<f_Cosine_f, float, 10>(DotProduct<f_Cosine_f, float, 512>, "DotProduct f_Cosine_f", repetitions, sizeMax, -100, 100, 100, false, false); TestDotProduct<c_ComplexDot_c, Complex, 10>(DotProduct<c_ComplexDot_c, Complex, 512>, "ComplexDotProduct c_ComplexDot_c", repetitions, sizeMax, -100, 100, 100, false, false); // DOT PRODUCT SEGMENTED TestDotProduct<i_Dot_i, int, 10>(DotProduct<i_Dot_i, int, 512>, "DotProduct i_Dot_i", repetitions, sizeMax, -10, 10, 1, true, false); TestDotProduct<f_Dot_f, float, 10>(DotProduct<f_Dot_f, float, 512>, "DotProduct f_Dot_f", repetitions, sizeMax, -100, 100, 100, true, false); TestDotProduct<f_Cosine_f, float, 10>(DotProduct<f_Cosine_f, float, 512>, "DotProduct f_Cosine_f", repetitions, sizeMax, -100, 100, 100, true, false); TestDotProduct<c_ComplexDot_c, Complex, 10>(DotProduct<c_ComplexDot_c, Complex, 512>, "ComplexDotProduct c_ComplexDot_c", repetitions, sizeMax, -100, 100, 100, true, false); return 0; }
the_stack
#include <fstream> #include <ios> #include <iostream> #include <map> #include <iterator> #include <algorithm> #include <amgx_types/util.h> #include <amgx_types/io.h> namespace amgx { template <typename T> void LoadValueFromStream(std::ifstream &fin, T &val); template <> void LoadValueFromStream(std::ifstream &fin, float &val) { fin >> val; } template <> void LoadValueFromStream(std::ifstream &fin, double &val) { fin >> val; } template <> void LoadValueFromStream(std::ifstream &fin, cuComplex &val) { float x, y; fin >> x >> y; val = make_cuComplex(x, y); } template <> void LoadValueFromStream(std::ifstream &fin, cuDoubleComplex &val) { double x, y; fin >> x >> y; val = make_cuDoubleComplex(x, y); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> bool LoadVector(std::ifstream &fin, bool read_all, int rows_total, int block_size, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &vec, const std::map<const int, int> &GlobalToLocalRowMap = std::map<const int, int>()) { std::map<const int, int>::const_iterator gtl_it; //std::vector<double> block_vals(block_size); typedef typename VecPrecisionMap<t_vecPrec>::Type value_type; std::vector<value_type> block_vals(block_size); //for each entry int idx = 0; if (fin) { for (int i = 0; i < rows_total; i++) { //read entry for (int k = 0; k < block_size; k++) { LoadValueFromStream(fin, block_vals[k]); } //fin >> block_vals[k]; if (read_all) for (int k = 0; k < block_size; k++) { vec[i * block_size + k] = block_vals[k]; idx++; } else { gtl_it = GlobalToLocalRowMap.find(i); if (gtl_it != GlobalToLocalRowMap.end()) { for (int k = 0; k < block_size; k++) { vec[gtl_it->second * block_size + k] = block_vals[k]; idx++; } } } } if (idx != vec.size()) { FatalError("Matrix Market reader rows mismatch", AMGX_ERR_IO); } } else { return false; } return true; } // Distrubuted version void skip_vals(ifstream &fin, int num_values) { double val; for (long int i = 0; i < num_values; i++) { fin >> val; } } template <typename T> T getBoostValue(); template <> float getBoostValue() { return 1.e-6f; } template <> double getBoostValue() { return 1.e-6; } template <> cuComplex getBoostValue() { return make_cuComplex(1e-6f, 0.f); } template <> cuDoubleComplex getBoostValue() { return make_cuDoubleComplex(1e-6, 0.); } template<AMGX_VecPrecision prec> struct vecRealToComplexPrec { static const AMGX_VecPrecision CPrec = prec; }; template <> struct vecRealToComplexPrec<AMGX_vecDouble> { static const AMGX_VecPrecision CPrec = AMGX_vecDoubleComplex; }; template <> struct vecRealToComplexPrec<AMGX_vecFloat> { static const AMGX_VecPrecision CPrec = AMGX_vecComplex; }; template<AMGX_MatPrecision prec> struct matRealToComplexPrec { static const AMGX_MatPrecision CPrec = prec; }; template <> struct matRealToComplexPrec<AMGX_matDouble> { static const AMGX_MatPrecision CPrec = AMGX_matDoubleComplex; }; template <> struct matRealToComplexPrec<AMGX_matFloat> { static const AMGX_MatPrecision CPrec = AMGX_matComplex; }; template <class TReal, class TComplex, class PartVec, bool init_flag> struct ReadAndConvert; template <class TReal, class TComplex, class PartVec> struct ReadAndConvert<TReal, TComplex, PartVec, true> { static void readAndConvert(std::ifstream &fin, const char *fname, int conversion_type , Matrix<TReal> &A , Vector<TReal> &b , Vector<TReal> &x , unsigned int props , const PartVec &rank_rows) { FatalError("Converversion from complex matrix to ERF, but one of the complex modes is specified", AMGX_ERR_IO); } }; template <class TReal, class TComplex, class PartVec> struct ReadAndConvert<TReal, TComplex, PartVec, false> { static void readAndConvert(std::ifstream &fin, const char *fname, int conversion_type , Matrix<TReal> &A , Vector<TReal> &b , Vector<TReal> &x , unsigned int props , const PartVec &rank_rows) { AMG_Config tcfg; Matrix<TComplex> Ac; Vector<TComplex> xc, bc; typedef typename TReal::MatPrec RValueTypeA; typedef typename TReal::VecPrec RValueTypeB; typedef typename TComplex::MatPrec CValueTypeA; typedef typename TComplex::VecPrec CValueTypeB; printf("ERF conversion: reading complex valued system\n"); fflush(stdout); ReadMatrixMarket<TComplex>::readMatrixMarket(fin, fname, Ac, bc, xc, tcfg); // modes = 1..4 - convert to the scalar system of 2x size using K1..K4 formulation if (conversion_type > 0 && conversion_type < 5) { // fill CSR values, common for all modes int cnrows = Ac.get_num_rows(); int cnnz = Ac.get_num_nz(); int nrows = cnrows * 2; int nnz = Ac.get_num_nz() * 4; A.addProps(CSR); A.resize(nrows, nrows, nnz); // set row offsets for (int i = 0; i < cnrows; i++) { A.row_offsets[i] = Ac.row_offsets[i] * 2; A.row_offsets[i + cnrows] = Ac.row_offsets[i] * 2 + cnnz * 2; } A.row_offsets[nrows] = nnz; // set col indices for (int r = 0; r < nrows ; r++) { int *Ac_col_ptr = Ac.col_indices.raw() + Ac.row_offsets[r % cnrows]; int row_nnz = A.row_offsets[r + 1] - A.row_offsets[r]; for (int c = 0; c < (row_nnz / 2); c++) { A.col_indices[A.row_offsets[r] + c] = Ac_col_ptr[c]; A.col_indices[A.row_offsets[r] + c + row_nnz / 2] = Ac_col_ptr[c] + nrows / 2; } } // set values for (int r = 0; r < cnrows; r++) { CValueTypeA *Ac_values = Ac.values.raw() + Ac.row_offsets[r]; int row_nnz = Ac.row_offsets[r + 1] - Ac.row_offsets[r]; for (int c = 0; c < row_nnz; c++) { switch (conversion_type) { case 1: A.values[A.row_offsets[r] + c] = types::get_re(Ac_values[c]); A.values[A.row_offsets[r] + c + row_nnz] = -types::get_im(Ac_values[c]); A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_im(Ac_values[c]); A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = types::get_re(Ac_values[c]); break; case 2: A.values[A.row_offsets[r] + c] = types::get_re(Ac_values[c]); A.values[A.row_offsets[r] + c + row_nnz] = types::get_im(Ac_values[c]); A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_im(Ac_values[c]); A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = -types::get_re(Ac_values[c]); break; case 3: A.values[A.row_offsets[r] + c] = types::get_im(Ac_values[c]); A.values[A.row_offsets[r] + c + row_nnz] = types::get_re(Ac_values[c]); A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_re(Ac_values[c]); A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = -types::get_im(Ac_values[c]); break; case 4: A.values[A.row_offsets[r] + c] = types::get_im(Ac_values[c]); A.values[A.row_offsets[r] + c + row_nnz] = -types::get_re(Ac_values[c]); A.values[A.row_offsets[r] + c + 2 * cnnz] = types::get_re(Ac_values[c]); A.values[A.row_offsets[r] + c + row_nnz + 2 * cnnz] = types::get_im(Ac_values[c]); break; } } } // set b b.set_block_dimx(1); b.set_block_dimy(1); b.resize(nrows); for (int r = 0; r < cnrows; r++) { switch (conversion_type) { case 1: case 2: b[r] = types::get_re(bc[r]); b[r + cnrows] = types::get_im(bc[r]); break; case 3: case 4: b[r] = types::get_im(bc[r]); b[r + cnrows] = types::get_re(bc[r]); break; } } //set x if needed x.set_block_dimx(1); x.set_block_dimy(1); if (xc.size() > 0) { // set b x.resize(nrows); for (int r = 0; r < cnrows; r++) { switch (conversion_type) { case 1: case 3: x[r] = types::get_re(xc[r]); x[r + cnrows] = types::get_im(xc[r]); break; case 2: case 4: x[r] = types::get_re(xc[r]); x[r + cnrows] = -types::get_im(xc[r]); break; } } } A.computeDiagonal(); std::stringstream info; info << "Converted complex matrix " << cnrows << "x" << cnrows << " with " << cnnz << " nonzeros to the ERF - using K" << conversion_type << " formulation." << std::endl; std::cout << info.str(); } // modes 221..224 - convert to the system of the same size but with 2x2 blocks, // where each block converted from original Aij value using K1..K4 formulation // this switch is for original blocksize of 1 else if (conversion_type > 220 && conversion_type < 225 && Ac.get_block_dimy()*Ac.get_block_dimx() == 1) { // fill CSR values, common for all modes int nrows = Ac.get_num_rows(); int nnz = Ac.get_num_nz(); A.addProps(Ac.hasProps(DIAG) ? CSR | DIAG : CSR); A.resize(nrows, nrows, nnz, 2 * Ac.get_block_dimx(), 2 * Ac.get_block_dimy(), 1); thrust::copy(Ac.row_offsets.begin(), Ac.row_offsets.end(), A.row_offsets.begin()); thrust::copy(Ac.col_indices.begin(), Ac.col_indices.end(), A.col_indices.begin()); for (int i = 0; i < nnz; i++) { switch (conversion_type) { case 221: A.values[4 * i ] = types::get_re(Ac.values[i]); A.values[4 * i + 1] = -types::get_im(Ac.values[i]); A.values[4 * i + 2] = types::get_im(Ac.values[i]); A.values[4 * i + 3] = types::get_re(Ac.values[i]); break; case 222: A.values[4 * i ] = types::get_re(Ac.values[i]); A.values[4 * i + 1] = types::get_im(Ac.values[i]); A.values[4 * i + 2] = types::get_im(Ac.values[i]); A.values[4 * i + 3] = -types::get_re(Ac.values[i]); break; case 223: A.values[4 * i ] = types::get_im(Ac.values[i]); A.values[4 * i + 1] = types::get_re(Ac.values[i]); A.values[4 * i + 2] = types::get_re(Ac.values[i]); A.values[4 * i + 3] = -types::get_im(Ac.values[i]); break; case 224: A.values[4 * i ] = types::get_im(Ac.values[i]); A.values[4 * i + 1] = -types::get_re(Ac.values[i]); A.values[4 * i + 2] = types::get_re(Ac.values[i]); A.values[4 * i + 3] = types::get_im(Ac.values[i]); break; } } A.computeDiagonal(); b.resize(nrows * 2); b.set_block_dimx(1); b.set_block_dimy(2); for (int r = 0; r < nrows; r++) { switch (conversion_type) { case 221: case 222: b[2 * r ] = types::get_re(bc[r]); b[2 * r + 1] = types::get_im(bc[r]); break; case 223: case 224: b[2 * r ] = types::get_im(bc[r]); b[2 * r + 1] = types::get_re(bc[r]); break; } } //set x if needed if (xc.size() > 0) { // set b x.resize(nrows * 2); x.set_block_dimx(1); x.set_block_dimy(2); for (int r = 0; r < nrows; r++) { switch (conversion_type) { case 221: case 223: x[2 * r ] = types::get_re(xc[r]); x[2 * r + 1] = types::get_im(xc[r]); break; case 222: case 224: x[2 * r ] = types::get_re(xc[r]); x[2 * r + 1] = -types::get_im(xc[r]); break; } } } std::stringstream info; info << "Converted complex matrix " << nrows << "x" << nrows << " with " << nnz << " nonzeros to the (2x2) block-ERF - using K" << conversion_type - 220 << " formulation." << std::endl; std::cout << info.str(); } // modes 221..224 - convert to the system of the same size but with 2x2 blocks, // where each block converted from original Aij value using K1..K4 formulation // this switch is for original blocksize of 1 else if (conversion_type > 220 && conversion_type < 225) { // fill CSR values, common for all modes int nrows = Ac.get_num_rows(); int nnz = Ac.get_num_nz(); A.addProps(Ac.hasProps(DIAG) ? (CSR | DIAG) : CSR); int bdimx = 2 * Ac.get_block_dimx(); int bdimy = 2 * Ac.get_block_dimy(); A.resize(nrows, nrows, nnz, bdimx, bdimy, 1); thrust::copy(Ac.row_offsets.begin(), Ac.row_offsets.end(), A.row_offsets.begin()); thrust::copy(Ac.col_indices.begin(), Ac.col_indices.end(), A.col_indices.begin()); thrust::fill(A.values.begin(), A.values.end(), amgx::types::util<RValueTypeA>::get_zero()); std::cout << "Input block system " << Ac.get_block_dimx() << "x" << Ac.get_block_dimy() << " will be converted to system with blocks " << bdimx << "x" << bdimy << std::endl; std::cout << "Converting values...\n"; // iterate through blocks for (int i = 0; i < nnz; i++) { int block_offsetc = Ac.get_block_dimx() * Ac.get_block_dimy() * i; int block_offset = bdimx * bdimy * i; // iterate through values in the blocks for (int j = 0; j < Ac.get_block_dimx()*Ac.get_block_dimy(); j++) { int cx = j / Ac.get_block_dimy(); int cy = j % Ac.get_block_dimy(); // interleaved blocks int val_offset = block_offset + cx * bdimx + cy; // in-place blocks //int val_offset = block_offset + 2 * cx * bdimx + 2 * cy; switch (conversion_type) { case 221: // interleaved blocks A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]); A.values[val_offset + Ac.get_block_dimx() ] = -types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + 2 * Ac.get_block_size() ] = types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + 2 * Ac.get_block_size() + Ac.get_block_dimx() ] = types::get_re(Ac.values[block_offsetc + j]); // in-place blocks //A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]); //A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]); //A.values[val_offset + bdimx ] = types::get_im(Ac.values[block_offsetc + j]); //A.values[val_offset + 1 + bdimx ] = types::get_re(Ac.values[block_offsetc + j]); break; case 222: A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]); A.values[val_offset + 1 ] = types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + 1 + bdimx] = -types::get_re(Ac.values[block_offsetc + j]); break; case 223: A.values[val_offset ] = types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + 1 ] = types::get_re(Ac.values[block_offsetc + j]); A.values[val_offset + bdimx] = types::get_re(Ac.values[block_offsetc + j]); A.values[val_offset + 1 + bdimx] = -types::get_im(Ac.values[block_offsetc + j]); break; case 224: A.values[val_offset ] = types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + 1 ] = -types::get_re(Ac.values[block_offsetc + j]); A.values[val_offset + bdimx] = types::get_re(Ac.values[block_offsetc + j]); A.values[val_offset + 1 + bdimx] = types::get_im(Ac.values[block_offsetc + j]); break; } } } std::cout << "Compute diagonal\n"; A.computeDiagonal(); // if external diagonal - convert those values too if (A.hasProps(DIAG)) { std::cout << "Convert diagonal (warning!)\n"; for (int i = 0; i < Ac.get_num_rows(); i++) { int block_offsetc = Ac.diag[i] * Ac.get_block_dimx() * Ac.get_block_dimy(); int block_offset = A.diag[i] * bdimx * bdimy; for (int j = 0; j < Ac.get_block_dimx()*Ac.get_block_dimy(); j++) { int val_offset = block_offset + (j / bdimx) * 2 * bdimx + (j % bdimx) * 2; switch (conversion_type) { case 221: A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]); A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]); break; case 222: A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]); A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]); break; case 223: A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]); A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]); break; case 224: A.values[val_offset ] = types::get_re(Ac.values[block_offsetc + j]); A.values[val_offset + 1 ] = -types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + bdimx] = types::get_im(Ac.values[block_offsetc + j]); A.values[val_offset + 1 + bdimx] = types::get_re(Ac.values[block_offsetc + j]); break; } } } } std::cout << "Convert rhs\n"; b.resize(nrows * bdimy); b.set_block_dimx(1); b.set_block_dimy(bdimy); // interleaved blocks for (int r = 0; r < nrows; r++) { for (int j = 0; j < Ac.get_block_dimy(); j++) { switch (conversion_type) { case 221: case 222: b[r * bdimy + j ] = types::get_re(bc[r * Ac.get_block_dimy() + j]); b[r * bdimy + j + Ac.get_block_dimy()] = types::get_im(bc[r * Ac.get_block_dimy() + j]); break; case 223: case 224: b[r * bdimy + j ] = types::get_im(bc[r * Ac.get_block_dimy() + j]); b[r * bdimy + j + Ac.get_block_dimy()] = types::get_re(bc[r * Ac.get_block_dimy() + j]); break; } } } std::cout << "Convert soln\n"; //set x if needed if (xc.size() > 0) { x.resize(nrows * bdimx); x.set_block_dimx(1); x.set_block_dimy(bdimy); // interleaved blocks for (int r = 0; r < nrows; r++) { for (int j = 0; j < Ac.get_block_dimx(); j++) { switch (conversion_type) { case 221: case 223: x[r * bdimx + j ] = types::get_re(xc[r * Ac.get_block_dimx() + j]); x[r * bdimx + j + Ac.get_block_dimx()] = types::get_im(xc[r * Ac.get_block_dimx() + j]); break; case 222: case 224: x[r * bdimx + j ] = types::get_re(xc[r * Ac.get_block_dimx() + j]); x[r * bdimx + j + Ac.get_block_dimx()] =-types::get_im(xc[r * Ac.get_block_dimx() + j]); break; } } } } std::stringstream info; info << "Converted complex matrix " << nrows << "x" << nrows << " with " << nnz << " nonzeros to the (2x2) block-ERF - using K" << conversion_type - 220 << " formulation." << std::endl; std::cout << info.str(); } } }; template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> bool ReadMatrixMarket<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::readMatrixMarket(std::ifstream &fin, const char *fname, Matrix_h &A , Vector_h &b , Vector_h &x , const AMG_Config &cfg , unsigned int props , const IVector_h &rank_rows // row indices for given rank ) { fin.seekg(std::ios::beg); typedef typename Matrix_h::index_type IndexType; typedef typename Matrix_h::value_type ValueTypeA;// change later back to load in high precision! typedef typename TConfig_h::VecPrec ValueTypeB; std::string warning; int complex_conversion = cfg.AMG_Config::getParameter<IndexType>("complex_conversion", "default"); // if we are in the real-valued mode and complex conversion is specified and we are reading actual matrix if (complex_conversion != 0 && !types::util<ValueTypeA>::is_complex && !types::util<ValueTypeB>::is_complex && !io_config::hasProps(io_config::SIZE, props)) { // read system as complex valued system of same precision and convert it to our matrices typedef typename TConfig_h::template setVecPrec<vecRealToComplexPrec<TConfig_h::vecPrec>::CPrec>::Type TConfig_h_cvec; typedef typename TConfig_h_cvec::template setMatPrec<matRealToComplexPrec<TConfig_h::matPrec>::CPrec>::Type TComplex_h; ReadAndConvert<TConfig_h, TComplex_h, IVector_h, types::util<ValueTypeA>::is_complex>::readAndConvert(fin, fname, complex_conversion, A, b, x, props, rank_rows); return true; } //skip comments and read amgx relevant parameters std::list<string> nvConfig; std::list<string> mmConfig; // Workaround section to convert external diagonal into internal // in CLASSICAL bool isClassical = false; std::string solver_scope, solver_value; std::string precond_scope, precond_value; AlgorithmType algorithm_s, algorithm_p; Resources *resources = A.getResources(); if (resources != NULL) { resources->getResourcesConfig()->getParameter<std::string>("solver", solver_value, "default", solver_scope); algorithm_s = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", solver_scope); resources->getResourcesConfig()->getParameter<std::string>("preconditioner", precond_value, solver_scope, precond_scope); algorithm_p = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", precond_scope); if (algorithm_s == CLASSICAL && algorithm_p == CLASSICAL) { isClassical = true; } } // End of CLASSICAL workaround bool has_zero_diagonal_element = false; bool check_zero_diagonal = false; const bool boost_zero_diagonal = false; ValueTypeA boostValue = getBoostValue<ValueTypeA>(); if (boost_zero_diagonal) { check_zero_diagonal = true; } while (fin.peek() == '%') { std::string nvString; int fpos = fin.tellg(); // store current position getline(fin, nvString); std::transform(nvString.begin(), nvString.end(), nvString.begin(), ::tolower); std::istringstream nvString_s(nvString); std::string nvFormat; nvString_s >> nvFormat; if (nvFormat.size() > 2) { if ((nvFormat.substr(2, nvFormat.size()) == "nvamg") || (nvFormat.substr(2, nvFormat.size()) == "amgx")) { std::copy(istream_iterator<string>(nvString_s), istream_iterator<string>(), back_inserter<list<string> >(nvConfig)); } if (nvFormat.substr(2, nvFormat.size()) == "matrixmarket") { std::copy(istream_iterator<string>(nvString_s), istream_iterator<string>(), back_inserter<list<string> >(mmConfig)); } } fin.seekg(fpos, std::ios_base::beg); fin.ignore(INT_MAX, '\n'); } // process MatrixMarket config string bool symmetric = false; bool skew_symmetric = false; bool hermitian = false; if (mmConfig.size() > 0) { for (list<string>::const_iterator it = mmConfig.begin(); it != mmConfig.end(); ++it) { if (*it == "symmetric") {symmetric = true; continue;} if (*it == "complex") { if (!types::util<ValueTypeA>::is_complex && complex_conversion == 0) { FatalError("Trying to load file with complex matrix to real valued matrix structure", AMGX_ERR_IO); } continue; } if (*it == "real") { if (!types::util<ValueTypeA>::is_real) { FatalError("Trying to load file with real matrix to complex valued matrix structure", AMGX_ERR_IO); } } if (*it == "pattern") {FatalError("'pattern' is not supported in %%MatrixMarket format string", AMGX_ERR_IO);} if (*it == "skew-symmetric") {symmetric = true; skew_symmetric = true; continue;} //if (*it == "skew-symmetric") {FatalError("'skew-symmetric' is not supported in %%MatrixMarket format string", AMGX_ERR_IO);} if (*it == "hermitian") {hermitian = true; continue;} } } // process amgx config string int block_dimx = 1, block_dimy = 1, index_base = 1; bool diag_prop = false, rhs = false, soln = false, mtx = false, sorted = false; list<int> block_sizes; if (nvConfig.size() > 0) { for (list<string>::const_iterator it = nvConfig.begin(); it != nvConfig.end(); ++it) { if (*it == "diagonal") {diag_prop = true; continue;} if (*it == "rhs") {rhs = true; continue;} if (*it == "solution") {soln = true; continue;} if (*it == "sorted") {sorted = true; continue;} if (*it == "base0") {index_base = 0; continue;} if (isdigit((*it)[0])) { int bsize; istringstream(*it) >> bsize; block_sizes.push_back(bsize); continue;}; } } // CLASSICAL fix if (sorted && isClassical && diag_prop) { sorted = false; } // Currently not implemented sorted symmetric matrices if (sorted && symmetric || sorted && hermitian) { sorted = false; } if (std::find(mmConfig.begin(), mmConfig.end(), "matrix") != mmConfig.end()) { mtx = true; } if (block_sizes.size() == 2) { block_dimy = block_sizes.back(); block_dimx = block_sizes.front(); } else if (block_sizes.size() == 1) { block_dimy = block_dimx = block_sizes.back(); } int fpos = fin.tellg(); // store current position int rows, cols, entries; //read rows cols entries fin >> rows >> cols >> entries; if (rows % block_dimx != 0 || cols % block_dimy != 0 || entries % (block_dimx * block_dimy) != 0) { FatalError("Matrix dimensions do not match with block sizes", AMGX_ERR_IO); } rows /= block_dimx; cols /= block_dimy; entries /= (block_dimx * block_dimy); if (io_config::hasProps(io_config::SIZE, props)) { if (complex_conversion != 0 && block_dimy * block_dimx != 1) { FatalError("Complex conversion is supported only for non-coupled matrices with blocks of 1x1", AMGX_ERR_IO); } if (complex_conversion == 0) { A.set_num_rows(rows); A.set_num_cols(cols); A.set_block_dimy(block_dimy); A.set_block_dimx(block_dimx); } else if (complex_conversion > 0 && complex_conversion < 5) { // general ERF A.set_num_rows(rows * 2); A.set_num_cols(cols * 2); A.set_block_dimy(block_dimy); A.set_block_dimx(block_dimx); } else if (complex_conversion > 220 && complex_conversion < 225) { // 2x2 block ERF A.set_num_rows(rows); A.set_num_cols(cols); A.set_block_dimy(block_dimy * 2); // complex 1x1 only supported, which converts to 2x2 real blocks A.set_block_dimx(block_dimx * 2); } else { FatalError("Unsupported complex_conversion mode", AMGX_ERR_IO); } int num_entries = 0; if (symmetric || hermitian) { int i, j; int idiag = 0; ValueTypeA v; for (int e = 0; e < entries * (block_dimx * block_dimy); e++) { fin >> i >> j; LoadValueFromStream(fin, v); // skip explicit zeroes, only block_size=1 is supported if (block_dimx == 1 && block_dimy == 1 && types::util<ValueTypeA>::is_zero(v)) { continue; } if (i == j) { idiag++; } } num_entries = 2 * entries - idiag / (block_dimx * block_dimy); } else { if (isClassical && diag_prop) { num_entries = entries + rows; } else { num_entries = entries; } } if (complex_conversion == 0) { A.set_num_nz(num_entries); } else if (complex_conversion > 0 && complex_conversion < 5) { // general ERF A.set_num_nz(num_entries * 4); } else if (complex_conversion > 220 && complex_conversion < 225) { // 2x2 block ERF A.set_num_nz(num_entries); } else { FatalError("Unsupported complex_conversion mode", AMGX_ERR_IO); } return true; } warning = "Reading data...\n"; if (isClassical && diag_prop) { warning += "Warning: external diagonal will be converted into internal for CLASSICAL path\n"; } amgx_output(warning.c_str(), warning.length()); // check for consistent input if (io_config::hasProps(io_config::MTX, props)) { if (!mtx) { FatalError("Expecting 'matrix' keyword in %%MatrixMarket format string", AMGX_ERR_IO); } } else { if (mtx) { skip_vals(fin, 3 * entries * (block_dimy * block_dimx)); if (diag_prop) { skip_vals(fin, rows * block_dimy * block_dimx); } } } bool read_all = (rank_rows.size() == 0) ? true : false; const IVector_h &partRowVec = rank_rows; int n_rows_part = (read_all) ? rows : partRowVec.size(); std::map<const int, int> GlobalToLocalRowMap; // should try unordered_map std::map<const int, int>::const_iterator gtl_i; std::map<const int, int>::const_iterator gtl_j; // Generate inverse map for faster searching during the read if (!read_all) for (int i = 0; i < n_rows_part; i++) { GlobalToLocalRowMap.insert(std::pair<const int, int>(partRowVec[i], i)); } if (io_config::hasProps(io_config::MTX, props)) { int ival = 0, idiag = 0; int block_size = block_dimy * block_dimx; typedef std::map<const int, std::vector<ValueTypeA> > ColValuesMap_t; typedef std::pair<const int, std::vector<ValueTypeA> > ColValuesPair_t; typedef std::vector<ValueTypeA> ValuesVector_t; typedef std::vector<int> ColVector_t; std::vector<ColValuesMap_t> input; std::vector<int> nnz_per_row; //typename Matrix_h::MVector input_sorted_v; //IVector_h input_sorted_c; ValuesVector_t input_sorted_v; ColVector_t input_sorted_c; std::vector<int> trackDiag; if (check_zero_diagonal) { trackDiag.resize(n_rows_part, 0); } if (sorted) { nnz_per_row.resize(n_rows_part, 0); if (read_all) { input_sorted_v.resize(entries * block_size); input_sorted_c.resize(entries); } } else { input.resize(n_rows_part); } typename Matrix_h::MVector diag(n_rows_part * block_size, types::util<ValueTypeA>::get_zero()); std::vector<ValueTypeA> block_vals(block_size); //for each entry int i, j, ii, jj, i_old = -1; bool skip = false; bool has_ii = true, has_jj = false; if (symmetric || hermitian) { has_jj = true; } int explicit_zeroes = 0; for (int e = 0; e < entries; e++) { for (int kx = 0; kx < block_dimx; kx++) for (int ky = 0; ky < block_dimy; ky++) { //read entry fin >> i >> j; LoadValueFromStream(fin, block_vals[kx * block_dimy + ky]); // check we haven't been given a 0-indexed matrix if ((i == 0 || j == 0) && index_base == 1) { FatalError("Matrix Market format requires 1-based indexing. Use 'base0' AMGX format option to override.", AMGX_ERR_IO); } } // skip explicit zeroes, only block_size=1 is supported if (block_dimx == 1 && block_dimy == 1 && types::util<ValueTypeA>::is_zero(block_vals[0])) { explicit_zeroes++; if (i == j) { idiag++; has_zero_diagonal_element = true; if (check_zero_diagonal) { trackDiag[i - index_base] = 0; } } continue; } else { if (i == j) { if (check_zero_diagonal) { trackDiag[i - index_base] = 1; } } } i = (i - index_base) / block_dimx; j = (j - index_base) / block_dimy; if (!read_all) if (!symmetric && !hermitian) { if (i != i_old) // reduce overhead of searching in GlobalToLocalRowMap { has_ii = false; i_old = i; gtl_i = GlobalToLocalRowMap.find(i); if (gtl_i == GlobalToLocalRowMap.end()) { skip = true; continue; } else { has_ii = true; skip = false; ii = gtl_i->second; } } else if (skip) { continue; } } else { ii = i; jj = j; if (!read_all) { gtl_i = GlobalToLocalRowMap.find(i); gtl_j = GlobalToLocalRowMap.find(j); has_ii = has_jj = false; if (gtl_i != GlobalToLocalRowMap.end()) { has_ii = true; } if (gtl_j != GlobalToLocalRowMap.end()) { has_jj = true; } if (!has_ii && !has_jj) { continue; } else { if (has_ii) { ii = gtl_i->second; } if (has_jj) { jj = gtl_j->second; } } } } else { ii = i; if (symmetric || hermitian) { jj = j; } } if (sorted) { nnz_per_row[ii]++; if (!read_all) { input_sorted_v.insert(input_sorted_v.end(), block_vals.begin(), block_vals.end()); input_sorted_c.push_back(j); } else { std::copy(block_vals.begin(), block_vals.end(), &input_sorted_v[ival * block_size]); input_sorted_c[ival] = j; } ival++; } else { if (has_ii) { ival++; input[ii].insert(ColValuesPair_t(j, block_vals)); } if (has_jj) { ival++; if ((skew_symmetric || hermitian) && i != j) for (int k = 0; k < block_dimx * block_dimy; k++) { if (skew_symmetric) { block_vals[k] = types::util<ValueTypeA>::invert(block_vals[k]); } else if (hermitian) { block_vals[k] = types::util<ValueTypeA>::conjugate(block_vals[k]); } } input[jj].insert(ColValuesPair_t(i, block_vals)); } } if (i == j) { idiag++; std::copy(block_vals.begin(), block_vals.end(), &diag[ii * block_size]); } } // end of entries loop int diagIdx = 0; if (check_zero_diagonal) { for (int i = 0; i < rows; i++) { if (trackDiag[i] == 0) { trackDiag[diagIdx] = i; diagIdx++; } } } else { diagIdx = idiag; } if (has_zero_diagonal_element && block_dimx == 1 && block_dimy == 1) { if (check_zero_diagonal) { printf("Warning! Input matrix has zeroes on diagonal: %d %d\nZero diagonal elements are:\n", rows, diagIdx); for (int i = 0; i < diagIdx; i++) { printf("%d ", trackDiag[i]); } printf("\n"); } } if (boost_zero_diagonal && has_zero_diagonal_element && block_dimx == 1 && block_dimy == 1) { for (int i = 0; i < diagIdx; i++) { block_vals[0] = boostValue; input[ii].insert(ColValuesPair_t(trackDiag[i], block_vals)); } } if (!(symmetric || hermitian) && (ival + explicit_zeroes) != entries && read_all) { FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO); } IndexType n_nonzeros_part; if (symmetric || hermitian) { n_nonzeros_part = ival - idiag; } else { n_nonzeros_part = ival; } //if (symmetric) // printf("Matrix is symmetric. Counted %d entries and %d diag elements, corresponding to %d nonzeroes\n ", ival, idiag, n_nonzeros_part); if (sorted && input_sorted_c.size() != n_nonzeros_part) { //printf("input_sorted_c.size() = %d n_nonzeros_part = %d\n", input_sorted_c.size(), n_nonzeros_part); FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO); } if (sorted && input_sorted_v.size() != n_nonzeros_part * block_size) { //printf("input_sorted_v.size() = %d n_nonzeros_part*block_size = %d\n", input_sorted_v.size(), n_nonzeros_part*block_size); FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO); } A.resize(0, 0, 0); //A.delProps(COO); A.addProps(CSR); if (diag_prop && !isClassical) { A.addProps(DIAG); } else { A.delProps(DIAG); } if (diag_prop) { LoadVector(fin, read_all, rows, block_size, diag, GlobalToLocalRowMap); } if (isClassical && diag_prop) { n_nonzeros_part = n_nonzeros_part + n_rows_part; for (int i = 0; i < n_rows_part; i++) { std::copy(&diag[i * block_size], &diag[i * block_size] + block_size, block_vals.begin()); input[i].insert(ColValuesPair_t(read_all ? i : rank_rows[i], block_vals)); } } A.resize(n_rows_part, cols, n_nonzeros_part, block_dimx, block_dimy); ValueTypeA *dia_values_ptr = thrust::raw_pointer_cast(&(A.values[block_dimx * block_dimy * n_nonzeros_part])); if (A.hasProps(CSR)) { A.row_offsets[0] = 0; ival = 0; if (!sorted) { for (int i = 0; i < n_rows_part; i++) { for (auto it = input[i].begin(); it != input[i].end(); it++) { A.col_indices[ival] = it->first; for (int k = 0; k < block_size; k++) { A.values[ival * block_size + k] = it->second[k]; } ival++; } A.row_offsets[i + 1] = ival; } } else { A.row_offsets[0] = 0; for (int i = 0; i < n_rows_part; i++) { A.row_offsets[i + 1] = A.row_offsets[i] + nnz_per_row[i]; } if (A.row_offsets[n_rows_part] != n_nonzeros_part) { FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO); } std::copy(input_sorted_c.begin(), input_sorted_c.end(), A.col_indices.begin()); std::copy(input_sorted_v.begin(), input_sorted_v.end(), A.values.begin()); } } else { FatalError("Matrix Market reader COO output is not supported", AMGX_ERR_IO); } if (diag_prop && !isClassical) { A.computeDiagonal(); } if (A.hasProps(DIAG) && !isClassical) for (int i = 0; i < diag.size(); i++) { dia_values_ptr[i] = diag[i]; } }// End of load matrix if (!io_config::hasProps(io_config::RHS, props)) if (rhs) { skip_vals(fin, rows * block_dimy); } if (io_config::hasProps(io_config::RHS, props)) { b.resize(n_rows_part * block_dimy); b.set_block_dimy(block_dimy); b.set_block_dimx(1); if (rhs) { LoadVector(fin, read_all, rows, block_dimy, b, GlobalToLocalRowMap); } else { //initialize RHS if (io_config::hasProps(io_config::GEN_RHS, props)) { Vector_h b0(n_rows_part * block_dimy, types::util<ValueTypeB>::get_one()); b0.set_block_dimy(block_dimy); b0.set_block_dimx(1); warning = "RHS vector was not found. Using RHS b=A*e where e=[1,…,1]^T\n"; A.set_initialized(true); multiply(A, b0, b); A.set_initialized(false); } else { warning = "RHS vector was not found. Using RHS b=[1,…,1]^T\n"; for (int i = 0; i < n_rows_part * block_dimy; i++) { b[i] = types::util<ValueTypeB>::get_one(); } } amgx_output(warning.c_str(), warning.length()); } } // try to read initial guess if (io_config::hasProps(io_config::SOLN, props)) { x.resize(n_rows_part * block_dimx); x.set_block_dimy(block_dimy); x.set_block_dimx(1); if (soln) { LoadVector(fin, read_all, rows, block_dimx, x, GlobalToLocalRowMap); } else { warning = "Solution vector was not found. Setting initial solution to x=[0,…,0]^T\n"; for (int i = 0; i < n_rows_part * block_dimx; i++) { x[i] = types::util<ValueTypeB>::get_zero(); } } amgx_output(warning.c_str(), warning.length()); } if (rank_rows.size() > 0) { A.set_is_matrix_read_partitioned(true); b.set_is_vector_read_partitioned(true); if (x.size() > 0) { x.set_is_vector_read_partitioned(true); } } warning = ""; if (has_zero_diagonal_element || skew_symmetric) { warning += "Warning: Matrix has at least one zero on its diagonal\n"; } warning = +"Finished reading\n"; amgx_output(warning.c_str(), warning.length()); return true; } // Distrubuted version template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> bool ReadMatrixMarket<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::readMatrixMarketV2(std::ifstream &fin, const char *fname, Matrix_h &A , Vector_h &b , Vector_h &x , const AMG_Config &cfg , unsigned int props , const IVector_h &rank_rows // row indices for given rank ) { fin.seekg(std::ios::beg); typedef typename Matrix_h::index_type IndexType; typedef typename Matrix_h::value_type ValueTypeA;// change later back to load in high precision! typedef typename TConfig_h::VecPrec ValueTypeB; //skip comments while (fin.peek() == '%') { fin.ignore(INT_MAX, '\n'); } int rows, cols, entries, block_dimx, block_dimy, diag_prop; //read rows cols entries fin >> rows >> cols >> entries >> block_dimx >> block_dimy >> diag_prop; if (io_config::hasProps(io_config::SIZE, props)) { A.set_num_rows(rows); A.set_num_cols(cols); A.set_num_nz(entries); A.set_block_dimy(block_dimy); A.set_block_dimx(block_dimx); return true; } fflush(stdout); bool read_all = (rank_rows.size() == 0) ? true : false; const IVector_h &partRowVec = rank_rows; int n_rows_part = (read_all) ? rows : partRowVec.size(); std::map<const int, int> GlobalToLocalRowMap; // should try unordered_map std::map<const int, int>::const_iterator gtl_it; // Generate inverse map for faster searching during the read if (!read_all) for (int i = 0; i < n_rows_part; i++) { GlobalToLocalRowMap.insert(std::pair<const int, int>(partRowVec[i], i)); } typedef std::map<const int, std::vector<ValueTypeA> > ColValuesMap_t; typedef std::pair<const int, std::vector<ValueTypeA> > ColValuesPair_t; std::vector<ColValuesMap_t> input(n_rows_part); int ival = 0; int block_size = block_dimy * block_dimx; typename Matrix_h::MVector diag(n_rows_part * block_size, types::util<ValueTypeA>::get_zero()); std::vector<ValueTypeA> block_vals(block_size); //for each entry for (int e = 0; e < entries; e++) { int i, j; //read entry fin >> i >> j; // check we haven't been given a 0-indexed matrix if (i == 0 || j == 0) { FatalError("Matrix Market format requires 1-based indexing", AMGX_ERR_IO); } for (int k = 0; k < block_size; k++) { LoadValueFromStream(fin, block_vals[k]); } if (!read_all) { gtl_it = GlobalToLocalRowMap.find(i - 1); if (gtl_it != GlobalToLocalRowMap.end()) { input[gtl_it->second].insert(ColValuesPair_t(j - 1, block_vals)); if (i == j) for (int k = 0; k < block_size; k++) { diag[block_size * gtl_it->second + k] = block_vals[k]; } ival++; } } else { input[i - 1].insert(ColValuesPair_t(j - 1, block_vals)); if (i == j) for (int k = 0; k < block_size; k++) { diag[block_size * (i - 1) + k] = block_vals[k]; } ival++; } } if (ival != entries && read_all) { FatalError("Matrix Market mismatch in number of entries", AMGX_ERR_IO); } IndexType n_nonzeros_part = ival; A.resize(0, 0, 0); //A.delProps(COO); A.addProps(CSR); if (diag_prop) { A.addProps(DIAG); } else { A.delProps(DIAG); } A.resize(n_rows_part, cols, n_nonzeros_part, block_dimx, block_dimy); ValueTypeA *dia_values_ptr = thrust::raw_pointer_cast(&(A.values[block_dimx * block_dimy * n_nonzeros_part])); if (A.hasProps(CSR)) { A.row_offsets[0] = 0; ival = 0; for (int i = 0; i < n_rows_part; i++) { for (auto it = input[i].begin(); it != input[i].end(); it++) { A.col_indices[ival] = it->first; for (int k = 0; k < block_size; k++) { A.values[ival * block_size + k] = it->second[k]; } ival++; } A.row_offsets[i + 1] = ival; } } else { FatalError("Matrix Market reader COO output is not supported", AMGX_ERR_IO); } if (diag_prop) { A.computeDiagonal(); LoadVector(fin, read_all, rows, block_size, diag, GlobalToLocalRowMap); } if (A.hasProps(DIAG)) for (int i = 0; i < diag.size(); i++) { dia_values_ptr[i] = diag[i]; } if (io_config::hasProps(io_config::RHS, props)) { b.resize(n_rows_part * block_dimy); b.set_block_dimy(block_dimy); //initialize RHS for (int i = 0; i < n_rows_part * block_dimy; i++) { b[i] = types::util<ValueTypeB>::get_one(); } //read num rows fin >> rows; LoadVector(fin, read_all, rows / block_dimy, block_dimy, b, GlobalToLocalRowMap); } // try to read initial guess if (io_config::hasProps(io_config::SOLN, props)) { fin >> rows; if (rows) { x.resize(n_rows_part * block_dimx); x.set_block_dimy(block_dimx); LoadVector(fin, read_all, rows / block_dimx, block_dimx, x, GlobalToLocalRowMap); } else { x.resize(0); } } if (rank_rows.size() > 0) { A.set_is_matrix_read_partitioned(true); b.set_is_vector_read_partitioned(true); if (x.size() > 0) { x.set_is_vector_read_partitioned(true); } } return true; } template <typename TSRC, typename TDST> void val_copy(const TSRC *src, TDST *dst, int size) { for (int i = 0; i < size; i++) { dst[i] = static_cast<TDST>(src[i]); } } template <> void val_copy<cuDoubleComplex, cuComplex>(const cuDoubleComplex *src, cuComplex *dst, int size) { for (int i = 0; i < size; i++) { dst[i] = types::util<cuDoubleComplex>::to_downtype(src[i]); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> bool ReadNVAMGBinary<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::read(std::ifstream &finstr, const char *fnamec , Matrix_h &A , Vector_h &b, Vector_h &x , const AMG_Config &cfg , unsigned int props , const IVector_h &rank_rows ) { typedef typename Matrix_h::index_type IndexType; typedef typename Matrix_h::value_type ValueTypeA; typedef typename Vector_h::value_type ValueTypeB; // change back to matrix type later typedef typename types::util<ValueTypeA>::uptype UpValueTypeA; size_t is_read; std::string err; finstr.close(); FILE *fin = fopen(fnamec, "rb"); if (fin == NULL) { err = "Error: couldn't open file " + std::string(fnamec); } char text_header[255]; uint32_t system_flags [9]; is_read = fread(text_header, sizeof(char), strlen("%%NVAMGBinary\n"), fin); is_read = fread(system_flags, sizeof(uint32_t), 9, fin); //bool is_mtx = system_flags[0]; bool is_rhs = system_flags[1]; bool is_soln = system_flags[2]; uint32_t matrix_format = system_flags[3]; bool diag = system_flags[4]; uint32_t block_dimx = system_flags[5]; uint32_t block_dimy = system_flags[6]; uint32_t num_rows = system_flags[7]; uint32_t num_nz = system_flags[8]; if (io_config::hasProps(io_config::SIZE, props)) { A.set_num_rows(num_rows); A.set_num_cols(num_rows); A.set_num_nz(num_nz); A.set_block_dimy(block_dimy); A.set_block_dimx(block_dimx); fclose(fin); return true; } long int data_pos = ftell(fin); IVector_h *partRowVec_p = NULL; if (rank_rows.size() == 0) { partRowVec_p = new IVector_h(num_rows); thrust::sequence(partRowVec_p->begin(), partRowVec_p->end()); cudaCheckError(); } else { partRowVec_p = (IVector_h *) &rank_rows; } IVector_h &partRowVec = *partRowVec_p; int n_rows_part = partRowVec.size(); IVector_h row_offsets_part(n_rows_part + 1); IVector_h row_start_glb(n_rows_part); // Store global row start positions here int beginEnd[2]; int n_nonzeros_part = 0; for (int i = 0; i < partRowVec.size(); i++) { if (fseek(fin, data_pos + partRowVec[i]*sizeof(int), SEEK_SET) != 0) { FatalError("fseek error", AMGX_ERR_IO); } is_read = fread(beginEnd, sizeof(int), 2, fin); if (is_read != 2) { err = "fread failed reading row_offsets, exiting"; FatalError(err, AMGX_ERR_IO); } row_start_glb[i] = beginEnd[0]; row_offsets_part[i] = n_nonzeros_part; n_nonzeros_part += beginEnd[1] - beginEnd[0]; } row_offsets_part[n_rows_part] = n_nonzeros_part; A.delProps(DIAG | COLORING); if ((matrix_format & COMPLEX) && types::util<ValueTypeA>::is_real) { FatalError("Matrix is in complex format, but reading as real AMGX mode", AMGX_ERR_IO); } if (!(matrix_format & COMPLEX) && types::util<ValueTypeA>::is_complex) { FatalError("Matrix is in real format, but reading as complex AMGX mode", AMGX_ERR_IO); } if (diag) { A.addProps(DIAG); } if (!(matrix_format & 1)) { A.addProps(CSR); } else { FatalError("COO matrix binary format is not supported for reading.", AMGX_ERR_IO); } A.resize(n_rows_part, num_rows, n_nonzeros_part, block_dimx, block_dimy); IndexType *row_offsets_ptr = A.row_offsets.raw(); IndexType *column_indices_ptr = A.col_indices.raw(); ValueTypeA *nonzero_values_ptr = A.values.raw(); ValueTypeA *dia_values_ptr = thrust::raw_pointer_cast(&(A.values[block_dimy * block_dimx * n_nonzeros_part])); //Transfer row_offsets to matrix thrust::copy(row_offsets_part.begin(), row_offsets_part.end(), A.row_offsets.begin()); cudaCheckError(); data_pos += (num_rows + 1) * sizeof(int); n_nonzeros_part = 0; int row_nnz; for (int i = 0; i < partRowVec.size(); i++) { if (fseek(fin, data_pos + sizeof(int)*row_start_glb[i], SEEK_SET) != 0) { FatalError("fseek error", AMGX_ERR_IO); } row_nnz = row_offsets_part[i + 1] - row_offsets_part[i]; is_read = fread(column_indices_ptr + n_nonzeros_part, sizeof(int), row_nnz, fin); n_nonzeros_part += row_nnz; if (is_read != row_nnz) { err = "fread failed reading column_indices, exiting"; FatalError(err, AMGX_ERR_IO); } } data_pos += num_nz * sizeof(int); //temperary array for storing ValueTypeA data // double storage for complex vector< UpValueTypeA > temp(n_nonzeros_part * block_dimy * block_dimx); n_nonzeros_part = 0; for (int i = 0; i < partRowVec.size(); i++) { if (fseek(fin, data_pos + sizeof(UpValueTypeA)*row_start_glb[i] * block_dimy * block_dimx, SEEK_SET) != 0) { FatalError("fseek error", AMGX_ERR_IO); } row_nnz = row_offsets_part[i + 1] - row_offsets_part[i]; //read in data as a ValueTypeA is_read = fread(&temp[n_nonzeros_part * block_dimy * block_dimx], sizeof(UpValueTypeA), row_nnz * block_dimy * block_dimx, fin); n_nonzeros_part += row_nnz; if (is_read != row_nnz * block_dimy * block_dimx) { err = "fread failed reading off-diagonal values, exiting"; FatalError(err, AMGX_ERR_IO); } } //copy with cast data to ValueTypeA val_copy(temp.data(), nonzero_values_ptr, n_nonzeros_part * block_dimy * block_dimx); data_pos += sizeof(UpValueTypeA) * num_nz * block_dimx * block_dimy; if (diag) { temp.resize(n_rows_part * block_dimx * block_dimy); //read in diagonal data as a ValueTypeA for (int i = 0; i < partRowVec.size(); i++) { if (fseek(fin, data_pos + sizeof(UpValueTypeA) * partRowVec[i] * block_dimx * block_dimy, SEEK_SET) != 0) { FatalError("fseek error", AMGX_ERR_IO); } is_read = fread(&temp[i * block_dimx * block_dimy], sizeof(UpValueTypeA), block_dimx * block_dimy, fin); if (is_read != block_dimx * block_dimy) { err = "fread failed reading diagonal values, exiting"; FatalError(err, AMGX_ERR_IO); } } //copy with cast data to ValueTypeA val_copy(temp.data(), dia_values_ptr, n_rows_part * block_dimx * block_dimy); data_pos += sizeof(double) * num_rows * block_dimx * block_dimy; } else // fill last values item with zeros { thrust::fill(A.values.begin() + A.get_num_nz() * block_dimy * block_dimx, A.values.end(), types::util<ValueTypeA>::get_zero()); cudaCheckError(); } //printf("Reading values\n"); fflush(stdout); b.resize(n_rows_part * block_dimy); b.set_block_dimy(block_dimy); b.set_block_dimx(1); temp.resize(n_rows_part * block_dimy); if (is_rhs) { for (int i = 0; i < partRowVec.size(); i++) { if (fseek(fin, data_pos + sizeof(UpValueTypeA) * partRowVec[i] * block_dimy, SEEK_SET) != 0) { FatalError("fseek error", AMGX_ERR_IO); } //read in data as a double (doublecomplex) is_read = fread(&temp[i * block_dimy], sizeof(UpValueTypeA), block_dimy, fin); // if the rhs exists, we must have read the whole thing if (is_read != block_dimy) { err = "fread failed reading rhs, exiting"; FatalError(err, AMGX_ERR_IO); } } //cast data to ValueTypeB val_copy(temp.data(), b.raw(), n_rows_part * block_dimy); data_pos += sizeof(UpValueTypeA) * num_rows * block_dimy; } else { thrust::fill(b.begin(), b.end(), types::util<ValueTypeB>::get_one()); cudaCheckError(); } x.resize(0); if (is_soln) { x.resize(n_rows_part * block_dimx); x.set_block_dimx(1); x.set_block_dimy(block_dimy); temp.resize(n_rows_part * block_dimx); for (int i = 0; i < partRowVec.size(); i++) { if (fseek(fin, data_pos + sizeof(UpValueTypeA) * partRowVec[i] * block_dimx, SEEK_SET) != 0) { FatalError("fseek error", AMGX_ERR_IO); } //read in data as a double is_read = fread(&temp[i * block_dimx], sizeof(UpValueTypeA), block_dimx, fin); if (is_read != block_dimx) { err = "fread failed reading rhs, exiting"; FatalError(err, AMGX_ERR_IO); } } val_copy(temp.data(), x.raw(), n_rows_part * block_dimx); } fclose(fin); if (rank_rows.size() > 0) { A.set_is_matrix_read_partitioned(true); b.set_is_vector_read_partitioned(true); if (x.size() > 0) { x.set_is_vector_read_partitioned(true); } } else { delete partRowVec_p; } return true; } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class ReadMatrixMarket<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class ReadNVAMGBinary<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE }
the_stack
#define DI __device__ //! Thread-local Matrix-Vector multiplication. template <int n> DI void Mv_l(const double* A, const double* v, double* out) { for (int i = 0; i < n; i++) { double sum = 0.0; for (int j = 0; j < n; j++) { sum += A[i + j * n] * v[j]; } out[i] = sum; } } template <int n> DI void Mv_l(double alpha, const double* A, const double* v, double* out) { for (int i = 0; i < n; i++) { double sum = 0.0; for (int j = 0; j < n; j++) { sum += A[i + j * n] * v[j]; } out[i] = alpha * sum; } } //! Thread-local Matrix-Matrix multiplication. template <int n, bool aT = false, bool bT = false> DI void MM_l(const double* A, const double* B, double* out) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { double sum = 0.0; for (int k = 0; k < n; k++) { double Aik = aT ? A[k + i * n] : A[i + k * n]; double Bkj = bT ? B[j + k * n] : B[k + j * n]; sum += Aik * Bkj; } out[i + j * n] = sum; } } } /** * Kalman loop kernel. Each thread computes kalman filter for a single series * and stores relevant matrices in registers. * * @tparam r Dimension of the state vector * @param[in] ys Batched time series * @param[in] nobs Number of observation per series * @param[in] T Batched transition matrix. (r x r) * @param[in] Z Batched "design" vector (1 x r) * @param[in] RQR Batched R*Q*R' (r x r) * @param[in] P Batched P (r x r) * @param[in] alpha Batched state vector (r x 1) * @param[in] intercept Do we fit an intercept? * @param[in] d_mu Batched intercept (1) * @param[in] batch_size Batch size * @param[out] vs Batched residuals (nobs) * @param[out] Fs Batched variance of prediction errors (nobs) * @param[out] sum_logFs Batched sum of the logs of Fs (1) * @param[in] n_diff d + s*D * @param[in] fc_steps Number of steps to forecast * @param[out] d_fc Array to store the forecast * @param[in] conf_int Whether to compute confidence intervals * @param[out] d_F_fc Batched variance of forecast errors (fc_steps) */ template <int rd> __global__ void kalman( const double*__restrict__ ys, int nobs, const double*__restrict__ T, const double*__restrict__ Z, const double*__restrict__ RQR, const double*__restrict__ P, const double*__restrict__ alpha, bool intercept, const double*__restrict__ d_mu, int batch_size, double*__restrict__ vs, double*__restrict__ Fs, double*__restrict__ sum_logFs, int n_diff, int fc_steps = 0, double*__restrict__ d_fc = nullptr, bool conf_int = false, double* d_F_fc = nullptr) { constexpr int rd2 = rd * rd; double l_RQR[rd2]; double l_T[rd2]; double l_Z[rd]; double l_P[rd2]; double l_alpha[rd]; double l_K[rd]; double l_tmp[rd2]; double l_TP[rd2]; int bid = blockDim.x * blockIdx.x + threadIdx.x; if (bid < batch_size) { // Load global mem into registers int b_rd_offset = bid * rd; int b_rd2_offset = bid * rd2; for (int i = 0; i < rd2; i++) { l_RQR[i] = RQR[b_rd2_offset + i]; l_T[i] = T[b_rd2_offset + i]; l_P[i] = P[b_rd2_offset + i]; } for (int i = 0; i < rd; i++) { if (n_diff > 0) l_Z[i] = Z[b_rd_offset + i]; l_alpha[i] = alpha[b_rd_offset + i]; } double b_sum_logFs = 0.0; const double* b_ys = ys + bid * nobs; double* b_vs = vs + bid * nobs; double* b_Fs = Fs + bid * nobs; double mu = intercept ? d_mu[bid] : 0.0; for (int it = 0; it < nobs; it++) { // 1. v = y - Z*alpha double vs_it = b_ys[it]; if (n_diff == 0) vs_it -= l_alpha[0]; else { for (int i = 0; i < rd; i++) { vs_it -= l_alpha[i] * l_Z[i]; } } b_vs[it] = vs_it; // 2. F = Z*P*Z' double _Fs; if (n_diff == 0) _Fs = l_P[0]; else { _Fs = 0.0; for (int i = 0; i < rd; i++) { for (int j = 0; j < rd; j++) { _Fs += l_P[j * rd + i] * l_Z[i] * l_Z[j]; } } } b_Fs[it] = _Fs; if (it >= n_diff) b_sum_logFs += log(_Fs); // 3. K = 1/Fs[it] * T*P*Z' // TP = T*P MM_l<rd>(l_T, l_P, l_TP); // K = 1/Fs[it] * TP*Z' double _1_Fs = 1.0 / _Fs; if (n_diff == 0) { for (int i = 0; i < rd; i++) { l_K[i] = _1_Fs * l_TP[i]; } } else Mv_l<rd>(_1_Fs, l_TP, l_Z, l_K); // 4. alpha = T*alpha + K*vs[it] + c // tmp = T*alpha Mv_l<rd>(l_T, l_alpha, l_tmp); // alpha = tmp + K*vs[it] for (int i = 0; i < rd; i++) { l_alpha[i] = l_tmp[i] + l_K[i] * vs_it; } // alpha = alpha + c l_alpha[n_diff] += mu; // 5. L = T - K * Z // L = T (L is tmp) for (int i = 0; i < rd2; i++) { l_tmp[i] = l_T[i]; } // L = L - K * Z if (n_diff == 0) { for (int i = 0; i < rd; i++) { l_tmp[i] -= l_K[i]; } } else { for (int i = 0; i < rd; i++) { for (int j = 0; j < rd; j++) { l_tmp[j * rd + i] -= l_K[i] * l_Z[j]; } } } // 6. P = T*P*L' + R*Q*R' // P = TP*L' MM_l<rd, false, true>(l_TP, l_tmp, l_P); // P = P + RQR for (int i = 0; i < rd2; i++) { l_P[i] += l_RQR[i]; } } sum_logFs[bid] = b_sum_logFs; // Forecast double* b_fc = fc_steps ? d_fc + bid * fc_steps : nullptr; double* b_F_fc = conf_int ? d_F_fc + bid * fc_steps : nullptr; for (int it = 0; it < fc_steps; it++) { if (n_diff == 0) b_fc[it] = l_alpha[0]; else { double pred = 0.0; for (int i = 0; i < rd; i++) { pred += l_alpha[i] * l_Z[i]; } b_fc[it] = pred; } // alpha = T*alpha + c Mv_l<rd>(l_T, l_alpha, l_tmp); for (int i = 0; i < rd; i++) { l_alpha[i] = l_tmp[i]; } l_alpha[n_diff] += mu; if (conf_int) { if (n_diff == 0) b_F_fc[it] = l_P[0]; else { double _Fs = 0.0; for (int i = 0; i < rd; i++) { for (int j = 0; j < rd; j++) { _Fs += l_P[j * rd + i] * l_Z[i] * l_Z[j]; } } b_F_fc[it] = _Fs; } // P = T*P*T' + RR' // TP = T*P MM_l<rd>(l_T, l_P, l_TP); // P = TP*T' MM_l<rd, false, true>(l_TP, l_T, l_P); // P = P + RR' for (int i = 0; i < rd2; i++) { l_P[i] += l_RQR[i]; } } } } } int main(int argc, char* argv[]) { if (argc != 4) { printf("Usage: %s <#series> <#observations> <forcast steps>\n", argv[0]); return 1; } const int nseries = atoi(argv[1]); const int nobs = atoi(argv[2]); const int fc_steps = atoi(argv[3]); const int rd = 8; const int rd2 = rd * rd; const int batch_size = nseries; const int rd2_size = nseries * rd2 * sizeof(double); const int rd_size = nseries * rd * sizeof(double); const int nobs_size = nseries * nobs * sizeof(double); const int ns_size = nseries * sizeof(double); const int fc_size = fc_steps * nseries * sizeof(double); int i; srand(123); double *RQR = (double*) malloc (rd2_size); for (i = 0; i < rd2 * nseries; i++) RQR[i] = (double)rand() / (double)RAND_MAX; double *d_RQR; hipMalloc((void**)&d_RQR, rd2_size); hipMemcpy(d_RQR, RQR, rd2_size, hipMemcpyHostToDevice); double *T = (double*) malloc (rd2_size); for (i = 0; i < rd2 * nseries; i++) T[i] = (double)rand() / (double)RAND_MAX; double *d_T; hipMalloc((void**)&d_T, rd2_size); hipMemcpy(d_T, T, rd2_size, hipMemcpyHostToDevice); double *P = (double*) malloc (rd2_size); for (i = 0; i < rd2 * nseries; i++) P[i] = (double)rand() / (double)RAND_MAX; double *d_P; hipMalloc((void**)&d_P, rd2_size); hipMemcpy(d_P, P, rd2_size, hipMemcpyHostToDevice); double *Z = (double*) malloc (rd_size); for (i = 0; i < rd * nseries; i++) Z[i] = (double)rand() / (double)RAND_MAX; double *d_Z; hipMalloc((void**)&d_Z, rd_size); hipMemcpy(d_Z, Z, rd_size, hipMemcpyHostToDevice); double *alpha = (double*) malloc (rd_size); for (i = 0; i < rd * nseries; i++) alpha[i] = (double)rand() / (double)RAND_MAX; double *d_alpha; hipMalloc((void**)&d_alpha, rd_size); hipMemcpy(d_alpha, alpha, rd_size, hipMemcpyHostToDevice); double *ys = (double*) malloc (nobs_size); for (i = 0; i < nobs * nseries; i++) ys[i] = (double)rand() / (double)RAND_MAX; double *d_ys; hipMalloc((void**)&d_ys, nobs_size); hipMemcpy(d_ys, ys, nobs_size, hipMemcpyHostToDevice); double *mu = (double*) malloc (ns_size); for (i = 0; i < nseries; i++) mu[i] = (double)rand() / (double)RAND_MAX; double *d_mu; hipMalloc((void**)&d_mu, ns_size); hipMemcpy(d_mu, mu, ns_size, hipMemcpyHostToDevice); double *vs = (double*) malloc (nobs_size); double *d_vs; hipMalloc((void**)&d_vs, nobs_size); double *Fs = (double*) malloc (nobs_size); double *d_Fs; hipMalloc((void**)&d_Fs, nobs_size); double *sum_logFs = (double*) malloc (ns_size); double *d_sum_logFs; hipMalloc((void**)&d_sum_logFs, ns_size); double *fc = (double*) malloc (fc_size); double *d_fc; hipMalloc((void**)&d_fc, fc_size); double *F_fc = (double*) malloc (fc_size); double *d_F_fc; hipMalloc((void**)&d_F_fc, fc_size); dim3 grids ((nseries + 255)/256); dim3 blocks (256); for (int n_diff = 0; n_diff < rd; n_diff++) for (i = 0; i < 100; i++) hipLaunchKernelGGL(HIP_KERNEL_NAME(kalman<rd>), grids, blocks, 0, 0, d_ys, nobs, d_T, d_Z, d_RQR, d_P, d_alpha, true, // intercept, d_mu, batch_size, d_vs, d_Fs, d_sum_logFs, n_diff, fc_steps, d_fc, true, // forcast d_F_fc ); hipMemcpy(F_fc, d_F_fc, fc_size, hipMemcpyDeviceToHost); double sum = 0.0; for (i = 0; i < fc_steps * nseries; i++) sum += F_fc[i]; printf("Checksum: %lf\n", sum); free(fc); free(F_fc); free(sum_logFs); free(mu); free(Fs); free(vs); free(ys); free(alpha); free(Z); free(P); free(T); free(RQR); hipFree(d_RQR); hipFree(d_T); hipFree(d_P); hipFree(d_Z); hipFree(d_alpha); hipFree(d_ys); hipFree(d_vs); hipFree(d_Fs); hipFree(d_mu); hipFree(d_sum_logFs); hipFree(d_F_fc); hipFree(d_fc); return 0; }
the_stack
#include <stdio.h> #include <stdlib.h> #include <assert.h> // CHECK: #include <hip/hip_runtime.h> #include <cuda_runtime.h> // CHECK: #include <hipblas.h> #include <cublas_v2.h> // CHECK: #include "hipsparse.h" #include "cusparse.h" void printMatrix(int m, int n, const double*A, int lda, const char* name) { for(int row = 0 ; row < m ; row++){ for(int col = 0 ; col < n ; col++){ double Areg = A[row + col*lda]; printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg); } } } int main(int argc, char*argv[]) { // CHECK: hipblasHandle_t cublasH = NULL; cublasHandle_t cublasH = NULL; // CHECK: hipsparseHandle_t cusparseH = NULL; cusparseHandle_t cusparseH = NULL; // CHECK: hipStream_t stream = NULL; cudaStream_t stream = NULL; // CHECK: hipsparseMatDescr_t descrA = NULL; cusparseMatDescr_t descrA = NULL; // CHECK: hipblasStatus_t cublasStat = HIPBLAS_STATUS_SUCCESS; cublasStatus_t cublasStat = CUBLAS_STATUS_SUCCESS; // CHECK: hipsparseStatus_t cusparseStat = HIPSPARSE_STATUS_SUCCESS; cusparseStatus_t cusparseStat = CUSPARSE_STATUS_SUCCESS; // CHECK: hipError_t cudaStat1 = hipSuccess; // CHECK: hipError_t cudaStat2 = hipSuccess; // CHECK: hipError_t cudaStat3 = hipSuccess; // CHECK: hipError_t cudaStat4 = hipSuccess; // CHECK: hipError_t cudaStat5 = hipSuccess; cudaError_t cudaStat1 = cudaSuccess; cudaError_t cudaStat2 = cudaSuccess; cudaError_t cudaStat3 = cudaSuccess; cudaError_t cudaStat4 = cudaSuccess; cudaError_t cudaStat5 = cudaSuccess; const int n = 4; const int nnzA = 9; /* * | 1 0 2 3 | * | 0 4 0 0 | * A = | 5 0 6 7 | * | 0 8 0 9 | * * eigevales are { -0.5311, 7.5311, 9.0000, 4.0000 } * * The largest eigenvaluse is 9 and corresponding eigenvector is * * | 0.3029 | * v = | 0 | * | 0.9350 | * | 0.1844 | */ const int csrRowPtrA[n+1] = { 0, 3, 4, 7, 9 }; const int csrColIndA[nnzA] = {0, 2, 3, 1, 0, 2, 3, 1, 3 }; const double csrValA[nnzA] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 }; const double lambda_exact[n] = { 9.0000, 7.5311, 4.0000, -0.5311 }; const double x0[n] = {1.0, 2.0, 3.0, 4.0 }; /* initial guess */ double x[n]; /* numerical eigenvector */ int *d_csrRowPtrA = NULL; int *d_csrColIndA = NULL; double *d_csrValA = NULL; double *d_x = NULL; /* eigenvector */ double *d_y = NULL; /* workspace */ const double tol = 1.e-6; const int max_ites = 30; const double h_one = 1.0; const double h_zero = 0.0; printf("example of csrmv_mp \n"); printf("tol = %E \n", tol); printf("max. iterations = %d \n", max_ites); printf("1st eigenvaluse is %f\n", lambda_exact[0] ); printf("2nd eigenvaluse is %f\n", lambda_exact[1] ); double alpha = lambda_exact[1]/lambda_exact[0] ; printf("convergence rate is %f\n", alpha ); double est_iterations = log(tol)/log(alpha); printf("# of iterations required is %d\n", (int)ceil(est_iterations)); // step 1: create cublas/cusparse handle, bind a stream // CHECK: cudaStat1 = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); cudaStat1 = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cublasStat = hipblasCreate(&cublasH); cublasStat = cublasCreate(&cublasH); // CHECK: assert(HIPBLAS_STATUS_SUCCESS == cublasStat); assert(CUBLAS_STATUS_SUCCESS == cublasStat); // CHECK: cublasStat = hipblasSetStream(cublasH, stream); cublasStat = cublasSetStream(cublasH, stream); // CHECK: assert(HIPBLAS_STATUS_SUCCESS == cublasStat); assert(CUBLAS_STATUS_SUCCESS == cublasStat); // CHECK: cusparseStat = hipsparseCreate(&cusparseH); cusparseStat = cusparseCreate(&cusparseH); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == cusparseStat); assert(CUSPARSE_STATUS_SUCCESS == cusparseStat); // CHECK: cusparseStat = hipsparseSetStream(cusparseH, stream); cusparseStat = cusparseSetStream(cusparseH, stream); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == cusparseStat); assert(CUSPARSE_STATUS_SUCCESS == cusparseStat); // step 2: configuration of matrix A // CHECK: cusparseStat = hipsparseCreateMatDescr(&descrA); cusparseStat = cusparseCreateMatDescr(&descrA); // assert(HIPSPARSE_STATUS_SUCCESS == cusparseStat); assert(CUSPARSE_STATUS_SUCCESS == cusparseStat); // CHECK: hipsparseSetMatIndexBase(descrA,HIPSPARSE_INDEX_BASE_ZERO); cusparseSetMatIndexBase(descrA,CUSPARSE_INDEX_BASE_ZERO); // CHECK: hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL ); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL ); // step 3: copy A and x0 to device // CHECK: cudaStat1 = hipMalloc ((void**)&d_csrRowPtrA, sizeof(int) * (n+1) ); cudaStat1 = cudaMalloc ((void**)&d_csrRowPtrA, sizeof(int) * (n+1) ); // CHECK: cudaStat2 = hipMalloc ((void**)&d_csrColIndA, sizeof(int) * nnzA ); cudaStat2 = cudaMalloc ((void**)&d_csrColIndA, sizeof(int) * nnzA ); // CHECK: cudaStat3 = hipMalloc ((void**)&d_csrValA , sizeof(double) * nnzA ); cudaStat3 = cudaMalloc ((void**)&d_csrValA , sizeof(double) * nnzA ); // CHECK: cudaStat4 = hipMalloc ((void**)&d_x , sizeof(double) * n ); cudaStat4 = cudaMalloc ((void**)&d_x , sizeof(double) * n ); // CHECK: cudaStat5 = hipMalloc ((void**)&d_y , sizeof(double) * n ); cudaStat5 = cudaMalloc ((void**)&d_y , sizeof(double) * n ); // CHECK: assert(hipSuccess == cudaStat1); // CHECK: assert(hipSuccess == cudaStat2); // CHECK: assert(hipSuccess == cudaStat3); // CHECK: assert(hipSuccess == cudaStat4); // CHECK: assert(hipSuccess == cudaStat5); assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); assert(cudaSuccess == cudaStat3); assert(cudaSuccess == cudaStat4); assert(cudaSuccess == cudaStat5); // CHECK: cudaStat1 = hipMemcpy(d_csrRowPtrA, csrRowPtrA, sizeof(int) * (n+1) , hipMemcpyHostToDevice); cudaStat1 = cudaMemcpy(d_csrRowPtrA, csrRowPtrA, sizeof(int) * (n+1) , cudaMemcpyHostToDevice); // CHECK: cudaStat2 = hipMemcpy(d_csrColIndA, csrColIndA, sizeof(int) * nnzA , hipMemcpyHostToDevice); cudaStat2 = cudaMemcpy(d_csrColIndA, csrColIndA, sizeof(int) * nnzA , cudaMemcpyHostToDevice); // CHECK: cudaStat3 = hipMemcpy(d_csrValA , csrValA , sizeof(double) * nnzA , hipMemcpyHostToDevice); cudaStat3 = cudaMemcpy(d_csrValA , csrValA , sizeof(double) * nnzA , cudaMemcpyHostToDevice); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: assert(hipSuccess == cudaStat2); assert(cudaSuccess == cudaStat2); // CHECK: assert(hipSuccess == cudaStat3); assert(cudaSuccess == cudaStat3); // step 4: power method double lambda = 0.0; double lambda_next = 0.0; // 4.1: initial guess x0 cudaStat1 = cudaMemcpy(d_x, x0, sizeof(double) * n, cudaMemcpyHostToDevice); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); for(int ite = 0 ; ite < max_ites ; ite++ ){ // 4.2: normalize vector x // x = x / |x| double nrm2_x; // TODO: cublasStat = hipblasDnrm2_v2(cublasH, cublasStat = cublasDnrm2_v2(cublasH, n, d_x, 1, // incx, &nrm2_x /* host pointer */ ); // CHECK: assert(HIPBLAS_STATUS_SUCCESS == cublasStat); assert(CUBLAS_STATUS_SUCCESS == cublasStat); double one_over_nrm2_x = 1.0 / nrm2_x; // TODO: cublasStat = hipblasDscal_v2( cublasH, cublasStat = cublasDscal_v2( cublasH, n, &one_over_nrm2_x, /* host pointer */ d_x, 1 // incx ); // CHECK: assert(HIPBLAS_STATUS_SUCCESS == cublasStat); assert(CUBLAS_STATUS_SUCCESS == cublasStat); // 4.3: y = A*x // TODO: hipsparseStat = cusparseDcsrmv_mp(cusparseH, // CHECK: HIPSPARSE_OPERATION_NON_TRANSPOSE cusparseStat = cusparseDcsrmv_mp(cusparseH, CUSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnzA, &h_one, descrA, d_csrValA, d_csrRowPtrA, d_csrColIndA, d_x, &h_zero, d_y); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == cusparseStat); assert(CUSPARSE_STATUS_SUCCESS == cusparseStat); // 4.4: lambda = y**T*x // TODO: cublasStat = hipblasDdot_v2 ( cublasH, cublasStat = cublasDdot_v2 ( cublasH, n, d_x, 1, // incx, d_y, 1, // incy, &lambda_next /* host pointer */ ); // CHECK: assert(HIPBLAS_STATUS_SUCCESS == cublasStat); assert(CUBLAS_STATUS_SUCCESS == cublasStat); double lambda_err = fabs( lambda_next - lambda_exact[0] ); printf("ite %d: lambda = %f, error = %E\n", ite, lambda_next, lambda_err ); // 4.5: check if converges if ( (ite > 0) && fabs( lambda - lambda_next ) < tol ){ break; // converges } /* * 4.6: x := y * lambda = lambda_next * * so new approximation is (lambda, x), x is not normalized. */ // CHECK: cudaStat1 = hipMemcpy(d_x, d_y, sizeof(double) * n , hipMemcpyDeviceToDevice); cudaStat1 = cudaMemcpy(d_x, d_y, sizeof(double) * n , cudaMemcpyDeviceToDevice); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); lambda = lambda_next; } // step 5: report eigen-pair // CHECK: cudaStat1 = hipMemcpy(x, d_x, sizeof(double) * n, hipMemcpyDeviceToHost); cudaStat1 = cudaMemcpy(x, d_x, sizeof(double) * n, cudaMemcpyDeviceToHost); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); printf("largest eigenvalue is %E\n", lambda ); printf("eigenvector = (matlab base-1)\n"); printMatrix(n, 1, x, n, "V0"); printf("=====\n"); // free resources // CHECK: if (d_csrRowPtrA ) hipFree(d_csrRowPtrA); if (d_csrRowPtrA ) cudaFree(d_csrRowPtrA); // CHECK: if (d_csrColIndA ) hipFree(d_csrColIndA); if (d_csrColIndA ) cudaFree(d_csrColIndA); // CHECK: if (d_csrValA ) hipFree(d_csrValA); if (d_csrValA ) cudaFree(d_csrValA); // CHECK: if (d_x ) hipFree(d_x); if (d_x ) cudaFree(d_x); // CHeCK: if (d_y ) hipFree(d_y); if (d_y ) cudaFree(d_y); // CHECK: if (cublasH ) hipblasDestroy(cublasH); if (cublasH ) cublasDestroy(cublasH); // CHECK: if (cusparseH ) hipsparseDestroy(cusparseH); if (cusparseH ) cusparseDestroy(cusparseH); // CHECK: if (stream ) hipStreamDestroy(stream); if (stream ) cudaStreamDestroy(stream); // CHECK: if (descrA ) hipsparseDestroyMatDescr(descrA); if (descrA ) cusparseDestroyMatDescr(descrA); // CHECK: hipDeviceReset(); cudaDeviceReset(); return 0; }
the_stack
#define debug_aml(a...) //#define debug_aml(a...) {printf("%s:%d ", __FILE__, __LINE__); printf(a); \ printf("\n");} #pragma once #ifdef BOOST_FOUND // Boost includes for CPU Push Relabel Max Flow reference algorithms #include <boost/config.hpp> #include <iostream> #include <string> #include <boost/graph/edmonds_karp_max_flow.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/read_dimacs.hpp> #endif #include <gunrock/app/mf/mf_test.cuh> namespace gunrock { namespace app { namespace gtf { /***************************************************************************** * Housekeeping Routines ****************************************************************************/ cudaError_t UseParameters_test(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(parameters.Use<double>( "lambda2", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 3, "Parameter controlling how heavily non-connected solutions are " "penalized.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<double>( "gamma", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 3, "Parameter controling how heavily non-sparsity is penalized.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<double>( "error_threshold", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 1e-12, "Error threshold to compare floating point values", __FILE__, __LINE__)); return retval; } /** * @brief Displays the GTF result * * @tparam ValueT Type of capacity/flow/excess * @tparam VertxeT Type of vertex * * @param[in] h_flow Flow calculated on edges * @param[in] source Index of source vertex * @param[in] nodes Number of nodes */ template <typename GraphT, typename ValueT, typename VertexT> void DisplaySolution(GraphT graph, ValueT *h_flow, VertexT *reverse, VertexT sink, VertexT nodes) {} /**************************************************************************** * GTF Testing Routines ***************************************************************************/ /** * @brief Min Cut algorithm * * @tparam ValueT Type of capacity/flow/excess * @tparam VertxeT Type of vertex * @tparam GraphT Type of graph * @param[in] graph Graph * @param[in] source Source vertex * @param[in] sink Sink vertex * @param[in] flow Function of flow on edges * @param[out] min_cut Function on nodes, 1 = connected to source, 0 = sink * */ template <typename VertexT, typename ValueT, typename GraphT> void minCut_sub(GraphT &graph, VertexT src, ValueT *flow, bool *vertex_reachabilities, ValueT *residuals) { typedef typename GraphT::CsrT CsrT; // std::vector<bool> flag; flag.resize(graph.nodes, true); std::queue<VertexT> que; que.push(src); for (auto e = 0; e < graph.edges; e++) { residuals[e] = graph.CsrT::edge_values[e] - flow[e]; } while (!que.empty()) { auto v = que.front(); que.pop(); auto e_start = graph.CsrT::GetNeighborListOffset(v); auto num_neighbors = graph.CsrT::GetNeighborListLength(v); auto e_end = e_start + num_neighbors; for (auto e = e_start; e < e_end; ++e) { auto u = graph.CsrT::GetEdgeDest(e); if (vertex_reachabilities[u] == false and abs(graph.CsrT::edge_values[e] - flow[e]) > 1e-6) { vertex_reachabilities[u] = true; que.push(u); } } } } template <typename GraphT, typename VertexT, typename SizeT, typename ValueT> cudaError_t MinCut(util::Parameters &parameters, GraphT &graph, std::map<std::pair<VertexT, VertexT>, SizeT> &edge_id, SizeT *reverse_edges, VertexT source, VertexT dest, ValueT *edge_flows, ValueT *edge_residuals, bool *vertex_reachabilities) { cudaError_t retval = cudaSuccess; double error_threshold = parameters.Get<double>("error_threshold"); ValueT max_flow = 0; // for (auto e = 0; e < graph.edges; e++){ // printf("CPU: e_idx %d, e_val %f\n", e, graph.edge_values[e]); // } mf::CPU_Reference(parameters, graph, edge_id, source, dest, max_flow, reverse_edges, edge_flows); memset(vertex_reachabilities, false, graph.nodes * sizeof(vertex_reachabilities[0])); minCut_sub(graph, source, edge_flows, vertex_reachabilities, edge_residuals); auto &edge_capacities = graph.edge_values; // printf("after maxflow \n"); for (auto e = 0; e < graph.edges; e++) { edge_residuals[e] = edge_capacities[e] - edge_flows[e]; // if(e<10) printf("CPU: er_idx %d, e_res %f \n", e, edge_residuals[e]); } /* std::queue <typename GraphT::VertexT> q; q.push(source); memset(vertex_reachabilities, false, graph.nodes*sizeof(vertex_reachabilities[0])); vertex_reachabilities[source] = true; // Standard BFS Loop while (!q.empty()) { VertexT v = q.front(); q.pop(); auto e_start = graph.GetNeighborListOffset(v); auto num_neighbors = graph.GetNeighborListLength(v); auto e_end = e_start + num_neighbors; for (auto e = e_start; e < e_end; e++) { VertexT u = graph.GetEdgeDest(e); if (vertex_reachabilities[u] == false && abs(edge_residuals[e]) > 1e-6) { q.push(u); vertex_reachabilities[u] = true; } } } */ // printf("In PR min-cut \n"); ///////////////////////// /* VertexT head = 0; VertexT tail = 0; VertexT *queue = new VertexT[graph.nodes]; queue[head] = source; while (tail <= head) { VertexT v = queue[tail]; auto e_start = graph.GetNeighborListOffset(v); auto num_neighbors = graph.GetNeighborListLength(v); auto e_end = e_start + num_neighbors; for (auto e = e_start; e < e_end; e++) { VertexT u = graph.GetEdgeDest(e); if (vertex_reachabilities[u] == false && abs(edge_residuals[e]) > 1e-6){ head ++; queue[head] = u; vertex_reachabilities[u] = true; } } tail ++; } //for(auto i = 0; i < graph.nodes; i++){ // printf("%d, ", vertex_reachabilities[i]); //} printf("\n"); */ return retval; } /*-----------------------------------------------*/ template <typename ValueT, typename VertexT> int bfs(ValueT **rGraph, VertexT s, VertexT t, VertexT parent[], const int V) { // Create a visited array and mark all vertices as not visited bool *visited = new bool[V]; memset(visited, 0, V * sizeof(visited[0])); // Create a queue, enqueue source vertex and mark source vertex // as visited std::queue<VertexT> q; q.push(s); visited[s] = true; parent[s] = -1; // Standard BFS Loop while (!q.empty()) { int u = q.front(); q.pop(); for (int v = 0; v < V; v++) { if (visited[v] == false && rGraph[u][v] > 0) { q.push(v); parent[v] = u; visited[v] = true; } } } // If we reached sink in BFS starting from source, then return // true, else false return (visited[t] == true); } // A DFS based function to find all reachable vertices from s. The function // marks visited[i] as true if i is reachable from s. The initial values in // visited[] must be false. We can also use BFS to find reachable vertices template <typename ValueT, typename VertexT> void dfs(ValueT **rGraph, VertexT s, bool visited[], const int V) { visited[s] = true; for (int i = 0; i < V; i++) if (abs(rGraph[s][i]) > 1e-6 && !visited[i]) dfs(rGraph, i, visited, V); } // Prints the minimum s-t cut template <typename ValueT, typename VertexT, typename GraphT> void minCut(GraphT graph, VertexT s, VertexT t, bool *visited, ValueT *edge_residuals, const int V) { ValueT max_flow = 0; // Create a residual graph and fill the residual graph with // given capacities in the original graph as residual capacities // in residual graph ValueT **rGraph = new ValueT *[V]; // rGraph[i][j] indicates residual capacity of edge i-j for (int u = 0; u < V; u++) { rGraph[u] = new ValueT[V]; for (int v = 0; v < V; v++) { rGraph[u][v] = 0; } } for (auto u = 0; u < graph.nodes; ++u) { auto e_start = graph.GraphT::CsrT::GetNeighborListOffset(u); auto num_neighbors = graph.GraphT::CsrT::GetNeighborListLength(u); auto e_end = e_start + num_neighbors; for (auto e = e_start; e < e_end; ++e) { auto v = graph.GraphT::CsrT::GetEdgeDest(e); rGraph[int(u)][int(v)] = graph.edge_values[e]; } } /* printf("\n we are before maxflow \n"); for (int u = 0; u < V; u++) { for (int v = 0; v < V; v++) { printf("%5.2f ", rGraph[u][v]); } printf("\n"); } printf("\n"); */ VertexT *parent = new VertexT[V]; // This array is filled by BFS and to store path // Augment the flow while there is a path from source to sink int counter = 0; while (bfs(rGraph, s, t, parent, V)) { // Find minimum residual capacity of the edges along the // path filled by BFS. Or we can say find the maximum flow // through the path found. ValueT path_flow = INT_MAX; for (int v = t; v != s; v = parent[v]) { int u = parent[v]; path_flow = min(path_flow, rGraph[u][v]); } // update residual capacities of the edges and reverse edges // along the path for (int v = t; v != s; v = parent[v]) { int u = parent[v]; // printf("%d -> %d\n", u, v); rGraph[u][v] -= path_flow; rGraph[v][u] += path_flow; } counter++; max_flow += path_flow; } // Flow is maximum now, find vertices reachable from s memset(visited, false, V * sizeof(visited[0])); dfs(rGraph, s, visited, V); int tem_i = 0; for (auto u = 0; u < graph.nodes; ++u) { auto e_start = graph.GraphT::CsrT::GetNeighborListOffset(u); auto num_neighbors = graph.GraphT::CsrT::GetNeighborListLength(u); auto e_end = e_start + num_neighbors; for (auto e = e_start; e < e_end; ++e) { auto v = graph.GraphT::CsrT::GetEdgeDest(e); edge_residuals[tem_i] = rGraph[int(u)][int(v)]; // printf("inside graph loaded as %d (%d -> %d) = %f\n", tem_i, u, v, // edge_residuals[tem_i]); tem_i++; } } /* for (int u = 0; u < V; u++) { for (int v = 0; v < V; v++) { printf("%5.2f ", rGraph[u][v]); } printf("\n"); } */ } /*----------------------------------------------*/ template <typename ValueT> void soft_thresh(ValueT *Y, const ValueT thresh, const int n) { for (int i = 0; i < n; i++) { ValueT tmp = max(Y[i] - thresh, 0.0); Y[i] = tmp + min(Y[i] + thresh, 0.0); } } /** * @brief Simple CPU-based reference GTF implementations * * @tparam GraphT Type of the graph * @tparam VertexT Type of the vertex * @tparam ValueT Type of the capacity/flow/excess * @param[in] parameters Running parameters * @param[in] graph Input graph * @param[in] src The source vertex * @param[in] sin The sink vertex * @param[out] maxflow Value of computed maxflow reached sink * @param[out] reverse Computed reverse * @param[out] edges_flow Computed flows on edges * * \return double Time taken for the GTF */ template <typename GraphT, typename ArrayT, typename VertexT, typename SizeT> cudaError_t CPU_Reference(util::Parameters &parameters, GraphT &graph, std::map<std::pair<VertexT, VertexT>, SizeT> &edge_id, ArrayT &reverse_edges, double &elapsed) { typedef typename GraphT::ValueT ValueT; cudaError_t retval = cudaSuccess; auto num_nodes = graph.nodes; // n + 2 = V auto num_org_nodes = num_nodes - 2; // n auto num_edges = graph.edges; // m + n*4 VertexT source = num_org_nodes; // originally 0 VertexT dest = num_org_nodes + 1; // originally 1 double lambda2 = parameters.Get<double>("lambda2"); double error_threshold = parameters.Get<double>("error_threshold"); VertexT num_comms = 1; // nlab VertexT *next_communities = new VertexT[num_nodes]; // nextlabel VertexT *curr_communities = new VertexT[num_nodes]; // label VertexT *community_sizes = new VertexT[num_nodes]; // nums ValueT *community_weights = new ValueT[num_nodes]; // averages bool *community_active = new bool[num_nodes]; // !inactivelable ValueT *community_accus = new ValueT[num_nodes]; // values bool *vertex_active = new bool[num_nodes]; // alive bool *vertex_reachabilities = new bool[num_nodes]; // visited: 1 reachable from source, 2 reachable from dest, 0 otherwise ValueT *edge_residuals = new ValueT[num_edges]; // graph ValueT *edge_flows = new ValueT[num_edges]; // edge flows double sum_weights_source_sink = 0; // moy // use to preserve graph edge weights ValueT *original_edge_capacities = new ValueT[num_edges]; // graph auto &edge_capacities = graph.edge_values; for (auto e = 0; e < graph.edges; e++) { original_edge_capacities[e] = edge_capacities[e]; } util::CpuTimer cpu_timer; // Normalization and resets SizeT offset = num_edges - num_org_nodes * 2; printf("offset is %d num edges %d \n", offset, num_edges); for (VertexT v = 0; v < num_org_nodes; v++) { sum_weights_source_sink += graph.edge_values[offset + v]; SizeT e = graph.GetNeighborListOffset(v) + graph.GetNeighborListLength(v) - 1; sum_weights_source_sink -= graph.edge_values[e]; vertex_active[v] = true; community_active[v] = true; curr_communities[v] = 0; next_communities[v] = 0; // extra } auto avg_weights_source_sink = sum_weights_source_sink / num_org_nodes; community_accus[0] = avg_weights_source_sink; printf("!!!!!!!!!! avg is %f \n", avg_weights_source_sink); for (VertexT v = 0; v < num_org_nodes; v++) { SizeT e = graph.GetNeighborListOffset(v) + graph.GetNeighborListLength(v) - 1; ValueT val = graph.edge_values[offset + v] - graph.edge_values[e] - avg_weights_source_sink; if (val > 0) { graph.edge_values[offset + v] = val; graph.edge_values[e] = 0; } else { graph.edge_values[offset + v] = 0; graph.edge_values[e] = -1 * val; } } cpu_timer.Start(); VertexT iteration = 0; // iter bool to_continue = true; // flagstop unsigned int comm; while (to_continue) { printf("Iteration %d\n", iteration); iteration++; // for(int e = 0; e < 10; e++) // printf("CPU: e_idx %d, e_val %f\n", e, graph.edge_values[e]); GUARD_CU(MinCut(parameters, graph, edge_id, reverse_edges + 0, source, dest, edge_flows, edge_residuals, vertex_reachabilities)); // minCut(graph, source, dest, vertex_reachabilities, edge_residuals, // num_nodes); auto &edge_capacities = graph.edge_values; for (comm = 0; comm < num_comms; comm++) { community_weights[comm] = 0; community_sizes[comm] = 0; next_communities[comm] = 0; } auto pervious_num_comms = num_comms; for (VertexT v = 0; v < num_org_nodes; v++) { if (!vertex_active[v]) continue; if (vertex_reachabilities[v] == 1) { // reachable by source comm = next_communities[curr_communities[v]]; if (comm == 0) { // not assigned yet comm = num_comms; next_communities[curr_communities[v]] = num_comms; community_active[comm] = true; num_comms++; community_weights[comm] = 0; community_sizes[comm] = 0; next_communities[comm] = 0; community_accus[comm] = community_accus[curr_communities[v]]; } curr_communities[v] = comm; community_weights[comm] += edge_residuals[num_edges - num_org_nodes * 2 + v]; community_sizes[comm]++; // printf("++ %d %f %f\n", comm, community_weights[comm], // community_accus[comm]); } else { // otherwise comm = curr_communities[v]; SizeT e_start = graph.GetNeighborListOffset(v); SizeT num_neighbors = graph.GetNeighborListLength(v); community_weights[comm] -= edge_residuals[e_start + num_neighbors - 1]; community_sizes[comm]++; auto e_end = e_start + num_neighbors - 2; for (auto e = e_start; e < e_end; e++) { VertexT u = graph.GetEdgeDest(e); if (vertex_reachabilities[u] == 1) { edge_residuals[e] = 0; } } // printf("-- %d %f %f\n", comm, community_weights[comm], // community_accus[comm]); } } // end of for v // printf("%d %f %f\n", comm, community_weights[comm], // community_accus[comm]); for (comm = 0; comm < pervious_num_comms; comm++) { if (community_active[comm]) { if (next_communities[comm] == 0) { community_weights[comm] = 0; community_active[comm] = false; } else if (community_sizes[comm] == 0) { community_active[comm] = false; community_active[next_communities[comm]] = false; community_weights[next_communities[comm]] = 0; } else { // printf("values: comm: %d, sizes: %d, weights: %f, accus: %f.\n", // comm, community_sizes[comm], community_weights[comm], // community_accus[comm]); community_weights[comm] /= community_sizes[comm]; community_accus[comm] += community_weights[comm]; } } else { community_weights[comm] = 0; } } for (; comm < num_comms; comm++) { community_weights[comm] /= community_sizes[comm]; community_accus[comm] += community_weights[comm]; // printf("comm %d, accus %f, sizes %d \n", comm, community_accus [comm], // community_sizes [comm]); printf("values: comm: %d, sizes: %d, weights: // %f, accus: %f.\n", comm, community_sizes[comm], community_weights[comm], // community_accus[comm]); } to_continue = false; for (VertexT v = 0; v < num_org_nodes; v++) { if (!vertex_active[v]) continue; auto comm = curr_communities[v]; if (!community_active[comm] || abs(community_weights[comm]) <= 1e-6) { if (vertex_reachabilities[v] == 1) edge_residuals[num_edges - num_org_nodes * 2 + v] = 0; if (vertex_reachabilities[v] != 1) { SizeT e = graph.GetNeighborListOffset(v) + graph.GetNeighborListLength(v) - 1; edge_residuals[e] = 0; } vertex_active[v] = false; community_active[comm] = false; } else { to_continue = true; SizeT e_from_src = num_edges - num_org_nodes * 2 + v; SizeT e_to_dest = graph.GetNeighborListOffset(v) + graph.GetNeighborListLength(v) - 1; if (vertex_reachabilities[v] == 1) { edge_residuals[e_from_src] -= community_weights[comm]; if (edge_residuals[e_from_src] < 0) { double temp = -1 * edge_residuals[e_from_src]; edge_residuals[e_from_src] = edge_residuals[e_to_dest]; edge_residuals[e_to_dest] = temp; } } else { edge_residuals[e_to_dest] += community_weights[comm]; if (edge_residuals[e_to_dest] < 0) { double temp = -1 * edge_residuals[e_to_dest]; edge_residuals[e_to_dest] = edge_residuals[e_from_src]; edge_residuals[e_from_src] = temp; } } } } // end of for v for (SizeT e = 0; e < graph.edges; e++) { edge_capacities[e] = edge_residuals[e]; // printf("CPU: eidx %d, edge_v %f \n", e, edge_capacities[e]); } } // end of while cpu_timer.Stop(); elapsed = cpu_timer.ElapsedMillis(); soft_thresh(community_accus, lambda2, num_org_nodes); std::ofstream out_pr("./output_pr.txt"); for (int i = 0; i < num_org_nodes; i++) out_pr << (double)community_accus[curr_communities[i]] << std::endl; out_pr.close(); for (auto e = 0; e < graph.edges; e++) { edge_capacities[e] = original_edge_capacities[e]; } delete[] next_communities; next_communities = NULL; delete[] curr_communities; curr_communities = NULL; delete[] community_sizes; community_sizes = NULL; delete[] community_weights; community_weights = NULL; delete[] community_active; community_active = NULL; delete[] community_accus; community_accus = NULL; delete[] vertex_active; vertex_active = NULL; delete[] vertex_reachabilities; vertex_reachabilities = NULL; delete[] edge_residuals; edge_residuals = NULL; delete[] original_edge_capacities; original_edge_capacities = NULL; return retval; } /** * @brief Validation of GTF results * * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[in] source The source vertex * @param[in] sink The sink vertex * @param[in] h_flow Computed flow on edges * @param[in] ref_flow Reference flow on edges * @param[in] verbose Whether to output detail comparsions * * \return int Number of errors */ template <typename GraphT, typename ValueT, typename VertexT> int Validate_Results(util::Parameters &parameters, GraphT &graph, VertexT source, VertexT sink, ValueT *h_flow, VertexT *reverse, ValueT *ref_flow = NULL, bool verbose = true) { typedef typename GraphT::SizeT SizeT; SizeT num_errors = 0; return num_errors; } } // namespace gtf } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
//typedef unsigned char BitSequence; #include "cuda_helper.h" #include "cuda_vector.h" static __constant__ uint32_t d_T512[4096/4] = { 0xef0b0270, 0x3afd0000, 0x5dae0000, 0x69490000, 0x9b0f3c06, 0x4405b5f9, 0x66140a51, 0x924f5d0a, 0xc96b0030, 0xe7250000, 0x2f840000, 0x264f0000, 0x08695bf9, 0x6dfcf137, 0x509f6984, 0x9e69af68, 0xc96b0030, 0xe7250000, 0x2f840000, 0x264f0000, 0x08695bf9, 0x6dfcf137, 0x509f6984, 0x9e69af68, 0x26600240, 0xddd80000, 0x722a0000, 0x4f060000, 0x936667ff, 0x29f944ce, 0x368b63d5, 0x0c26f262, 0x145a3c00, 0xb9e90000, 0x61270000, 0xf1610000, 0xce613d6c, 0xb0493d78, 0x47a96720, 0xe18e24c5, 0x23671400, 0xc8b90000, 0xf4c70000, 0xfb750000, 0x73cd2465, 0xf8a6a549, 0x02c40a3f, 0xdc24e61f, 0x23671400, 0xc8b90000, 0xf4c70000, 0xfb750000, 0x73cd2465, 0xf8a6a549, 0x02c40a3f, 0xdc24e61f, 0x373d2800, 0x71500000, 0x95e00000, 0x0a140000, 0xbdac1909, 0x48ef9831, 0x456d6d1f, 0x3daac2da, 0x54285c00, 0xeaed0000, 0xc5d60000, 0xa1c50000, 0xb3a26770, 0x94a5c4e1, 0x6bb0419d, 0x551b3782, 0x9cbb1800, 0xb0d30000, 0x92510000, 0xed930000, 0x593a4345, 0xe114d5f4, 0x430633da, 0x78cace29, 0x9cbb1800, 0xb0d30000, 0x92510000, 0xed930000, 0x593a4345, 0xe114d5f4, 0x430633da, 0x78cace29, 0xc8934400, 0x5a3e0000, 0x57870000, 0x4c560000, 0xea982435, 0x75b11115, 0x28b67247, 0x2dd1f9ab, 0x29449c00, 0x64e70000, 0xf24b0000, 0xc2f30000, 0x0ede4e8f, 0x56c23745, 0xf3e04259, 0x8d0d9ec4, 0x466d0c00, 0x08620000, 0xdd5d0000, 0xbadd0000, 0x6a927942, 0x441f2b93, 0x218ace6f, 0xbf2c0be2, 0x466d0c00, 0x08620000, 0xdd5d0000, 0xbadd0000, 0x6a927942, 0x441f2b93, 0x218ace6f, 0xbf2c0be2, 0x6f299000, 0x6c850000, 0x2f160000, 0x782e0000, 0x644c37cd, 0x12dd1cd6, 0xd26a8c36, 0x32219526, 0xf6800005, 0x3443c000, 0x24070000, 0x8f3d0000, 0x21373bfb, 0x0ab8d5ae, 0xcdc58b19, 0xd795ba31, 0xa67f0001, 0x71378000, 0x19fc0000, 0x96db0000, 0x3a8b6dfd, 0xebcaaef3, 0x2c6d478f, 0xac8e6c88, 0xa67f0001, 0x71378000, 0x19fc0000, 0x96db0000, 0x3a8b6dfd, 0xebcaaef3, 0x2c6d478f, 0xac8e6c88, 0x50ff0004, 0x45744000, 0x3dfb0000, 0x19e60000, 0x1bbc5606, 0xe1727b5d, 0xe1a8cc96, 0x7b1bd6b9, 0xf7750009, 0xcf3cc000, 0xc3d60000, 0x04920000, 0x029519a9, 0xf8e836ba, 0x7a87f14e, 0x9e16981a, 0xd46a0000, 0x8dc8c000, 0xa5af0000, 0x4a290000, 0xfc4e427a, 0xc9b4866c, 0x98369604, 0xf746c320, 0xd46a0000, 0x8dc8c000, 0xa5af0000, 0x4a290000, 0xfc4e427a, 0xc9b4866c, 0x98369604, 0xf746c320, 0x231f0009, 0x42f40000, 0x66790000, 0x4ebb0000, 0xfedb5bd3, 0x315cb0d6, 0xe2b1674a, 0x69505b3a, 0x774400f0, 0xf15a0000, 0xf5b20000, 0x34140000, 0x89377e8c, 0x5a8bec25, 0x0bc3cd1e, 0xcf3775cb, 0xf46c0050, 0x96180000, 0x14a50000, 0x031f0000, 0x42947eb8, 0x66bf7e19, 0x9ca470d2, 0x8a341574, 0xf46c0050, 0x96180000, 0x14a50000, 0x031f0000, 0x42947eb8, 0x66bf7e19, 0x9ca470d2, 0x8a341574, 0x832800a0, 0x67420000, 0xe1170000, 0x370b0000, 0xcba30034, 0x3c34923c, 0x9767bdcc, 0x450360bf, 0xe8870170, 0x9d720000, 0x12db0000, 0xd4220000, 0xf2886b27, 0xa921e543, 0x4ef8b518, 0x618813b1, 0xb4370060, 0x0c4c0000, 0x56c20000, 0x5cae0000, 0x94541f3f, 0x3b3ef825, 0x1b365f3d, 0xf3d45758, 0xb4370060, 0x0c4c0000, 0x56c20000, 0x5cae0000, 0x94541f3f, 0x3b3ef825, 0x1b365f3d, 0xf3d45758, 0x5cb00110, 0x913e0000, 0x44190000, 0x888c0000, 0x66dc7418, 0x921f1d66, 0x55ceea25, 0x925c44e9, 0x0c720000, 0x49e50f00, 0x42790000, 0x5cea0000, 0x33aa301a, 0x15822514, 0x95a34b7b, 0xb44b0090, 0xfe220000, 0xa7580500, 0x25d10000, 0xf7600000, 0x893178da, 0x1fd4f860, 0x4ed0a315, 0xa123ff9f, 0xfe220000, 0xa7580500, 0x25d10000, 0xf7600000, 0x893178da, 0x1fd4f860, 0x4ed0a315, 0xa123ff9f, 0xf2500000, 0xeebd0a00, 0x67a80000, 0xab8a0000, 0xba9b48c0, 0x0a56dd74, 0xdb73e86e, 0x1568ff0f, 0x45180000, 0xa5b51700, 0xf96a0000, 0x3b480000, 0x1ecc142c, 0x231395d6, 0x16bca6b0, 0xdf33f4df, 0xb83d0000, 0x16710600, 0x379a0000, 0xf5b10000, 0x228161ac, 0xae48f145, 0x66241616, 0xc5c1eb3e, 0xb83d0000, 0x16710600, 0x379a0000, 0xf5b10000, 0x228161ac, 0xae48f145, 0x66241616, 0xc5c1eb3e, 0xfd250000, 0xb3c41100, 0xcef00000, 0xcef90000, 0x3c4d7580, 0x8d5b6493, 0x7098b0a6, 0x1af21fe1, 0x75a40000, 0xc28b2700, 0x94a40000, 0x90f50000, 0xfb7857e0, 0x49ce0bae, 0x1767c483, 0xaedf667e, 0xd1660000, 0x1bbc0300, 0x9eec0000, 0xf6940000, 0x03024527, 0xcf70fcf2, 0xb4431b17, 0x857f3c2b, 0xd1660000, 0x1bbc0300, 0x9eec0000, 0xf6940000, 0x03024527, 0xcf70fcf2, 0xb4431b17, 0x857f3c2b, 0xa4c20000, 0xd9372400, 0x0a480000, 0x66610000, 0xf87a12c7, 0x86bef75c, 0xa324df94, 0x2ba05a55, 0x75c90003, 0x0e10c000, 0xd1200000, 0xbaea0000, 0x8bc42f3e, 0x8758b757, 0xbb28761d, 0x00b72e2b, 0xeecf0001, 0x6f564000, 0xf33e0000, 0xa79e0000, 0xbdb57219, 0xb711ebc5, 0x4a3b40ba, 0xfeabf254, 0xeecf0001, 0x6f564000, 0xf33e0000, 0xa79e0000, 0xbdb57219, 0xb711ebc5, 0x4a3b40ba, 0xfeabf254, 0x9b060002, 0x61468000, 0x221e0000, 0x1d740000, 0x36715d27, 0x30495c92, 0xf11336a7, 0xfe1cdc7f, 0x86790000, 0x3f390002, 0xe19ae000, 0x98560000, 0x9565670e, 0x4e88c8ea, 0xd3dd4944, 0x161ddab9, 0x30b70000, 0xe5d00000, 0xf4f46000, 0x42c40000, 0x63b83d6a, 0x78ba9460, 0x21afa1ea, 0xb0a51834, 0x30b70000, 0xe5d00000, 0xf4f46000, 0x42c40000, 0x63b83d6a, 0x78ba9460, 0x21afa1ea, 0xb0a51834, 0xb6ce0000, 0xdae90002, 0x156e8000, 0xda920000, 0xf6dd5a64, 0x36325c8a, 0xf272e8ae, 0xa6b8c28d, 0x14190000, 0x23ca003c, 0x50df0000, 0x44b60000, 0x1b6c67b0, 0x3cf3ac75, 0x61e610b0, 0xdbcadb80, 0xe3430000, 0x3a4e0014, 0xf2c60000, 0xaa4e0000, 0xdb1e42a6, 0x256bbe15, 0x123db156, 0x3a4e99d7, 0xe3430000, 0x3a4e0014, 0xf2c60000, 0xaa4e0000, 0xdb1e42a6, 0x256bbe15, 0x123db156, 0x3a4e99d7, 0xf75a0000, 0x19840028, 0xa2190000, 0xeef80000, 0xc0722516, 0x19981260, 0x73dba1e6, 0xe1844257, 0x54500000, 0x0671005c, 0x25ae0000, 0x6a1e0000, 0x2ea54edf, 0x664e8512, 0xbfba18c3, 0x7e715d17, 0xbc8d0000, 0xfc3b0018, 0x19830000, 0xd10b0000, 0xae1878c4, 0x42a69856, 0x0012da37, 0x2c3b504e, 0xbc8d0000, 0xfc3b0018, 0x19830000, 0xd10b0000, 0xae1878c4, 0x42a69856, 0x0012da37, 0x2c3b504e, 0xe8dd0000, 0xfa4a0044, 0x3c2d0000, 0xbb150000, 0x80bd361b, 0x24e81d44, 0xbfa8c2f4, 0x524a0d59, 0x69510000, 0xd4e1009c, 0xc3230000, 0xac2f0000, 0xe4950bae, 0xcea415dc, 0x87ec287c, 0xbce1a3ce, 0xc6730000, 0xaf8d000c, 0xa4c10000, 0x218d0000, 0x23111587, 0x7913512f, 0x1d28ac88, 0x378dd173, 0xc6730000, 0xaf8d000c, 0xa4c10000, 0x218d0000, 0x23111587, 0x7913512f, 0x1d28ac88, 0x378dd173, 0xaf220000, 0x7b6c0090, 0x67e20000, 0x8da20000, 0xc7841e29, 0xb7b744f3, 0x9ac484f4, 0x8b6c72bd, 0xcc140000, 0xa5630000, 0x5ab90780, 0x3b500000, 0x4bd013ff, 0x879b3418, 0x694348c1, 0xca5a87fe, 0x819e0000, 0xec570000, 0x66320280, 0x95f30000, 0x5da92802, 0x48f43cbc, 0xe65aa22d, 0x8e67b7fa, 0x819e0000, 0xec570000, 0x66320280, 0x95f30000, 0x5da92802, 0x48f43cbc, 0xe65aa22d, 0x8e67b7fa, 0x4d8a0000, 0x49340000, 0x3c8b0500, 0xaea30000, 0x16793bfd, 0xcf6f08a4, 0x8f19eaec, 0x443d3004, 0x78230000, 0x12fc0000, 0xa93a0b80, 0x90a50000, 0x713e2879, 0x7ee98924, 0xf08ca062, 0x636f8bab, 0x02af0000, 0xb7280000, 0xba1c0300, 0x56980000, 0xba8d45d3, 0x8048c667, 0xa95c149a, 0xf4f6ea7b, 0x02af0000, 0xb7280000, 0xba1c0300, 0x56980000, 0xba8d45d3, 0x8048c667, 0xa95c149a, 0xf4f6ea7b, 0x7a8c0000, 0xa5d40000, 0x13260880, 0xc63d0000, 0xcbb36daa, 0xfea14f43, 0x59d0b4f8, 0x979961d0, 0xac480000, 0x1ba60000, 0x45fb1380, 0x03430000, 0x5a85316a, 0x1fb250b6, 0xfe72c7fe, 0x91e478f6, 0x1e4e0000, 0xdecf0000, 0x6df80180, 0x77240000, 0xec47079e, 0xf4a0694e, 0xcda31812, 0x98aa496e, 0x1e4e0000, 0xdecf0000, 0x6df80180, 0x77240000, 0xec47079e, 0xf4a0694e, 0xcda31812, 0x98aa496e, 0xb2060000, 0xc5690000, 0x28031200, 0x74670000, 0xb6c236f4, 0xeb1239f8, 0x33d1dfec, 0x094e3198, 0xaec30000, 0x9c4f0001, 0x79d1e000, 0x2c150000, 0x45cc75b3, 0x6650b736, 0xab92f78f, 0xa312567b, 0xdb250000, 0x09290000, 0x49aac000, 0x81e10000, 0xcafe6b59, 0x42793431, 0x43566b76, 0xe86cba2e, 0xdb250000, 0x09290000, 0x49aac000, 0x81e10000, 0xcafe6b59, 0x42793431, 0x43566b76, 0xe86cba2e, 0x75e60000, 0x95660001, 0x307b2000, 0xadf40000, 0x8f321eea, 0x24298307, 0xe8c49cf9, 0x4b7eec55, 0x58430000, 0x807e0000, 0x78330001, 0xc66b3800, 0xe7375cdc, 0x79ad3fdd, 0xac73fe6f, 0x3a4479b1, 0x1d5a0000, 0x2b720000, 0x488d0000, 0xaf611800, 0x25cb2ec5, 0xc879bfd0, 0x81a20429, 0x1e7536a6, 0x1d5a0000, 0x2b720000, 0x488d0000, 0xaf611800, 0x25cb2ec5, 0xc879bfd0, 0x81a20429, 0x1e7536a6, 0x45190000, 0xab0c0000, 0x30be0001, 0x690a2000, 0xc2fc7219, 0xb1d4800d, 0x2dd1fa46, 0x24314f17, 0xa53b0000, 0x14260000, 0x4e30001e, 0x7cae0000, 0x8f9e0dd5, 0x78dfaa3d, 0xf73168d8, 0x0b1b4946, 0x07ed0000, 0xb2500000, 0x8774000a, 0x970d0000, 0x437223ae, 0x48c76ea4, 0xf4786222, 0x9075b1ce, 0x07ed0000, 0xb2500000, 0x8774000a, 0x970d0000, 0x437223ae, 0x48c76ea4, 0xf4786222, 0x9075b1ce, 0xa2d60000, 0xa6760000, 0xc9440014, 0xeba30000, 0xccec2e7b, 0x3018c499, 0x03490afa, 0x9b6ef888, 0x88980000, 0x1f940000, 0x7fcf002e, 0xfb4e0000, 0xf158079a, 0x61ae9167, 0xa895706c, 0xe6107494, 0x0bc20000, 0xdb630000, 0x7e88000c, 0x15860000, 0x91fd48f3, 0x7581bb43, 0xf460449e, 0xd8b61463, 0x0bc20000, 0xdb630000, 0x7e88000c, 0x15860000, 0x91fd48f3, 0x7581bb43, 0xf460449e, 0xd8b61463, 0x835a0000, 0xc4f70000, 0x01470022, 0xeec80000, 0x60a54f69, 0x142f2a24, 0x5cf534f2, 0x3ea660f7, 0x52500000, 0x29540000, 0x6a61004e, 0xf0ff0000, 0x9a317eec, 0x452341ce, 0xcf568fe5, 0x5303130f, 0x538d0000, 0xa9fc0000, 0x9ef70006, 0x56ff0000, 0x0ae4004e, 0x92c5cdf9, 0xa9444018, 0x7f975691, 0x538d0000, 0xa9fc0000, 0x9ef70006, 0x56ff0000, 0x0ae4004e, 0x92c5cdf9, 0xa9444018, 0x7f975691, 0x01dd0000, 0x80a80000, 0xf4960048, 0xa6000000, 0x90d57ea2, 0xd7e68c37, 0x6612cffd, 0x2c94459e, 0xe6280000, 0x4c4b0000, 0xa8550000, 0xd3d002e0, 0xd86130b8, 0x98a7b0da, 0x289506b4, 0xd75a4897, 0xf0c50000, 0x59230000, 0x45820000, 0xe18d00c0, 0x3b6d0631, 0xc2ed5699, 0xcbe0fe1c, 0x56a7b19f, 0xf0c50000, 0x59230000, 0x45820000, 0xe18d00c0, 0x3b6d0631, 0xc2ed5699, 0xcbe0fe1c, 0x56a7b19f, 0x16ed0000, 0x15680000, 0xedd70000, 0x325d0220, 0xe30c3689, 0x5a4ae643, 0xe375f8a8, 0x81fdf908, 0xb4310000, 0x77330000, 0xb15d0000, 0x7fd004e0, 0x78a26138, 0xd116c35d, 0xd256d489, 0x4e6f74de, 0xe3060000, 0xbdc10000, 0x87130000, 0xbff20060, 0x2eba0a1a, 0x8db53751, 0x73c5ab06, 0x5bd61539, 0xe3060000, 0xbdc10000, 0x87130000, 0xbff20060, 0x2eba0a1a, 0x8db53751, 0x73c5ab06, 0x5bd61539, 0x57370000, 0xcaf20000, 0x364e0000, 0xc0220480, 0x56186b22, 0x5ca3f40c, 0xa1937f8f, 0x15b961e7, 0x02f20000, 0xa2810000, 0x873f0000, 0xe36c7800, 0x1e1d74ef, 0x073d2bd6, 0xc4c23237, 0x7f32259e, 0xbadd0000, 0x13ad0000, 0xb7e70000, 0xf7282800, 0xdf45144d, 0x361ac33a, 0xea5a8d14, 0x2a2c18f0, 0xbadd0000, 0x13ad0000, 0xb7e70000, 0xf7282800, 0xdf45144d, 0x361ac33a, 0xea5a8d14, 0x2a2c18f0, 0xb82f0000, 0xb12c0000, 0x30d80000, 0x14445000, 0xc15860a2, 0x3127e8ec, 0x2e98bf23, 0x551e3d6e, 0x1e6c0000, 0xc4420000, 0x8a2e0000, 0xbcb6b800, 0x2c4413b6, 0x8bfdd3da, 0x6a0c1bc8, 0xb99dc2eb, 0x92560000, 0x1eda0000, 0xea510000, 0xe8b13000, 0xa93556a5, 0xebfb6199, 0xb15c2254, 0x33c5244f, 0x92560000, 0x1eda0000, 0xea510000, 0xe8b13000, 0xa93556a5, 0xebfb6199, 0xb15c2254, 0x33c5244f, 0x8c3a0000, 0xda980000, 0x607f0000, 0x54078800, 0x85714513, 0x6006b243, 0xdb50399c, 0x8a58e6a4, 0x033d0000, 0x08b30000, 0xf33a0000, 0x3ac20007, 0x51298a50, 0x6b6e661f, 0x0ea5cfe3, 0xe6da7ffe, 0xa8da0000, 0x96be0000, 0x5c1d0000, 0x07da0002, 0x7d669583, 0x1f98708a, 0xbb668808, 0xda878000, 0xa8da0000, 0x96be0000, 0x5c1d0000, 0x07da0002, 0x7d669583, 0x1f98708a, 0xbb668808, 0xda878000, 0xabe70000, 0x9e0d0000, 0xaf270000, 0x3d180005, 0x2c4f1fd3, 0x74f61695, 0xb5c347eb, 0x3c5dfffe, 0x01930000, 0xe7820000, 0xedfb0000, 0xcf0c000b, 0x8dd08d58, 0xbca3b42e, 0x063661e1, 0x536f9e7b, 0x92280000, 0xdc850000, 0x57fa0000, 0x56dc0003, 0xbae92316, 0x5aefa30c, 0x90cef752, 0x7b1675d7, 0x92280000, 0xdc850000, 0x57fa0000, 0x56dc0003, 0xbae92316, 0x5aefa30c, 0x90cef752, 0x7b1675d7, 0x93bb0000, 0x3b070000, 0xba010000, 0x99d00008, 0x3739ae4e, 0xe64c1722, 0x96f896b3, 0x2879ebac, 0x5fa80000, 0x56030000, 0x43ae0000, 0x64f30013, 0x257e86bf, 0x1311944e, 0x541e95bf, 0x8ea4db69, 0x00440000, 0x7f480000, 0xda7c0000, 0x2a230001, 0x3badc9cc, 0xa9b69c87, 0x030a9e60, 0xbe0a679e, 0x00440000, 0x7f480000, 0xda7c0000, 0x2a230001, 0x3badc9cc, 0xa9b69c87, 0x030a9e60, 0xbe0a679e, 0x5fec0000, 0x294b0000, 0x99d20000, 0x4ed00012, 0x1ed34f73, 0xbaa708c9, 0x57140bdf, 0x30aebcf7, 0xee930000, 0xd6070000, 0x92c10000, 0x2b9801e0, 0x9451287c, 0x3b6cfb57, 0x45312374, 0x201f6a64, 0x7b280000, 0x57420000, 0xa9e50000, 0x634300a0, 0x9edb442f, 0x6d9995bb, 0x27f83b03, 0xc7ff60f0, 0x7b280000, 0x57420000, 0xa9e50000, 0x634300a0, 0x9edb442f, 0x6d9995bb, 0x27f83b03, 0xc7ff60f0, 0x95bb0000, 0x81450000, 0x3b240000, 0x48db0140, 0x0a8a6c53, 0x56f56eec, 0x62c91877, 0xe7e00a94 }; #define SBOX(a, b, c, d) { \ uint32_t t; \ t = (a); \ (a) &= (c); \ (a) ^= (d); \ (c) ^= (b); \ (c) ^= (a); \ (d) |= t; \ (d) ^= (b); \ t ^= (c); \ (b) = (d); \ (d) |= t; \ (d) ^= (a); \ (a) &= (b); \ t ^= (a); \ (b) ^= (d); \ (b) ^= t; \ (a) = (c); \ (c) = (b); \ (b) = (d); \ (d) = ~t; \ } #define HAMSI_L(a, b, c, d) { \ (a) = ROTL32(a, 13); \ (c) = ROTL32(c, 3); \ (b) ^= (a) ^ (c); \ (d) ^= (c) ^ ((a) << 3); \ (b) = ROTL32(b, 1); \ (d) = ROTL32(d, 7); \ (a) ^= (b) ^ (d); \ (c) ^= (d) ^ ((b) << 7); \ (a) = ROTL32(a, 5); \ (c) = ROTL32(c, 22); \ } #define ROUND_BIG(rc, alpha) { \ m0 ^= alpha[0x00]; \ c4 ^= alpha[0x08]; \ m8 ^= alpha[0x10]; \ cC ^= alpha[0x18]; \ m1 ^= alpha[0x01] ^ rc; \ c5 ^= alpha[0x09]; \ m9 ^= alpha[0x11]; \ cD ^= alpha[0x19]; \ c0 ^= alpha[0x02]; \ m4 ^= alpha[0x0A]; \ c8 ^= alpha[0x12]; \ mC ^= alpha[0x1A]; \ c1 ^= alpha[0x03]; \ m5 ^= alpha[0x0B]; \ c9 ^= alpha[0x13]; \ mD ^= alpha[0x1B]; \ m2 ^= alpha[0x04]; \ c6 ^= alpha[0x0C]; \ mA ^= alpha[0x14]; \ cE ^= alpha[0x1C]; \ m3 ^= alpha[0x05]; \ c7 ^= alpha[0x0D]; \ mB ^= alpha[0x15]; \ cF ^= alpha[0x1D]; \ c2 ^= alpha[0x06]; \ m6 ^= alpha[0x0E]; \ cA ^= alpha[0x16]; \ mE ^= alpha[0x1E]; \ c3 ^= alpha[0x07]; \ m7 ^= alpha[0x0F]; \ cB ^= alpha[0x17]; \ mF ^= alpha[0x1F]; \ SBOX(m0, c4, m8, cC); \ SBOX(m1, c5, m9, cD); \ SBOX(c0, m4, c8, mC); \ SBOX(c1, m5, c9, mD); \ SBOX(m2, c6, mA, cE); \ SBOX(m3, c7, mB, cF); \ SBOX(c2, m6, cA, mE); \ SBOX(c3, m7, cB, mF); \ HAMSI_L(m0, c5, c8, mD); \ HAMSI_L(m1, m4, c9, cE); \ HAMSI_L(c0, m5, mA, cF); \ HAMSI_L(c1, c6, mB, mE); \ HAMSI_L(m2, c7, cA, mF); \ HAMSI_L(m3, m6, cB, cC); \ HAMSI_L(c2, m7, m8, cD); \ HAMSI_L(c3, c4, m9, mC); \ HAMSI_L(m0, c0, m3, c3); \ HAMSI_L(m8, c9, mB, cA); \ HAMSI_L(c5, m5, c6, m6); \ HAMSI_L(cD, mC, cE, mF); \ } #define P_BIG { \ for( int r = 0; r < 6; r++ ) \ ROUND_BIG(r, d_alpha_n); \ } #define PF_BIG { \ for( int r = 0; r < 12; r++ ) \ ROUND_BIG(r, d_alpha_f); \ } #define T_BIG { \ /* order is important */ \ cF = (h[0xF] ^= cB); \ cE = (h[0xE] ^= cA); \ cD = (h[0xD] ^= mB); \ cC = (h[0xC] ^= mA); \ cB = (h[0xB] ^= c9); \ cA = (h[0xA] ^= c8); \ c9 = (h[0x9] ^= m9); \ c8 = (h[0x8] ^= m8); \ c7 = (h[0x7] ^= c3); \ c6 = (h[0x6] ^= c2); \ c5 = (h[0x5] ^= m3); \ c4 = (h[0x4] ^= m2); \ c3 = (h[0x3] ^= c1); \ c2 = (h[0x2] ^= c0); \ c1 = (h[0x1] ^= m1); \ c0 = (h[0x0] ^= m0); \ } __global__ void x13_hamsi512_gpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *g_hash ) { uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { const uint32_t d_alpha_n[32] = { 0xff00f0f0, 0xccccaaaa, 0xf0f0cccc, 0xff00aaaa, 0xccccaaaa, 0xf0f0ff00, 0xaaaacccc, 0xf0f0ff00, 0xf0f0cccc, 0xaaaaff00, 0xccccff00, 0xaaaaf0f0, 0xaaaaf0f0, 0xff00cccc, 0xccccf0f0, 0xff00aaaa, 0xccccaaaa, 0xff00f0f0, 0xff00aaaa, 0xf0f0cccc, 0xf0f0ff00, 0xccccaaaa, 0xf0f0ff00, 0xaaaacccc, 0xaaaaff00, 0xf0f0cccc, 0xaaaaf0f0, 0xccccff00, 0xff00cccc, 0xaaaaf0f0, 0xff00aaaa, 0xccccf0f0 }; const uint32_t d_alpha_f[32] = { 0xcaf9639c, 0x0ff0f9c0, 0x639c0ff0, 0xcaf9f9c0, 0x0ff0f9c0, 0x639ccaf9, 0xf9c00ff0, 0x639ccaf9, 0x639c0ff0, 0xf9c0caf9, 0x0ff0caf9, 0xf9c0639c, 0xf9c0639c, 0xcaf90ff0, 0x0ff0639c, 0xcaf9f9c0, 0x0ff0f9c0, 0xcaf9639c, 0xcaf9f9c0, 0x639c0ff0, 0x639ccaf9, 0x0ff0f9c0, 0x639ccaf9, 0xf9c00ff0, 0xf9c0caf9, 0x639c0ff0, 0xf9c0639c, 0x0ff0caf9, 0xcaf90ff0, 0xf9c0639c, 0xcaf9f9c0, 0x0ff0639c }; uint32_t nounce = (startNounce + thread); uint32_t hashPosition = nounce - startNounce; uint32_t *Hash = &g_hash[hashPosition*16]; uint8_t h1[16 * 4]; uint28 *phash = (uint28*)Hash; uint28 *outpt = (uint28*)h1; outpt[0] = phash[0]; outpt[1] = phash[1]; uint32_t c0 = 0x73746565, c1 = 0x6c706172, c2 = 0x6b204172, c3 = 0x656e6265; uint32_t c4 = 0x72672031, c5 = 0x302c2062, c6 = 0x75732032, c7 = 0x3434362c; uint32_t c8 = 0x20422d33, c9 = 0x30303120, cA = 0x4c657576, cB = 0x656e2d48; uint32_t cC = 0x65766572, cD = 0x6c65652c, cE = 0x2042656c, cF = 0x6769756d; uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, mA, mB, mC, mD, mE, mF; uint32_t h[16] = { c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, cA, cB, cC, cD, cE, cF }; uint32_t *tp, db, dm; #if __CUDA_ARCH__ > 500 #pragma unroll 2 #endif for(int i = 0; i < 64; i += 8) { tp = &d_T512[0]; m0 = 0; m1 = 0; m2 = 0; m3 = 0; m4 = 0; m5 = 0; m6 = 0; m7 = 0; m8 = 0; m9 = 0; mA = 0; mB = 0; mC = 0; mD = 0; mE = 0; mF = 0; // #pragma unroll 2 for (int u = 0; u < 8; u++) { db = h1[i+u]; // #pragma unroll 2 for (int v = 0; v < 8; v++, db >>= 1, tp += 16) { dm = -(db & 1); m0 ^= dm & tp[0]; m1 ^= dm & tp[1]; m2 ^= dm & tp[2]; m3 ^= dm & tp[3]; m4 ^= dm & tp[4]; m5 ^= dm & tp[5]; m6 ^= dm & tp[6]; m7 ^= dm & tp[7]; m8 ^= dm & tp[8]; m9 ^= dm & tp[9]; mA ^= dm & tp[10]; mB ^= dm & tp[11]; mC ^= dm & tp[12]; mD ^= dm & tp[13]; mE ^= dm & tp[14]; mF ^= dm & tp[15]; } } for (int r = 0; r < 6; r += 2) { ROUND_BIG(r, d_alpha_n); ROUND_BIG(r+1, d_alpha_n); } T_BIG; } tp = &d_T512[0] + 112; m0 = tp[ 0]; m1 = tp[ 1]; m2 = tp[ 2]; m3 = tp[ 3]; m4 = tp[ 4]; m5 = tp[ 5]; m6 = tp[ 6]; m7 = tp[ 7]; m8 = tp[ 8]; m9 = tp[ 9]; mA = tp[10]; mB = tp[11]; mC = tp[12]; mD = tp[13]; mE = tp[14]; mF = tp[15]; for (int r = 0; r < 6; r += 2) { // ROUND_BIG(r, d_alpha_n); m0 ^= d_alpha_n[0x00]; \ c4 ^= d_alpha_n[0x08]; \ m8 ^= d_alpha_n[0x10]; \ cC ^= d_alpha_n[0x18]; \ m1 ^= d_alpha_n[0x01] ^ r; \ c5 ^= d_alpha_n[0x09]; \ m9 ^= d_alpha_n[0x11]; \ cD ^= d_alpha_n[0x19]; \ c0 ^= d_alpha_n[0x02]; \ m4 ^= d_alpha_n[0x0A]; \ c8 ^= d_alpha_n[0x12]; \ mC ^= d_alpha_n[0x1A]; \ c1 ^= d_alpha_n[0x03]; \ m5 ^= d_alpha_n[0x0B]; \ c9 ^= d_alpha_n[0x13]; \ mD ^= d_alpha_n[0x1B]; \ m2 ^= d_alpha_n[0x04]; \ c6 ^= d_alpha_n[0x0C]; \ mA ^= d_alpha_n[0x14]; \ cE ^= d_alpha_n[0x1C]; \ m3 ^= d_alpha_n[0x05]; \ c7 ^= d_alpha_n[0x0D]; \ mB ^= d_alpha_n[0x15]; \ cF ^= d_alpha_n[0x1D]; \ c2 ^= d_alpha_n[0x06]; \ m6 ^= d_alpha_n[0x0E]; \ cA ^= d_alpha_n[0x16]; \ mE ^= d_alpha_n[0x1E]; \ c3 ^= d_alpha_n[0x07]; \ m7 ^= d_alpha_n[0x0F]; \ cB ^= d_alpha_n[0x17]; \ mF ^= d_alpha_n[0x1F]; \ SBOX(m0, c4, m8, cC); \ SBOX(m1, c5, m9, cD); \ SBOX(c0, m4, c8, mC); \ SBOX(c1, m5, c9, mD); \ SBOX(m2, c6, mA, cE); \ SBOX(m3, c7, mB, cF); \ SBOX(c2, m6, cA, mE); \ SBOX(c3, m7, cB, mF); \ HAMSI_L(m0, c5, c8, mD); \ HAMSI_L(m1, m4, c9, cE); \ HAMSI_L(c0, m5, mA, cF); \ HAMSI_L(c1, c6, mB, mE); \ HAMSI_L(m2, c7, cA, mF); \ HAMSI_L(m3, m6, cB, cC); \ HAMSI_L(c2, m7, m8, cD); \ HAMSI_L(c3, c4, m9, mC); \ HAMSI_L(m0, c0, m3, c3); \ HAMSI_L(m8, c9, mB, cA); \ HAMSI_L(c5, m5, c6, m6); \ HAMSI_L(cD, mC, cE, mF); \ // ROUND_BIG(r+1, d_alpha_n); m0 ^= d_alpha_n[0x00]; \ c4 ^= d_alpha_n[0x08]; \ m8 ^= d_alpha_n[0x10]; \ cC ^= d_alpha_n[0x18]; \ m1 ^= d_alpha_n[0x01] ^ (r+1); \ c5 ^= d_alpha_n[0x09]; \ m9 ^= d_alpha_n[0x11]; \ cD ^= d_alpha_n[0x19]; \ c0 ^= d_alpha_n[0x02]; \ m4 ^= d_alpha_n[0x0A]; \ c8 ^= d_alpha_n[0x12]; \ mC ^= d_alpha_n[0x1A]; \ c1 ^= d_alpha_n[0x03]; \ m5 ^= d_alpha_n[0x0B]; \ c9 ^= d_alpha_n[0x13]; \ mD ^= d_alpha_n[0x1B]; \ m2 ^= d_alpha_n[0x04]; \ c6 ^= d_alpha_n[0x0C]; \ mA ^= d_alpha_n[0x14]; \ cE ^= d_alpha_n[0x1C]; \ m3 ^= d_alpha_n[0x05]; \ c7 ^= d_alpha_n[0x0D]; \ mB ^= d_alpha_n[0x15]; \ cF ^= d_alpha_n[0x1D]; \ c2 ^= d_alpha_n[0x06]; \ m6 ^= d_alpha_n[0x0E]; \ cA ^= d_alpha_n[0x16]; \ mE ^= d_alpha_n[0x1E]; \ c3 ^= d_alpha_n[0x07]; \ m7 ^= d_alpha_n[0x0F]; \ cB ^= d_alpha_n[0x17]; \ mF ^= d_alpha_n[0x1F]; \ SBOX(m0, c4, m8, cC); \ SBOX(m1, c5, m9, cD); \ SBOX(c0, m4, c8, mC); \ SBOX(c1, m5, c9, mD); \ SBOX(m2, c6, mA, cE); \ SBOX(m3, c7, mB, cF); \ SBOX(c2, m6, cA, mE); \ SBOX(c3, m7, cB, mF); \ HAMSI_L(m0, c5, c8, mD); \ HAMSI_L(m1, m4, c9, cE); \ HAMSI_L(c0, m5, mA, cF); \ HAMSI_L(c1, c6, mB, mE); \ HAMSI_L(m2, c7, cA, mF); \ HAMSI_L(m3, m6, cB, cC); \ HAMSI_L(c2, m7, m8, cD); \ HAMSI_L(c3, c4, m9, mC); \ HAMSI_L(m0, c0, m3, c3); \ HAMSI_L(m8, c9, mB, cA); \ HAMSI_L(c5, m5, c6, m6); \ HAMSI_L(cD, mC, cE, mF); \ } T_BIG; tp = &d_T512[0] + 784; m0 = tp[ 0]; m1 = tp[ 1]; m2 = tp[ 2]; m3 = tp[ 3]; m4 = tp[ 4]; m5 = tp[ 5]; m6 = tp[ 6]; m7 = tp[ 7]; m8 = tp[ 8]; m9 = tp[ 9]; mA = tp[10]; mB = tp[11]; mC = tp[12]; mD = tp[13]; mE = tp[14]; mF = tp[15]; //#pragma unroll 2 for( int r = 0; r < 12; r += 2 ) { // ROUND_BIG(r, d_alpha_f); m0 ^= d_alpha_f[0x00]; \ c4 ^= d_alpha_f[0x08]; \ m8 ^= d_alpha_f[0x10]; \ cC ^= d_alpha_f[0x18]; \ m1 ^= d_alpha_f[0x01] ^ r; \ c5 ^= d_alpha_f[0x09]; \ m9 ^= d_alpha_f[0x11]; \ cD ^= d_alpha_f[0x19]; \ c0 ^= d_alpha_f[0x02]; \ m4 ^= d_alpha_f[0x0A]; \ c8 ^= d_alpha_f[0x12]; \ mC ^= d_alpha_f[0x1A]; \ c1 ^= d_alpha_f[0x03]; \ m5 ^= d_alpha_f[0x0B]; \ c9 ^= d_alpha_f[0x13]; \ mD ^= d_alpha_f[0x1B]; \ m2 ^= d_alpha_f[0x04]; \ c6 ^= d_alpha_f[0x0C]; \ mA ^= d_alpha_f[0x14]; \ cE ^= d_alpha_f[0x1C]; \ m3 ^= d_alpha_f[0x05]; \ c7 ^= d_alpha_f[0x0D]; \ mB ^= d_alpha_f[0x15]; \ cF ^= d_alpha_f[0x1D]; \ c2 ^= d_alpha_f[0x06]; \ m6 ^= d_alpha_f[0x0E]; \ cA ^= d_alpha_f[0x16]; \ mE ^= d_alpha_f[0x1E]; \ c3 ^= d_alpha_f[0x07]; \ m7 ^= d_alpha_f[0x0F]; \ cB ^= d_alpha_f[0x17]; \ mF ^= d_alpha_f[0x1F]; \ SBOX(m0, c4, m8, cC); \ SBOX(m1, c5, m9, cD); \ SBOX(c0, m4, c8, mC); \ SBOX(c1, m5, c9, mD); \ SBOX(m2, c6, mA, cE); \ SBOX(m3, c7, mB, cF); \ SBOX(c2, m6, cA, mE); \ SBOX(c3, m7, cB, mF); \ HAMSI_L(m0, c5, c8, mD); \ HAMSI_L(m1, m4, c9, cE); \ HAMSI_L(c0, m5, mA, cF); \ HAMSI_L(c1, c6, mB, mE); \ HAMSI_L(m2, c7, cA, mF); \ HAMSI_L(m3, m6, cB, cC); \ HAMSI_L(c2, m7, m8, cD); \ HAMSI_L(c3, c4, m9, mC); \ HAMSI_L(m0, c0, m3, c3); \ HAMSI_L(m8, c9, mB, cA); \ HAMSI_L(c5, m5, c6, m6); \ HAMSI_L(cD, mC, cE, mF); \ // ROUND_BIG(r+1, d_alpha_n); m0 ^= d_alpha_f[0x00]; \ c4 ^= d_alpha_f[0x08]; \ m8 ^= d_alpha_f[0x10]; \ cC ^= d_alpha_f[0x18]; \ m1 ^= d_alpha_f[0x01] ^ (r + 1); \ c5 ^= d_alpha_f[0x09]; \ m9 ^= d_alpha_f[0x11]; \ cD ^= d_alpha_f[0x19]; \ c0 ^= d_alpha_f[0x02]; \ m4 ^= d_alpha_f[0x0A]; \ c8 ^= d_alpha_f[0x12]; \ mC ^= d_alpha_f[0x1A]; \ c1 ^= d_alpha_f[0x03]; \ m5 ^= d_alpha_f[0x0B]; \ c9 ^= d_alpha_f[0x13]; \ mD ^= d_alpha_f[0x1B]; \ m2 ^= d_alpha_f[0x04]; \ c6 ^= d_alpha_f[0x0C]; \ mA ^= d_alpha_f[0x14]; \ cE ^= d_alpha_f[0x1C]; \ m3 ^= d_alpha_f[0x05]; \ c7 ^= d_alpha_f[0x0D]; \ mB ^= d_alpha_f[0x15]; \ cF ^= d_alpha_f[0x1D]; \ c2 ^= d_alpha_f[0x06]; \ m6 ^= d_alpha_f[0x0E]; \ cA ^= d_alpha_f[0x16]; \ mE ^= d_alpha_f[0x1E]; \ c3 ^= d_alpha_f[0x07]; \ m7 ^= d_alpha_f[0x0F]; \ cB ^= d_alpha_f[0x17]; \ mF ^= d_alpha_f[0x1F]; \ SBOX(m0, c4, m8, cC); \ SBOX(m1, c5, m9, cD); \ SBOX(c0, m4, c8, mC); \ SBOX(c1, m5, c9, mD); \ SBOX(m2, c6, mA, cE); \ SBOX(m3, c7, mB, cF); \ SBOX(c2, m6, cA, mE); \ SBOX(c3, m7, cB, mF); \ HAMSI_L(m0, c5, c8, mD); \ HAMSI_L(m1, m4, c9, cE); \ HAMSI_L(c0, m5, mA, cF); \ HAMSI_L(c1, c6, mB, mE); \ HAMSI_L(m2, c7, cA, mF); \ HAMSI_L(m3, m6, cB, cC); \ HAMSI_L(c2, m7, m8, cD); \ HAMSI_L(c3, c4, m9, mC); \ HAMSI_L(m0, c0, m3, c3); \ HAMSI_L(m8, c9, mB, cA); \ HAMSI_L(c5, m5, c6, m6); \ HAMSI_L(cD, mC, cE, mF); \ } h[0x0] = cuda_swab32(h[0x0] ^ m0); h[0x1] = cuda_swab32(h[0x1] ^ m1); h[0x2] = cuda_swab32(h[0x2] ^ c0); h[0x3] = cuda_swab32(h[0x3] ^ c1); h[0x4] = cuda_swab32(h[0x4] ^ m2); h[0x5] = cuda_swab32(h[0x5] ^ m3); h[0x6] = cuda_swab32(h[0x6] ^ c2); h[0x7] = cuda_swab32(h[0x7] ^ c3); h[0x8] = cuda_swab32(h[0x8] ^ m8); h[0x9] = cuda_swab32(h[0x9] ^ m9); h[0xA] = cuda_swab32(h[0xa] ^ c8); h[0xB] = cuda_swab32(h[0xb] ^ c9); h[0xC] = cuda_swab32(h[0xc] ^ mA); h[0xD] = cuda_swab32(h[0xd] ^ mB); h[0xE] = cuda_swab32(h[0xe] ^ cA); h[0xF] = cuda_swab32(h[0xf] ^ cB); phash = (uint28*)h; outpt = (uint28*)Hash; outpt[0] = phash[0]; outpt[1] = phash[1]; } } __host__ void x13_hamsi512_cpu_init(int thr_id, uint32_t threads) { } __host__ void x13_hamsi512_cpu_hash_64( uint32_t threads, uint32_t startNounce, uint32_t *d_hash) { const uint32_t threadsperblock = 128; dim3 grid((threads + threadsperblock-1)/threadsperblock); dim3 block(threadsperblock); x13_hamsi512_gpu_hash_64<<<grid, block>>>(threads, startNounce, d_hash); }
the_stack
#include <thrust/device_vector.h> #include <thrust/for_each.h> #include <thrust/transform.h> #include <catch2/catch.hpp> #include <cuco/static_map.cuh> namespace { namespace cg = cooperative_groups; // User-defined logical algorithms to reduce compilation time template <typename Iterator, typename Predicate> bool all_of(Iterator begin, Iterator end, Predicate p, cudaStream_t stream = 0) { auto size = thrust::distance(begin, end); auto out = thrust::count_if(thrust::cuda::par.on(stream), begin, end, p); cudaStreamSynchronize(stream); return size == out; } template <typename Iterator, typename Predicate> bool any_of(Iterator begin, Iterator end, Predicate p) { return thrust::count_if(begin, end, p) > 0; } template <typename Iterator, typename Predicate> bool none_of(Iterator begin, Iterator end, Predicate p) { return not all_of(begin, end, p); } } // namespace enum class dist_type { UNIQUE, UNIFORM, GAUSSIAN }; template <dist_type Dist, typename Key, typename OutputIt> static void generate_keys(OutputIt output_begin, OutputIt output_end) { auto num_keys = std::distance(output_begin, output_end); std::random_device rd; std::mt19937 gen{rd()}; switch (Dist) { case dist_type::UNIQUE: for (auto i = 0; i < num_keys; ++i) { output_begin[i] = i; } break; case dist_type::UNIFORM: for (auto i = 0; i < num_keys; ++i) { output_begin[i] = std::abs(static_cast<Key>(gen())); } break; case dist_type::GAUSSIAN: std::normal_distribution<> dg{1e9, 1e7}; for (auto i = 0; i < num_keys; ++i) { output_begin[i] = std::abs(static_cast<Key>(dg(gen))); } break; } } // User-defined key type template <typename T> struct key_pair_type { T a; T b; __host__ __device__ key_pair_type() {} __host__ __device__ key_pair_type(T x) : a{x}, b{x} {} // Device equality operator is mandatory due to libcudacxx bug: // https://github.com/NVIDIA/libcudacxx/issues/223 __device__ bool operator==(key_pair_type const& other) const { return a == other.a and b == other.b; } }; // User-defined key type template <typename T> struct large_key_type { T a; T b; T c; __host__ __device__ large_key_type() {} __host__ __device__ large_key_type(T x) : a{x}, b{x}, c{x} {} // Device equality operator is mandatory due to libcudacxx bug: // https://github.com/NVIDIA/libcudacxx/issues/223 __device__ bool operator==(large_key_type const& other) const { return a == other.a and b == other.b and c == other.c; } }; // User-defined value type template <typename T> struct value_pair_type { T f; T s; __host__ __device__ value_pair_type() {} __host__ __device__ value_pair_type(T x) : f{x}, s{x} {} __device__ bool operator==(value_pair_type const& other) const { return f == other.f and s == other.s; } }; // User-defined device hasher struct hash_custom_key { template <typename custom_type> __device__ uint32_t operator()(custom_type k) { return k.a; }; }; // User-defined device key equality struct custom_key_equals { template <typename custom_type> __device__ bool operator()(custom_type lhs, custom_type rhs) { return std::tie(lhs.a, lhs.b) == std::tie(rhs.a, rhs.b); } }; #define SIZE 10 __device__ int A[SIZE]; template <typename T> struct custom_equals { __device__ bool operator()(T lhs, T rhs) { return A[lhs] == A[rhs]; } }; TEMPLATE_TEST_CASE_SIG("User defined key and value type", "", ((typename Key, typename Value), Key, Value), #ifndef CUCO_NO_INDEPENDENT_THREADS // Key type larger than 8B only supported for sm_70 and up (key_pair_type<int64_t>, value_pair_type<int32_t>), (key_pair_type<int64_t>, value_pair_type<int64_t>), (large_key_type<int32_t>, value_pair_type<int32_t>), #endif (key_pair_type<int32_t>, value_pair_type<int32_t>)) { auto const sentinel_key = Key{-1}; auto const sentinel_value = Value{-1}; constexpr std::size_t num = 100; constexpr std::size_t capacity = num * 2; cuco::static_map<Key, Value> map{capacity, sentinel_key, sentinel_value}; thrust::device_vector<Key> insert_keys(num); thrust::device_vector<Value> insert_values(num); thrust::transform(thrust::device, thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(num), insert_keys.begin(), [] __device__(auto i) { return Key{i}; }); thrust::transform(thrust::device, thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(num), insert_values.begin(), [] __device__(auto i) { return Value{i}; }); auto insert_pairs = thrust::make_zip_iterator(thrust::make_tuple(insert_keys.begin(), insert_values.begin())); SECTION("All inserted keys-value pairs should be correctly recovered during find") { thrust::device_vector<Value> found_values(num); map.insert(insert_pairs, insert_pairs + num, hash_custom_key{}, custom_key_equals{}); REQUIRE(num == map.get_size()); map.find(insert_keys.begin(), insert_keys.end(), found_values.begin(), hash_custom_key{}, custom_key_equals{}); REQUIRE(thrust::equal(thrust::device, insert_values.begin(), insert_values.end(), found_values.begin(), [] __device__(Value lhs, Value rhs) { return std::tie(lhs.f, lhs.s) == std::tie(rhs.f, rhs.s); })); } SECTION("All inserted keys-value pairs should be contained") { thrust::device_vector<bool> contained(num); map.insert(insert_pairs, insert_pairs + num, hash_custom_key{}, custom_key_equals{}); map.contains(insert_keys.begin(), insert_keys.end(), contained.begin(), hash_custom_key{}, custom_key_equals{}); REQUIRE(all_of(contained.begin(), contained.end(), [] __device__(bool const& b) { return b; })); } SECTION("All conditionally inserted keys-value pairs should be contained") { thrust::device_vector<bool> contained(num); map.insert_if( insert_pairs, insert_pairs + num, thrust::counting_iterator<int>(0), [] __device__(auto const& key) { return (key % 2) == 0; }, hash_custom_key{}, custom_key_equals{}); map.contains(insert_keys.begin(), insert_keys.end(), contained.begin(), hash_custom_key{}, custom_key_equals{}); REQUIRE(thrust::equal(thrust::device, contained.begin(), contained.end(), thrust::counting_iterator<int>(0), [] __device__(auto const& idx_contained, auto const& idx) { return ((idx % 2) == 0) == idx_contained; })); } SECTION("Non-inserted keys-value pairs should not be contained") { thrust::device_vector<bool> contained(num); map.contains(insert_keys.begin(), insert_keys.end(), contained.begin(), hash_custom_key{}, custom_key_equals{}); REQUIRE( none_of(contained.begin(), contained.end(), [] __device__(bool const& b) { return b; })); } SECTION("All inserted keys-value pairs should be contained") { thrust::device_vector<bool> contained(num); map.insert(insert_pairs, insert_pairs + num, hash_custom_key{}, custom_key_equals{}); auto view = map.get_device_view(); REQUIRE(all_of( insert_pairs, insert_pairs + num, [view] __device__(cuco::pair_type<Key, Value> const& pair) { return view.contains(pair.first, hash_custom_key{}, custom_key_equals{}); })); } SECTION("Inserting unique keys should return insert success.") { auto m_view = map.get_device_mutable_view(); REQUIRE(all_of(insert_pairs, insert_pairs + num, [m_view] __device__(cuco::pair_type<Key, Value> const& pair) mutable { return m_view.insert(pair, hash_custom_key{}, custom_key_equals{}); })); } SECTION("Cannot find any key in an empty hash map") { SECTION("non-const view") { auto view = map.get_device_view(); REQUIRE(all_of(insert_pairs, insert_pairs + num, [view] __device__(cuco::pair_type<Key, Value> const& pair) mutable { return view.find(pair.first, hash_custom_key{}, custom_key_equals{}) == view.end(); })); } SECTION("const view") { auto const view = map.get_device_view(); REQUIRE(all_of(insert_pairs, insert_pairs + num, [view] __device__(cuco::pair_type<Key, Value> const& pair) { return view.find(pair.first, hash_custom_key{}, custom_key_equals{}) == view.end(); })); } } } TEMPLATE_TEST_CASE_SIG("Key comparison against sentinel", "", ((typename T, dist_type Dist), T, Dist), (int32_t, dist_type::UNIQUE), (int64_t, dist_type::UNIQUE)) { using Key = T; using Value = T; constexpr std::size_t num_keys{SIZE}; cuco::static_map<Key, Value> map{SIZE * 2, -1, -1}; auto m_view = map.get_device_mutable_view(); auto view = map.get_device_view(); std::vector<Key> h_keys(num_keys); std::vector<cuco::pair_type<Key, Value>> h_pairs(num_keys); generate_keys<Dist, Key>(h_keys.begin(), h_keys.end()); for (auto i = 0; i < num_keys; ++i) { Key key = h_keys[i]; Value val = h_keys[i]; h_pairs[i].first = key; h_pairs[i].second = val; } int h_A[SIZE]; for (int i = 0; i < SIZE; i++) { h_A[i] = i; } cudaMemcpyToSymbol(A, h_A, SIZE * sizeof(int)); thrust::device_vector<cuco::pair_type<Key, Value>> d_pairs(h_pairs); SECTION( "Tests of non-CG insert: The custom `key_equal` can never be used to compare against sentinel") { REQUIRE(all_of(d_pairs.begin(), d_pairs.end(), [m_view] __device__(cuco::pair_type<Key, Value> const& pair) mutable { return m_view.insert( pair, cuco::detail::MurmurHash3_32<Key>{}, custom_equals<Key>{}); })); } SECTION( "Tests of CG insert: The custom `key_equal` can never be used to compare against sentinel") { map.insert( d_pairs.begin(), d_pairs.end(), cuco::detail::MurmurHash3_32<Key>{}, custom_equals<Key>{}); // All keys inserted via custom `key_equal` should be found REQUIRE(all_of( d_pairs.begin(), d_pairs.end(), [view] __device__(cuco::pair_type<Key, Value> const& pair) { auto const found = view.find(pair.first); return (found != view.end()) and (found->first.load() == pair.first and found->second.load() == pair.second); })); } } TEMPLATE_TEST_CASE_SIG("Unique sequence of keys", "", ((typename T, dist_type Dist), T, Dist), (int32_t, dist_type::UNIQUE), (int64_t, dist_type::UNIQUE), (int32_t, dist_type::UNIFORM), (int64_t, dist_type::UNIFORM), (int32_t, dist_type::GAUSSIAN), (int64_t, dist_type::GAUSSIAN)) { using Key = T; using Value = T; constexpr std::size_t num_keys{500'000}; cuco::static_map<Key, Value> map{1'000'000, -1, -1}; auto m_view = map.get_device_mutable_view(); auto view = map.get_device_view(); std::vector<Key> h_keys(num_keys); std::vector<Value> h_values(num_keys); std::vector<cuco::pair_type<Key, Value>> h_pairs(num_keys); generate_keys<Dist, Key>(h_keys.begin(), h_keys.end()); for (auto i = 0; i < num_keys; ++i) { Key key = h_keys[i]; Value val = h_keys[i]; h_pairs[i].first = key; h_pairs[i].second = val; h_values[i] = val; } thrust::device_vector<Key> d_keys(h_keys); thrust::device_vector<Value> d_values(h_values); thrust::device_vector<cuco::pair_type<Key, Value>> d_pairs(h_pairs); thrust::device_vector<Value> d_results(num_keys); thrust::device_vector<bool> d_contained(num_keys); // bulk function test cases SECTION("All inserted keys-value pairs should be correctly recovered during find") { map.insert(d_pairs.begin(), d_pairs.end()); map.find(d_keys.begin(), d_keys.end(), d_results.begin()); auto zip = thrust::make_zip_iterator(thrust::make_tuple(d_results.begin(), d_values.begin())); REQUIRE(all_of(zip, zip + num_keys, [] __device__(auto const& p) { return thrust::get<0>(p) == thrust::get<1>(p); })); } SECTION("All inserted keys-value pairs should be contained") { map.insert(d_pairs.begin(), d_pairs.end()); map.contains(d_keys.begin(), d_keys.end(), d_contained.begin()); REQUIRE( all_of(d_contained.begin(), d_contained.end(), [] __device__(bool const& b) { return b; })); } SECTION("Non-inserted keys-value pairs should not be contained") { map.contains(d_keys.begin(), d_keys.end(), d_contained.begin()); REQUIRE( none_of(d_contained.begin(), d_contained.end(), [] __device__(bool const& b) { return b; })); } SECTION("Inserting unique keys should return insert success.") { if (Dist == dist_type::UNIQUE) { REQUIRE(all_of(d_pairs.begin(), d_pairs.end(), [m_view] __device__(cuco::pair_type<Key, Value> const& pair) mutable { return m_view.insert(pair); })); } } SECTION("Cannot find any key in an empty hash map with non-const view") { SECTION("non-const view") { REQUIRE(all_of(d_pairs.begin(), d_pairs.end(), [view] __device__(cuco::pair_type<Key, Value> const& pair) mutable { return view.find(pair.first) == view.end(); })); } SECTION("const view") { REQUIRE(all_of( d_pairs.begin(), d_pairs.end(), [view] __device__(cuco::pair_type<Key, Value> const& pair) { return view.find(pair.first) == view.end(); })); } } SECTION("Keys are all found after inserting many keys.") { // Bulk insert keys thrust::for_each(thrust::device, d_pairs.begin(), d_pairs.end(), [m_view] __device__(cuco::pair_type<Key, Value> const& pair) mutable { m_view.insert(pair); }); SECTION("non-const view") { // All keys should be found REQUIRE(all_of(d_pairs.begin(), d_pairs.end(), [view] __device__(cuco::pair_type<Key, Value> const& pair) mutable { auto const found = view.find(pair.first); return (found != view.end()) and (found->first.load() == pair.first and found->second.load() == pair.second); })); } SECTION("const view") { // All keys should be found REQUIRE(all_of( d_pairs.begin(), d_pairs.end(), [view] __device__(cuco::pair_type<Key, Value> const& pair) { auto const found = view.find(pair.first); return (found != view.end()) and (found->first.load() == pair.first and found->second.load() == pair.second); })); } } } TEMPLATE_TEST_CASE_SIG("Unique sequence of keys on given stream", "", ((typename T, dist_type Dist), T, Dist), (int32_t, dist_type::UNIQUE), (int64_t, dist_type::UNIFORM), (int32_t, dist_type::GAUSSIAN)) { using Key = T; using Value = T; cudaStream_t stream; cudaStreamCreate(&stream); constexpr std::size_t num_keys{500'000}; cuco::static_map<Key, Value> map{1'000'000, -1, -1, cuco::cuda_allocator<char>{}, stream}; auto m_view = map.get_device_mutable_view(); auto view = map.get_device_view(); std::vector<Key> h_keys(num_keys); std::vector<Value> h_values(num_keys); std::vector<cuco::pair_type<Key, Value>> h_pairs(num_keys); generate_keys<Dist, Key>(h_keys.begin(), h_keys.end()); for (auto i = 0; i < num_keys; ++i) { Key key = h_keys[i]; Value val = h_keys[i]; h_pairs[i].first = key; h_pairs[i].second = val; h_values[i] = val; } thrust::device_vector<Key> d_keys(h_keys); thrust::device_vector<Value> d_values(h_values); thrust::device_vector<cuco::pair_type<Key, Value>> d_pairs(h_pairs); thrust::device_vector<Value> d_results(num_keys); thrust::device_vector<bool> d_contained(num_keys); auto hash_fn = cuco::detail::MurmurHash3_32<Key>{}; auto equal_fn = thrust::equal_to<Value>{}; // bulk function test cases SECTION("All inserted keys-value pairs should be correctly recovered during find") { map.insert(d_pairs.begin(), d_pairs.end(), hash_fn, equal_fn, stream); map.find(d_keys.begin(), d_keys.end(), d_results.begin(), hash_fn, equal_fn, stream); // cudaStreamSynchronize(stream); auto zip = thrust::make_zip_iterator(thrust::make_tuple(d_results.begin(), d_values.begin())); REQUIRE(all_of( zip, zip + num_keys, [] __device__(auto const& p) { return thrust::get<0>(p) == thrust::get<1>(p); }, stream)); } SECTION("All inserted keys-value pairs should be contained") { map.insert(d_pairs.begin(), d_pairs.end(), hash_fn, equal_fn, stream); map.contains(d_keys.begin(), d_keys.end(), d_contained.begin(), hash_fn, equal_fn, stream); REQUIRE(all_of( d_contained.begin(), d_contained.end(), [] __device__(bool const& b) { return b; }, stream)); } cudaStreamDestroy(stream); } template <typename MapType, int CAPACITY> __global__ void shared_memory_test_kernel( typename MapType::device_view const* const device_views, typename MapType::device_view::key_type const* const insterted_keys, typename MapType::device_view::mapped_type const* const inserted_values, const size_t number_of_elements, bool* const keys_exist, bool* const keys_and_values_correct) { // Each block processes one map const size_t map_id = blockIdx.x; const size_t offset = map_id * number_of_elements; __shared__ typename MapType::pair_atomic_type sm_buffer[CAPACITY]; auto g = cg::this_thread_block(); typename MapType::device_view sm_device_view = MapType::device_view::make_copy(g, sm_buffer, device_views[map_id]); for (int i = g.thread_rank(); i < number_of_elements; i += g.size()) { auto found_pair_it = sm_device_view.find(insterted_keys[offset + i]); if (found_pair_it != sm_device_view.end()) { keys_exist[offset + i] = true; if (found_pair_it->first == insterted_keys[offset + i] and found_pair_it->second == inserted_values[offset + i]) { keys_and_values_correct[offset + i] = true; } else { keys_and_values_correct[offset + i] = false; } } else { keys_exist[offset + i] = false; keys_and_values_correct[offset + i] = true; } } } TEMPLATE_TEST_CASE_SIG("Shared memory static map", "", ((typename T, dist_type Dist), T, Dist), (int32_t, dist_type::UNIQUE), (int64_t, dist_type::UNIQUE), (int32_t, dist_type::UNIFORM), (int64_t, dist_type::UNIFORM), (int32_t, dist_type::GAUSSIAN), (int64_t, dist_type::GAUSSIAN)) { using KeyType = T; using ValueType = T; using MapType = cuco::static_map<KeyType, ValueType>; using DeviceViewType = typename MapType::device_view; using DeviceViewIteratorType = typename DeviceViewType::iterator; constexpr std::size_t number_of_maps = 1000; constexpr std::size_t elements_in_map = 500; constexpr std::size_t map_capacity = 2 * elements_in_map; // one array for all maps, first elements_in_map element belong to map 0, second to map 1 and so // on std::vector<KeyType> h_keys(number_of_maps * elements_in_map); std::vector<ValueType> h_values(number_of_maps * elements_in_map); std::vector<cuco::pair_type<KeyType, ValueType>> h_pairs(number_of_maps * elements_in_map); // using std::unique_ptr because static_map does not have copy/move constructor/assignment // operator yet std::vector<std::unique_ptr<MapType>> maps; for (std::size_t map_id = 0; map_id < number_of_maps; ++map_id) { const std::size_t offset = map_id * elements_in_map; generate_keys<Dist, KeyType>(h_keys.begin() + offset, h_keys.begin() + offset + elements_in_map); for (std::size_t i = 0; i < elements_in_map; ++i) { KeyType key = h_keys[offset + i]; ValueType val = key < std::numeric_limits<KeyType>::max() ? key + 1 : 0; h_values[offset + i] = val; h_pairs[offset + i].first = key; h_pairs[offset + i].second = val; } maps.push_back(std::make_unique<MapType>(map_capacity, -1, -1)); } thrust::device_vector<KeyType> d_keys(h_keys); thrust::device_vector<ValueType> d_values(h_values); thrust::device_vector<cuco::pair_type<KeyType, ValueType>> d_pairs(h_pairs); SECTION("Keys are all found after insertion.") { std::vector<DeviceViewType> h_device_views; for (std::size_t map_id = 0; map_id < number_of_maps; ++map_id) { const std::size_t offset = map_id * elements_in_map; MapType* map = maps[map_id].get(); map->insert(d_pairs.begin() + offset, d_pairs.begin() + offset + elements_in_map); h_device_views.push_back(map->get_device_view()); } thrust::device_vector<DeviceViewType> d_device_views(h_device_views); thrust::device_vector<bool> d_keys_exist(number_of_maps * elements_in_map); thrust::device_vector<bool> d_keys_and_values_correct(number_of_maps * elements_in_map); shared_memory_test_kernel<MapType, map_capacity> <<<number_of_maps, 64>>>(d_device_views.data().get(), d_keys.data().get(), d_values.data().get(), elements_in_map, d_keys_exist.data().get(), d_keys_and_values_correct.data().get()); REQUIRE(d_keys_exist.size() == d_keys_and_values_correct.size()); auto zip = thrust::make_zip_iterator( thrust::make_tuple(d_keys_exist.begin(), d_keys_and_values_correct.begin())); REQUIRE(all_of(zip, zip + d_keys_exist.size(), [] __device__(auto const& z) { return thrust::get<0>(z) and thrust::get<1>(z); })); } SECTION("No key is found before insertion.") { std::vector<DeviceViewType> h_device_views; for (std::size_t map_id = 0; map_id < number_of_maps; ++map_id) { h_device_views.push_back(maps[map_id].get()->get_device_view()); } thrust::device_vector<DeviceViewType> d_device_views(h_device_views); thrust::device_vector<bool> d_keys_exist(number_of_maps * elements_in_map); thrust::device_vector<bool> d_keys_and_values_correct(number_of_maps * elements_in_map); shared_memory_test_kernel<MapType, map_capacity> <<<number_of_maps, 64>>>(d_device_views.data().get(), d_keys.data().get(), d_values.data().get(), elements_in_map, d_keys_exist.data().get(), d_keys_and_values_correct.data().get()); REQUIRE(none_of(d_keys_exist.begin(), d_keys_exist.end(), [] __device__(const bool key_found) { return key_found; })); } } template <typename K, typename V, std::size_t N> __global__ void shared_memory_hash_table_kernel(bool* key_found) { namespace cg = cooperative_groups; using map_type = typename cuco::static_map<K, V, cuda::thread_scope_block>::device_mutable_view; using find_map_type = typename cuco::static_map<K, V, cuda::thread_scope_block>::device_view; __shared__ typename map_type::slot_type slots[N]; auto map = map_type::make_from_uninitialized_slots(cg::this_thread_block(), &slots[0], N, -1, -1); auto g = cg::this_thread_block(); std::size_t index = threadIdx.x + blockIdx.x * blockDim.x; int rank = g.thread_rank(); // insert {thread_rank, thread_rank} for each thread in thread-block map.insert(cuco::pair<int, int>(rank, rank)); g.sync(); auto find_map = find_map_type(map); auto retrieved_pair = find_map.find(rank); if (retrieved_pair != find_map.end() && retrieved_pair->second == rank) { key_found[index] = true; } } TEMPLATE_TEST_CASE("Shared memory slots.", "", int32_t) { constexpr std::size_t N = 256; thrust::device_vector<bool> key_found(N, false); shared_memory_hash_table_kernel<TestType, TestType, N><<<8, 32>>>(key_found.data().get()); REQUIRE(all_of(key_found.begin(), key_found.end(), thrust::identity<bool>{})); }
the_stack
namespace fastertransformer{ template <typename T> __inline__ __device__ T warpReduceSum(T val) { for(int mask = 16; mask > 0; mask >>= 1) val += __shfl_xor_sync(FINAL_MASK, val, mask, 32); return val; } template <typename T> __inline__ __device__ T blockReduceSum(T val) { static __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; int wid = threadIdx.x >> 5; val = warpReduceSum<T>(val); if(lane == 0) shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)0.0f; val = warpReduceSum(val); return val; } template <typename T> __inline__ __device__ T warpReduceMax(T val) { for(int mask = 16; mask > 0; mask >>= 1) val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32)); return val; } /* Calculate the maximum of all elements in a block */ template <typename T> __inline__ __device__ T blockReduceMax(T val) { static __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; // in-warp idx int wid = threadIdx.x >> 5; // warp idx val = warpReduceMax(val); // get maxx in each warp if(lane == 0) // record in-warp maxx by warp Idx shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f; val = warpReduceMax<T>(val); return val; } template <typename T> __global__ void update_logits_kernel(float* logits, const T* tmp_logits, const T* bias, const int end_id, const bool* finished, const int n) { int bid = blockIdx.x; bool finish = finished[bid]; int offset = bid * n; float max_val = -1 * FLT_MAX; __shared__ float s_max_val; __shared__ float s_sum_val; if(finish) { for(int tid = threadIdx.x; tid < n; tid += blockDim.x) { logits[offset + tid] = (tid == end_id) ? 0 : -FLT_MAX; } } else { for(int tid = threadIdx.x; tid < n; tid += blockDim.x) { if(finish) logits[offset + tid] = (tid == end_id) ? FLT_MAX : -1 * FLT_MAX; else logits[offset + tid] = (float)(tmp_logits[offset + tid] + bias[tid]); max_val = max(max_val, logits[offset + tid]); } max_val = blockReduceMax<float>((float)max_val); if(threadIdx.x == 0) s_max_val = max_val; __syncthreads(); float sum_val = 0.0f; for(int tid = threadIdx.x; tid < n; tid += blockDim.x) { logits[offset + tid] = __expf((float)logits[offset + tid] - s_max_val); sum_val += (float)logits[offset + tid]; } sum_val = blockReduceSum<float>(sum_val); if(threadIdx.x == 0) s_sum_val = sum_val; __syncthreads(); for(int tid = threadIdx.x; tid < n; tid += blockDim.x) { logits[offset + tid] = logf((float)logits[offset + tid] / s_sum_val); } } } template <typename T> __global__ void update_logits_kernel_without_softmax(T* logits, const T* bias, const int end_id, const bool* finished, const int n) { int bid = blockIdx.x; bool finish = finished != nullptr ? finished[bid] : false; int offset = bid * n; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; for(int tid = threadIdx.x; tid < n; tid += blockDim.x) { if(finish) { logits[offset + tid] = (tid == end_id) ? MAX_T_VAL : -MAX_T_VAL; } else { logits[offset + tid] += bias[tid]; } } } template <typename T> __global__ void softmax_kernel(T* logits, const T* bias, const int end_id, const bool* finished, const int n_padded, const int n) { int bid = blockIdx.x; bool finish = (finished != nullptr) ? finished[bid] : false; int offset = bid * n_padded; float max_val = -1 * FLT_MAX; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; __shared__ float s_max_val; __shared__ float s_sum_val; for(int tid = threadIdx.x; tid < n_padded; tid += blockDim.x) { if(tid < n) { if(finish) logits[offset + tid] = (tid == end_id) ? MAX_T_VAL : -MAX_T_VAL; else { T bias_val = (bias != nullptr) ? bias[tid] : (T)0.0f; logits[offset + tid] += bias_val; } } else { logits[offset + tid] = -MAX_T_VAL; } max_val = max(max_val, (float)logits[offset + tid]); } max_val = blockReduceMax<float>((float)max_val); if(threadIdx.x == 0) s_max_val = max_val; __syncthreads(); float sum_val = 0.0f; for(int tid = threadIdx.x; tid < n_padded; tid += blockDim.x) { logits[offset + tid] = __expf((float)logits[offset + tid] - s_max_val); sum_val += (float)logits[offset + tid]; } sum_val = blockReduceSum<float>(sum_val); if(threadIdx.x == 0) s_sum_val = sum_val; __syncthreads(); for(int tid = threadIdx.x; tid < n_padded; tid += blockDim.x) { logits[offset + tid] = ((float)logits[offset + tid] / s_sum_val); } } template<typename T> __global__ void remove_sequence_length_padding(const T* src, T* tgt, const int* tmp_mask_offset, int* mask_offset, const int n) { const int tid = threadIdx.x; const int bid = blockIdx.x; mask_offset[bid] = tmp_mask_offset[bid]; const int src_seq_id = bid + mask_offset[bid]; const int tgt_seq_id = bid; for(int i = tid; i < n; i += blockDim.x) { tgt[tgt_seq_id * n + i] = src[src_seq_id * n + i]; } } template<typename T> void remove_sequence_length_padding_kernelLauncher(const T* src, T* tgt, const int* tmp_mask_offset, int* mask_offset, const int m, const int n, cudaStream_t stream) { // src: [batch_size*max_seq_len, hidden_dim] // tgt: [valid_word_num, hidden_dim] remove_sequence_length_padding<<<m, 256, 0, stream>>>(src, tgt, tmp_mask_offset, mask_offset, n); } template<typename T> __global__ void rebuild_sequence_length_padding(const T* src, T* tgt, const int* mask_offset, const int n) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int tgt_seq_id = bid + mask_offset[bid]; const int src_seq_id = bid; for(int i = tid; i < n; i += blockDim.x) { tgt[tgt_seq_id * n + i] = src[src_seq_id * n + i]; } } template<typename T> void rebuild_sequence_length_padding_kernelLauncher(const T* src, T* tgt, const int* mask_offset, const int m, const int n, cudaStream_t stream) { // src: [valid_word_num, hidden_dim] // tgt: [batch_size*max_seq_len, hidden_dim] rebuild_sequence_length_padding<<<m, 256, 0, stream>>>(src, tgt, mask_offset, n); } __global__ void build_sequence_length_padding_offset(const int* sequence_length, const int batch_size, const int max_seq_len, int* valid_word_num, int* tmp_mask_offset) { // do cumulated sum int total_seq_len = 0; int cum_offset = 0; int index = 0; for(int i = 0; i < batch_size; i++) { const int seq_len = sequence_length[i]; for(int j = 0; j < seq_len; j++) { tmp_mask_offset[index] = cum_offset; index++; } cum_offset += max_seq_len - seq_len; total_seq_len += seq_len; } valid_word_num[0] = total_seq_len; } void build_sequence_length_padding_offset_kernelLauncher(const int* sequence_length, const int batch_size, const int max_seq_len, int* valid_word_num, int* tmp_mask_offset, cudaStream_t stream) { build_sequence_length_padding_offset<<<1, 1, 0, stream>>>(sequence_length, batch_size, max_seq_len, valid_word_num, tmp_mask_offset); } template void rebuild_sequence_length_padding_kernelLauncher(const float* src, float* tgt, const int* mask_offset, const int m, const int n, cudaStream_t stream); template void rebuild_sequence_length_padding_kernelLauncher(const half* src, half* tgt, const int* mask_offset, const int m, const int n, cudaStream_t stream); template void remove_sequence_length_padding_kernelLauncher(const float* src, float* tgt, const int* tmp_mask_offset, int* mask_offset, const int m, const int n, cudaStream_t stream); template void remove_sequence_length_padding_kernelLauncher(const half* src, half* tgt, const int* tmp_mask_offset, int* mask_offset, const int m, const int n, cudaStream_t stream); template <typename T> __global__ void cuda_random_uniform_kernel(T* buffer, const int size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; curandState_t local_state; curand_init((T)1337.f, idx, 0, &local_state); for(int index = idx; index < size; index += blockDim.x * gridDim.x) { buffer[index] = (T)(curand_uniform(&local_state) * 0.2f - 0.1f); } } template <typename T> void cuda_random_uniform_kernelLauncher(T *buffer, const int size) { cuda_random_uniform_kernel<<<256, 256>>>(buffer, size); } template void cuda_random_uniform_kernelLauncher(float *buffer, const int size); template void cuda_random_uniform_kernelLauncher(half *buffer, const int size); template <typename T> void update_logits(float* logits, const T* tmp_logits, const T* bias, const int end_id, const bool* finished, const int m, const int n, cudaStream_t stream) { dim3 grid(m); dim3 block(min(n, 1024)); /*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big. */ update_logits_kernel<<<grid, block, 0, stream>>>(logits, tmp_logits, bias, end_id, finished, n); } template void update_logits(float* logits, const float* tmp_logits, const float* bias, const int end_id, const bool* finished, const int m, const int n, cudaStream_t stream); template void update_logits(float* logits, const half* tmp_logits, const half* bias, const int end_id, const bool* finished, const int m, const int n, cudaStream_t stream); template<typename T> void update_logits_without_softmax(T* logits, const T* bias, const int end_id, const bool* finished, const int m, const int n, cudaStream_t stream) { dim3 grid(m); dim3 block(min(n, 1024)); /*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big. */ update_logits_kernel_without_softmax<<<grid, block, 0, stream>>>(logits, bias, end_id, finished, n); } template void update_logits_without_softmax(float* logits, const float* bias, const int end_id, const bool* finished, const int m, const int n, cudaStream_t stream); template void update_logits_without_softmax(half* logits, const half* bias, const int end_id, const bool* finished, const int m, const int n, cudaStream_t stream); template<typename T> void softmax_kernelLauncher(T* logits, const T* bias, const int end_id, const bool* finished, const int m, const int n_padded, const int n, cudaStream_t stream) { dim3 grid(m); dim3 block(min(n, 1024)); /*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big. */ softmax_kernel<<<grid, block, 0, stream>>>(logits, bias, end_id, finished, n_padded, n); } template void softmax_kernelLauncher(float* logits, const float* bias, const int end_id, const bool* finished, const int m, const int n_padded, const int n, cudaStream_t stream); template void softmax_kernelLauncher(half* logits, const half* bias, const int end_id, const bool* finished, const int m, const int n_padded, const int n, cudaStream_t stream); /* *********************************** Debug tools *********************************** */ template <typename T> __global__ void print_abs_mean_kernel(const T* buf, uint size) { float sum; for(int i = 0; i < size; i++) { sum += abs((float)buf[i]); // printf("[INFO] buf[%d] %f \n", i, buf[i]); } printf("mean: %f \n", (float) sum / (float) size); printf("sum: %f \n", sum); } template <typename T> __global__ void print_kernel(const T* buf, uint size) { for(int i = 0; i < size; i++) { printf("%f ", (float(buf[i]))); } printf("\n"); } template <> __global__ void print_kernel(const int* buf, uint size) { for(int i = 0; i < size; i++) { printf("%d ", buf[i]); } printf("\n"); } template <typename T> void print_first_k(const T* buf, uint size, cudaStream_t stream) { cudaDeviceSynchronize(); check_cuda_error(cudaGetLastError()); print_kernel<<<1, 1, 0, stream>>>(buf, size); cudaDeviceSynchronize(); check_cuda_error(cudaGetLastError()); } template <typename T> void print_abs_mean(const T* buf, uint size, cudaStream_t stream) { cudaDeviceSynchronize(); check_cuda_error(cudaGetLastError()); print_abs_mean_kernel<<<1, 1, 0, stream>>>(buf, size); cudaDeviceSynchronize(); check_cuda_error(cudaGetLastError()); } template void print_first_k(const float*, uint size, cudaStream_t); template void print_first_k(const half*, uint size, cudaStream_t); template void print_first_k(const int*, uint size, cudaStream_t); template void print_first_k(const bool*, uint size, cudaStream_t); template void print_abs_mean(const float* buf, uint size, cudaStream_t stream); template void print_abs_mean(const half* buf, uint size, cudaStream_t stream); template void print_abs_mean(const int* buf, uint size, cudaStream_t stream); /* **************************** end of Debug tools *********************************** */ // TODO remove in v4.1 /* *************************** depreciated kernels *********************************** */ template <typename T> __global__ void topK_kernel(const T* log_probs, int* ids, const int batch_size, const int N, const int K) { int tid = threadIdx.x + blockIdx.x * blockDim.x; float val, max_val; __shared__ float s_max_val; for(int ite = 0; ite < batch_size; ++ite) { bool choosed = false; val = (tid < N ) ? (float)log_probs[ite * N + tid] : -1e20f; for(int kids = 0; kids < K; ++kids) { max_val = blockReduceMax<float>(val); if(threadIdx.x == 0) s_max_val = max_val; __syncthreads(); if(s_max_val == val && !choosed && tid < N) { ids[ite * gridDim.x * K + blockIdx.x * K + kids] = tid + ite * N; val = -1e20f; choosed = true; } } } } template <typename T> __global__ void topK_kernel_2nd(const T* log_probs, int* ids, const int batch_size, const int N, const int K, const int id_offset) { int tid = threadIdx.x; float val, max_val; __shared__ float s_max_val; __shared__ int beam_index; __shared__ int ids_before_sort[16]; for(int ite = 0; ite < batch_size; ++ite) { bool choosed = false; const int id = (tid < N) ? ids[ite * N + tid] : -1; val = (tid < N) ? (float)log_probs[id] : -1e20f; __syncthreads(); if(tid == 0) beam_index = 0; if(tid < 16) ids_before_sort[tid] = -1; __syncthreads(); while(beam_index < K){ int begin_beam_index = beam_index; max_val = blockReduceMax<float>(val); if(threadIdx.x == 0){ s_max_val = max_val; } __syncthreads(); if(s_max_val == val && !choosed && id != -1) { int id_offset_ = atomicAdd(&beam_index, 1); ids_before_sort[id_offset_] = id; val = -1e20f; choosed = true; } __syncthreads(); // simply sort the ids if(threadIdx.x == 0 && beam_index - begin_beam_index > 1){ for(int i = begin_beam_index; i < beam_index; i++){ for(int j = i; j < beam_index; j++){ if(ids_before_sort[j] < ids_before_sort[i]){ int tmpid = ids_before_sort[j]; ids_before_sort[j] = ids_before_sort[i]; ids_before_sort[i] = tmpid; } } } } } __syncthreads(); if(tid < K) ids[ite * K + tid] = ids_before_sort[tid]; __syncthreads(); } } void topK(const float* log_probs, int* ids, const int batch_size, const int beam_width, const int vocab_size, cudaStream_t stream) { int N = beam_width * vocab_size; dim3 block(1024); dim3 grid((N - 1) / block.x + 1); /* First round topK, for each batch, get grid.x * K values */ topK_kernel<float><<<grid, block, 0, stream>>>(log_probs, ids, batch_size, N, beam_width); /*Second round, for each batch, get the final TopK values out from grid.x * K values. */ topK_kernel_2nd<float><<<1, block, 0, stream>>>(log_probs, ids, batch_size, beam_width * grid.x, beam_width, N); } template <typename T> __global__ void embedding_lookup_kernel(const T* embedding_table, const int* word_ids, const int hidden_units, T* from_tensor) { int write_pos = threadIdx.x + blockIdx.x * hidden_units; from_tensor[write_pos] = embedding_table[word_ids[blockIdx.x] * hidden_units + threadIdx.x]; } template <typename T> void embedding_lookup(const T* embedding_table, const int* word_ids, T* from_tensor, const int batch_size, const int beam_width, const int hidden_units, cudaStream_t stream) { dim3 grid(batch_size * beam_width); dim3 block(hidden_units); assert(hidden_units <= 1024); embedding_lookup_kernel<<<grid, block, 0, stream>>>(embedding_table, word_ids, hidden_units, from_tensor); } template<typename T> __global__ void sine_position_encoder_kernel(T* output, int step, int n){ int tid = threadIdx.x; int bid = blockIdx.x; float half_n = (float)n / 2.; // input = input * hidden_dim**0.5 output[bid * n + tid] = output[bid * n + tid] * (T)sqrtf(float(n)); float log_timescale_increment = __logf(10000) / (half_n - 1.f); float inv_timescales = __expf( (tid % (int)half_n) * -1 * log_timescale_increment ); float scaled_time = inv_timescales * step; T encoding_val = (tid < half_n) ? (T) __sinf(scaled_time) : (T) __cosf(scaled_time); output[bid * n + tid] = output[bid * n + tid] + encoding_val; } template<typename T> void sine_position_encoder( T* output, int step, int m, int n, cudaStream_t stream) { dim3 grid(m); dim3 block(n); assert(n <= 1024); sine_position_encoder_kernel<T><<<grid, block, 0, stream>>>(output, step, n); } template void embedding_lookup(const float* embedding_table, const int* word_ids, float* from_tensor, const int batch_size, const int beam_width, const int hidden_units, cudaStream_t stream); template void embedding_lookup(const half* embedding_table, const int* word_ids, half* from_tensor, const int batch_size, const int beam_width, const int hidden_units, cudaStream_t stream); template void sine_position_encoder( float* output, int step, int m, int n, cudaStream_t stream); template void sine_position_encoder( half* output, int step, int m, int n, cudaStream_t stream); /* *************************** end of depreciated kernels *********************************** */ }//namespace
the_stack
// RobustEdgeDetection.cu // 实现图像的边缘检测算法。 #include "RobustEdgeDetection.h" #include<stdio.h> // 宏:SA_DEF_BLOCK_X 和 SA_DEF_BLOCK_Y 和 SA_DEF_BLOCK_Z // 定义了默认单纯平均法线程快的尺寸。 #define SA_DEF_BLOCK_X 32 #define SA_DEF_BLOCK_Y 2 #define SA_DEF_BLOCK_Z 4 // 宏:FV_DEF_BLOCK_X 和 FV_DEF_BLOCK_Y 和 FV_DEF_BLOCK_Z // 定义了默认三维特征向量法线程快的尺寸。 #define FV_DEF_BLOCK_X 32 #define FV_DEF_BLOCK_Y 2 #define FV_DEF_BLOCK_Z 4 // 宏:RED_CODE_CHOICE // 定义一个选择宏,来决定注释掉原来算法的代码。 // #define RED_CODE_CHOICE // 宏:RED_DIV_PERCENTAGE // 定义特征向量法中求中值平均时去掉前后一定百分比像素点的辅助宏。 #define RED_DIV_PERCENTAGE 20 // 宏:RED_ARRAY_MAXLEN // 特征向量法计算临时数组的最大尺寸的辅助宏即对向邻域的最大尺寸, // 此处默认为 11。 #define RED_ARRAY_MAXLEN 11 // Device 全局常量:_argSupreDev[4][4](最大增强法操作的参数) // 最大增强法的参数。 const int static __device__ _argSupreDev[4][4] = { // [0][ ], [1][ ] { -1, 0, 1, 0 }, { 0, -1, 0, 1 }, // [2][ ], [3][ ] { -1, -1, 1, 1 }, { 1, -1, -1, 1 } }; // Host 函数:_initNeighbor(初始化对向邻域坐标) // 设置 8 个对向邻域的坐标,为了方便传入 Device 端,此处把 8 个邻域的数据赋值 // 在一个数组内,通过偏移来寻找每一个线程应该用到的坐标。 static __host__ int // 函数若正确执行返回 NO_ERROR。 _initNeighbor( int neighbor[], // 欲初始化的坐标数组。 int diffsize, // 对向邻域的大小。 int *sizedp01, // dp 号为 0 和 1 的对向邻域的尺寸。 int *sizedp23 // dp 号为 2 和 3 的对向邻域的尺寸。 ); // Device 函数:_maxEnhancementDev(最大增强法) // 进行最大增强操作。差分正规化计算得到的临时图片中每一个位置(ndif)都对应一个 // THREAD,各个 THREAD 在其对应的 dP 号所指示的方向上检查像素点的像素值,考察在 // ndif[][] 上蓝箭头所经过的 pixel 范围内 60% (0.6) 以上的位置上的值小于 rate * // ndif(x,y)。如果是,则 pixel(x,y) 及其 8 邻域对应位置上置 200。每个对向临域对 // 应两种检测方向,分别为上下,左右,左上右下和右上左下,根据这四种方向以及检测 // 范围参数 searchscope 进行遍历 (3 x 3 的邻域,箭头下共有 4 个 pixel;5 x 5的邻 // 域,箭头下共有 6 个 pixel;7 x 7 的邻域,箭头下共有 6 个 pixel;9 x 9 和 11 x // 11 邻域,箭头下则都是共有8个pixel),最后将最终得到的像素值赋给输出图像。 static __device__ void // 无返回值。 _maxEnhancementDev( ImageCuda inimg, // 输入图像。 ImageCuda outimg, // 输出图像。 ImageCuda tempimg, // 需要用到的临时图像。 int searchscope, // 控制最大增强方法的搜索范围参数。 int ndif, // 进行最大增强法的正规化后的点的像素值。 int dp, // 对应搜索方向的 dp 号。 int c, // 进行最大增强的点的横坐标。 int r // 进行最大增强的点的纵坐标。 ); // Device 函数:_computeMavSgmDev(计算中值平均即 MAV,方差值 SGM) // 对某一邻域内部的像素值排序,分别计算两个邻域块内点的像素平均值和方差值,辅助 // 求解正规化差分值。在本算法中,传入一个已经统计好的数组。 static __device__ int // 函数若正确执行,返回 NO_ERROR。 _computeMavSgmDev( unsigned char pixeltmpDev[], // 传入的一个临时数组,记载了邻域内部不同 // 的像素值。 int pixelareacnt, // 对向邻域内不同像素点的个数。 float *mav, // 计算中值平均结果。 float *sgm // 计算某一个邻域的方差值 SGM ); // Device 函数:_computeMavMaxDev(计算各个对向邻域的3个统计量) // 计算特征向量值。分别计算已排好序的对向邻域的像素值高端处 10% 个图像值的平均值 // 作为高灰度值 hg ;计算排序结果的低端处 10% 个图像值的平均值作为低灰度值 lg;计 // 算排序结果中部 20% 个图像值的平均值作为中央均值 ag;各对向域内的 pixel的灰度的 // 整体平均值,并以此求对向域内的灰度标准偏差 sd。并同时计算出对向邻域内的像素值 // 最大值 max。 static __device__ int // 函数若正确执行,返回 NO_ERROR。 _computeMavMaxDev( unsigned char pixeltmpDev[], // 传入的一个临时数组,记载了邻域内部不同 // 的像素值。 int pixelareacnt, // 对向邻域内不同像素点的个数。 float *hg, // 高端处 10% 个图像值的平均值,为高灰度值 float *lg, // 低端处 10% 个图像值的平均值,为低灰度值 float *ag, // 中部 20% 个图像值的平均值,为中央均值 int *max // 对向邻域内的像素最大值 max。 ); // Kernel 函数:_detectEdgeSAKer(单纯平均法) // 直接进行对向差分运算。利用四种对向临域的模版,进行差分正规化,得到差分计算的 // 最大值,然后进行最大增强操作。即由最新的 Maximum Enhancement 方法代替原来的 // Non-maximum Suppression 方法,对正规化后的差分值结果进行处理。最后再进行 // Thinning 和 FrekleFilter 处理,得到单像素宽度边缘图像。但是由于非最大抑制操作 // 的消除自身的特点,检测出的边缘有很大的可能是非连续的。 static __global__ void // Kernel 函数无返回值 _detectEdgeSAKer( int searchscope, // 控制非极大值抑制方法的搜索范围参数 int diffsize, // 对向临域像素点个数的一半 int neighbor[], // 传入的模板,可以通过计算偏移量得到 8 个对向邻域的 // 模板坐标值。 ImageCuda inimg, // 输入图像 ImageCuda tempimg, // 在 host 函数里申请的临时图片,然后传入 // Kernel 函数中 // 用来存储差分计算后得到的各个像素点像素值 ImageCuda outimg, // 输出图像 int sizedp01, // dp 号为 0 和 1 的对向邻域的尺寸。 int sizedp23 // dp 号为 2 和 3 的对向邻域的尺寸。 ); // Kernel 函数:_detectEdgeFVKer(特征向量法) // 通过公式计算,进行边缘检测。首先,运用并行计算,一个线程计算一个像素点,通过 // 四种对向临域方向计算(分别为上下,左右,左上右下,右上左下四种方向),计算出 // 每一个邻域的 MAV,MMD,SGM,以及整幅图片的 EMAV,EMMD,ESGM,再利用河边老师提 // 供的公式进行计算,可以得到四个 disp 值,从中选择出最大的值,并记下其 dp 号, // 在 dp 号对应方向上进行非最大抑制。最后,将最终结果赋值到输出图像 outimg 上, // 再进行 Thinning 和 FreckleFilter 处理,得到最终结果。 static __global__ void // Kernel 函数无返回值 _detectEdgeFVKer( ImageCuda inimg, // 输入图像 ImageCuda tempimg, // 用于中间存储的临时图像。 ImageCuda outimg, // 输出图像 int diffsize, // 对向临域像素点个数 int searchscope, // 在 dp 方向上进行搜索的范围。 int neighbor[], // 传入的模板,可以通过计算偏移量得到 8 个对向邻域的 // 模板坐标值。 int sizedp01, // dp 号为 0 和 1 的对向邻域的尺寸。 int sizedp23 // dp 号为 2 和 3 的对向邻域的尺寸。 ); // Host 函数:_initNeighbor(初始化对向邻域坐标) static __host__ int _initNeighbor( int neighbor[], int diffsize, int *sizedp01, int *sizedp23) { // 判断指针参数是否合法 if (neighbor == NULL || sizedp01 == NULL || sizedp23 == NULL) return NULL_POINTER; // dp 为 0 和 1 时的邻域大小为 diffsize * diffsize。 // 计算 dp 为 2 和 3 时重叠区域大小。 int overlap; if ((diffsize + 1) & 2 != 0) { overlap = (diffsize + 1) / 2; } else { overlap = (diffsize - 1) / 2; } // 临时变量,用于计算点的坐标。 int pntlocationtmp = diffsize >> 1; int pntlocationofftmp = overlap >> 1; // 分别处理每一个点的索引。 int idx = 0; // 为了减少 for 循环的次数,在计算 dp 为 0 时可以根据数学分析一并将 dp 为 2 // 的点计算,dp 为 3 和 4 时同理。 // dp 为 0 和 dp 为 1 时的尺寸。 int offdp01 = diffsize * diffsize * 2; // dp 为 0 和 dp 为 1 时存放邻域内点所需要的内存大小。 *sizedp01 = offdp01; // dp 为 2 和 dp 为 3 时存放邻域内点所需要的内存大小。 int offdp23 = offdp01 - overlap * (overlap + 1); *sizedp23 = offdp23; // 存放 dp 为 2 及其之后的对向邻域的点坐标,相对于数组起始位置的偏移量。 int offdp12 = offdp01 << 2; // 为模板赋值。 for (int i = -diffsize; i < 0; i++) { for (int j = -pntlocationtmp; j <= pntlocationtmp; j++, idx++) { // dp 为 0 时记录点的横纵坐标。 neighbor[idx << 1] = i; neighbor[(idx << 1) + 1] = j; // 据分析 dp 为 0 时左块和右块点的坐标存在着某种关系: // 关于 y 轴对称。 neighbor[offdp01 + (idx << 1)] = -i; neighbor[offdp01 + (idx << 1) + 1] = j; // 据分析dp 为 0 和 dp 为 1 的点的坐标存在着某种关系: // x 和 y 值交换。 // 故可以推出 dp 为 1 时的坐标,如下: neighbor[(offdp01 << 1) + (idx << 1)] = j; neighbor[(offdp01 << 1) + (idx << 1) + 1] = i; // 据分析 dp 为 1 上块和下块的点的坐标存在着某种关系: // 关于 x 轴对称。 neighbor[offdp01 * 3 + (idx << 1)] = j; neighbor[offdp01 * 3 + (idx << 1) + 1] = -i; } } // 为数组赋值 dp 为 2 和 dp 为 3 时的情形。 // 计算左边第一个点的横纵坐标。 int firstpntx = -(diffsize - (overlap + 1) / 2); int firstpnty = -(diffsize - (overlap + 1) / 2); // 计算模板值。 idx = 0; for (int i = firstpntx; i <= pntlocationofftmp; i++) { for (int j = firstpnty; j <= pntlocationofftmp; j++) { // 保证点的有效性,舍去不符合要求的点。 // 而此时也已经找到了邻域中所有的点,所以可以返回 NO_ERROR if (i + j >= 0) continue; // dp 为 2 时记录点的横纵坐标。 neighbor[offdp12 + (idx << 1)] = i; neighbor[offdp12 + (idx << 1) + 1] = j; // 根据分析发现,dp 为 2 时左上块和右下块点的坐标存在着某种 // 关系:关于 y = -x 对称。 neighbor[offdp12 + offdp23 + (idx << 1)] = -j; neighbor[offdp12 + offdp23 + (idx << 1) + 1] = -i; // 根据分析发现,dp 为 2 左上块和 dp 为 3 右上块的点的坐标存 // 在着某种关系:x 值互为相反数。 // 故可以推出 dp 为 3 时的模板,如下: neighbor[offdp12 + (offdp23 << 1) + (idx << 1)] = -i; neighbor[offdp12 + (offdp23 << 1) + (idx << 1) + 1] = j; // 根据分析发现,dp 为 3 左上块和右下块的点的坐标存在着某种 // 关系:关于 y = x 对称。 neighbor[offdp12 + offdp23 * 3 + (idx << 1)] = j; neighbor[offdp12 + offdp23 * 3 + (idx << 1) + 1] = -i; idx++; } } // 调试代码,输出所有生成的坐标点集 //for (int i = 0; i < offdp12 + offdp23 * 4; i += 2) { // cout << "(" << neighbor[i] << ", " << neighbor[i + 1] << "), "; // if (i % 30 == 0) cout << endl; //} //cout << endl; // 处理完毕,退出。 return NO_ERROR; } // Device 函数:_maxEnhancementDev(最大增强法) static __device__ void _maxEnhancementDev( ImageCuda inimg, ImageCuda outimg, ImageCuda tempimg, int searchscope, int ndif, int dp, int c, int r) { // dp 号为 0,左右方向检测 // dp 号为 1,上下方向检测 // dp 号为 2,左上右下方向检测 // dp 号为 3,右上左下方向检测 int curc = c, curr = r; // 临时变量表示点的坐标。 int arrsub = 0; // 定义一个临时变量,用来定位 Device 数组的下标。 int icounter = 0; // 辅助计算的计数临时变量。 float rate = 0.4f; // 外部指定参数,要求 0 < rate <= 1,这里设为0.5。 for (int i = 1; i <= searchscope; i++) { // 在左(上、左上、右上)方向上搜索,如果有符合要求的值,则计数变量自加 arrsub = 0; curc = c + _argSupreDev[dp][arrsub++] * i; curr = r + _argSupreDev[dp][arrsub++] * i; if (tempimg.imgMeta.imgData[curr * inimg.pitchBytes + curc] < (unsigned char)(rate * ndif) && curc >= 0 && curc < inimg.imgMeta.width && curr >= 0 && curr < inimg.imgMeta.height) { icounter++; } // 在右(下、右下、左下)方向上搜索 ,如果有符合要求的值,则计数变量自加 curc = c + _argSupreDev[dp][arrsub++] * i; curr = r + _argSupreDev[dp][arrsub++] * i; if (tempimg.imgMeta.imgData[curr * inimg.pitchBytes + curc] < (unsigned char)(rate * ndif) && curc >= 0 && curc < inimg.imgMeta.width && curr >= 0 && curr < inimg.imgMeta.height) { icounter++; } } // 在所判断方向上经过的 pixel 范围内 60% 以上的位置,如果都符合要求,则 // pixel(x,y) 及其 8邻域对应位置上置 200 if((icounter * 1.0f / searchscope) > 0.3) { outimg.imgMeta.imgData[r * inimg.pitchBytes + c] = 200; outimg.imgMeta.imgData[r * inimg.pitchBytes + c + 1] = 200; outimg.imgMeta.imgData[r * inimg.pitchBytes + c - 1] = 200; outimg.imgMeta.imgData[(r + 1) * inimg.pitchBytes + c] = 200; outimg.imgMeta.imgData[(r - 1) * inimg.pitchBytes + c] = 200; outimg.imgMeta.imgData[(r + 1) * inimg.pitchBytes + c + 1] = 200; outimg.imgMeta.imgData[(r + 1) * inimg.pitchBytes + c + 1] = 200; outimg.imgMeta.imgData[(r - 1) * inimg.pitchBytes + c - 1] = 200; outimg.imgMeta.imgData[(r - 1) * inimg.pitchBytes + c - 1] = 200; } // 否则置 0。 else outimg.imgMeta.imgData[r * inimg.pitchBytes + c] = 0; } // Kernel 函数:_detectEdgeSAKer(单纯平均法) static __global__ void _detectEdgeSAKer( int searchscope, int diffsize, int neighbor[], ImageCuda inimg, ImageCuda tempimg, ImageCuda outimg, int sizedp01, int sizedp23) { // 计算线程对应的输出点的位置,线程处理的像素点的坐标的 c 和 r 分量,z 表示 // 对应的邻域方向,其中 0 到 3 分别表示左右、上下、左上右下、右上左下。 // 采用的是二维的 grid,三维的 block。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; int z = threadIdx.z; // 计算当前线程在线程块内的索引。 int tidinblk = z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; // 申请动态共享内存。 extern __shared__ int shdedgesa[]; // 为了只使用一个线程来做最大增强法,此处默认选择 z 为 0 的线程来做最大增强 // 法,但是这个线程可能在边界处理时被 return 掉,因此需要一个标记值,当 z // 为 0 的线程 return 之后由其他线程来做最大增强。 int *shdflag = &shdedgesa[0]; // 每一个点的 0 号线程在线程块内的索引。 int index = threadIdx.y * blockDim.x + threadIdx.x; if (z == 0) shdflag[index] = 0; // 在共享内存中申请出一段空间用来存放 4 个对向邻域差分正规化值的结果。 int *shddiffvalue = &shdflag[blockDim.x * blockDim.y]; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算当前线程要用到的模板地址。 int templateidx = (z < 2 ? z * sizedp01 : (sizedp01 << 1) + (z - 2) * sizedp23); int *curtemplate = &neighbor[templateidx]; // 计算重叠区域大小。 int overlap; if ((diffsize + 1) & 2 != 0) { overlap = (diffsize + 1) >> 1; } else { overlap = (diffsize - 1) >> 1; } // 一个临时变量用来判断 dp 为 3 和 4 时的边界。 int offcoord = diffsize - (overlap + 1) / 2; // 判断边缘点,将其像素值置零。 // 并不再进行下面的处理。 // 分别对应 dp 为 0,1,2 和 3 时的边界情况。 // 分别对应 dp 为 0,1,2 和 3 时的边界点放在数组中。 unsigned char edgec[4] = {diffsize, diffsize >> 1, offcoord, offcoord}; unsigned char edger[4] = {diffsize >> 1, diffsize, offcoord, offcoord}; // 判断是否是边界点,如果是则置零并退出。 if (c < edgec[z] || c >= inimg.imgMeta.width - edgec[z] || r < edger[z] || r >= inimg.imgMeta.height - edger[z]) { tempimg.imgMeta.imgData[r * inimg.pitchBytes + c] = 0; // 为了防止某些点的 4 个线程都出界,故先将输出图像的对应点也置为 0; outimg.imgMeta.imgData[r * inimg.pitchBytes + c] = 0; // 将值写入到共享内存。 shddiffvalue[tidinblk] = 0; // 如果 z 为 0 的线程由于边界判断被 return,则将重新设置标记值。 // 此处的 255 仅仅表示非 0 的概念,没有实际意义。 if (z == 0) shdflag[index] = 255; return; } // 当标记值非 0 时,即 z 为 0 的线程已经不复存在了,此时需要更换新的标记值。 // 这时可能用的有 z 为 1、2、3 线程,为了减少 bank conflict,同时又因为 z // 为 2 和 3 必然同时存在或者同时被 return,故判断 z 是否为奇数,可以将 3 // 路冲突改为 2 路冲突。 if (shdflag[index] != 0 && z & 1 != 0) shdflag[index] = z; // 块内同步,保证新的标记值已经写入到共享内存中。 __syncthreads(); // 在计算中用来记录点的像素值的临时变量。 int curgray = 0; // 申请两个中间数组,分别用来存放对向邻域两个块内点的值。 unsigned char pixeltmpDev1[RED_ARRAY_MAXLEN * RED_ARRAY_MAXLEN] = { 0 }; unsigned char pixeltmpDev2[RED_ARRAY_MAXLEN * RED_ARRAY_MAXLEN] = { 0 }; // 用 for 循环,分别算出每个对向临域的各个点的索引。 int curc = c, curr = r; // 点的坐标个数。 int pntcnt = (z < 2) ? sizedp01: sizedp23; // 邻域内部点的数目. int pixelareacnt = 0; for (int i = 0; i < pntcnt; i = i + 2, pixelareacnt++) { // 统计对向邻域的第一模板内的点的坐标。 curc = c + curtemplate[i]; curr = r + curtemplate[i + 1]; // 取出第一个邻域内的点的像素值并统计到对应的数组中。 curgray = inimg.imgMeta.imgData[curr * inimg.pitchBytes + curc]; // 利用像素个数进行判断,将两个对向邻域块内的值分别记录到两个数组中。 if (pixelareacnt < (pntcnt >> 2)) pixeltmpDev1[pixelareacnt] = curgray; else pixeltmpDev2[pixelareacnt - (pntcnt >> 2)] = curgray; } // 块内同步,保证块内的差分值都写入到了共享内存中。 __syncthreads(); // 设置临时变量 sgm1,sgm2,来分别存储两个对向邻域块的方差值,sgm记录 sgm1 // 和 sgm2 中较大的值,来进行最后的正规化计算。 float sgm1, sgm2, sgm; // 设置临时变量 sgm1,sgm2,来分别存储两个对向邻域块的平均值。 float mav1, mav2; // 调用 device 端的函数求解对向邻域的像素平均值和方差。 _computeMavSgmDev(pixeltmpDev1, pixelareacnt / 2, &mav1, &sgm1); // 调用 device 端的函数求解对向邻域的像素平均值和方差。 _computeMavSgmDev(pixeltmpDev2, pixelareacnt / 2, &mav2, &sgm2); // 比较出 sgm1 和 sgm2 两者中较大的赋值给 sgm。 sgm = (sgm1 > sgm2) ? sgm1 : sgm2; // 设 ndif 为两个对向域之间的正规化差分值,数组 t 和 k 为计算正规化差分值 // ndif 的参数数组,大小都为 10。 int ndif = 0, dp = 0; double t[10] = { 9, 25, 49, 81, 121, 169, 225, 289, 361, 441 } ; double k[10] = { 0.001, 0.005, 0.025, 0.125, 0.625, 3.125, 15.624, 78.125, 390.625, 1953.125 } ; shddiffvalue[tidinblk] = (mav1 - mav2) * (mav1 - mav2) / (t[4] + k[5] * sgm); // 块内同步,保证块内的正规化差分值都写入到了共享内存中。 __syncthreads(); // 只需要标记的线程来做以下处理。 if (z != shdflag[index]) return; // 用设定的变量 ndif 来存储四种方向正规化差分计算的最大值,dp 记录四种方向 // 中差分值最大的方向。 // 局部变量,方便一下计算。 int offinblk = blockDim.x * blockDim.y; ndif = shddiffvalue[index]; for (int i = index + offinblk, j = 1; i < index + 4 * offinblk; i += offinblk, j++) { if (ndif < shddiffvalue[i]) { ndif = shddiffvalue[i]; dp = j; } } // 块内同步,保证块内的正规化差分值都写入到了共享内存中。 __syncthreads(); // 根据是否有宏 RED_CODE_CHOICE,来决定调用那部分检测边缘的代码。 #ifndef RED_CODE_CHOICE // 判断是否是边界点。如果 ndfi 大于等于 1,则证明是边界点,在输出图像的对应 // 位置像素点置 200,否则置 0。 if(ndif >= 1) outimg.imgMeta.imgData[r * inimg.pitchBytes + c] = 200; else outimg.imgMeta.imgData[r * inimg.pitchBytes + c] = 0; _maxEnhancementDev(inimg, tempimg, inimg, searchscope, ndif, dp, c, r); #else // 将正规化后的差分值赋给当前像素点,存储到新的临时图片上。 tempimg.imgMeta.imgData[r * inimg.pitchBytes + c] = ndif; __syncthreads(); // 设置数组 assist 辅助计算最大增强法的搜索范围参数 searchscope,其中第一个 // 数据 1 为辅助数据,没有实际意义。 int assist[6] = {1, 2, 3, 3, 4, 4}; // 计算 searchscope 的值。 searchscope = assist[(diffsize - 1) / 2]; // 进行最大增强操作。 _maxEnhancementDev(inimg, outimg, tempimg, searchscope, ndif, dp, c, r); #endif } // 宏:FAIL_RED_SA_FREE // 该宏用于清理在申请的设备端或者主机端内存空间。 #define FAIL_RED_SA_FREE do { \ if (tempimg != NULL) \ ImageBasicOp::deleteImage(tempimg); \ if (tempimg1 != NULL) \ ImageBasicOp::deleteImage(tempimg1); \ if (neighborDev != NULL) \ cudaFree(neighborDev); \ if (neighbor != NULL) \ delete [] (neighbor); \ } while (0) // Host 成员方法:detectEdgeSA(单纯平均法) __host__ int RobustEdgeDetection::detectEdgeSA( Image *inimg, Image *outimg, CoordiSet *guidingset) { // 检查输入图像和输出图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 检查图像是否为空 if (inimg->imgData == NULL) return UNMATCH_IMG; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // guidingset 为边缘检测的指导区域,如果 guidingset 不为空,暂未实现。 if (guidingset != NULL) { return UNIMPLEMENT; } // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图像 // 的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 临时图像和存放邻域坐标的数组已经在 Device 端的邻域坐标数组。 Image *tempimg = NULL, * tempimg1 = NULL; int *neighbor = NULL, *neighborDev = NULL; // 创建临时图像。 errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) { FAIL_RED_SA_FREE; return errcode; } // 将 temp 图像在 Device 内存中建立数据。 errcode = ImageBasicOp::makeAtCurrentDevice( tempimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像操作失败,则释放内存报错退出。 if (errcode != NO_ERROR) { FAIL_RED_SA_FREE; return errcode; } // 创建第二幅临时图像,供调用 Thinning 函数和 FreckleFilter 函数使用 errcode = ImageBasicOp::newImage(&tempimg1); if (errcode != NO_ERROR) { FAIL_RED_SA_FREE; return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) { FAIL_RED_SA_FREE; return errcode; } // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { FAIL_RED_SA_FREE; return errcode; } // 提取临时图像 tempimg 的 ROI 子图像。 ImageCuda subimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &subimgCud); if (errcode != NO_ERROR) { FAIL_RED_SA_FREE; return errcode; } // 记录不同 dp 情况下邻域中点的横纵坐标个数总和。 int sizedp01 = 0, sizedp23 = 0; // 申请 Host 端记录邻域坐标的空间。 neighbor = new int[16 * diffsize * diffsize]; // 判断空间是否申请成功。 if (neighbor == NULL) { // 释放空间。 FAIL_RED_SA_FREE; return OUT_OF_MEM; } // 调用 _initNeighbor errcode = _initNeighbor(neighbor, diffsize, &sizedp01, &sizedp23); // 如果调用失败,则删除临时图像和临时申请的空间。 if (errcode != NO_ERROR) { // 释放空间。 FAIL_RED_SA_FREE; return errcode; } // 将邻域坐标 neighbor 传入 Device 端。 // 为 neighborDev 在 Device 端申请空间。 int cudaerrcode = cudaMalloc((void **)&neighborDev, sizeof (int) * 16 * diffsize * diffsize); // 若申请不成功则释放空间。 if (cudaerrcode != cudaSuccess) { // 释放空间。 FAIL_RED_SA_FREE; return CUDA_ERROR; } // 将 neighbor 拷贝到 Device 端的 neighborDev 中。 cudaerrcode = cudaMemcpy(neighborDev, neighbor, sizeof (int) * 16 * diffsize * diffsize, cudaMemcpyHostToDevice); // 如果拷贝不成功,则释放空间。 if (cudaerrcode != cudaSuccess) { // 释放空间。 FAIL_RED_SA_FREE; return CUDA_ERROR; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = SA_DEF_BLOCK_X; blocksize.y = SA_DEF_BLOCK_Y; blocksize.z = SA_DEF_BLOCK_Z; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (insubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; gridsize.z = 1; // 计算用到的共享内存空间大小。 int memsize = SA_DEF_BLOCK_X * SA_DEF_BLOCK_Y * (SA_DEF_BLOCK_Z + 1) * sizeof (int); // 调用核函数 _detectEdgeSAKer<<<gridsize, blocksize, memsize>>>( searchScope, diffsize, neighborDev, insubimgCud, subimgCud, outsubimgCud, 2 * sizedp01, 2 * sizedp23); // 判断 CUDA 调用是否出错。 if (cudaGetLastError() != cudaSuccess) { FAIL_RED_SA_FREE; return CUDA_ERROR; } // 调用 Thinning errcode = this->thinning.thinMatlabLike(outimg, tempimg1); if (errcode != NO_ERROR) { FAIL_RED_SA_FREE; return errcode; } // 调用 FreckleFilter errcode = this->frecklefilter.freckleFilter(tempimg1, outimg); if (errcode != NO_ERROR) { FAIL_RED_SA_FREE; return errcode; } // 退出前删除临时图像。 ImageBasicOp::deleteImage(tempimg); ImageBasicOp::deleteImage(tempimg1); delete [] neighbor; cudaFree(neighborDev); // 处理完毕,退出。 return NO_ERROR; } // Device 函数:_computeMavSgmDev(计算中值平均即 MAV,方差值 SGM) static __device__ int _computeMavSgmDev( unsigned char pixeltmpDev[], int pixelareacnt, float *mav, float *sgm) { // 判断输入参数的有效性。 if (pixeltmpDev == NULL || mav == NULL || sgm == NULL) return INVALID_DATA; // 若邻域内部的点的值都一样,则不用再进行下面的计算,直接返回。 if (pixeltmpDev[0] == pixeltmpDev[pixelareacnt - 1]) { *mav = pixeltmpDev[0]; *sgm = 0.0f; return NO_ERROR; } // 定义一个临时变量 sumpixel 来计算数组里下标从 up 到 down 的像素值和。 double sumpixel = 0.0f; // 累加像素值和。 for (int i = 0; i < pixelareacnt; i++) sumpixel += pixeltmpDev[i]; // 计算 MAV。 *mav = sumpixel / pixelareacnt; // 计算 SGM。 double sum = 0.0; // 先累加每一个像素值和平均值的差的平方和。 for (int i = 0; i < pixelareacnt; i++) sum += (pixeltmpDev[i] - *mav) * (pixeltmpDev[i] - *mav); // 计算方差值。 *sgm = sum / pixelareacnt; // 正常执行,返回 NO_ERROR。 return NO_ERROR; } // Device 函数:_computeMavMaxDev(计算各个对向邻域的3个统计量) static __device__ int _computeMavMaxDev( unsigned char pixeltmpDev[], int pixelareacnt, float *hg, float *lg, float *ag, int *max) { // 判断输入参数的有效性。 if (pixeltmpDev == NULL || max == NULL || hg == NULL || lg == NULL || ag == NULL) return INVALID_DATA; // 若邻域内部的点的值都一样,则不用再进行下面的计算,直接返回。 if (pixeltmpDev[0] == pixeltmpDev[pixelareacnt - 1]) { *max = pixeltmpDev[0]; *hg = pixeltmpDev[0]; *lg = pixeltmpDev[0]; *ag = pixeltmpDev[0]; return NO_ERROR; } // 排序后数组成降序,可直接获取最大值。 *max = pixeltmpDev[pixelareacnt - 1]; // 计数器 icounter,jcounter,初始化均为 0。 int icounter = 0, jcounter = 0; // 统计变量 sum,初始化为 0。 float sum = 0.0f; // 计算低灰度值 lg jcounter = pixelareacnt / 10; for(icounter = 0; icounter < jcounter; icounter++) sum += pixeltmpDev[icounter]; *lg = sum / jcounter; // 计算高灰度值 hg sum = 0.0f; jcounter = pixelareacnt - (pixelareacnt / 10); for(icounter = jcounter; icounter < pixelareacnt; icounter++) sum += pixeltmpDev[icounter]; jcounter = pixelareacnt / 10; *hg = sum / jcounter; // 计算中央灰度均值 ag sum = 0.0f; jcounter = pixelareacnt / 10; for(icounter = (4 * jcounter); icounter < (6 * jcounter); icounter++) sum += pixeltmpDev[icounter]; *ag = sum / jcounter; // 正常执行,返回 NO_ERROR。 return NO_ERROR; } // Kernel 函数:_detectEdgeFVKer(特征向量法) // 通过公式计算,进行边缘检测。 static __global__ void _detectEdgeFVKer( ImageCuda inimg, ImageCuda tempimg, ImageCuda outimg, int diffsize, int searchscope, int neighbor[], int sizedp01, int sizedp23) { // 计算线程对应的输出点的位置,线程处理的像素点的坐标的 c 和 r 分量,z 表示 // 对应的邻域方向,其中 0 到 3 分别表示左右、上下、左上右下、右上左下。 // 采用的是二维的 grid,三维的 block。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; int z = threadIdx.z; // 计算当前线程在线程块内的索引。 int tidinblk = z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; // 申请动态共享内存。 extern __shared__ int shdedgesa[]; // 为了只使用一个线程来做非极大值抑制,此处默认选择 z 为 0 的线程来做非极大 // 值抑制,但是这个线程可能在边界处理时被 return 掉,因此需要一个标记值, // 当 z 为 0 的线程 return 之后由其他线程来做非极大值抑制。 int *shdflag = &shdedgesa[0]; // 块内同步,保证新的标记值已经写入到共享内存中。 __syncthreads(); // 每一个点的 0 号线程在线程块内的索引。 int index = threadIdx.y * blockDim.x + threadIdx.x; if (z == 0) shdflag[index] = 0; // 块内同步,保证新的标记值已经写入到共享内存中。 __syncthreads(); // 在共享内存中申请出一段空间用来存放 4 个对向邻域差分正规化值的结果。 int *shddiffvalue = &shdflag[blockDim.x * blockDim.y]; // 块内同步,保证新的标记值已经写入到共享内存中。 __syncthreads(); // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算当前线程要用到的模板地址。 int templateidx = (z < 2 ? z * sizedp01 : (sizedp01 << 1) + (z - 2) * sizedp23); int *curtemplate = &neighbor[templateidx]; // 计算重叠区域大小。 int overlap; if ((diffsize + 1) & 2 != 0) { overlap = (diffsize + 1) >> 1; } else { overlap = (diffsize - 1) >> 1; } // 一个临时变量用来判断 dp 为 3 和 4 时的边界。 int offcoord = diffsize - (overlap + 1) / 2; // 判断边缘点,将其像素值置零。 // 并不再进行下面的处理。 // 分别对应 dp 为 0,1,2 和 3 时的边界情况。 // 分别对应 dp 为 0,1,2 和 3 时的边界点放在数组中。 unsigned char edgec[4] = {diffsize, diffsize >> 1, offcoord, offcoord}; unsigned char edger[4] = {diffsize >> 1, diffsize, offcoord, offcoord}; // 判断是否是边界点,如果是则置零并退出。 if (c < edgec[z] || c >= inimg.imgMeta.width - edgec[z] || r < edger[z] || r >= inimg.imgMeta.height - edger[z]) { tempimg.imgMeta.imgData[r * inimg.pitchBytes + c] = 0; // 为了防止某些点的 4 个线程都出界,故先将输出图像的对应点也置为 0; outimg.imgMeta.imgData[r * inimg.pitchBytes + c] = 0; // 将值写入到共享内存。 shddiffvalue[tidinblk] = 0; // 如果 z 为 0 的线程由于边界判断被 return,则将重新设置标记值。 // 此处的 255 仅仅表示非 0 的概念,没有实际意义。 if (z == 0) shdflag[index] = 255; return; } // 块内同步,保证新的标记值已经写入到共享内存中。 __syncthreads(); // 当标记值非 0 时,即 z 为 0 的线程已经不复存在了,此时需要更换新的标记值。 // 这时可能用的有 z 为 1、2、3 线程,为了减少 bank conflict,同时又因为 z // 为 2 和 3 必然同时存在或者同时被 return,故判断 z 是否为奇数,可以将 3 // 路冲突改为 2 路冲突。 if (shdflag[index] != 0 && z & 1 != 0) shdflag[index] = z; // 块内同步,保证新的标记值已经写入到共享内存中。 __syncthreads(); // 在计算中用来记录点的像素值的临时变量。 int curgray = 0; // 申请两个中间数组,分别用来存放对向邻域两个块内点的值。 unsigned char pixeltmpDev1[RED_ARRAY_MAXLEN * RED_ARRAY_MAXLEN] = { 0 }; unsigned char pixeltmpDev2[RED_ARRAY_MAXLEN * RED_ARRAY_MAXLEN] = { 0 }; // 用 for 循环,分别算出每个对向临域的各个点的索引 int curc = c, curr = r; // 点的坐标个数。 int pntcnt = (z < 2) ? sizedp01: sizedp23; int middle = pntcnt >> 2; // 邻域内部点的数目. int pixelareacnt = 0; for (int i = 0; i < pntcnt; i = i + 2, pixelareacnt++) { // 统计对向邻域的第一模板内的点的坐标。 curc = c + curtemplate[i]; curr = r + curtemplate[i + 1]; // 取出第一个邻域内的点的像素值并统计到对应的数组中。 curgray = inimg.imgMeta.imgData[curr * inimg.pitchBytes + curc]; // 利用像素个数进行判断,将两个对向邻域块内的值分别记录到两个数组中。 if (pixelareacnt < middle) pixeltmpDev1[pixelareacnt] = curgray; else{ pixeltmpDev2[pixelareacnt - middle] = curgray;} } // 块内同步,保证块内的差分值都写入到了共享内存中。 __syncthreads(); // 为两个 pixeltmpDev 排序,经过上面的处理,现在数组内部只有 pixelareacnt // 个值,此时可以使用插入排序。 for (int i = 1; i < (pixelareacnt / 2); i++) { int sorttmp = pixeltmpDev1[i]; int j = i - 1; while (j >= 0 && sorttmp < pixeltmpDev1[j]) { pixeltmpDev1[j + 1] = pixeltmpDev1[j]; j = j - 1; } pixeltmpDev1[j + 1] = sorttmp; } for (int i = 1; i < (pixelareacnt / 2); i++) { int sorttmp = pixeltmpDev2[i]; int j = i - 1; while (j >= 0 && sorttmp < pixeltmpDev2[j]) { pixeltmpDev2[j + 1] = pixeltmpDev2[j]; j = j - 1; } pixeltmpDev2[j + 1] = sorttmp; } // 计算邻域平均高灰度值 hg,平均低灰度值 lg 和中央均值ag 以 // 及对向邻域内像素值的最大值 max。 // 计算邻域整体的平均值 MAV,方差 SGM。 // 调用 device 函数。 float mav1 = 0.0f, mav2 = 0.0f; float sgm1 = 0.0f, sgm2 = 0.0f; float hg1 = 0.0f, hg2 = 0.0f; float lg1 = 0.0f, lg2 = 0.0f; float ag1 = 0.0f, ag2 = 0.0f; int max1 = 0, max2 = 0; // 调用 device 端的函数求解一个对向邻域块的 MAV 和 SGM。 _computeMavSgmDev(pixeltmpDev1, pixelareacnt / 2, &mav1, &sgm1); // 调用 device 端的函数求解一个对向邻域块儿的平均高灰度值 hg,平均低灰度值 // lg 和中央均值ag 以及对向邻域内像素值的最大值 max。 _computeMavMaxDev(pixeltmpDev1, pixelareacnt / 2, &hg1, &lg1, &ag1, &max1); // 调用 device 端的函数求解另一个对向邻域块的 MAV 和 SGM。 _computeMavSgmDev(pixeltmpDev2, pixelareacnt / 2, &mav2, &sgm2); // 调用 device 端的函数求解另一个对向邻域块儿的平均高灰度值 hg,平均低灰度值 // lg 和中央均值ag 以及对向邻域内像素值的最大值 max。 _computeMavMaxDev(pixeltmpDev2, pixelareacnt / 2, &hg2, &lg2, &ag2, &max2); // 求解对向邻域的三个特征向量。 // 设置临时变量。 int aa = 1, bb = 1, cc = 1; // 外部参数 float abc = aa + bb + cc; float A = aa / abc; float B = bb / abc; float C = cc / abc; float dg1 = hg1 - lg1, dg2 = hg2 - lg2; float s1 = 255 / (1 + max1 - mav1), s2 = 255 / (1 + max2 - mav2); float sd1 = s1 * sqrt(sgm1), sd2 = s2 * sqrt(sgm2); // 特征向量对向域间的相关系数 indexc。 float indexc; // 设置几个辅助变量,辅助计算特征向量对向域间的相关系数 indexc, 无实际含义。 float m0 = A * dg1 * dg2 + B * ag1 * ag2 + C * sd1 * sd2; float m1 = A * dg1 * dg1 + B * ag1 * ag1 + C * sd1 * sd1; float m2 = A * dg2 * dg2 + B * ag2 * ag2 + C * sd2 * sd2; // 用块内的偶数号线程来根据公式计算,可以得到 4 个值, // 并存放在共享内存中。 // 计算特征向量对向域间的相关系数 indexc。 indexc = (m1 * m2 == 0) ? 1.0f : m0 / (3 * sqrt(m1 * m2)); // 将结果存入共享内存。 shddiffvalue[tidinblk] = indexc; // 块内同步。 __syncthreads(); // 以下处理主要是找出 indexc 的最小值,并在其对应的 dp 方向上做最大增强,如 // 果只用 1 个线程来处理该步骤可能会出现该线程被提前 return 掉的情形,所以此 // 步骤采用偶数线程来处理,并将选出的 minc 的最小值处理,赋值赋值给临时图片 // tempimg。 // 只需要标记的线程来做以下处理。 if (z != shdflag[index]) return; // 记录最小 minc 值的方向。 // 设定变量 minc 来存储四种方向差分计算的最小值,dp 记录四种方向中差分值最 // 大的方向。 int offinblk = blockDim.x * blockDim.y; // 局部变量,方便一下计算。 float minc = shddiffvalue[index]; int dp = 0; for (int i = index + offinblk, j = 1; i < index + 4 * offinblk; i += offinblk, j++) { if (minc > shddiffvalue[i]) { minc = shddiffvalue[i]; dp = j; } } // 块内同步,保证新的标记值已经写入到共享内存中。 __syncthreads(); // 外部指定参数 float distRate = 0.5f; float maxMahaDist = 3.0f; float correTh = 0.5f; float centerFV[3] = { 1.0f, 1.0f, 1.0f }; // 此乃给定的对称半正定矩阵 float variMatrix[9] = { 1.0f, 2.0f, 1.0f, 2.0f, 3.0f, 2.0f, 1.0f, 2.0f, 1.0f } ; // 将 minc 与外部传入参数做比较,进行判断赋值 int ipixel = 0; if(minc < correTh) { // 将最大值赋给当前像素点,存储到临时图片上。 ipixel = 255 * (1 - minc); tempimg.imgMeta.imgData[r * inimg.pitchBytes + c] = ipixel; // edge- likelihood- score的印加方法 if(centerFV != NULL && variMatrix != NULL){ // edge- likelihood- score的印加 // 计算两个对向域特征 (dgi, agi, sdi) (i =1,2) 和 centerFV 之间的马 // 氏距离。 float v1[3], v2[3]; float md1, md2, minmd; v1[0] = abs(dg1 - centerFV[0]) * A; v1[1] = abs(ag1 - centerFV[1]) * B; v1[2] = abs(sd1 - centerFV[2]) * C; v2[0] = abs(dg2 - centerFV[0]) * A; v2[1] = abs(ag2 - centerFV[1]) * B; v2[2] = abs(sd2 - centerFV[2]) * C; md1 = sqrt((v1[0] * (v1[0] * variMatrix[0] + v1[1] * variMatrix[1] + v1[2] * variMatrix[2])) + (v1[1] * (v1[0] * variMatrix[0] + v1[1] * variMatrix[1] + v1[2] * variMatrix[2])) + (v1[2] * (v1[0] * variMatrix[0] + v1[1] * variMatrix[1] + v1[2] * variMatrix[2]))); md2 = sqrt((v2[0] * (v2[0] * variMatrix[0] + v2[1] * variMatrix[1] + v2[2] * variMatrix[2])) + (v2[1] * (v2[0] * variMatrix[0] + v2[1] * variMatrix[1] + v2[2] * variMatrix[2])) + (v2[2] * (v2[0] * variMatrix[0] + v2[1] * variMatrix[1] + v2[2] * variMatrix[2]))); minmd = (md1 < md2) ? md1 : md2; if(minmd > (distRate * maxMahaDist)) { ipixel = 255 * (1 - minmd / maxMahaDist); tempimg.imgMeta.imgData[r * inimg.pitchBytes + c] = ipixel; } } } else // 最终为图片赋值。 tempimg.imgMeta.imgData[r * inimg.pitchBytes + c] = 0; // 设置数组 assist 辅助计算最大增强法的搜索范围参数 searchscope,其中第一个 // 数据 1 为辅助数据,没有实际意义。 int assist[6] = {1, 2, 3, 3, 4, 4}; // 计算 searchscope 的值。 searchscope = assist[(diffsize - 1) / 2]; // 进行最大增强操作。 _maxEnhancementDev(inimg, outimg, tempimg, searchscope, ipixel, dp, c, r); } // 宏:FAIL_RED_FV_FREE // 该宏用于清理在申请的设备端或者主机端内存空间。 #define FAIL_RED_FV_FREE do { \ if (tempimg != NULL) \ ImageBasicOp::deleteImage(tempimg); \ if (tempimg1 != NULL) \ ImageBasicOp::deleteImage(tempimg1); \ if (neighborDev != NULL) \ cudaFree(neighborDev); \ if (neighbor != NULL) \ delete [] (neighbor); \ } while (0) // Host 成员方法:detectEdgeFV(特征向量法) __host__ int RobustEdgeDetection::detectEdgeFV( Image *inimg, Image *outimg, CoordiSet *guidingset) { // 检查输入图像和输出图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 检查图像是否为空 if (inimg->imgData == NULL) return UNMATCH_IMG; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 // 局部变量,错误码 int errcode; cudaError_t cudaerrcode; // guidingset 为边缘检测的指导区域,如果 guidingset 不为空暂时未实现。 if (guidingset != NULL) return UNIMPLEMENT; // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 创建临时图像 Image *tempimg = NULL, *tempimg1 = NULL; // 创建记录邻域坐标的指针和对应在 Device 端的指针。 int *neighbor = NULL, *neighborDev = NULL; errcode = ImageBasicOp::newImage(&tempimg); if (errcode != NO_ERROR) { // 释放空间。 FAIL_RED_FV_FREE; return errcode; } // 将 tempimg 图像在 Device 内存中建立数据。 errcode = ImageBasicOp::makeAtCurrentDevice( tempimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像操作失败,则释放内存报错退出。 if (errcode != NO_ERROR) { // 释放空间。 FAIL_RED_FV_FREE; return errcode; } // 创建第二幅临时图像,供调用 Thinning 函数和 FreckleFilter 函数使用 errcode = ImageBasicOp::newImage(&tempimg1); if (errcode != NO_ERROR) { // 释放空间。 FAIL_RED_FV_FREE; return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) { // 释放空间。 FAIL_RED_FV_FREE; return errcode; } // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { // 释放空间。 FAIL_RED_FV_FREE; return errcode; } // 提取临时图像的 ROI 子图像。 ImageCuda subimgCud; errcode = ImageBasicOp::roiSubImage(tempimg, &subimgCud); if (errcode != NO_ERROR) { // 释放空间。 FAIL_RED_FV_FREE; return errcode; } // 如果调用失败,则删除临时图像。 if (errcode != NO_ERROR) { // 释放空间。 FAIL_RED_FV_FREE; return errcode; } // 调用 _initNeighbor // 记录不同 dp 情况下邻域中点的横纵坐标个数总和。 int sizedp01 = 0, sizedp23 = 0; neighbor = new int[16 * diffsize * diffsize]; // 判断空间是否申请成功。 if (neighbor == NULL) { // 释放空间。 FAIL_RED_FV_FREE; return NULL_POINTER; } errcode = _initNeighbor(neighbor, diffsize, &sizedp01, &sizedp23); // 如果调用失败,则删除临时图像和临时申请的空间。 if (errcode != NO_ERROR) { // 释放空间。 FAIL_RED_FV_FREE; return errcode; } // 将邻域坐标 neighbor 传入 Device 端。 // 为 neighborDev 在 Device 端申请空间。 cudaerrcode = cudaMalloc((void **)&neighborDev, sizeof (int) * 16 * diffsize * diffsize); // 若申请不成功则释放空间。 if (cudaerrcode != cudaSuccess) { // 释放空间。 FAIL_RED_FV_FREE; return CUDA_ERROR; } // 将 neighbor 拷贝到 Device 端的 neighborDev 中。 cudaerrcode = cudaMemcpy(neighborDev, neighbor, sizeof (int) * 16 * diffsize * diffsize, cudaMemcpyHostToDevice); // 如果拷贝不成功,则释放空间。 if (cudaerrcode != cudaSuccess) { // 释放空间。 FAIL_RED_FV_FREE; return CUDA_ERROR; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = FV_DEF_BLOCK_X; blocksize.y = FV_DEF_BLOCK_Y; blocksize.z = FV_DEF_BLOCK_Z; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; gridsize.z = 1; // 计算共享内存的空间。 int memsize = FV_DEF_BLOCK_Y * FV_DEF_BLOCK_X * FV_DEF_BLOCK_Z * (3 * sizeof (float) + sizeof (int)); // 调用核函数 _detectEdgeFVKer<<<gridsize, blocksize, memsize>>>( insubimgCud, subimgCud, outsubimgCud, diffsize, searchScope, neighborDev, sizedp01 * 2, sizedp23 * 2); // 检查 kernel 是否调用出错,若出错则释放空间。 if (cudaGetLastError() != cudaSuccess) { // 释放空间。 FAIL_RED_FV_FREE; return CUDA_ERROR; } // 调用 Thinning,如果调用出错则释放空间。 errcode = this->thinning.thinMatlabLike(outimg, tempimg1); if (errcode != NO_ERROR) { // 释放空间。 FAIL_RED_FV_FREE; return errcode; } // 调用 FreckleFilter,如果调用出错则释放空间。 errcode = this->frecklefilter.freckleFilter(tempimg1, outimg); if (errcode != NO_ERROR) { // 释放空间。 FAIL_RED_FV_FREE; return errcode; } // 退出前删除临时图像。 ImageBasicOp::deleteImage(tempimg); ImageBasicOp::deleteImage(tempimg1); delete [] neighbor; cudaFree(neighborDev); // 处理完毕,退出。 return NO_ERROR; } // Host 成员方法:CPU 端的 sobel 算子边缘检测。 __host__ int RobustEdgeDetection::sobelHost(Image *src, Image *out) { int x, y, s, s1, s2; for (x = 1; x < src->width - 1; x++) { for (y = 1; y < src->height - 1; y++) { //横向梯度[-1 0 1; -2 0 2; -1 0 1] s1 = src->imgData[x + 1 + (y - 1) * src->width] + 2 * src->imgData[x + 1 + y * src->width] + src->imgData[x + 1 + (y + 1) * src->width]; s1 = s1 - src->imgData[x - 1 + (y - 1) * src->width] - 2 * src->imgData[x - 1 + y * src->width] - src->imgData[x - 1 + (y + 1) * src->width]; //纵向梯度[-1 -2 -1; 0 0 0; 1 2 1] s2 = src->imgData[x + 1 + (y + 1) * src->width] + 2 * src->imgData[x + (y + 1) * src->width] + src->imgData[x - 1 + (y + 1) * src->width]; s2 = s2 - src->imgData[x + 1 + (y - 1) * src->width] - 2 * src->imgData[x + (y - 1) * src->width] - src->imgData[x - 1 + (y - 1) * src->width]; //给图像赋值 s = s1 * s1 + s2 * s2; s = sqrt(s); out->imgData[y * src->width + x] = s; } } return NO_ERROR; } // 核函数:GPU 端的 sobel 算子边缘检测。 static __global__ void // Kernel 函数无返回值 _sobelKer(ImageCuda inimg, ImageCuda outimg) { // 并行策略:一个线程处理一个像素点 int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (x >= inimg.imgMeta.width || y >= inimg.imgMeta.height) return; int s, s1, s2; //横向梯度[-1 0 1; -2 0 2; -1 0 1] s1 = inimg.imgMeta.imgData[x + 1 + (y - 1) * inimg.pitchBytes] + 2 * inimg.imgMeta.imgData[x + 1 + y * inimg.pitchBytes] + inimg.imgMeta.imgData[x + 1 + (y + 1) * inimg.pitchBytes]; s1 = s1 - inimg.imgMeta.imgData[x - 1 + (y - 1) * inimg.pitchBytes] - 2 * inimg.imgMeta.imgData[x - 1 + y * inimg.pitchBytes] - inimg.imgMeta.imgData[x - 1 + (y + 1) * inimg.pitchBytes]; //纵向梯度[-1 -2 -1; 0 0 0; 1 2 1] s2 = inimg.imgMeta.imgData[x + 1 + (y + 1) * inimg.pitchBytes] + 2 * inimg.imgMeta.imgData[x + (y + 1) * inimg.pitchBytes] + inimg.imgMeta.imgData[x - 1 + (y + 1) * inimg.pitchBytes]; s2 = s2 - inimg.imgMeta.imgData[x + 1 + (y - 1) * inimg.pitchBytes] - 2 * inimg.imgMeta.imgData[x + (y - 1) * inimg.pitchBytes] - inimg.imgMeta.imgData[x - 1 + (y - 1) * inimg.pitchBytes]; //给图像赋值 s = s1 * s1 + s2 * s2; s = sqrtf(s); outimg.imgMeta.imgData[y * inimg.pitchBytes + x] = s; } // Host 成员方法:GPU 端的 sobel 算子边缘检测。 __host__ int RobustEdgeDetection::sobel(Image *inimg, Image *outimg) { // 检查输入图像和输出图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 检查图像是否为空 if (inimg->imgData == NULL) return UNMATCH_IMG; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 // 局部变量,错误码 int errcode; // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = SA_DEF_BLOCK_X; blocksize.y = SA_DEF_BLOCK_Y; blocksize.z = SA_DEF_BLOCK_Z; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (insubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; gridsize.z = 1; // 调用核函数 _sobelKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud); return NO_ERROR; }
the_stack
#include <thrust/count.h> //count #include <thrust/sort.h> //sort #include <thrust/unique.h> //unique #include <cusp/detail/format_utils.h> //offsets_to_indices namespace amgx { namespace aggregation { namespace size4_selector { // include common routines for all selectors #include <aggregation/selectors/common_selector.h> // ------------------------ // Kernels // ------------------------ #ifndef DELETE // Kernel to compute the weight of the edges with block_dia_csr format template <typename IndexType, typename ValueType> __global__ void computeEdgeWeightsBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices, const IndexType *dia_idx, const ValueType *nonzero_values, const IndexType num_block_rows, float *edge_weights, int bsize) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int jcol; int bsize_sq = bsize * bsize; while (tid < num_block_rows) { for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++) { jcol = column_indices[j]; if (jcol >= num_block_rows) { continue; } // Compute edge weight for (int k = row_offsets[jcol]; k < row_offsets[jcol + 1]; k++) { if (column_indices[k] == tid) { edge_weights[j] = 0.5 * (fabs(nonzero_values[j * bsize_sq]) + fabs(nonzero_values[k * bsize_sq])) / max( fabs(nonzero_values[bsize_sq * dia_idx[tid]]), fabs(nonzero_values[bsize_sq * dia_idx[jcol]])); break; } } } tid += gridDim.x * blockDim.x; } } #endif // ----------------- // Methods // ---------------- // Constructor template<class T_Config> Size4SelectorBase<T_Config>::Size4SelectorBase(AMG_Config &cfg, const std::string &cfg_scope) { deterministic = cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default"); max_iterations = cfg.AMG_Config::getParameter<IndexType>("max_matching_iterations", cfg_scope); numUnassigned_tol = cfg.AMG_Config::getParameter<double>("max_unassigned_percentage", cfg_scope); m_aggregation_edge_weight_component = cfg.AMG_Config::getParameter<int>("aggregation_edge_weight_component", cfg_scope); weight_formula = cfg.AMG_Config::getParameter<int>("weight_formula", cfg_scope); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Size4Selector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblock(const Matrix_h &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates) { FatalError("OnePhaseHandshaking selector: setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Size4Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblock(const Matrix_d &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates) { // both ways are supported const IndexType num_block_rows = A.get_num_rows(); const IndexType num_nonzero_blocks = A.get_num_nz(); if (!A.is_matrix_singleGPU()) { aggregates.resize(A.manager->halo_offset(A.manager->num_neighbors())); } else { aggregates.resize(num_block_rows); } // Initially, put each vertex in its own aggregate thrust::sequence(aggregates.begin(), aggregates.begin() + num_block_rows); cudaCheckError(); IndexType *aggregates_ptr = aggregates.raw(); // Create row_indices array IndexType total_nz = A.is_matrix_singleGPU() ? num_nonzero_blocks : A.manager->num_nz_all(); typename Matrix_d::IVector row_indices(total_nz); if (total_nz > 0) { cusp::detail::offsets_to_indices(A.row_offsets, row_indices); } const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_row_indices_ptr = row_indices.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); const ValueType *A_nonzero_values_ptr = A.values.raw(); typename Matrix_d::IVector strongest_neighbour(num_block_rows, -1); typename Matrix_d::IVector partner_index(num_block_rows * 3, -1); IndexType *strongest_neighbour_ptr = strongest_neighbour.raw(); IndexType *partner_index_ptr = partner_index.raw(); const int threads_per_block = 256; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (num_block_rows - 1) / threads_per_block + 1); int numUnassigned = num_block_rows; int numUnassigned_previous = numUnassigned; Vector<TemplateConfig<AMGX_device, AMGX_vecFloat, t_matPrec, t_indPrec> > edge_weights(num_nonzero_blocks, -1); float *edge_weights_ptr = edge_weights.raw(); float *rand_edge_weights_ptr = NULL; // Compute the edge weights const int num_blocks_V2 = min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1); if (num_nonzero_blocks > 0) { cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType, ValueType, float>, cudaFuncCachePreferL1); computeEdgeWeightsBlockDiaCsr_V2 <<< num_blocks_V2, threads_per_block>>>(A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, num_nonzero_blocks, edge_weights_ptr, rand_edge_weights_ptr, num_block_rows, A.get_block_dimy(), this->m_aggregation_edge_weight_component, this->weight_formula); } // ------------------------------------------------- // First create aggregates of size 2 // ------------------------------------------------- int icount = 0; do { // For each block_row, find the strongest neighbour who hasn't been assigned findStrongestNeighbourBlockDiaCsr_NoMerge <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, partner_index_ptr, strongest_neighbour_ptr, this->deterministic); cudaCheckError(); // Look for perfect matches matchEdges <<< num_blocks, threads_per_block>>>(num_block_rows, partner_index_ptr, aggregates_ptr, strongest_neighbour_ptr); cudaCheckError(); numUnassigned_previous = numUnassigned; numUnassigned = (int)thrust::count(partner_index.begin(), partner_index.end(), -1); cudaCheckError(); icount++; } while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned_previous == numUnassigned)); assignUnassignedVertices <<< num_blocks, threads_per_block>>>(partner_index_ptr, num_block_rows); cudaCheckError(); // ------------------------------------------------- // Merge aggregates to create aggregates of size 4 // ------------------------------------------------- Vector<TemplateConfig<AMGX_device, AMGX_vecFloat, t_matPrec, t_indPrec> > weight_strongest_neighbour(num_block_rows, -1); float *weight_strongest_neighbour_ptr = weight_strongest_neighbour.raw(); // now used as flag to check if aggregated or not typename Matrix_d::IVector aggregated(num_block_rows, -1); IndexType *aggregated_ptr = aggregated.raw(); icount = 0; numUnassigned = num_block_rows; numUnassigned_previous = numUnassigned; do { // Each vertex stores in strongest_neighbour the aggregates number of strongest neighbour and the weight of connection findStrongestNeighbourBlockDiaCsr_StoreWeight <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic); cudaCheckError(); // Each vertex in same aggregates will agree on aggregates to propose too, and both store the aggregate number they want to match with agreeOnProposal <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, num_block_rows, aggregated_ptr, strongest_neighbour_ptr, weight_strongest_neighbour_ptr, partner_index_ptr, aggregates_ptr); cudaCheckError(); matchAggregates <IndexType> <<< num_blocks, threads_per_block>>>(aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, num_block_rows); cudaCheckError(); numUnassigned_previous = numUnassigned; numUnassigned = thrust::count(aggregated.begin(), aggregated.end(), -1); cudaCheckError(); icount++; } while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned_previous == numUnassigned)); // Merge remaining vertices with current aggregates if (!this->deterministic) { while (numUnassigned != 0) { mergeWithExistingAggregatesBlockDiaCsr <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, aggregated_ptr, this->deterministic, (IndexType *) NULL); cudaCheckError(); numUnassigned = (int)thrust::count(aggregated.begin(), aggregated.end(), -1); cudaCheckError(); }; } else { typename Matrix_d::IVector aggregates_candidate(num_block_rows, -1); while (numUnassigned != 0) { mergeWithExistingAggregatesBlockDiaCsr <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, aggregated_ptr, this->deterministic, aggregates_candidate.raw()); cudaCheckError(); joinExistingAggregates <<< num_blocks, threads_per_block>>>(num_block_rows, aggregates_ptr, aggregated_ptr, aggregates_candidate.raw()); cudaCheckError(); numUnassigned = (int)thrust::count(aggregated.begin(), aggregated.end(), -1); cudaCheckError(); }; } this->renumberAndCountAggregates(aggregates, aggregates_global, num_block_rows, num_aggregates); } template <class T_Config> void Size4SelectorBase<T_Config>::setAggregates(Matrix<T_Config> &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates) { if (A.get_block_dimx() == A.get_block_dimy()) { setAggregates_common_sqblock( A, aggregates, aggregates_global, num_aggregates ); } else { FatalError("Unsupported block size for Size4Selector", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } } // ------------------------- // Explict instantiations // ------------------------- #define AMGX_CASE_LINE(CASE) template class Size4SelectorBase<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Size4Selector<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } } }
the_stack
#ifndef INCLUDE_GGNN_CUDA_KNN_GGNN_GPU_INSTANCE_CUH_ #define INCLUDE_GGNN_CUDA_KNN_GGNN_GPU_INSTANCE_CUH_ #include <array> #include <limits> #include <string> #include <thread> #include <cuda.h> #include <cuda_runtime.h> #include <curand.h> #include <curand_kernel.h> #include <stdio.h> #include <cub/cub.cuh> #include "ggnn/graph/cuda_knn_ggnn_graph_device.cuh" #include "ggnn/graph/cuda_knn_ggnn_graph_host.cuh" #include "ggnn/graph/cuda_knn_ggnn_graph_buffer.cuh" #include "ggnn/merge/cuda_knn_merge_layer.cuh" #include "ggnn/merge/cuda_knn_top_merge_layer.cuh" #include "ggnn/query/cuda_knn_query_layer.cuh" #include "ggnn/query/cuda_knn_ggnn_query.cuh" #include "ggnn/query/cuda_knn_bf_query_layer.cuh" #include "ggnn/query/cuda_knn_stats_query_layer.cuh" #include "ggnn/select/cuda_knn_wrs_select_layer.cuh" #include "ggnn/sym/cuda_knn_sym_buffer_merge_layer.cuh" #include "ggnn/sym/cuda_knn_sym_query_layer.cuh" #include "ggnn/utils/cuda_knn_utils.cuh" #include "ggnn/utils/cuda_knn_constants.cuh" #include "ggnn/utils/cuda_knn_dataset.cuh" template <typename ValueT> __global__ void divide(ValueT* res, ValueT* input, ValueT N) { res[threadIdx.x] = input[threadIdx.x]/N; } /** * GGNN core operations (shared between single-GPU and multi-GPU version) * * @param measure distance measure: Euclidean or Cosine * @param KeyT datatype of dataset indices (needs to be able to represent * N_base, signed integer required) * @param ValueT distance value type * @param GAddrT address type used to access neighborhood vectors (needs to be * able to represent N_all*K) * @param BaseT datatype of dataset vector elements * @param BAddrT address type used to access dataset vectors (needs to be able * to represent N_base*D) * @param D dimension of dataset * @param KBuild neighbors per node in the GGNN graph * @param KF maximum number of inverse links per node in the GGNN graph * @param KQuery number of nearest neighbors to retrieve during query * @param S segment size */ template <DistanceMeasure measure, typename KeyT, typename ValueT, typename GAddrT, typename BaseT, typename BAddrT, int D, int KBuild, int KF, int KQuery, int S> struct GGNNGPUInstance { /// number of base points per shard int N_shard; /// number of layers int L; /// growth factor (number of sub-graphs merged together per layer) int G; /// segment size in base layer int S0; /// number of segments in base layer with one additional element int S0_off; /// slack factor for symmetric linking float tau_build; /// total number of neighborhoods in the graph int N_all; /// total number of selection/translation entries int ST_all; /// neighborhoods per layer std::array<int, MAX_LAYER> Ns; // [L] /// start of neighborhoods per layer std::array<int, MAX_LAYER> Ns_offsets; // [L] /// start of selection/translation per layer std::array<int, MAX_LAYER> STs_offsets; // [L] typedef GGNNGraphDevice<KeyT, BaseT, ValueT> GGNNGraphDevice; typedef GGNNGraphHost<KeyT, BaseT, ValueT> GGNNGraphHost; const Dataset<KeyT, BaseT, BAddrT>* dataset; GGNNGraphBuffer<KeyT, ValueT>* ggnn_buffer {nullptr}; GGNNQuery<KeyT, ValueT, BaseT> ggnn_query; // Graph Shards resident on the GPU std::vector<GGNNGraphDevice> ggnn_shards; // Graph Shards resident on the CPU (for swapping, loading, and storing) std::vector<GGNNGraphHost> ggnn_cpu_buffers; curandGenerator_t gen; //TODO (lukas): merge the buffer-code in here? // CUDA GPU id associated with this instance const int gpu_id; // number of shards that need to be processed by this instance const int num_parts; GGNNGPUInstance(const int gpu_id, const Dataset<KeyT, BaseT, BAddrT>* dataset, const int N_shard, const int L, const bool enable_construction, const float tau_build, const int num_parts=1, const int num_cpu_buffers=1) : N_shard{N_shard}, L{L}, tau_build{tau_build}, dataset{dataset}, gpu_id{gpu_id}, ggnn_query{dataset->N_query, D, KQuery, num_parts}, num_parts{num_parts} { CHECK_LE(L, MAX_LAYER); LOG(INFO) << "GGNNGPUInstance(): CUDA device id: " << gpu_id; { int current_gpu_id; cudaGetDevice(&current_gpu_id); CHECK_EQ(current_gpu_id, gpu_id) << "cudaSetDevice() needs to be called in advance!"; } ggnn_query.loadQueriesAsync(dataset->h_query, 0); computeGraphParameters(); CHECK_LE(static_cast<size_t>(N_all) * static_cast<size_t>(KBuild), static_cast<size_t>(std::numeric_limits<GAddrT>::max())) << "address type is insufficient to address the requested graph."; copyConstantsToGPU(); // allocate CPU memory first (fail early if out of memory) ggnn_cpu_buffers.reserve(num_cpu_buffers); for (int i=0; i < num_cpu_buffers; i++) ggnn_cpu_buffers.emplace_back(N_shard, KBuild, N_all, ST_all); //TODO (lukas): merge the buffer-code in here? if (enable_construction) ggnn_buffer = new GGNNGraphBuffer<KeyT, ValueT>{N_shard, KBuild, KF}; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); int max_shards; { size_t free, total; CHECK_CUDA(cudaMemGetInfo(&free, &total)); size_t size_per_shard = getSizePerShard(); max_shards = free/size_per_shard; LOG(INFO) << "remaining device memory (" << free/(1024.0f*1024.0f*1024.0f) << " GB) suffices for " << max_shards << " shards (" << size_per_shard/(1024.0f*1024.0f*1024.0f) << " GB each)."; CHECK_GT(max_shards, 0) << "use smaller shards."; } const int num_shards = min(max_shards, num_parts); ggnn_shards.reserve(num_shards); for (int i=0; i < num_shards; i++) { ggnn_shards.emplace_back(N_shard, D, KBuild, N_all, ST_all); } CHECK_CUDA(cudaPeekAtLastError()); CHECK_CUDA(cudaDeviceSynchronize()); CHECK_CUDA(cudaPeekAtLastError()); } GGNNGPUInstance(const GGNNGPUInstance& other) : dataset{nullptr}, ggnn_query{0, D, KQuery}, gpu_id{0}, N_shard{0}, num_parts{0} { // this exists to allow using vector::emplace_back // when it triggers a reallocation, this code will be called. // always make sure that enough memory is reserved ahead of time. LOG(FATAL) << "copying is not supported. reserve()!"; } ~GGNNGPUInstance() { CHECK_CUDA(cudaSetDevice(gpu_id)); ggnn_shards.clear(); delete ggnn_buffer; CHECK_CUDA(cudaPeekAtLastError()); CHECK_CUDA(cudaDeviceSynchronize()); CHECK_CUDA(cudaPeekAtLastError()); } void computeGraphParameters() { /// theoretical growth factor (number of sub-graphs merged together per /// layer) const float growth = powf(N_shard / static_cast<float>(S), 1.f / (L - 1)); const int Gf = growth; const int Gc = growth + 1; const float S0f = N_shard / (pow(Gf, (L - 1))); const float S0c = N_shard / (pow(Gc, (L - 1))); const bool is_floor = (growth > 0) && ((S0c < KBuild) || (fabs(S0f - S) < fabs(S0c - S))); G = (is_floor) ? Gf : Gc; S0 = (is_floor) ? S0f : S0c; S0_off = N_shard - pow(G, L - 1) * S0; VLOG(1) << "GGNNGPUInstance(): N: " << N_shard << ", L: " << L << ", G: " << G << ", S: " << S << ", S0: " << S0 << ", S0_off: " << S0_off << ", K: " << KBuild << ", KF: " << KF; N_all = 0; ST_all = 0; int N_current = N_shard; for (int l = 0; l < L; l++) { Ns[l] = N_current; Ns_offsets[l] = N_all; STs_offsets[l] = ST_all; N_all += N_current; if (l) { ST_all += N_current; N_current /= G; } else { N_current = S; for (int i=2;i<L; ++i) N_current *= G; } } } size_t getSizePerShard() const { const size_t graph_size = static_cast<GAddrT>(N_all) * KBuild * sizeof(KeyT); const size_t selection_translation_size = ST_all * sizeof(KeyT); // const size_t nn1_dist_buffer_size = N * sizeof(ValueT); const size_t nn1_stats_size = 2 * sizeof(ValueT); const size_t total_graph_size = graph_size + 2 * selection_translation_size + nn1_stats_size; const size_t base_size = static_cast<BAddrT>(N_shard) * D * sizeof(BaseT); return total_graph_size + base_size; } void copyConstantsToGPU() const { CHECK_CUDA(cudaSetDevice(gpu_id)); VLOG(2) << "GGNNGPUInstance::copyConstantsToGPU().\n"; cudaMemcpyToSymbol(c_Ns, Ns.data(), L * sizeof(int)); cudaMemcpyToSymbol(c_Ns_offsets, Ns_offsets.data(), L * sizeof(int)); cudaMemcpyToSymbol(c_G, &G, sizeof(int)); cudaMemcpyToSymbol(c_L, &L, sizeof(int)); cudaMemcpyToSymbol(c_S0, &S0, sizeof(int)); cudaMemcpyToSymbol(c_S0_offset, &S0_off, sizeof(int)); cudaMemcpyToSymbol(c_tau_build, &tau_build, sizeof(float)); cudaMemcpyToSymbol(c_STs_offsets, STs_offsets.data(), L * sizeof(int)); } // graph utilities int getNs(const int layer) const { return Ns[layer]; } int getS(const int layer) const { return layer ? S : S0; } int getS_offset(const int layer) const { return layer ? 0 : S0_off; } KeyT* getGraph(const int shard, const int layer) { return &ggnn_shards.at(shard%ggnn_shards.size()).d_graph[static_cast<GAddrT>(Ns_offsets[layer]) * KBuild]; } KeyT* getSelection(const int shard, const int layer) { if (!layer) { // there is no selection for layer 0 return nullptr; } return &ggnn_shards.at(shard%ggnn_shards.size()).d_selection[STs_offsets[layer]]; } KeyT* getTranslation(const int shard, const int layer) { if (!layer) { // there is no translation for layer 0 return nullptr; } return &ggnn_shards.at(shard%ggnn_shards.size()).d_translation[STs_offsets[layer]]; } // io void waitForDiskIO(const int shard_id) { auto& cpu_buffer = ggnn_cpu_buffers[shard_id%ggnn_cpu_buffers.size()]; if (cpu_buffer.disk_io_thread.joinable()) cpu_buffer.disk_io_thread.join(); } void loadPartAsync(const std::string graph_dir, const int part_id, const int shard_id) { waitForDiskIO(shard_id); auto& cpu_buffer = ggnn_cpu_buffers[shard_id%ggnn_cpu_buffers.size()]; auto load_part = [this, graph_dir, part_id, shard_id]() -> void { CHECK_CUDA(cudaSetDevice(gpu_id)); auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); auto& cpu_buffer = ggnn_cpu_buffers[shard_id%ggnn_cpu_buffers.size()]; cudaStreamSynchronize(shard.stream); if (shard.current_part_id == part_id) { VLOG(4) << "[GPU: " << gpu_id << "] part " << part_id << " is already loaded on shard " << shard_id; return; } shard.current_part_id = part_id; loadShardBaseDataAsync(part_id, shard_id); if (cpu_buffer.current_part_id == part_id) { VLOG(4) << "[GPU: " << gpu_id << "] part " << part_id << " is already loaded on cpu buffer " << shard_id%ggnn_cpu_buffers.size(); } else { const std::string part_filename = graph_dir + "part_" + std::to_string(part_id) + ".ggnn"; cpu_buffer.load(part_filename); VLOG(2) << "[GPU: " << gpu_id << "] loaded part " << part_id << " from " << part_filename.c_str(); cpu_buffer.current_part_id = part_id; } cpu_buffer.uploadAsync(shard); cudaStreamSynchronize(shard.stream); VLOG(4) << "[GPU: " << gpu_id << "] uploaded part " << part_id; }; cpu_buffer.disk_io_thread = std::thread(load_part); } void uploadPartAsync(const int part_id, const int shard_id) { waitForDiskIO(shard_id); auto& cpu_buffer = ggnn_cpu_buffers[shard_id%ggnn_cpu_buffers.size()]; auto upload_part = [this, part_id, shard_id]() -> void { CHECK_CUDA(cudaSetDevice(gpu_id)); auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); auto& cpu_buffer = ggnn_cpu_buffers[shard_id%ggnn_cpu_buffers.size()]; cudaStreamSynchronize(shard.stream); if (shard.current_part_id == part_id) { VLOG(4) << "[GPU: " << gpu_id << "] part " << part_id << " is already loaded on shard " << shard_id; return; } shard.current_part_id = part_id; CHECK_EQ(cpu_buffer.current_part_id, part_id); loadShardBaseDataAsync(part_id, shard_id); cpu_buffer.uploadAsync(shard); cudaStreamSynchronize(shard.stream); VLOG(4) << "[GPU: " << gpu_id << "] uploaded part " << part_id; }; cpu_buffer.disk_io_thread = std::thread(upload_part); } void storePartAsync(const std::string graph_dir, const int part_id, const int shard_id) { waitForDiskIO(shard_id); auto& cpu_buffer = ggnn_cpu_buffers[shard_id%ggnn_cpu_buffers.size()]; auto store_part = [this, graph_dir, part_id, shard_id]() -> void { CHECK_CUDA(cudaSetDevice(gpu_id)); auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); auto& cpu_buffer = ggnn_cpu_buffers[shard_id%ggnn_cpu_buffers.size()]; if (cpu_buffer.current_part_id == part_id) { VLOG(4) << "[GPU: " << gpu_id << "] part " << part_id << " is already downloaded"; } else { cpu_buffer.downloadAsync(shard); cudaStreamSynchronize(shard.stream); VLOG(4) << "[GPU: " << gpu_id << "] downloaded part " << part_id; } const std::string part_filename = graph_dir + "part_" + std::to_string(part_id) + ".ggnn"; cpu_buffer.store(part_filename); VLOG(2) << "[GPU: " << gpu_id << "] stored part " << part_id << " to " << part_filename.c_str(); }; cpu_buffer.disk_io_thread = std::thread(store_part); } void downloadPartAsync(const int part_id, const int shard_id) { waitForDiskIO(shard_id); auto& cpu_buffer = ggnn_cpu_buffers[shard_id%ggnn_cpu_buffers.size()]; auto download_part = [this, part_id, shard_id]() -> void { CHECK_CUDA(cudaSetDevice(gpu_id)); auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); auto& cpu_buffer = ggnn_cpu_buffers[shard_id%ggnn_cpu_buffers.size()]; cpu_buffer.downloadAsync(shard); cudaStreamSynchronize(shard.stream); cpu_buffer.current_part_id = part_id; VLOG(4) << "[GPU: " << gpu_id << "] downloaded part " << part_id; }; cpu_buffer.disk_io_thread = std::thread(download_part); } void loadShardBaseDataAsync(const int part_id, const int shard_id) { const size_t memsize = static_cast<BAddrT>(N_shard) * D * sizeof(BaseT); const size_t N_offset = static_cast<BAddrT>(N_shard) * part_id; auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); CHECK_CUDA(cudaMemcpyAsync(shard.d_base, dataset->h_base + N_offset * D, memsize, cudaMemcpyHostToDevice, shard.stream)); } void generateGTUsingBF(const int shard_id = 0) { CHECK_CUDA(cudaSetDevice(gpu_id)); const auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); const int KGT = 100; KeyT* m_gt = nullptr; CHECK_CUDA(cudaMallocManaged(&m_gt, sizeof(KeyT)*KGT*dataset->N_query)); CHECK_LE(dataset->K_gt, KGT) << "The brute force query is set to " << KGT << " neighbors, but the dataset is configured for " << dataset->K_gt << "."; typedef BruteForceQueryKernel<measure, ValueT, KeyT, D, KGT, 32, BaseT, BAddrT, GAddrT, false> QueryKernel; LOG(INFO) << "Running brute force query to determine ground truth"; QueryKernel query_kernel; query_kernel.d_base = shard.d_base; query_kernel.d_query = ggnn_query.d_query; query_kernel.d_query_results = m_gt; query_kernel.N_base = N_shard; // this applies to potential subsets query_kernel.N = dataset->N_query; query_kernel.N_offset = 0; time_launcher(0, &query_kernel, query_kernel.N, shard.stream); cudaStreamSynchronize(shard.stream); if (dataset->K_gt == KGT) { std::copy_n(m_gt, KGT*dataset->N_query, dataset->gt); } else { const size_t stride_results = static_cast<size_t>(dataset->N_query)*KGT; const size_t stride_dest = static_cast<size_t>(dataset->N_query)*dataset->K_gt; for (int n=0; n<dataset->N_query; ++n) { std::copy_n(m_gt+n*stride_results, dataset->K_gt, dataset->gt+n*stride_dest); } } CHECK_CUDA(cudaFree(m_gt)); CHECK_CUDA(cudaPeekAtLastError()); CHECK_CUDA(cudaDeviceSynchronize()); CHECK_CUDA(cudaPeekAtLastError()); } // graph operations template <int BLOCK_DIM_X = 32, int MAX_ITERATIONS = 400, int CACHE_SIZE = 512, int SORTED_SIZE = 256, bool DIST_STATS = false> void queryLayer(const int shard_id = 0) const { CHECK_CUDA(cudaSetDevice(gpu_id)); const auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); typedef QueryKernel<measure, ValueT, KeyT, D, KBuild, KF, KQuery, S, BLOCK_DIM_X, BaseT, BAddrT, GAddrT, DIST_STATS, false, MAX_ITERATIONS, CACHE_SIZE, SORTED_SIZE, true> QueryKernel; int* m_dist_statistics = nullptr; if (DIST_STATS) cudaMallocManaged(&m_dist_statistics, dataset->N_query * sizeof(int)); QueryKernel query_kernel; query_kernel.d_base = shard.d_base; query_kernel.d_query = ggnn_query.d_query; query_kernel.d_graph = shard.d_graph; query_kernel.d_query_results = ggnn_query.d_query_result_ids; query_kernel.d_query_results_dists = ggnn_query.d_query_result_dists; query_kernel.d_translation = shard.d_translation; query_kernel.d_nn1_stats = shard.d_nn1_stats; query_kernel.N = dataset->N_query; query_kernel.N_offset = 0; query_kernel.d_dist_stats = m_dist_statistics; query_kernel.part = shard_id; query_kernel.num_parts = num_parts; query_kernel.N_base = N_shard; query_kernel.launch(shard.stream); if (DIST_STATS) cudaFree(m_dist_statistics); } void select(const int layer, const int shard_id = 0) { CHECK_CUDA(cudaSetDevice(gpu_id)); const auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); typedef WRSSelectionKernel<ValueT, KeyT, 128, S> SelectionKernel; SelectionKernel select_kernel; select_kernel.d_selection = getSelection(shard_id, layer + 1); select_kernel.d_translation = getTranslation(shard_id, layer + 1); select_kernel.d_translation_layer = getTranslation(shard_id, layer); select_kernel.layer = layer; select_kernel.S = getS(layer); select_kernel.S_offset = getS_offset(layer); const int SG = S / G; const int SG_offset = S - SG * G; select_kernel.SG = SG; select_kernel.SG_offset = SG_offset; select_kernel.B = pow(G, L - 1 - layer); select_kernel.B_offset = 0; select_kernel.d_rng = ggnn_buffer->d_rng; select_kernel.d_nn1_dist_buffer = ggnn_buffer->d_nn1_dist_buffer; /* Generate n floats on device */ curandGenerateUniform(gen, ggnn_buffer->d_rng, getNs(layer)); time_launcher(2, &select_kernel, getNs(layer), shard.stream); } void top(const int layer, const int shard_id = 0) { CHECK_CUDA(cudaSetDevice(gpu_id)); const auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); typedef TopMergeKernel<measure, ValueT, KeyT, D, KBuild, 128, BaseT, BAddrT, GAddrT> TopMergeKernel; TopMergeKernel top_kernel; top_kernel.d_base = shard.d_base; top_kernel.d_translation = getTranslation(shard_id, layer); top_kernel.d_graph = getGraph(shard_id, layer); top_kernel.d_nn1_dist_buffer = ggnn_buffer->d_nn1_dist_buffer; top_kernel.layer = layer; top_kernel.N = getNs(layer); top_kernel.N_offset = 0; top_kernel.S = getS(layer); top_kernel.S_offset = getS_offset(layer); time_launcher(2, &top_kernel, getNs(layer), shard.stream); } void mergeLayer(const int layer_top, const int layer_btm, const int shard_id = 0) { CHECK_CUDA(cudaSetDevice(gpu_id)); const auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); typedef MergeKernel<measure, ValueT, KeyT, D, KBuild, KF, S, 32, BaseT, BAddrT, GAddrT> MergeKernel; const size_t graph_buffer_size = static_cast<GAddrT>(getNs(layer_btm)) * KBuild * sizeof(KeyT); MergeKernel merge_kernel; merge_kernel.d_base = shard.d_base; merge_kernel.d_graph = shard.d_graph; merge_kernel.d_graph_buffer = ggnn_buffer->d_graph_buffer; merge_kernel.d_translation = shard.d_translation; merge_kernel.d_selection = shard.d_selection; merge_kernel.d_nn1_stats = shard.d_nn1_stats; merge_kernel.d_nn1_dist_buffer = ggnn_buffer->d_nn1_dist_buffer; merge_kernel.N = getNs(layer_btm); merge_kernel.N_offset = 0; merge_kernel.layer_top = layer_top; merge_kernel.layer_btm = layer_btm; time_launcher(2, &merge_kernel, getNs(layer_btm), shard.stream); cudaMemcpyAsync((void*)getGraph(shard_id, layer_btm), (void*)ggnn_buffer->d_graph_buffer, graph_buffer_size, cudaMemcpyDeviceToDevice, shard.stream); }; void merge(const int layer_top, const int layer_btm, const int shard_id = 0) { CHECK_CUDA(cudaSetDevice(gpu_id)); const auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); VLOG(2) << "merge: " << layer_top << layer_btm << std::endl; if (layer_top == layer_btm) top(layer_btm, shard_id); else mergeLayer(layer_top, layer_btm, shard_id); if (!layer_btm) computeNN1Stats(shard_id); }; void computeNN1Stats(const int shard_id = 0) { CHECK_CUDA(cudaSetDevice(gpu_id)); const auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); CHECK_CUDA(cub::DeviceReduce::Sum(ggnn_buffer->d_temp_storage_sum, ggnn_buffer->temp_storage_bytes_sum, ggnn_buffer->d_nn1_dist_buffer, &shard.d_nn1_stats[0], N_shard, shard.stream)); divide<ValueT><<<1, 1, 0, shard.stream>>>(shard.d_nn1_stats, shard.d_nn1_stats, ValueT(N_shard)); CHECK_CUDA(cub::DeviceReduce::Max(ggnn_buffer->d_temp_storage_max, ggnn_buffer->temp_storage_bytes_max, ggnn_buffer->d_nn1_dist_buffer, &shard.d_nn1_stats[1], N_shard, shard.stream)); if(VLOG_IS_ON(2)) { ValueT h_nn1_stats[2]; cudaMemcpyAsync(h_nn1_stats, shard.d_nn1_stats, 2*sizeof(ValueT), cudaMemcpyDeviceToHost, shard.stream); cudaStreamSynchronize(shard.stream); VLOG(2) << "mean: " << h_nn1_stats[0] << " | max: " << h_nn1_stats[1] << std::endl; } } void sym(const int layer, const int shard_id = 0) { CHECK_CUDA(cudaSetDevice(gpu_id)); const auto& shard = ggnn_shards.at(shard_id%ggnn_shards.size()); typedef SymQueryKernel<measure, ValueT, KeyT, D, KBuild, KF, 64, BaseT, BAddrT, GAddrT> SymQueryKernel; cudaMemsetAsync( ggnn_buffer->d_sym_buffer, -1, static_cast<GAddrT>(static_cast<GAddrT>(getNs(layer))) * KF * sizeof(KeyT), shard.stream); cudaMemsetAsync(ggnn_buffer->d_sym_atomic, 0, getNs(layer) * sizeof(int), shard.stream); SymQueryKernel sym_kernel; sym_kernel.d_base = shard.d_base; sym_kernel.d_graph = getGraph(shard_id, layer); sym_kernel.d_translation = getTranslation(shard_id, layer); sym_kernel.d_sym_atomic = ggnn_buffer->d_sym_atomic; sym_kernel.d_sym_buffer = ggnn_buffer->d_sym_buffer; sym_kernel.d_nn1_stats = shard.d_nn1_stats; sym_kernel.d_stats = ggnn_buffer->d_statistics; sym_kernel.layer = layer; sym_kernel.N = getNs(layer); sym_kernel.N_offset = 0; // CHECK_CUDA(cudaPeekAtLastError()); // CHECK_CUDA(cudaDeviceSynchronize()); // CHECK_CUDA(cudaPeekAtLastError()); time_launcher(2, &sym_kernel, getNs(layer), shard.stream); // CHECK_CUDA(cudaPeekAtLastError()); // CHECK_CUDA(cudaDeviceSynchronize()); // CHECK_CUDA(cudaPeekAtLastError()); typedef SymBufferMergeKernel<ValueT, KeyT, KBuild, KF, 128, GAddrT> SymBufferMergeKernel; SymBufferMergeKernel sym_buffer_merge_kernel; sym_buffer_merge_kernel.d_sym_buffer = ggnn_buffer->d_sym_buffer; sym_buffer_merge_kernel.d_sym_atomic = ggnn_buffer->d_sym_atomic; sym_buffer_merge_kernel.d_graph = getGraph(shard_id, layer); sym_buffer_merge_kernel.N = getNs(layer); sym_buffer_merge_kernel.N_offset = 0; time_launcher(3, &sym_buffer_merge_kernel, getNs(layer), shard.stream); // CHECK_CUDA(cudaPeekAtLastError()); // CHECK_CUDA(cudaDeviceSynchronize()); // CHECK_CUDA(cudaPeekAtLastError()); if(VLOG_IS_ON(2)){ int* h_sym_atomic; //int* h_statistics; CHECK_CUDA(cudaMallocHost(&h_sym_atomic, static_cast<size_t>(getNs(layer)) * sizeof(int))); //CHECK_CUDA(cudaMallocHost(&h_statistics, static_cast<size_t>(getNs(layer)) * sizeof(int))); cudaMemcpyAsync(h_sym_atomic, ggnn_buffer->d_sym_atomic, static_cast<size_t>(getNs(layer)) * sizeof(int), cudaMemcpyDeviceToHost, shard.stream); //cudaMemcpyAsync(h_statistics, ggnn_buffer->d_statistics, static_cast<size_t>(getNs(layer)) * sizeof(int), cudaMemcpyDeviceToHost, shard.stream); cudaStreamSynchronize(shard.stream); int c = 0; int m = 0; // int unconnected = 0; for (int i = 0; i < getNs(layer); i++) { if (h_sym_atomic[i] > KF) c++; m += (h_sym_atomic[i] > KF) ? KF : h_sym_atomic[i]; // unconnected += h_statistics[i]; } VLOG(2) << "Layer " << layer << " [N: " << getNs(layer) << "] | overflow: " << c << " (" << c / float(getNs(layer)) << ") | added_links: " << m << " (" << m / float(getNs(layer)) << ") || unconnected: OVERFLOW_STATS currently not computed. )\n"; cudaFreeHost(h_sym_atomic); } // cudaFree(d_sym_buffer); // cudaFree(m_sym_atomic); // cudaFree(m_statistics); // CHECK_CUDA(cudaPeekAtLastError()); // CHECK_CUDA(cudaDeviceSynchronize()); // CHECK_CUDA(cudaPeekAtLastError()); }; void build(const int part_id, const int shard_id = 0) { CHECK(ggnn_buffer) << "the construction buffer is not allocated."; VLOG(1) << "build(): part_id: " << part_id << " shard_id: " << shard_id; for (int layer_top = 0; layer_top < L; layer_top++) { for (int layer_btm = layer_top; layer_btm >= 0; layer_btm--) { VLOG(2) << "layer_top: " << layer_top << " -> layer_btm: " << layer_btm << std::endl; merge(layer_top, layer_btm, shard_id); if (layer_top < (L - 1) && layer_top == layer_btm) select(layer_top, shard_id); sym(layer_btm, shard_id); } } } void refine(const int shard_id = 0) { for (int layer = L - 2; layer >= 0; layer--) { merge(L - 1, layer, shard_id); sym(layer, shard_id); } } }; #endif // INCLUDE_GGNN_CUDA_KNN_GGNN_GPU_INSTANCE_CUH_
the_stack
#include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/communicator/data_parallel_communicator.hpp> #include <algorithm> #include <cstdlib> #include <memory> namespace nbla { using std::make_shared; template <typename T> __global__ void kernel_divide_inplace(const int size, const int n_devices, T *dw) { NBLA_CUDA_KERNEL_LOOP(i, size) { dw[i] /= n_devices; } } template <typename T> DataParallelCommunicatorNccl<T>::DataParallelCommunicatorNccl( const Context &ctx) : DataParallelCommunicator<T>(ctx) {} template <typename T> DataParallelCommunicatorNccl<T>::~DataParallelCommunicatorNccl() { if (this->initialized_) { for (int i = 0; i < device_ids_.size(); ++i) { ncclCommDestroy(comms_[i]); NBLA_CUDA_CHECK(cudaStreamDestroy(streams_[i])); } } } template <typename T> void DataParallelCommunicatorNccl<T>::init() { Communicator::init(); try { // Set gpu information for (auto ctx : this->contexts_) { this->device_ids_.push_back(std::stoi(ctx.device_id)); } this->n_devices_ = this->device_ids_.size(); // Initialize stream and communicator for (int i = 0; i < n_devices_; ++i) { cuda_set_device(device_ids_[i]); // Stream cudaStream_t stream; NBLA_CUDA_CHECK(cudaStreamCreate(&stream)); streams_.push_back(stream); // NCCL Comm ncclComm_t comm; comms_.push_back(comm); } ncclResult_t res = ncclCommInitAll(comms_.data(), this->n_devices_, this->device_ids_.data()); if (res != 0) { NBLA_ERROR(error_code::target_specific, "ncclCommInitAll fails with %d"); } } catch (...) { this->initialized_ = false; } this->initialized_ = true; } template <typename T> void DataParallelCommunicatorNccl<T>::reduce( const vector<NdArrayPtr> &ndarray_list, int dst, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reduce(NdArrayPtr ndarray, int dst, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::allreduce(bool division, bool inplace) { // TODO: currently nnabla uses default stream for computation. // The following logic relies on that, so if nnabla uses another stream for // computation, // we have to issue null kernel to the default stream at the beginning of this // method // and at the end of this method for using the implicit synchronization // technique for // main thread not to wait for a result of a kernel call. if (inplace == true) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU allreduce with out-of-place is only implemented.") } // Once sync to prevent the hang where the memcpy occurs during the allreduce. this->sync_all_params(); // 1. copy inside device for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; cuda_set_device(device_id); auto func_named_param = this->device_func_named_param_[i]; auto comm = comms_[i]; auto stream = streams_[i]; // TODO: address 16 bits also here? NdArray arr_buff(Shape_t{this->total_params_}); Tc *buff = arr_buff.cast(get_dtype<Tc>(), ctx, true)->pointer<Tc>(); Size_t type_size = sizeof(Tc); for (auto elm : func_named_param) { VariablePtr vp = elm.second; const Tc *dw = vp->get_grad_pointer<Tc>(ctx); auto n_param = vp->size(); cudaMemcpyAsync(buff, dw, type_size * n_param, cudaMemcpyDeviceToDevice, stream); buff += n_param; } } // 2. allreduce #ifdef NCCL_MAJOR ncclGroupStart(); #endif for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; // cuda_set_device(device_id); auto comm = comms_[i]; auto stream = streams_[i]; // TODO: address 16 bits also here? NdArray arr_buff(Shape_t{this->total_params_}); Tc *buff = arr_buff.cast(get_dtype<Tc>(), ctx, true)->pointer<Tc>(); ncclResult_t ret = ncclAllReduce(buff, buff, this->total_params_, get_nccl_dtype<Tc>(), ncclSum, comm, 0); // use default stream if (ret != ncclSuccess) { NBLA_ERROR(error_code::target_specific, "ncclAllReduce fails with %d.", ret); } } #ifdef NCCL_MAJOR ncclGroupEnd(); // wait_by_streams_synchronization(); #endif // 3. divide if (division) { for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; cuda_set_device(device_id); auto comm = comms_[i]; auto stream = streams_[i]; // TODO: address 16 bits also here? NdArray arr_buff(Shape_t{this->total_params_}); Tc *buff = arr_buff.cast(get_dtype<Tc>(), ctx, true)->pointer<Tc>(); NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, stream, this->total_params_, n_devices_, buff); } } // 4. copy back inside device for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; cuda_set_device(device_id); auto func_named_param = this->device_func_named_param_[i]; auto comm = comms_[i]; auto stream = streams_[i]; // TODO: address 16 bits also here? NdArray arr_buff(Shape_t{this->total_params_}); Tc *buff = arr_buff.cast(get_dtype<Tc>(), ctx, true)->pointer<Tc>(); Size_t type_size = sizeof(Tc); for (auto elm : func_named_param) { VariablePtr vp = elm.second; Tc *dw = vp->cast_grad_and_get_pointer<Tc>(ctx); auto n_param = vp->size(); cudaMemcpyAsync(dw, buff, type_size * n_param, cudaMemcpyDeviceToDevice, stream); buff += n_param; } } } template <typename T> void DataParallelCommunicatorNccl<T>::all_reduce( const vector<NdArrayPtr> &ndarray_list, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU all_reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::all_reduce(NdArrayPtr ndarray, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU all_reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reduce_scatter( const vector<NdArrayPtr> &ndarray_list, NdArrayPtr ndarray, bool division, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce_scatter is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::bcast( const vector<NdArrayPtr> &ndarray_list, int src, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU bcast is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::bcast(NdArrayPtr ndarray, int src, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU bcast is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::all_gather( NdArrayPtr ndarray, const vector<NdArrayPtr> &ndarray_list, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU all_gather is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reduce_async(bool division) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::allreduce_async(bool division, bool inplace) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU allreduce_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reducescatter_async(bool division) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reducescatter_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::bcast_async() { NBLA_ERROR(error_code::not_implemented, "CUDA GPU bcast_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::allgather_async() { NBLA_ERROR(error_code::not_implemented, "CUDA GPU allgather_async is not implemented.") } template <typename T> vector<string> DataParallelCommunicatorNccl<T>::allowed_array_classes() { NBLA_ERROR(error_code::not_implemented, "Derived class of " "DataParallelCommunicatorNccl must " "implement allowed_array_classes().") } template <typename T> void DataParallelCommunicatorNccl<T>::wait_by_devices_synchronization() { for (int i = 0; i < device_ids_.size(); ++i) { cuda_device_synchronize(std::to_string(device_ids_[i])); } } template <typename T> void DataParallelCommunicatorNccl<T>::wait_by_streams_synchronization() { for (int i = 0; i < device_ids_.size(); ++i) { cuda_set_device(device_ids_[i]); NBLA_CUDA_CHECK(cudaStreamSynchronize(streams_[i])); } } template <typename T> void DataParallelCommunicatorNccl<T>::divide_by_num_devices(bool division) { if (division) { for (int i = 0; i < device_ids_.size(); ++i) { auto device_id = device_ids_[i]; cuda_set_device(device_id); Context ctx = this->contexts_[i]; auto func_named_param = this->device_func_named_param_[i]; auto stream = streams_[i]; for (auto elm : func_named_param) { VariablePtr vp = elm.second; Tc *dw = vp->cast_grad_and_get_pointer<Tc>(ctx); auto n_param = vp->size(); NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, stream, n_param, n_devices_, dw); } } } } template <typename T> void DataParallelCommunicatorNccl<T>::sync_all_params() { for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; auto func_named_param = this->device_func_named_param_[i]; auto size = func_named_param.size(); for (auto elm : func_named_param) { // function-loop VariablePtr vp = elm.second; // If the arrays are different, output the warning. this->check_array_class(ctx, vp); // Sync vp->get_grad_pointer<Tc>(ctx); } } } template class DataParallelCommunicatorNccl<float>; template class DataParallelCommunicatorNccl<Half>; }
the_stack
#include <iostream> #include "LinearFilter.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // static变量:_defTpl // 当用户未定义有效的模板时,使用此默认模板,默认为3x3,默认模板值为1 static TemplateCuda *_defTpl = NULL; // Host 函数:_initDefTemplate(初始化默认的模板指针) // 函数初始化默认模板指针 _defTpl,如果原来模板不为空,则直接返回,否则初始化 // 为3x3的默认模板 static __host__ TemplateCuda * // 返回值:返回默认模板指针 _defTpl _initDefTemplate(); // Kernel 函数:_linearFilterKer(实现线性滤波操作) static __global__ void // Kernel 函数无返回值 _linearFilterKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 TemplateCuda tpl, // 模板 int imptype // 滤波操作的实现方式 ); // Host 函数:_preOp(在算法操作前进行预处理) // 在滤波操作前,先进行预处理,包括:(1)对输入和输出图像 // 进行数据准备,包括申请当前Device存储空间;(2)对模板进行处理,包 // 申请当前Device存储空间 static __host__ int // 返回值:函数是否正确执行,若正确执行,返回 // NO_ERROR _preOp( Image *inimg, // 输入图像 Image *outimg, // 输出图像 Template *tp // 模板 ); // Host 函数:_adjustRoiSize(调整 ROI 子图的大小) // 调整 ROI 子图的大小,使输入和输出的子图大小统一 static __host__ void // 无返回值 _adjustRoiSize( ImageCuda *inimg, // 输入图像 ImageCuda *outimg // 输出图像 ); // Host 函数:_getBlockSize(获取 Block 和 Grid 的尺寸) // 根据默认的 Block 尺寸,使用最普通的线程划分方法获取 Grid 的尺寸 static __host__ int // 返回值:函数是否正确执行,若正确执行,返回 // NO_ERROR _getBlockSize( int width, // 需要处理的宽度 int height, // 需要处理的高度 dim3 *gridsize, // 计算获得的 Grid 的尺寸 dim3 *blocksize // 计算获得的 Block 的尺寸 ); // Host 函数:_initDefTemplate(初始化默认的模板指针) static __host__ TemplateCuda *_initDefTemplate() { // 如果 _defTpl 不为空,说明已经初始化了,则直接返回 if (_defTpl != NULL) return _defTpl; // 如果 _defTpl 为空,则初始化为大小为3x3,模板值为1的模板 Template *tmpdef; TemplateBasicOp::newTemplate(&tmpdef); TemplateBasicOp::makeAtHost(tmpdef, 9); _defTpl = TEMPLATE_CUDA(tmpdef); // 分别处理每一个点 for (int i = 0; i < 9; i++) { // 分别计算每一个点的横坐标和纵坐标 _defTpl->tplMeta.tplData[2 * i] = i % 3 - 1; _defTpl->tplMeta.tplData[2 * i + 1] = i / 3 - 1; // 将每个点的模板值设为1 _defTpl->attachedData[i] = 1; } return _defTpl; } // Kernel 函数:_linearFilterKer(实现滤波算法操作) static __global__ void _linearFilterKer(ImageCuda inimg, ImageCuda outimg, TemplateCuda tpl, int imptype) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中, // c 表示 column, r 表示 row)。由于采用并行度缩减策略 ,令一个线程 // 处理 4 个输出像素,这四个像素位于统一列的相邻 4 行上,因此,对于 // dstr 需要进行乘 4 的计算 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃 if (dstc >= inimg.imgMeta.width || dstr >= inimg.imgMeta.height) return; // 用来保存临时像素点的坐标的 x 和 y 分量 int dx, dy; // 用来记录当前模板所在的位置的指针 int *curtplptr = tpl.tplMeta.tplData; // 用来记录当前输入图像所在位置的指针 unsigned char *curinptr; // 用来存放模板中像素点的像素值加和 unsigned int tplsum[4] = { 0, 0, 0, 0 }; // 用来记录当前滤波操作的除数 float tmpcount[4] = { 0, 0, 0, 0 }; // 扫描模板范围内的每个输入图像的像素点 for (int i = 0; i < tpl.tplMeta.count; i++) { // 计算当前模板位置所在像素的 x 和 y 分量,模板使用相邻的两个下标的 // 数组表示一个点,所以使当前模板位置的指针作加一操作 dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); // 先判断当前像素的 x 分量是否越界,如果越界,则跳过,扫描下一个模板点 // 如果没有越界,则分别处理当前列的相邻的 4 个像素 if (dx >= 0 && dx < inimg.imgMeta.width) { // 根据 dx 和 dy 获取第一个像素的位置 curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes; // 检测此像素的 y 分量是否越界 if (dy >= 0 && dy < inimg.imgMeta.height) { // 将第一个像素点邻域内点的像素值累加 tplsum[0] += (*curinptr) * (tpl.attachedData[i]); // 针对不同的实现类型,选择不同的路径进行处理 switch(imptype) { // 使用邻域像素总和除以像素点个数的运算方法实现线性滤波 case LNFT_COUNT_DIV: // 记录当前像素点邻域内已累加点的个数 tmpcount[0] += 1; break; // 使用邻域像素总和除以像素点权重之和的运算方法实现线性滤波 case LNFT_WEIGHT_DIV: // 记录当前像素点权重之和 tmpcount[0] += tpl.attachedData[i]; break; // 使用邻域像素直接带权加和的运算方法实现线性滤波 case LNFT_NO_DIV: // 设置除数为 1 tmpcount[0] = 1; break; } } // 处理当前列的剩下的 3 个像素 for (int j = 1; j < 4; j++) { // 获取当前像素点的位置 curinptr += inimg.pitchBytes; // 使 dy 加一,得到当前要处理的像素的 y 分量 dy++; // 检测 dy 是否越界,如果越界,则跳过,扫描下一个模板点 // 如果 y 分量未越界,则处理当前像素点 if (dy >= 0 && dy < inimg.imgMeta.height) { // 将当前像素点邻域内点的像素值累加 tplsum[j] += (*curinptr) * (tpl.attachedData[i]); // 针对不同的实现类型,选择不同的路径进行处理 switch(imptype) { // 使用邻域像素总和除以像素点个数的运算方法实现线性滤波 case LNFT_COUNT_DIV: // 记录当前像素点邻域内已累加点的个数 tmpcount[j] += 1; break; // 使用邻域像素总和除以像素点权重之和的运算方法实现线性滤波 case LNFT_WEIGHT_DIV: // 记录当前像素点权重之和 tmpcount[j] += tpl.attachedData[i]; break; // 使用邻域像素直接带权加和的运算方法实现线性滤波 case LNFT_NO_DIV: // 设置除数为 1 tmpcount[j] = 1; break; } } } } } // 将 4 个平均值分别赋值给对应的输出图像 // 定义输出图像位置的指针 unsigned char *outptr; // 获取对应的第一个输出图像的位置 outptr = outimg.imgMeta.imgData + dstr * outimg.pitchBytes + dstc; // 计算邻域内点的像素平均值并赋值给输出图像 *outptr = (tplsum[0] / tmpcount[0]); // 处理剩下的 3 个点 for (int i = 1; i < 4; i++) { // 先判断 y 分量是否越界,如果越界,则可以确定后面的点也会越界,所以 // 直接返回 if (++dstr >= outimg.imgMeta.height) return; // 获取当前列的下一行的位置指针 outptr = outptr + outimg.pitchBytes; // 计算邻域内点的像素平均值并赋值给输出图像 *outptr = (tplsum[i] / tmpcount[i]); } } // Host 函数:_preOp(在算法操作前进行预处理) static __host__ int _preOp(Image *inimg, Image *outimg, Template *tp) { int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝到 Device 内存中 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 计算 roi 子图的宽和高 int roiwidth = inimg->roiX2 - inimg->roiX1; int roiheight = inimg->roiY2 - inimg->roiY1; // 如果输出图像无数据,则会创建一个和输出图像子图像尺寸相同的图像 errcode = ImageBasicOp::makeAtCurrentDevice(outimg, roiwidth, roiheight); // 如果创建图像依然操作失败,则返回错误 if (errcode != NO_ERROR) return errcode; } // 将模板拷贝到 Device 内存中 errcode = TemplateBasicOp::copyToCurrentDevice(tp); if (errcode != NO_ERROR) return errcode; return NO_ERROR; } // Host 函数:_adjustRoiSize(调整输入和输出图像的 ROI 的大小) static __host__ void _adjustRoiSize(ImageCuda *inimg, ImageCuda *outimg) { // 如果输入图像宽度大于输出图像,则将输出图像宽度值赋给输入图像, // 否则将输入图像宽度值赋给输出图像 if (inimg->imgMeta.width > outimg->imgMeta.width) inimg->imgMeta.width = outimg->imgMeta.width; else outimg->imgMeta.width = inimg->imgMeta.width; // 如果输入图像高度大于输出图像,则将输出图像高度值赋给输入图像, // 否则将输入图像高度值赋给输出图像 if (inimg->imgMeta.height > outimg->imgMeta.height) inimg->imgMeta.height = outimg->imgMeta.height; else outimg->imgMeta.height = inimg->imgMeta.height; } // Host 函数:_getBlockSize(获取 Block 和 Grid 的尺寸) static __host__ int _getBlockSize(int width, int height, dim3 *gridsize, dim3 *blocksize) { // 检测 girdsize 和 blocksize 是否是空指针 if (gridsize == NULL || blocksize == NULL) return NULL_POINTER; // blocksize 使用默认的尺寸 blocksize->x = DEF_BLOCK_X; blocksize->y = DEF_BLOCK_Y; // 使用最普通的方法划分 Grid gridsize->x = (width + blocksize->x - 1) / blocksize->x; gridsize->y = (height + blocksize->y * 4 - 1) / (blocksize->y * 4); return NO_ERROR; } // 构造函数:LinearFilter __host__ LinearFilter::LinearFilter(int imptype, Template *tp) { // 设置滤波操作的实现方式 setImpType(imptype); // 设置滤波操作所要使用的模板 setTemplate(tp); } // 成员方法:getImpType __host__ int LinearFilter::getImpType() const { // 返回 impType 成员变量的值 return this->impType; } // 成员方法:setImpType __host__ int LinearFilter::setImpType(int imptype) { // 检查输入参数是否合法 if (imptype != LNFT_COUNT_DIV && imptype != LNFT_WEIGHT_DIV && imptype != LNFT_NO_DIV) return INVALID_DATA; // 将 impType 成员变量赋成新值 this->impType = imptype; return NO_ERROR; } // 成员方法:getTemplate __host__ Template *LinearFilter::getTemplate() const { // 如果模板指针和默认模板指针相同,则返回空 if (this->tpl == &(_defTpl->tplMeta)) return NULL; // 否则返回设置的模板指针 return this->tpl; } // 成员方法:setTemplate __host__ int LinearFilter::setTemplate(Template *tp) { // 如果 tp 为空,则只用默认的模板指针,否则将 tp 赋值给 tpl if (tp == NULL) { this->tpl = &(_initDefTemplate()->tplMeta); } else { this->tpl = tp; } return NO_ERROR; } // 成员方法:linearFilter __host__ int LinearFilter::linearFilter(Image *inimg, Image *outimg) { int errcode; // 局部变量,错误码 dim3 gridsize; dim3 blocksize; // 检查输入图像,输出图像,以及模板是否为空 if (inimg == NULL || outimg == NULL || tpl == NULL) return NULL_POINTER; // 对输入图像,输出图像和模板进行预处理 errcode = _preOp(inimg, outimg, tpl); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 调整输入和输出图像的 ROI 子图,使大小统一 _adjustRoiSize(&insubimgCud, &outsubimgCud); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量 errcode = _getBlockSize(outsubimgCud.imgMeta.width, outsubimgCud.imgMeta.height, &gridsize, &blocksize); if (errcode != NO_ERROR) return errcode; // 检查滤波实现方式是否为合法值 if (impType != LNFT_COUNT_DIV && impType != LNFT_WEIGHT_DIV && impType != LNFT_NO_DIV) return INVALID_DATA; // 调用 Kernel 函数进行均值滤波操作 _linearFilterKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud, *TEMPLATE_CUDA(tpl), impType); // 调用 cudaGetLastError 判断程序是否出错 cudaError_t err; err = cudaGetLastError(); if (err != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出 return errcode; }
the_stack
//#define SOLVE_ZERO_INI_GUESS #define DEBUG namespace amgx { // parameter is used as test name DECLARE_UNITTEST_BEGIN(ProfileTest); struct TestCase { std::string file_name; std::string config_string; bool extract_diagonal; bool insert_diagonal; bool use_pre_setup; bool use_replace; TestCase(): use_pre_setup(true), insert_diagonal(false), extract_diagonal(false), use_replace(true) {} }; std::vector<double> test_main(TestCase &test_case) { bool insert_diagonal = test_case.insert_diagonal; bool extract_diagonal = test_case.extract_diagonal; // Create matrix arrays from file Matrix_h Atemp; Vector_h btemp, xtemp, x_final; // Read the matrix std::string fail_msg = "Cannot open " + test_case.file_name; this->PrintOnFail(fail_msg.c_str()); this->read_system(test_case.file_name.c_str(), Atemp, btemp, xtemp); bool hasDiag = Atemp.hasProps(DIAG); // Create row_offsets, col_indices, off_dia_values and dia_values arrays from the matrix just read int num_rows = Atemp.get_num_rows(); int num_nz = Atemp.get_num_nz(); int bsize_x = Atemp.get_block_dimx(); int bsize_y = Atemp.get_block_dimy(); int bsize = bsize_x * bsize_y; xtemp.resize(num_rows * bsize_y, 1.); std::vector<int> row_offsets(num_rows + 1); std::vector<int> col_indices(num_nz); std::vector<double> off_dia_values(num_nz * bsize); std::vector<double> dia_values; if (hasDiag) { dia_values.resize(num_rows * bsize); } std::vector<double> x_vec(num_rows * bsize_y); std::vector<double> b_vec(num_rows * bsize_x); // Fill vectors int *raw_row_ptr = Atemp.row_offsets.raw(); int *raw_col_ptr = Atemp.col_indices.raw(); double *raw_val_ptr = Atemp.values.raw(); // Row offsets for (int i = 0; i < num_rows + 1; i++) { row_offsets[i] = raw_row_ptr[i]; } // Column indices for (int i = 0; i < num_nz; i++) { col_indices[i] = raw_col_ptr[i]; } // Off-diagonal values for (int i = 0; i < num_nz; i++) for (int j = 0; j < bsize; j++) { off_dia_values[i * bsize + j] = raw_val_ptr[i * bsize + j]; } // Diagonal values if (hasDiag) { for (int i = 0; i < num_rows; i++) { for (int j = 0; j < bsize; j++) { dia_values[i * bsize + j] = raw_val_ptr[num_nz * bsize + i * bsize + j]; } } } srand(1); // Random RHS double *b_raw_ptr = btemp.raw(); for (int i = 0; i < num_rows; i++) for (int j = 0; j < bsize_x; j++) { b_vec[i * bsize_x + j] = b_raw_ptr[i * bsize_x + j] + (1.0 * rand() / RAND_MAX); } // Random xvector double *x_raw_ptr = xtemp.raw(); for (int i = 0; i < num_rows; i++) for (int j = 0; j < bsize_y; j++) { x_vec[i * bsize_y + j] = x_raw_ptr[i * bsize_y + j] + (1.0 * rand() / RAND_MAX); } std::vector<double> x_vec_col = x_vec; std::string option_string = test_case.config_string; // Insert diagonal if (insert_diagonal) { std::vector<int> new_col_indices( (num_nz + num_rows) ); std::vector<double> new_off_dia_values( (num_nz + num_rows)*bsize ); int icount = 0; for (int i = 0; i < num_rows; i++) { for (int j = row_offsets[i]; j < row_offsets[i + 1]; j++) { int col = col_indices[j]; new_col_indices[icount] = col; for (int k = 0; k < bsize; k++) { new_off_dia_values[icount * bsize + k] = off_dia_values[j * bsize + k]; } icount++; } // Insert diagonal new_col_indices[icount] = i; for (int k = 0; k < bsize; k++) { new_off_dia_values[icount * bsize + k] = dia_values[i * bsize + k]; } icount++; } // increment row_offsets for (int i = 0; i < num_rows + 1; i++) { row_offsets[i] += i; } off_dia_values = new_off_dia_values; col_indices = new_col_indices; dia_values.resize(0); num_nz += num_rows; } //Extract diagonal if (extract_diagonal) { std::vector<int> old_col_indices = col_indices; std::vector<double> old_off_dia_values = off_dia_values; off_dia_values.resize((num_nz - num_rows)*bsize); col_indices.resize(num_nz - num_rows); dia_values.resize(num_rows * bsize); int icount = 0; for (int i = 0; i < num_rows; i++) { for (int j = row_offsets[i]; j < row_offsets[i + 1]; j++) { int col = old_col_indices[j]; if (col != i) { col_indices[icount] = col; for (int k = 0; k < bsize; k++) { off_dia_values[icount * bsize + k] = old_off_dia_values[j * bsize + k]; } icount++; } else { for (int k = 0; k < bsize; k++) { dia_values[i * bsize + k] = old_off_dia_values[j * bsize + k]; } } } } // decrement row_offsets for (int i = 0; i < num_rows + 1; i++) { row_offsets[i] -= i; } num_nz -= num_rows; } AMGX_config_handle cfg; AMGX_config_create( &cfg, option_string.c_str()); AMGX_config_handle rsrc_cfg = NULL; UNITTEST_ASSERT_EQUAL(AMGX_config_create(&rsrc_cfg, ""), AMGX_OK); // Choosing device 0 int device = 0; AMGX_resources_handle rsrc = NULL; UNITTEST_ASSERT_EQUAL(AMGX_resources_create(&rsrc, rsrc_cfg, NULL, 1, &device), AMGX_OK); AMGX_matrix_handle matrix; AMGX_matrix_create( &matrix, rsrc, AMGX_mode_dDDI ); AMGX_solver_handle solver; AMGX_solver_create( &solver, rsrc, AMGX_mode_dDDI, cfg); AMGX_vector_handle b, x; AMGX_vector_create( &b, rsrc, AMGX_mode_dDDI ); AMGX_vector_create( &x, rsrc, AMGX_mode_dDDI ); int num_setup_iters = 3; for (int i_setup = 0; i_setup < num_setup_iters; i_setup++) { // Upload the new matrix and call setup if (i_setup == 0) { if (dia_values.size() != 0) { AMGX_matrix_upload_all(matrix, num_rows, num_nz, bsize_x, bsize_y, &row_offsets[0], &col_indices[0], &off_dia_values[0], &dia_values[0]); } else { AMGX_matrix_upload_all(matrix, num_rows, num_nz, bsize_x, bsize_y, &row_offsets[0], &col_indices[0], &off_dia_values[0], NULL); } AMGX_solver_setup( solver, matrix ); } else { // Perturb the matrix //for (int i=0;i<num_nz;i++) // for (int j=0;j<bsize;j++) // off_dia_values[i*bsize+j] -= .0001*abs(rand())/RAND_MAX; //// perturb the diagonal //if (hasDiag) { // for (int i=0;i<num_rows;i++) { // for (int j=0;j<bsize;j++) { // dia_values[i*bsize+j] += .001*(rand())/RAND_MAX; // } // } //} if (test_case.use_replace) { if (dia_values.size() != 0) { AMGX_matrix_replace_coefficients(matrix, num_rows, num_nz, &off_dia_values[0], &dia_values[0]); } else { AMGX_matrix_replace_coefficients(matrix, num_rows, num_nz, &off_dia_values[0], NULL); } } else { if (dia_values.size() != 0) { AMGX_matrix_upload_all(matrix, num_rows, num_nz, bsize_x, bsize_y, &row_offsets[0], &col_indices[0], &off_dia_values[0], &dia_values[0]); } else { AMGX_matrix_upload_all(matrix, num_rows, num_nz, bsize_x, bsize_y, &row_offsets[0], &col_indices[0], &off_dia_values[0], NULL); } } if (test_case.use_pre_setup) { AMGX_solver_resetup( solver, matrix ); } else { AMGX_solver_setup( solver, matrix ); } } // Run several solves. const int num_solves = 5; for ( int i_solve = 0 ; i_solve < num_solves ; ++i_solve ) { // perturb the rhs //for (int i=0;i<num_rows;i++) // for (int j=0;j<bsize_x;j++) // b_vec[i*bsize_x+j] += (1.0*rand()/RAND_MAX); AMGX_vector_upload( b, num_rows, bsize_y, &b_vec[0] ); // upload the updated x_vector AMGX_vector_upload( x, num_rows, bsize_x, &x_vec[0] ); AMGX_solver_solve_with_0_initial_guess( solver, b, x ); AMGX_vector_download( x, &x_vec[0] ); } } AMGX_solver_destroy( solver ); AMGX_matrix_destroy( matrix ); AMGX_vector_destroy( b ); AMGX_vector_destroy( x ); AMGX_config_destroy( cfg ); UNITTEST_ASSERT_EQUAL(AMGX_config_destroy( rsrc_cfg ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_resources_destroy( rsrc ), AMGX_OK); return x_vec; } void run() { TestCase temp_case; temp_case.file_name = "Public/florida/atmosmodd.mtx"; temp_case.config_string = "config_version=2, solver(main_solver)=MULTICOLOR_ILU, main_solver:ilu_sparsity_level=1, main_solver:max_iters=5, block_format=ROW_MAJOR, main_solver:coloring_level=2"; temp_case.config_string += ","; temp_case.config_string += "main_solver:monitor_residual=1,"; temp_case.config_string += "max_uncolored_percentage=0.,"; temp_case.config_string += "main_solver:store_res_history=1,"; temp_case.config_string += "main_solver:print_solve_stats=1,"; temp_case.config_string += "main_solver:obtain_timings=1"; temp_case.extract_diagonal = false; temp_case.insert_diagonal = false; test_main(temp_case); } DECLARE_UNITTEST_END(ProfileTest); // if you want to be able run this test for all available configs you can write this: //#define AMGX_CASE_LINE(CASE) TemplateTest <TemplateMode<CASE>::Type> TemplateTest_##CASE; // AMGX_FORALL_BUILDS(AMGX_CASE_LINE) //#undef AMGX_CASE_LINE // or run for all device configs //#define AMGX_CASE_LINE(CASE) TemplateTest <TemplateMode<CASE>::Type> TemplateTest_##CASE; // AMGX_FORALL_BUILDS_DEVICE(AMGX_CASE_LINE) //#undef AMGX_CASE_LINE ProfileTest <TemplateMode<AMGX_mode_dDDI>::Type> ProfileTest_instance_mode_dDDI; // or you can specify several desired configs //TemplateTest <TemplateMode<AMGX_mode_hDFI>::Type> TemplateTest_hDFI; //TemplateTest <TemplateMode<AMGX_mode_dDFI>::Type> TemplateTest_dDFI; }
the_stack
step of the computation. */ #ifndef _BISECT_KERNEL_LARGE_H_ #define _BISECT_KERNEL_LARGE_H_ // includes, project #include "config.h" #include "util.h" // additional kernel #include "bisect_util.cu" // declaration, forward //////////////////////////////////////////////////////////////////////////////// //! Write data to global memory //////////////////////////////////////////////////////////////////////////////// __device__ void writeToGmem( const unsigned int tid, const unsigned int tid_2, const unsigned int num_threads_active, const unsigned int num_blocks_mult, float* g_left_one, float* g_right_one, unsigned int* g_pos_one, float* g_left_mult, float* g_right_mult, unsigned int* g_left_count_mult, unsigned int* g_right_count_mult, float* s_left, float* s_right, unsigned short* s_left_count, unsigned short* s_right_count, unsigned int* g_blocks_mult, unsigned int* g_blocks_mult_sum, unsigned short* s_compaction_list, unsigned short* s_cl_helper, unsigned int offset_mult_lambda ); //////////////////////////////////////////////////////////////////////////////// //! Perform final stream compaction before writing out data //////////////////////////////////////////////////////////////////////////////// __device__ void compactStreamsFinal( const unsigned int tid, const unsigned int tid_2, const unsigned int num_threads_active, unsigned int& offset_mult_lambda, float* s_left, float* s_right, unsigned short* s_left_count, unsigned short* s_right_count, unsigned short* s_cl_one, unsigned short* s_cl_mult, unsigned short* s_cl_blocking, unsigned short* s_cl_helper, unsigned int is_one_lambda, unsigned int is_one_lambda_2, float& left, float& right, float& left_2, float& right_2, unsigned int& left_count, unsigned int& right_count, unsigned int& left_count_2, unsigned int& right_count_2, unsigned int c_block_iend, unsigned int c_sum_block, unsigned int c_block_iend_2, unsigned int c_sum_block_2 ); //////////////////////////////////////////////////////////////////////////////// //! Perform scan to compact list of block start addresses //////////////////////////////////////////////////////////////////////////////// __device__ void scanCompactBlocksStartAddress( const unsigned int tid, const unsigned int tid_2, const unsigned int num_threads_compaction, unsigned short* s_cl_blocking, unsigned short* s_cl_helper ); //////////////////////////////////////////////////////////////////////////////// //! Perform scan to obtain number of eigenvalues before a specific block //////////////////////////////////////////////////////////////////////////////// __device__ void scanSumBlocks( const unsigned int tid, const unsigned int tid_2, const unsigned int num_threads_active, const unsigned int num_threads_compaction, unsigned short* s_cl_blocking, unsigned short* s_cl_helper ); //////////////////////////////////////////////////////////////////////////////// //! Perform initial scan for compaction of intervals containing one and //! multiple eigenvalues; also do initial scan to build blocks //////////////////////////////////////////////////////////////////////////////// __device__ void scanInitial( const unsigned int tid, const unsigned int tid_2, const unsigned int num_threads_active, const unsigned int num_threads_compaction, unsigned short* s_cl_one, unsigned short* s_cl_mult, unsigned short* s_cl_blocking, unsigned short* s_cl_helper ); //////////////////////////////////////////////////////////////////////////////// //! Store all non-empty intervals resulting from the subdivision of the interval //! currently processed by the thread //! @param addr address where to store //////////////////////////////////////////////////////////////////////////////// __device__ void storeNonEmptyIntervalsLarge( unsigned int addr, const unsigned int num_threads_active, float* s_left, float* s_right, unsigned short* s_left_count, unsigned short* s_right_count, float left, float mid, float right, const unsigned short left_count, const unsigned short mid_count, const unsigned short right_count, float epsilon, unsigned int& compact_second_chunk, unsigned short* s_compaction_list, unsigned int& is_active_second ); //////////////////////////////////////////////////////////////////////////////// //! Bisection to find eigenvalues of a real, symmetric, and tridiagonal matrix //! @param g_d diagonal elements in global memory //! @param g_s superdiagonal elements in global elements (stored so that the //! element *(g_s - 1) can be accessed an equals 0 //! @param n size of matrix //! @param lg lower bound of input interval (e.g. Gerschgorin interval) //! @param ug upper bound of input interval (e.g. Gerschgorin interval) //! @param lg_eig_count number of eigenvalues that are smaller than \a lg //! @param lu_eig_count number of eigenvalues that are smaller than \a lu //! @param epsilon desired accuracy of eigenvalues to compute //////////////////////////////////////////////////////////////////////////////// __global__ void bisectKernelLarge( float* g_d, float* g_s, const unsigned int n, const float lg, const float ug, const unsigned int lg_eig_count, const unsigned int ug_eig_count, float epsilon, unsigned int* g_num_one, unsigned int* g_num_blocks_mult, float* g_left_one, float* g_right_one, unsigned int* g_pos_one, float* g_left_mult, float* g_right_mult, unsigned int* g_left_count_mult, unsigned int* g_right_count_mult, unsigned int* g_blocks_mult, unsigned int* g_blocks_mult_sum ) { const unsigned int tid = threadIdx.x; // intervals (store left and right because the subdivision tree is in general // not dense __shared__ float s_left[2 * MAX_THREADS_BLOCK + 1]; __shared__ float s_right[2 * MAX_THREADS_BLOCK + 1]; // number of eigenvalues that are smaller than s_left / s_right // (correspondence is realized via indices) __shared__ unsigned short s_left_count[2 * MAX_THREADS_BLOCK + 1]; __shared__ unsigned short s_right_count[2 * MAX_THREADS_BLOCK + 1]; // helper for stream compaction __shared__ unsigned short s_compaction_list[2 * MAX_THREADS_BLOCK + 1]; // state variables for whole block // if 0 then compaction of second chunk of child intervals is not necessary // (because all intervals had exactly one non-dead child) __shared__ unsigned int compact_second_chunk; // if 1 then all threads are converged __shared__ unsigned int all_threads_converged; // number of currently active threads __shared__ unsigned int num_threads_active; // number of threads to use for stream compaction __shared__ unsigned int num_threads_compaction; // helper for exclusive scan unsigned short* s_compaction_list_exc = s_compaction_list + 1; // variables for currently processed interval // left and right limit of active interval float left = 0.0f; float right = 0.0f; unsigned int left_count = 0; unsigned int right_count = 0; // midpoint of active interval float mid = 0.0f; // number of eigenvalues smaller then mid unsigned int mid_count = 0; // helper for stream compaction (tracking of threads generating second child) unsigned int is_active_second = 0; // initialize lists s_compaction_list[tid] = 0; s_left[tid] = 0; s_right[tid] = 0; s_left_count[tid] = 0; s_right_count[tid] = 0; __syncthreads(); // set up initial configuration if( 0 == tid) { s_left[0] = lg; s_right[0] = ug; s_left_count[0] = lg_eig_count; s_right_count[0] = ug_eig_count; compact_second_chunk = 0; num_threads_active = 1; num_threads_compaction = 1; all_threads_converged = 1; } __syncthreads(); // for all active threads read intervals from the last level // the number of (worst case) active threads per level l is 2^l while( true) { subdivideActiveInterval( tid, s_left, s_right, s_left_count, s_right_count, num_threads_active, left, right, left_count, right_count, mid, all_threads_converged); __syncthreads(); // check if done if( 1 == all_threads_converged) { break; } // compute number of eigenvalues smaller than mid // use all threads for reading the necessary matrix data from global // memory // use s_left and s_right as scratch space for diagonal and // superdiagonal of matrix mid_count = computeNumSmallerEigenvalsLarge( g_d, g_s, n, mid, threadIdx.x, num_threads_active, s_left, s_right, (left == right) ); __syncthreads(); // store intervals // for all threads store the first child interval in a continuous chunk of // memory, and the second child interval -- if it exists -- in a second // chunk; it is likely that all threads reach convergence up to // \a epsilon at the same level; furthermore, for higher level most / all // threads will have only one child, storing the first child compactly will // (first) avoid to perform a compaction step on the first chunk, (second) // make it for higher levels (when all threads / intervals have // exactly one child) unnecessary to perform a compaction of the second // chunk if( tid < num_threads_active) { if( left != right) { // store intervals storeNonEmptyIntervalsLarge( tid, num_threads_active, s_left, s_right, s_left_count, s_right_count, left, mid, right, left_count, mid_count, right_count, epsilon, compact_second_chunk, s_compaction_list_exc, is_active_second ); } else { // re-write converged interval (has to be stored again because s_left // and s_right are used as scratch space for // computeNumSmallerEigenvalsLarge() s_left[tid] = left; s_right[tid] = left; s_left_count[tid] = left_count; s_right_count[tid] = right_count; is_active_second = 0; } } // necessary so that compact_second_chunk is up-to-date __syncthreads(); // perform compaction of chunk where second children are stored // scan of (num_threads_active / 2) elements, thus at most // (num_threads_active / 4) threads are needed if( compact_second_chunk > 0) { // create indices for compaction createIndicesCompaction( s_compaction_list_exc, num_threads_compaction); compactIntervals( s_left, s_right, s_left_count, s_right_count, mid, right, mid_count, right_count, s_compaction_list, num_threads_active, is_active_second ); } __syncthreads(); // update state variables if( 0 == tid) { // update number of active threads with result of reduction num_threads_active += s_compaction_list[num_threads_active]; num_threads_compaction = ceilPow2( num_threads_active); compact_second_chunk = 0; all_threads_converged = 1; } __syncthreads(); if( num_threads_compaction > blockDim.x) { break; } } __syncthreads(); // generate two lists of intervals; one with intervals that contain one // eigenvalue (or are converged), and one with intervals that need further // subdivision // perform two scans in parallel unsigned int left_count_2; unsigned int right_count_2; unsigned int tid_2 = tid + blockDim.x; // cache in per thread registers so that s_left_count and s_right_count // can be used for scans left_count = s_left_count[tid]; right_count = s_right_count[tid]; // some threads have to cache data for two intervals if( tid_2 < num_threads_active) { left_count_2 = s_left_count[tid_2]; right_count_2 = s_right_count[tid_2]; } // compaction list for intervals containing one and multiple eigenvalues // do not affect first element for exclusive scan unsigned short* s_cl_one = s_left_count + 1; unsigned short* s_cl_mult = s_right_count + 1; // compaction list for generating blocks of intervals containing multiple // eigenvalues unsigned short* s_cl_blocking = s_compaction_list_exc; // helper compaction list for generating blocks of intervals __shared__ unsigned short s_cl_helper[2 * MAX_THREADS_BLOCK + 1]; if( 0 == tid) { // set to 0 for exclusive scan s_left_count[0] = 0; s_right_count[0] = 0; } __syncthreads(); // flag if interval contains one or multiple eigenvalues unsigned int is_one_lambda = 0; unsigned int is_one_lambda_2 = 0; // number of eigenvalues in the interval unsigned int multiplicity = right_count - left_count; is_one_lambda = (1 == multiplicity); s_cl_one[tid] = is_one_lambda; s_cl_mult[tid] = (! is_one_lambda); // (note: s_cl_blocking is non-zero only where s_cl_mult[] is non-zero) s_cl_blocking[tid] = (1 == is_one_lambda) ? 0 : multiplicity; s_cl_helper[tid] = 0; if( tid_2 < num_threads_active) { unsigned int multiplicity = right_count_2 - left_count_2; is_one_lambda_2 = (1 == multiplicity); s_cl_one[tid_2] = is_one_lambda_2; s_cl_mult[tid_2] = (! is_one_lambda_2); // (note: s_cl_blocking is non-zero only where s_cl_mult[] is non-zero) s_cl_blocking[tid_2] = (1 == is_one_lambda_2) ? 0 : multiplicity; s_cl_helper[tid_2] = 0; } else if( tid_2 < (2 * MAX_THREADS_BLOCK + 1)) { // clear s_cl_blocking[tid_2] = 0; s_cl_helper[tid_2] = 0; } scanInitial( tid, tid_2, num_threads_active, num_threads_compaction, s_cl_one, s_cl_mult, s_cl_blocking, s_cl_helper ); scanSumBlocks( tid, tid_2, num_threads_active, num_threads_compaction, s_cl_blocking, s_cl_helper ); // end down sweep of scan __syncthreads(); unsigned int c_block_iend = 0; unsigned int c_block_iend_2 = 0; unsigned int c_sum_block = 0; unsigned int c_sum_block_2 = 0; // for each thread / interval that corresponds to root node of interval block // store start address of block and total number of eigenvalues in all blocks // before this block (particular thread is irrelevant, constraint is to // have a subset of threads so that one and only one of them is in each // interval) if( 1 == s_cl_helper[tid]) { c_block_iend = s_cl_mult[tid] + 1; c_sum_block = s_cl_blocking[tid]; } if( 1 == s_cl_helper[tid_2]) { c_block_iend_2 = s_cl_mult[tid_2] + 1; c_sum_block_2 = s_cl_blocking[tid_2]; } scanCompactBlocksStartAddress( tid, tid_2, num_threads_compaction, s_cl_blocking, s_cl_helper); // finished second scan for s_cl_blocking __syncthreads(); // determine the global results __shared__ unsigned int num_blocks_mult; __shared__ unsigned int num_mult; __shared__ unsigned int offset_mult_lambda; if( 0 == tid) { num_blocks_mult = s_cl_blocking[num_threads_active - 1]; offset_mult_lambda = s_cl_one[num_threads_active - 1]; num_mult = s_cl_mult[num_threads_active - 1]; *g_num_one = offset_mult_lambda; *g_num_blocks_mult = num_blocks_mult; } __syncthreads(); float left_2, right_2; --s_cl_one; --s_cl_mult; --s_cl_blocking; compactStreamsFinal( tid, tid_2, num_threads_active, offset_mult_lambda, s_left, s_right, s_left_count, s_right_count, s_cl_one, s_cl_mult, s_cl_blocking, s_cl_helper, is_one_lambda, is_one_lambda_2, left, right, left_2, right_2, left_count, right_count, left_count_2, right_count_2, c_block_iend, c_sum_block, c_block_iend_2, c_sum_block_2 ); __syncthreads(); // final adjustment before writing out data to global memory if( 0 == tid) { s_cl_blocking[num_blocks_mult] = num_mult; s_cl_helper[0] = 0; } __syncthreads(); // write to global memory writeToGmem( tid, tid_2, num_threads_active, num_blocks_mult, g_left_one, g_right_one, g_pos_one, g_left_mult, g_right_mult, g_left_count_mult, g_right_count_mult, s_left, s_right, s_left_count, s_right_count, g_blocks_mult, g_blocks_mult_sum, s_compaction_list, s_cl_helper, offset_mult_lambda ); } //////////////////////////////////////////////////////////////////////////////// //! Write data to global memory //////////////////////////////////////////////////////////////////////////////// __device__ void writeToGmem( const unsigned int tid, const unsigned int tid_2, const unsigned int num_threads_active, const unsigned int num_blocks_mult, float* g_left_one, float* g_right_one, unsigned int* g_pos_one, float* g_left_mult, float* g_right_mult, unsigned int* g_left_count_mult, unsigned int* g_right_count_mult, float* s_left, float* s_right, unsigned short* s_left_count, unsigned short* s_right_count, unsigned int* g_blocks_mult, unsigned int* g_blocks_mult_sum, unsigned short* s_compaction_list, unsigned short* s_cl_helper, unsigned int offset_mult_lambda ) { if( tid < offset_mult_lambda) { g_left_one[tid] = s_left[tid]; g_right_one[tid] = s_right[tid]; // right count can be used to order eigenvalues without sorting g_pos_one[tid] = s_right_count[tid]; } else { g_left_mult[tid - offset_mult_lambda] = s_left[tid]; g_right_mult[tid - offset_mult_lambda] = s_right[tid]; g_left_count_mult[tid - offset_mult_lambda] = s_left_count[tid]; g_right_count_mult[tid - offset_mult_lambda] = s_right_count[tid]; } if( tid_2 < num_threads_active) { if( tid_2 < offset_mult_lambda) { g_left_one[tid_2] = s_left[tid_2]; g_right_one[tid_2] = s_right[tid_2]; // right count can be used to order eigenvalues without sorting g_pos_one[tid_2] = s_right_count[tid_2]; } else { g_left_mult[tid_2 - offset_mult_lambda] = s_left[tid_2]; g_right_mult[tid_2 - offset_mult_lambda] = s_right[tid_2]; g_left_count_mult[tid_2 - offset_mult_lambda] = s_left_count[tid_2]; g_right_count_mult[tid_2 - offset_mult_lambda] = s_right_count[tid_2]; } } // end writing out data // note that s_cl_blocking = s_compaction_list + 1;, that is by writing out // s_compaction_list we write the exclusive scan result if( tid <= num_blocks_mult) { g_blocks_mult[tid] = s_compaction_list[tid]; g_blocks_mult_sum[tid] = s_cl_helper[tid]; } if( tid_2 <= num_blocks_mult) { g_blocks_mult[tid_2] = s_compaction_list[tid_2]; g_blocks_mult_sum[tid_2] = s_cl_helper[tid_2]; } } //////////////////////////////////////////////////////////////////////////////// //! Perform final stream compaction before writing data to global memory //////////////////////////////////////////////////////////////////////////////// __device__ void compactStreamsFinal( const unsigned int tid, const unsigned int tid_2, const unsigned int num_threads_active, unsigned int& offset_mult_lambda, float* s_left, float* s_right, unsigned short* s_left_count, unsigned short* s_right_count, unsigned short* s_cl_one, unsigned short* s_cl_mult, unsigned short* s_cl_blocking, unsigned short* s_cl_helper, unsigned int is_one_lambda, unsigned int is_one_lambda_2, float& left, float& right, float& left_2, float& right_2, unsigned int& left_count, unsigned int& right_count, unsigned int& left_count_2, unsigned int& right_count_2, unsigned int c_block_iend, unsigned int c_sum_block, unsigned int c_block_iend_2, unsigned int c_sum_block_2 ) { // cache data before performing compaction left = s_left[tid]; right = s_right[tid]; if( tid_2 < num_threads_active) { left_2 = s_left[tid_2]; right_2 = s_right[tid_2]; } __syncthreads(); // determine addresses for intervals containing multiple eigenvalues and // addresses for blocks of intervals unsigned int ptr_w = 0; unsigned int ptr_w_2 = 0; unsigned int ptr_blocking_w = 0; unsigned int ptr_blocking_w_2 = 0; ptr_w = (1 == is_one_lambda) ? s_cl_one[tid] : s_cl_mult[tid] + offset_mult_lambda; if( 0 != c_block_iend) { ptr_blocking_w = s_cl_blocking[tid]; } if( tid_2 < num_threads_active) { ptr_w_2 = (1 == is_one_lambda_2) ? s_cl_one[tid_2] : s_cl_mult[tid_2] + offset_mult_lambda; if( 0 != c_block_iend_2) { ptr_blocking_w_2 = s_cl_blocking[tid_2]; } } __syncthreads(); // store compactly in shared mem s_left[ptr_w] = left; s_right[ptr_w] = right; s_left_count[ptr_w] = left_count; s_right_count[ptr_w] = right_count; if( 0 != c_block_iend) { s_cl_blocking[ptr_blocking_w + 1] = c_block_iend - 1; s_cl_helper[ptr_blocking_w + 1] = c_sum_block; } if( tid_2 < num_threads_active) { // store compactly in shared mem s_left[ptr_w_2] = left_2; s_right[ptr_w_2] = right_2; s_left_count[ptr_w_2] = left_count_2; s_right_count[ptr_w_2] = right_count_2; if( 0 != c_block_iend_2) { s_cl_blocking[ptr_blocking_w_2 + 1] = c_block_iend_2 - 1; s_cl_helper[ptr_blocking_w_2 + 1] = c_sum_block_2; } } } //////////////////////////////////////////////////////////////////////////////// //! Compute addresses to obtain compact list of block start addresses //////////////////////////////////////////////////////////////////////////////// __device__ void scanCompactBlocksStartAddress( const unsigned int tid, const unsigned int tid_2, const unsigned int num_threads_compaction, unsigned short* s_cl_blocking, unsigned short* s_cl_helper ) { // prepare for second step of block generation: compaction of the block // list itself to efficiently write out these s_cl_blocking[tid] = s_cl_helper[tid]; if( tid_2 < num_threads_compaction) { s_cl_blocking[tid_2] = s_cl_helper[tid_2]; } __syncthreads(); // additional scan to compact s_cl_blocking that permits to generate a // compact list of eigenvalue blocks each one containing about // MAX_THREADS_BLOCK eigenvalues (so that each of these blocks may be // processed by one thread block in a subsequent processing step unsigned int offset = 1; // build scan tree for(int d = (num_threads_compaction >> 1); d > 0; d >>= 1) { __syncthreads(); if (tid < d) { unsigned int ai = offset*(2*tid+1)-1; unsigned int bi = offset*(2*tid+2)-1; s_cl_blocking[bi] = s_cl_blocking[bi] + s_cl_blocking[ai]; } offset <<= 1; } // traverse down tree: first down to level 2 across for( int d = 2; d < num_threads_compaction; d <<= 1) { offset >>= 1; __syncthreads(); // if (tid < (d-1)) { unsigned int ai = offset*(tid+1) - 1; unsigned int bi = ai + (offset >> 1); s_cl_blocking[bi] = s_cl_blocking[bi] + s_cl_blocking[ai]; } } } //////////////////////////////////////////////////////////////////////////////// //! Perform scan to obtain number of eigenvalues before a specific block //////////////////////////////////////////////////////////////////////////////// __device__ void scanSumBlocks( const unsigned int tid, const unsigned int tid_2, const unsigned int num_threads_active, const unsigned int num_threads_compaction, unsigned short* s_cl_blocking, unsigned short* s_cl_helper ) { unsigned int offset = 1; // first step of scan to build the sum of elements within each block // build up tree for (int d = num_threads_compaction >> 1; d > 0; d >>= 1) { __syncthreads(); if (tid < d) { unsigned int ai = offset*(2*tid+1)-1; unsigned int bi = offset*(2*tid+2)-1; s_cl_blocking[bi] += s_cl_blocking[ai]; } offset *= 2; } // first step of scan to build the sum of elements within each block // traverse down tree for (int d = 2; d < (num_threads_compaction - 1); d <<= 1) { offset >>= 1; __syncthreads(); if (tid < (d-1)) { unsigned int ai = offset*(tid+1) - 1; unsigned int bi = ai + (offset >> 1); s_cl_blocking[bi] += s_cl_blocking[ai]; } } __syncthreads(); if( 0 == tid) { // move last element of scan to last element that is valid // necessary because the number of threads employed for scan is a power // of two and not necessarily the number of active threasd s_cl_helper[num_threads_active - 1] = s_cl_helper[num_threads_compaction - 1]; s_cl_blocking[num_threads_active - 1] = s_cl_blocking[num_threads_compaction - 1]; } } //////////////////////////////////////////////////////////////////////////////// //! Perform initial scan for compaction of intervals containing one and //! multiple eigenvalues; also do initial scan to build blocks //////////////////////////////////////////////////////////////////////////////// __device__ void scanInitial( const unsigned int tid, const unsigned int tid_2, const unsigned int num_threads_active, const unsigned int num_threads_compaction, unsigned short* s_cl_one, unsigned short* s_cl_mult, unsigned short* s_cl_blocking, unsigned short* s_cl_helper ) { // perform scan to compactly write out the intervals containing one and // multiple eigenvalues // also generate tree for blocking of intervals containing multiple // eigenvalues unsigned int offset = 1; // build scan tree for(int d = (num_threads_compaction >> 1); d > 0; d >>= 1) { __syncthreads(); if (tid < d) { unsigned int ai = offset*(2*tid+1); unsigned int bi = offset*(2*tid+2)-1; s_cl_one[bi] = s_cl_one[bi] + s_cl_one[ai - 1]; s_cl_mult[bi] = s_cl_mult[bi] + s_cl_mult[ai - 1]; // s_cl_helper is binary and zero for an internal node and 1 for a // root node of a tree corresponding to a block // s_cl_blocking contains the number of nodes in each sub-tree at each // iteration, the data has to be kept to compute the total number of // eigenvalues per block that, in turn, is needed to efficiently // write out data in the second step if( (s_cl_helper[ai - 1] != 1) || (s_cl_helper[bi] != 1)) { // check how many childs are non terminated if( s_cl_helper[ai - 1] == 1) { // mark as terminated s_cl_helper[bi] = 1; } else if( s_cl_helper[bi] == 1) { // mark as terminated s_cl_helper[ai - 1] = 1; } else { // both childs are non-terminated unsigned int temp = s_cl_blocking[bi] + s_cl_blocking[ai - 1]; if( temp > MAX_THREADS_BLOCK) { // the two child trees have to form separate blocks, terminate trees s_cl_helper[ai - 1] = 1; s_cl_helper[bi] = 1; } else { // build up tree by joining subtrees s_cl_blocking[bi] = temp; s_cl_blocking[ai - 1] = 0; } } } // end s_cl_helper update } offset <<= 1; } // traverse down tree, this only for stream compaction, not for block // construction for( int d = 2; d < num_threads_compaction; d <<= 1) { offset >>= 1; __syncthreads(); // if (tid < (d-1)) { unsigned int ai = offset*(tid+1) - 1; unsigned int bi = ai + (offset >> 1); s_cl_one[bi] = s_cl_one[bi] + s_cl_one[ai]; s_cl_mult[bi] = s_cl_mult[bi] + s_cl_mult[ai]; } } } //////////////////////////////////////////////////////////////////////////////// //! Store all non-empty intervals resulting from the subdivision of the interval //! currently processed by the thread //////////////////////////////////////////////////////////////////////////////// __device__ void storeNonEmptyIntervalsLarge( unsigned int addr, const unsigned int num_threads_active, float* s_left, float* s_right, unsigned short* s_left_count, unsigned short* s_right_count, float left, float mid, float right, const unsigned short left_count, const unsigned short mid_count, const unsigned short right_count, float epsilon, unsigned int& compact_second_chunk, unsigned short* s_compaction_list, unsigned int& is_active_second ) { // check if both child intervals are valid if(( left_count != mid_count) && (mid_count != right_count)) { storeInterval( addr, s_left, s_right, s_left_count, s_right_count, left, mid, left_count, mid_count, epsilon); is_active_second = 1; s_compaction_list[threadIdx.x] = 1; compact_second_chunk = 1; } else { // only one non-empty child interval // mark that no second child is_active_second = 0; s_compaction_list[threadIdx.x] = 0; // store the one valid child interval if( left_count != mid_count) { storeInterval( addr, s_left, s_right, s_left_count, s_right_count, left, mid, left_count, mid_count, epsilon); } else { storeInterval( addr, s_left, s_right, s_left_count, s_right_count, mid, right, mid_count, right_count, epsilon); } } } #endif // #ifndef _BISECT_KERNEL_LARGE_H_
the_stack
template <class T> __device__ void plus_prescan( T *a, T *b) { T av = *a; T bv = *b; *a = bv; *b = bv + av; } /// bitonic_sort: sort 2*LOCAL_THREADCOUNT elements template <class T> __device__ void bitonic_sort( T* sh_data, const uint localid) { for (uint ulevel = 1; ulevel < LQSORT_LOCAL_WORKGROUP_SIZE; ulevel <<= 1) { for (uint j = ulevel; j > 0; j >>= 1) { uint pos = 2*localid - (localid & (j - 1)); uint direction = localid & ulevel; uint av = sh_data[pos], bv = sh_data[pos + j]; const bool sortThem = av > bv; const uint greater = select(bv, av, sortThem); const uint lesser = select(av, bv, sortThem); sh_data[pos] = select(lesser, greater, direction); sh_data[pos + j] = select(greater, lesser, direction); __syncthreads(); } } for (uint j = LQSORT_LOCAL_WORKGROUP_SIZE; j > 0; j >>= 1) { uint pos = 2*localid - (localid & (j - 1)); uint av = sh_data[pos], bv = sh_data[pos + j]; const bool sortThem = av > bv; sh_data[pos] = select(av, bv, sortThem); sh_data[pos + j] = select(bv, av, sortThem); __syncthreads(); } } template <typename T> __device__ void sort_threshold( T* data_in, T* data_out, uint start, uint end, T* temp, uint localid) { uint tsum = end - start; if (tsum == SORT_THRESHOLD) { bitonic_sort(data_in+start, localid); for (uint i = localid; i < SORT_THRESHOLD; i += LQSORT_LOCAL_WORKGROUP_SIZE) { data_out[start + i] = data_in[start + i]; } } else if (tsum > 1) { for (uint i = localid; i < SORT_THRESHOLD; i += LQSORT_LOCAL_WORKGROUP_SIZE) { if (i < tsum) { temp[i] = data_in[start + i]; } else { temp[i] = UINT_MAX; } } __syncthreads(); bitonic_sort(temp, localid); for (uint i = localid; i < tsum; i += LQSORT_LOCAL_WORKGROUP_SIZE) { data_out[start + i] = temp[i]; } } else if (tsum == 1 && localid == 0) { data_out[start] = data_in[start]; } } //---------------------------------------------------------------------------- // Kernel implements gqsort_kernel //---------------------------------------------------------------------------- template <class T> __global__ void gqsort_kernel( T* d, T* dn, block_record<T>* blocks, parent_record* parents, work_record<T>* result) { const uint blockid = blockIdx.x; const uint localid = threadIdx.x; __shared__ uint lt[GQSORT_LOCAL_WORKGROUP_SIZE+1], gt[GQSORT_LOCAL_WORKGROUP_SIZE+1], ltsum, gtsum, lbeg, gbeg; uint i, lfrom, gfrom, lpivot, gpivot, tmp, ltp = 0, gtp = 0; // Get the sequence block assigned to this work group block_record<T> block = blocks[blockid]; uint start = block.start, end = block.end, direction = block.direction; T pivot = block.pivot; parent_record* pparent = parents + block.parent; uint* psstart, *psend, *poldstart, *poldend, *pblockcount; T *s, *sn; // GPU-Quicksort cannot sort in place, as the regular quicksort algorithm can. // It therefore needs two arrays to sort things out. We start sorting in the // direction of d -> dn and then change direction after each run of gqsort_kernel. // Which direction we are sorting: d -> dn or dn -> d? if (direction == 1) { s = d; sn = dn; } else { s = dn; sn = d; } // Set thread __shared__ counters to zero lt[localid] = gt[localid] = 0; __syncthreads(); // Align thread accesses for coalesced reads. // Go through data... for(i = start + localid; i < end; i += GQSORT_LOCAL_WORKGROUP_SIZE) { tmp = s[i]; // counting elements that are smaller ... if (tmp < pivot) ltp++; // or larger compared to the pivot. if (tmp > pivot) gtp++; } lt[localid] = ltp; gt[localid] = gtp; __syncthreads(); // calculate cumulative sums uint n; for(i = 1; i < GQSORT_LOCAL_WORKGROUP_SIZE; i <<= 1) { n = 2*i - 1; if ((localid & n) == n) { lt[localid] += lt[localid-i]; gt[localid] += gt[localid-i]; } __syncthreads(); } if ((localid & n) == n) { lt[GQSORT_LOCAL_WORKGROUP_SIZE] = ltsum = lt[localid]; gt[GQSORT_LOCAL_WORKGROUP_SIZE] = gtsum = gt[localid]; lt[localid] = 0; gt[localid] = 0; } for(i = GQSORT_LOCAL_WORKGROUP_SIZE/2; i >= 1; i >>= 1) { n = 2*i - 1; if ((localid & n) == n) { plus_prescan(&lt[localid - i], &lt[localid]); plus_prescan(&gt[localid - i], &gt[localid]); } __syncthreads(); } // Allocate memory in the sequence this block is a part of if (localid == 0) { // get shared variables psstart = &pparent->sstart; psend = &pparent->send; poldstart = &pparent->oldstart; poldend = &pparent->oldend; pblockcount = &pparent->blockcount; // Atomic increment allocates memory to write to. lbeg = atomicAdd(psstart, ltsum); // Atomic is necessary since multiple blocks access this gbeg = atomicSub(psend, gtsum) - gtsum; } __syncthreads(); // Allocate locations for work items lfrom = lbeg + lt[localid]; gfrom = gbeg + gt[localid]; // go thru data again writing elements to their correct position for(i = start + localid; i < end; i += GQSORT_LOCAL_WORKGROUP_SIZE) { tmp = s[i]; // increment counts if (tmp < pivot) sn[lfrom++] = tmp; if (tmp > pivot) sn[gfrom++] = tmp; } __syncthreads(); if (localid == 0) { //if (atomic_dec(pblockcount) == 0) { if (atomicSub(pblockcount, 1) == 0) { uint sstart = *psstart; uint send = *psend; uint oldstart = *poldstart; uint oldend = *poldend; // Store the pivot value between the new sequences for(i = sstart; i < send; i ++) { d[i] = pivot; } lpivot = sn[oldstart]; gpivot = sn[oldend-1]; if (oldstart < sstart) { lpivot = median(lpivot,sn[(oldstart+sstart) >> 1], sn[sstart-1]); } if (send < oldend) { gpivot = median(sn[send],sn[(oldend+send) >> 1], gpivot); } work_record<T>* result1 = result + 2*blockid; work_record<T>* result2 = result1 + 1; // change the direction of the sort. direction ^= 1; work_record<T> r1 = {oldstart, sstart, lpivot, direction}; *result1 = r1; work_record<T> r2 = {send, oldend, gpivot, direction}; *result2 = r2; } } } // record to push start of the sequence, end of the sequence and direction of sorting on internal stack typedef struct workstack_record { uint start; uint end; uint direction; } workstack_record; #define PUSH(START, END) if (localid == 0) { \ ++workstack_pointer; \ workstack_record wr = { (START), (END), direction ^ 1 }; \ workstack[workstack_pointer] = wr; \ } \ __syncthreads(); //--------------------------------------------------------------------------------------- // Kernel implements the last stage of GPU-Quicksort, when all the subsequences are small // enough to be processed in __shared__ memory. It uses similar algorithm to gqsort_kernel to // move items around the pivot and then switches to bitonic sort for sequences in // the range [1, SORT_THRESHOLD] // // d - input array // dn - scratch array of the same size as the input array // seqs - array of records to be sorted in a __shared__ memory, one sequence per work group. //--------------------------------------------------------------------------------------- template <class T> __global__ void lqsort_kernel(T* d, T* dn, work_record<T>* seqs) { const uint blockid = blockIdx.x; const uint localid = threadIdx.x; // workstack: stores the start and end of the sequences, direction of sort // If the sequence is less that SORT_THRESHOLD, it gets sorted. // It will only be pushed on the stack if it greater than the SORT_THRESHOLD. // Note, that the sum of ltsum + gtsum is less than QUICKSORT_BLOCK_SIZE. // The total sum of the length of records on the stack cannot exceed QUICKSORT_BLOCK_SIZE, // but each individual record should be greater than SORT_THRESHOLD, so the maximum length // of the stack is QUICKSORT_BLOCK_SIZE/SORT_THRESHOLD - in the case of BDW GT2 the length // of the stack is 2 :) __shared__ workstack_record workstack[QUICKSORT_BLOCK_SIZE/SORT_THRESHOLD]; __shared__ int workstack_pointer; __shared__ T mys[QUICKSORT_BLOCK_SIZE], mysn[QUICKSORT_BLOCK_SIZE], temp[SORT_THRESHOLD]; __shared__ T *s, *sn; __shared__ uint ltsum, gtsum; __shared__ uint lt[LQSORT_LOCAL_WORKGROUP_SIZE+1], gt[LQSORT_LOCAL_WORKGROUP_SIZE+1]; uint i, tmp, ltp, gtp; work_record<T> block = seqs[blockid]; const uint d_offset = block.start; uint start = 0; uint end = block.end - d_offset; uint direction = 1; // which direction to sort // initialize workstack and workstack_pointer: push the initial sequence on the stack if (localid == 0) { workstack_pointer = 0; // beginning of the stack workstack_record wr = { start, end, direction }; workstack[0] = wr; } // copy block of data to be sorted by one workgroup into __shared__ memory // note that indeces of __shared__ data go from 0 to end-start-1 if (block.direction == 1) { for (i = localid; i < end; i += LQSORT_LOCAL_WORKGROUP_SIZE) { mys[i] = d[i+d_offset]; } } else { for (i = localid; i < end; i += LQSORT_LOCAL_WORKGROUP_SIZE) { mys[i] = dn[i+d_offset]; } } __syncthreads(); while (workstack_pointer >= 0) { // pop up the stack workstack_record wr = workstack[workstack_pointer]; start = wr.start; end = wr.end; direction = wr.direction; __syncthreads(); if (localid == 0) { --workstack_pointer; ltsum = gtsum = 0; } if (direction == 1) { s = mys; sn = mysn; } else { s = mysn; sn = mys; } // Set thread __shared__ counters to zero lt[localid] = gt[localid] = 0; ltp = gtp = 0; __syncthreads(); // Pick a pivot uint pivot = s[start]; if (start < end) { pivot = median(pivot, s[(start+end) >> 1], s[end-1]); } // Align work item accesses for coalesced reads. // Go through data... for(i = start + localid; i < end; i += LQSORT_LOCAL_WORKGROUP_SIZE) { tmp = s[i]; // counting elements that are smaller ... if (tmp < pivot) ltp++; // or larger compared to the pivot. if (tmp > pivot) gtp++; } lt[localid] = ltp; gt[localid] = gtp; __syncthreads(); // calculate cumulative sums uint n; for(i = 1; i < LQSORT_LOCAL_WORKGROUP_SIZE; i <<= 1) { n = 2*i - 1; if ((localid & n) == n) { lt[localid] += lt[localid-i]; gt[localid] += gt[localid-i]; } __syncthreads(); } if ((localid & n) == n) { lt[LQSORT_LOCAL_WORKGROUP_SIZE] = ltsum = lt[localid]; gt[LQSORT_LOCAL_WORKGROUP_SIZE] = gtsum = gt[localid]; lt[localid] = 0; gt[localid] = 0; } for(i = LQSORT_LOCAL_WORKGROUP_SIZE/2; i >= 1; i >>= 1) { n = 2*i - 1; if ((localid & n) == n) { plus_prescan(&lt[localid - i], &lt[localid]); plus_prescan(&gt[localid - i], &gt[localid]); } __syncthreads(); } // Allocate locations for work items uint lfrom = start + lt[localid]; uint gfrom = end - gt[localid+1]; // go thru data again writing elements to their correct position for (i = start + localid; i < end; i += LQSORT_LOCAL_WORKGROUP_SIZE) { tmp = s[i]; // increment counts if (tmp < pivot) sn[lfrom++] = tmp; if (tmp > pivot) sn[gfrom++] = tmp; } __syncthreads(); // Store the pivot value between the new sequences for (i = start + ltsum + localid;i < end - gtsum; i += LQSORT_LOCAL_WORKGROUP_SIZE) { d[i+d_offset] = pivot; } __syncthreads(); // if the sequence is shorter than SORT_THRESHOLD // sort it using an alternative sort and place result in d if (ltsum <= SORT_THRESHOLD) { sort_threshold(sn, d+d_offset, start, start + ltsum, temp, localid); } else { PUSH(start, start + ltsum); } if (gtsum <= SORT_THRESHOLD) { sort_threshold(sn, d+d_offset, end - gtsum, end, temp, localid); } else { PUSH(end - gtsum, end); } } }
the_stack
//#define DEBUG //#define DEBUGX namespace amgx { // parameter is used as test name DECLARE_UNITTEST_BEGIN(AmgLevelsReuse); struct TestCase { std::string config_string; bool insert_diagonal; bool use_pre_setup; TestCase(): use_pre_setup(true), insert_diagonal(false){} }; std::vector<double> test_main(TestCase &test_case) { bool insert_diagonal = test_case.insert_diagonal; // Create matrix arrays from file Matrix_h Atemp; Vector_h btemp, xtemp, x_final; // Make the matrix generatePoissonForTest(Atemp, 1, 0, 27, 40, 40, 40); btemp.resize(Atemp.get_num_rows()); for (auto& val: btemp) val = 1.0; xtemp.resize(Atemp.get_num_rows()); bool hasDiag = Atemp.hasProps(DIAG); // Create row_offsets, col_indices, off_dia_values and dia_values arrays from the matrix just read int num_rows = Atemp.get_num_rows(); int num_nz = Atemp.get_num_nz(); int bsize_x = Atemp.get_block_dimx(); int bsize_y = Atemp.get_block_dimy(); int bsize = bsize_x * bsize_y; xtemp.resize(num_rows * bsize_y, 1.); std::vector<int> row_offsets(num_rows + 1); std::vector<int> col_indices(num_nz); std::vector<double> off_dia_values(num_nz * bsize); std::vector<double> dia_values; if (hasDiag) { dia_values.resize(num_rows * bsize); } std::vector<double> x_vec(num_rows * bsize_y); std::vector<double> b_vec(num_rows * bsize_x); // Fill vectors int *raw_row_ptr = Atemp.row_offsets.raw(); int *raw_col_ptr = Atemp.col_indices.raw(); double *raw_val_ptr = Atemp.values.raw(); // Row offsets for (int i = 0; i < num_rows + 1; i++) { row_offsets[i] = raw_row_ptr[i]; } // Column indices for (int i = 0; i < num_nz; i++) { col_indices[i] = raw_col_ptr[i]; } // Off-diagonal values for (int i = 0; i < num_nz; i++) for (int j = 0; j < bsize; j++) { off_dia_values[i * bsize + j] = raw_val_ptr[i * bsize + j]; } // Diagonal values if (hasDiag) { for (int i = 0; i < num_rows; i++) { for (int j = 0; j < bsize; j++) { dia_values[i * bsize + j] = raw_val_ptr[num_nz * bsize + i * bsize + j]; } } } srand(1); // Random RHS double *b_raw_ptr = btemp.raw(); for (int i = 0; i < num_rows; i++) for (int j = 0; j < bsize_x; j++) { b_vec[i * bsize_x + j] = b_raw_ptr[i * bsize_x + j] + (1.0 * rand() / RAND_MAX); } //b_vec[i*bsize_x+j] = b_raw_ptr[i*bsize_x+j]; // Random xvector srand(2); double *x_raw_ptr = xtemp.raw(); for (int i = 0; i < num_rows; i++) for (int j = 0; j < bsize_y; j++) { x_vec[i * bsize_y + j] = x_raw_ptr[i * bsize_y + j] + (1.0 * rand() / RAND_MAX); } //x_vec[i*bsize_y+j] = x_raw_ptr[i*bsize_y+j]; std::vector<double> x_vec_col = x_vec; std::string option_string = test_case.config_string; // Insert diagonal if (insert_diagonal && hasDiag) { std::vector<int> new_col_indices( (num_nz + num_rows) ); std::vector<double> new_off_dia_values( (num_nz + num_rows)*bsize ); int icount = 0; for (int i = 0; i < num_rows; i++) { for (int j = row_offsets[i]; j < row_offsets[i + 1]; j++) { int col = col_indices[j]; new_col_indices[icount] = col; for (int k = 0; k < bsize; k++) { new_off_dia_values[icount * bsize + k] = off_dia_values[j * bsize + k]; } icount++; } // Insert diagonal new_col_indices[icount] = i; for (int k = 0; k < bsize; k++) { new_off_dia_values[icount * bsize + k] = dia_values[i * bsize + k]; } icount++; } // increment row_offsets for (int i = 0; i < num_rows + 1; i++) { row_offsets[i] += i; } off_dia_values = new_off_dia_values; col_indices = new_col_indices; dia_values.resize(0); num_nz += num_rows; } AMGX_config_handle rsrc_cfg = NULL; UNITTEST_ASSERT_EQUAL(AMGX_config_create(&rsrc_cfg, ""), AMGX_OK); // Choosing device 0 int device = 0; AMGX_resources_handle rsrc = NULL; UNITTEST_ASSERT_EQUAL(AMGX_resources_create(&rsrc, rsrc_cfg, NULL, 1, &device), AMGX_OK); AMGX_config_handle cfg; UNITTEST_ASSERT_EQUAL(AMGX_config_create( &cfg, option_string.c_str()), AMGX_OK); AMGX_matrix_handle matrix; UNITTEST_ASSERT_EQUAL(AMGX_matrix_create( &matrix, rsrc, AMGX_mode_dDDI ), AMGX_OK); AMGX_solver_handle solver; UNITTEST_ASSERT_EQUAL(AMGX_solver_create( &solver, rsrc, AMGX_mode_dDDI, cfg), AMGX_OK); AMGX_vector_handle b, x; UNITTEST_ASSERT_EQUAL(AMGX_vector_create( &b, rsrc, AMGX_mode_dDDI ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_vector_create( &x, rsrc, AMGX_mode_dDDI ), AMGX_OK); int num_setup_iters = 2; for (int i_setup = 0; i_setup < num_setup_iters; i_setup++) { #ifdef DEBUG std::cout << "outer iteration #" << i_setup << std::endl; #endif // Upload the new matrix and call setup if (i_setup == 0) { if (dia_values.size() != 0) { UNITTEST_ASSERT_EQUAL(AMGX_matrix_upload_all(matrix, num_rows, num_nz, bsize_x, bsize_y, &row_offsets[0], &col_indices[0], &off_dia_values[0], &dia_values[0]), AMGX_OK); } else { UNITTEST_ASSERT_EQUAL(AMGX_matrix_upload_all(matrix, num_rows, num_nz, bsize_x, bsize_y, &row_offsets[0], &col_indices[0], &off_dia_values[0], NULL), AMGX_OK); } UNITTEST_ASSERT_EQUAL(AMGX_solver_setup( solver, matrix ), AMGX_OK); } else { // Perturb the matrix /* for (int i=0;i<num_nz;i++) for (int j=0;j<bsize;j++) off_dia_values[i*bsize+j] += .001*i_setup;*/ //// perturb the diagonal if (hasDiag) { for (int i = 0; i < num_rows; i++) { for (int j = 0; j < bsize; j++) { dia_values[i * bsize + j] += .001 * i_setup; } } } if (dia_values.size() != 0) { UNITTEST_ASSERT_EQUAL(AMGX_matrix_replace_coefficients(matrix, num_rows, num_nz, &off_dia_values[0], &dia_values[0]), AMGX_OK); } else { UNITTEST_ASSERT_EQUAL(AMGX_matrix_replace_coefficients(matrix, num_rows, num_nz, &off_dia_values[0], NULL), AMGX_OK); } if (test_case.use_pre_setup) { UNITTEST_ASSERT_EQUAL(AMGX_solver_resetup( solver, matrix ), AMGX_OK); } else { UNITTEST_ASSERT_EQUAL(AMGX_solver_setup( solver, matrix ), AMGX_OK); } } UNITTEST_ASSERT_EQUAL(AMGX_vector_upload( b, num_rows, bsize_y, &b_vec[0] ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_vector_upload( x, num_rows, bsize_x, &x_vec[0] ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_solver_solve( solver, b, x ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_vector_download( x, &x_vec[0] ), AMGX_OK); } #ifdef DEBUGX std::cout << "final x_vec" << std::endl; for (int i = 0; i < x_vec.size(); i++) { std::cout << i << " " << x_vec[i] << std::endl; } #endif UNITTEST_ASSERT_EQUAL(AMGX_solver_destroy( solver ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_matrix_destroy( matrix ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_vector_destroy( b ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_vector_destroy( x ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_config_destroy( cfg ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_config_destroy( rsrc_cfg ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_resources_destroy( rsrc ), AMGX_OK); return x_vec; } void run() { SignalHandler::hook(); AMGX_finalize_plugins(); AMGX_finalize(); UnitTest::amgx_intialized = false; std::vector<TestCase> test_cases; TestCase temp_case; std::string base_string; base_string = "config_version=2, "; base_string += "solver(main_solver)=AMG, "; base_string += "main_solver:algorithm=AGGREGATION, "; base_string += "main_solver:coarseAgenerator=LOW_DEG,"; base_string += "main_solver:coloring_level=1,"; base_string += "main_solver:convergence=RELATIVE_MAX,"; base_string += "main_solver:cycle=V,"; base_string += "main_solver:matrix_coloring_scheme=MIN_MAX,"; base_string += "main_solver:max_levels=21,"; base_string += "main_solver:norm=L1,"; base_string += "main_solver:postsweeps=3,"; base_string += "main_solver:presweeps=0,"; base_string += "main_solver:selector=SIZE_2,"; base_string += "main_solver:smoother=BLOCK_JACOBI,"; base_string += "main_solver:tolerance=0.1,"; temp_case.insert_diagonal = true; int max_reuse_levels = 10; for (int i = 0; i < max_reuse_levels; i++) { std::ostringstream config_string; config_string << base_string << ", main_solver:structure_reuse_levels=" << i << ", "; temp_case.config_string = config_string.str(); temp_case.use_pre_setup = false; test_cases.push_back(temp_case); } for (int i = 0; i < test_cases.size(); i++) { test_cases[i].config_string += ","; test_cases[i].config_string += "main_solver:max_iters=2,"; test_cases[i].config_string += "main_solver:monitor_residual=1,"; test_cases[i].config_string += "determinism_flag=1,"; test_cases[i].config_string += "max_uncolored_percentage=0.,"; test_cases[i].config_string += "main_solver:store_res_history=1,"; //test_cases[i].config_string += "main_solver:print_solve_stats=1,"; //test_cases[i].config_string += "main_solver:print_grid_stats=1,"; test_cases[i].config_string += "main_solver:obtain_timings=1"; } std::vector<double> x_ref; std::vector<double> x; for (int i = 0; i < test_cases.size(); i++) { #ifdef DEBUG std::stringstream fail_msg; fail_msg << std::endl << "structure_reuse_levels = " << i << std::endl; std::cout << fail_msg.str() << std::endl; #endif AMGX_initialize(); AMGX_initialize_plugins(); if (i == 0) { x_ref = test_main(test_cases[i]); } else { x = test_main(test_cases[i]); std::stringstream fail_msg; fail_msg << "Different result for test_case, " << std::endl; fail_msg << " config string = " << test_cases[i].config_string << std::endl;; fail_msg << " use pre_setup = " << test_cases[i].use_pre_setup << std::endl; this->PrintOnFail(fail_msg.str().c_str()); double zero = 1e-12; for (int i = 0; i < x.size(); i++) { if (abs(x[i]) > zero) { UNITTEST_ASSERT_EQUAL_TOL(x[i], x_ref[i], 1e-8); } else if (abs(x[i]) < zero && abs(x_ref[i]) < zero) { UNITTEST_ASSERT_TRUE(true); } } } AMGX_finalize_plugins(); AMGX_finalize(); } AMGX_initialize(); AMGX_initialize_plugins(); UnitTest::amgx_intialized = true; } DECLARE_UNITTEST_END(AmgLevelsReuse); // if you want to be able run this test for all available configs you can write this: //#define AMGX_CASE_LINE(CASE) TemplateTest <TemplateMode<CASE>::Type> TemplateTest_##CASE; // AMGX_FORALL_BUILDS(AMGX_CASE_LINE) //#undef AMGX_CASE_LINE // or run for all device configs //#define AMGX_CASE_LINE(CASE) TemplateTest <TemplateMode<CASE>::Type> TemplateTest_##CASE; // AMGX_FORALL_BUILDS_DEVICE(AMGX_CASE_LINE) //#undef AMGX_CASE_LINE AmgLevelsReuse <TemplateMode<AMGX_mode_dDDI>::Type> AmgLevelsReuse_mode_dDDI; // or you can specify several desired configs //TemplateTest <TemplateMode<AMGX_mode_hDFI>::Type> TemplateTest_hDFI; //TemplateTest <TemplateMode<AMGX_mode_dDFI>::Type> TemplateTest_dDFI; }
the_stack
//variable definition #define F 100 #define TILE_SIZE F/10 #define SCAN_BATCH 30 #define THETA_BATCH 3 #define X_BATCH 240 #define ITERS 10 #define M 50082603 #define N 39780 #define NNZ 3101144313 #define NNZ_TEST 344573330 //0.05 when use both "full" kernels #define LAMBDA 0.048 //hardware specific #define GPU_COUNT 4 #define DEVICEID 0 // the anchor device //debug option to save model //#define CUMF_SAVE_MODEL //#define CUMF_TT_FP16 using namespace std; void saveDeviceFloatArrayToFile(string fileName, int size, float* d_array){ float* h_array; cudacall(cudaMallocHost( (void** ) &h_array, size * sizeof(h_array[0])) ); cudacall(cudaMemcpy(h_array, d_array, size * sizeof(h_array[0]),cudaMemcpyDeviceToHost)); FILE * outfile = fopen(fileName.c_str(), "wb"); fwrite(h_array, sizeof(float), size, outfile); fclose(outfile); cudaFreeHost(h_array); } __global__ void __launch_bounds__(64, 6) get_hermitian100_tt_fp16(const int batch_offset, half2* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const float* __restrict__ thetaT) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync /* if(threadIdx.x < SCAN_BATCH){ if(iter*SCAN_BATCH + threadIdx.x < end - start){ for (int k = 0; k < F; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1); thetaTemp[threadIdx.x * F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float)); } */ //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } /* //issue: not coalesced access to csrColIndex if(threadIdx.x < F && threadIdx.x%2 == 0){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1); thetaTemp[k * F/2 + threadIdx.x/2] = theta; } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float)); } } */ /* int layers = blockDim.x/SCAN_BATCH; //100/30 = 3 //int height = blockDim.x/layers; //30 int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable //min(y, (layers-1)) * height int y_start = y * 30;//0-29:0;30-59:30;60-89:60 int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90 if(y >= layers - 1) y_end = F; //60-89:100 if(threadIdx.x - y_start < SCAN_BATCH){ if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){ for (int k = y_start; k < y_end; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1); thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float)); } */ __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); #ifdef DEBUG //if(threadIdx.x==0) // printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); #endif if(threadIdx.x < 55 ){ //weighted-lambda regularization if(tile_x == tile_y){ float temp = (end - start) * lambda; temp0 += temp; temp11 += temp; temp22 += temp; temp33 += temp; temp44 += temp; temp55 += temp; temp66 += temp; temp77 += temp; temp88 += temp; temp99 += temp; } //copy output to gmem int index = blockIdx.x*F*F/2; //fill_lower_half_from_registers(); fill_lower_half_from_registers_fp16(); //symmetric if(tile_x!=tile_y){ //fill_upper_half_from_registers(); fill_upper_half_from_registers_fp16(); } } } } __global__ void __launch_bounds__(64, 6) get_hermitian100(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const float* __restrict__ thetaT) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync /* if(threadIdx.x < SCAN_BATCH){ if(iter*SCAN_BATCH + threadIdx.x < end - start){ for (int k = 0; k < F; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1); thetaTemp[threadIdx.x * F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float)); } */ //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } /* //issue: not coalesced access to csrColIndex if(threadIdx.x < F && threadIdx.x%2 == 0){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1); thetaTemp[k * F/2 + threadIdx.x/2] = theta; } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float)); } } */ /* int layers = blockDim.x/SCAN_BATCH; //100/30 = 3 //int height = blockDim.x/layers; //30 int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable //min(y, (layers-1)) * height int y_start = y * 30;//0-29:0;30-59:30;60-89:60 int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90 if(y >= layers - 1) y_end = F; //60-89:100 if(threadIdx.x - y_start < SCAN_BATCH){ if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){ for (int k = y_start; k < y_end; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1); thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float)); } */ __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); #ifdef DEBUG //if(threadIdx.x==0) // printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); #endif if(threadIdx.x < 55 ){ //copy output to gmem int index = blockIdx.x*F*F; fill_lower_half_from_registers(); //symmetric if(tile_x!=tile_y){ fill_upper_half_from_registers(); } //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } } //split a big csr into many by rows. the row id of sub-matrices need to be changed //inval = inval - inval[0] __global__ void zeroIndex(int * inVal, const unsigned int inVal_0, const int size) { int i = blockDim.x*blockIdx.x + threadIdx.x; if(i < size){ inVal[i] = (unsigned)inVal[i] - inVal_0; } } texture<float> xTTexRef; texture<float> thetaTTexRef; __global__ void __launch_bounds__(100, 4) updateThetaByBlock2pRegDsmemTile(float * xx, const int* cscRowIndex, const int* cscColIndex, const float lambda, const float * XT) { __shared__ float2 xTemp[SCAN_BATCH * F/2]; int col = blockIdx.x; if (col < N) { //this block needs to handle end - start XT columns int start = cscColIndex[col]; int end = cscColIndex[col + 1]; int iterations = (end - start -1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; float2 x; int tile = F/10; int tile_x = (threadIdx.x/tile) * tile;//start x of this tile int tile_y = (threadIdx.x%tile) * tile;//start y of this tile for (int iter = 0; iter < iterations; iter ++){ //copy texture --> smem, and sync if(threadIdx.x < SCAN_BATCH){ if(iter*SCAN_BATCH + threadIdx.x < end - start){ for (int k = 0; k < F; k += 2){ x.x = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + threadIdx.x] + k ]; x.y = XT [ F * cscRowIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1 ]; xTemp[threadIdx.x * F/2 + k/2] = x; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&xTemp[threadIdx.x*F/2], 0, F*sizeof(float)); } __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////////////////////////// //tile: 10*10 for(int k = 0; k < SCAN_BATCH; k++){ temp0 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x; temp1 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y; temp2 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x; temp3 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y; temp4 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x; temp5 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y; temp6 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x; temp7 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y; temp8 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x; temp9 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y; temp10 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x; temp11 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y; temp12 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x; temp13 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y; temp14 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x; temp15 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y; temp16 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x; temp17 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y; temp18 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x; temp19 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y; temp20 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x; temp21 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y; temp22 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x; temp23 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y; temp24 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x; temp25 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y; temp26 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x; temp27 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y; temp28 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x; temp29 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y; temp30 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x; temp31 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y; temp32 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x; temp33 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y; temp34 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x; temp35 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y; temp36 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x; temp37 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y; temp38 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x; temp39 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y; temp40 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x; temp41 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y; temp42 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x; temp43 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y; temp44 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x; temp45 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y; temp46 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x; temp47 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y; temp48 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x; temp49 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y; temp50 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x; temp51 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y; temp52 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x; temp53 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y; temp54 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x; temp55 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y; temp56 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x; temp57 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y; temp58 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x; temp59 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y; temp60 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x; temp61 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y; temp62 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x; temp63 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y; temp64 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x; temp65 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y; temp66 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x; temp67 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y; temp68 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x; temp69 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y; temp70 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x; temp71 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y; temp72 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x; temp73 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y; temp74 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x; temp75 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y; temp76 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x; temp77 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y; temp78 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x; temp79 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y; temp80 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x; temp81 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y; temp82 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x; temp83 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y; temp84 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x; temp85 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y; temp86 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x; temp87 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y; temp88 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x; temp89 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y; temp90 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x; temp91 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y; temp92 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x; temp93 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y; temp94 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x; temp95 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y; temp96 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x; temp97 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y; temp98 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x; temp99 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y; } /////////////////////////////////////////////////////////////////////////////////////////////////////////// __syncthreads(); } int index = blockIdx.x*F*F; ///* //copy output to gmem xx[index + tile_x + tile_y*F] = temp0; xx[index + tile_x + (tile_y + 1)*F] = temp1; xx[index + tile_x + (tile_y + 2)*F] = temp2; xx[index + tile_x + (tile_y + 3)*F] = temp3; xx[index + tile_x + (tile_y + 4)*F] = temp4; xx[index + tile_x + (tile_y + 5)*F] = temp5; xx[index + tile_x + (tile_y + 6)*F] = temp6; xx[index + tile_x + (tile_y + 7)*F] = temp7; xx[index + tile_x + (tile_y + 8)*F] = temp8; xx[index + tile_x + (tile_y + 9)*F] = temp9; xx[index + tile_x + 1 + tile_y*F] = temp10; xx[index + tile_x + 1 + (tile_y + 1)*F] = temp11; xx[index + tile_x + 1 + (tile_y + 2)*F] = temp12; xx[index + tile_x + 1 + (tile_y + 3)*F] = temp13; xx[index + tile_x + 1 + (tile_y + 4)*F] = temp14; xx[index + tile_x + 1 + (tile_y + 5)*F] = temp15; xx[index + tile_x + 1 + (tile_y + 6)*F] = temp16; xx[index + tile_x + 1 + (tile_y + 7)*F] = temp17; xx[index + tile_x + 1 + (tile_y + 8)*F] = temp18; xx[index + tile_x + 1 + (tile_y + 9)*F] = temp19; xx[index + tile_x + 2 + tile_y*F] = temp20; xx[index + tile_x + 2 + (tile_y + 1)*F] = temp21; xx[index + tile_x + 2 + (tile_y + 2)*F] = temp22; xx[index + tile_x + 2 + (tile_y + 3)*F] = temp23; xx[index + tile_x + 2 + (tile_y + 4)*F] = temp24; xx[index + tile_x + 2 + (tile_y + 5)*F] = temp25; xx[index + tile_x + 2 + (tile_y + 6)*F] = temp26; xx[index + tile_x + 2 + (tile_y + 7)*F] = temp27; xx[index + tile_x + 2 + (tile_y + 8)*F] = temp28; xx[index + tile_x + 2 + (tile_y + 9)*F] = temp29; xx[index + tile_x + 3 + tile_y*F] = temp30; xx[index + tile_x + 3 + (tile_y + 1)*F] = temp31; xx[index + tile_x + 3 + (tile_y + 2)*F] = temp32; xx[index + tile_x + 3 + (tile_y + 3)*F] = temp33; xx[index + tile_x + 3 + (tile_y + 4)*F] = temp34; xx[index + tile_x + 3 + (tile_y + 5)*F] = temp35; xx[index + tile_x + 3 + (tile_y + 6)*F] = temp36; xx[index + tile_x + 3 + (tile_y + 7)*F] = temp37; xx[index + tile_x + 3 + (tile_y + 8)*F] = temp38; xx[index + tile_x + 3 + (tile_y + 9)*F] = temp39; xx[index + tile_x + 4 + tile_y*F] = temp40; xx[index + tile_x + 4 + (tile_y + 1)*F] = temp41; xx[index + tile_x + 4 + (tile_y + 2)*F] = temp42; xx[index + tile_x + 4 + (tile_y + 3)*F] = temp43; xx[index + tile_x + 4 + (tile_y + 4)*F] = temp44; xx[index + tile_x + 4 + (tile_y + 5)*F] = temp45; xx[index + tile_x + 4 + (tile_y + 6)*F] = temp46; xx[index + tile_x + 4 + (tile_y + 7)*F] = temp47; xx[index + tile_x + 4 + (tile_y + 8)*F] = temp48; xx[index + tile_x + 4 + (tile_y + 9)*F] = temp49; xx[index + tile_x + 5 + tile_y*F] = temp50; xx[index + tile_x + 5 + (tile_y + 1)*F] = temp51; xx[index + tile_x + 5 + (tile_y + 2)*F] = temp52; xx[index + tile_x + 5 + (tile_y + 3)*F] = temp53; xx[index + tile_x + 5 + (tile_y + 4)*F] = temp54; xx[index + tile_x + 5 + (tile_y + 5)*F] = temp55; xx[index + tile_x + 5 + (tile_y + 6)*F] = temp56; xx[index + tile_x + 5 + (tile_y + 7)*F] = temp57; xx[index + tile_x + 5 + (tile_y + 8)*F] = temp58; xx[index + tile_x + 5 + (tile_y + 9)*F] = temp59; xx[index + tile_x + 6 + tile_y*F] = temp60; xx[index + tile_x + 6 + (tile_y + 1)*F] = temp61; xx[index + tile_x + 6 + (tile_y + 2)*F] = temp62; xx[index + tile_x + 6 + (tile_y + 3)*F] = temp63; xx[index + tile_x + 6 + (tile_y + 4)*F] = temp64; xx[index + tile_x + 6 + (tile_y + 5)*F] = temp65; xx[index + tile_x + 6 + (tile_y + 6)*F] = temp66; xx[index + tile_x + 6 + (tile_y + 7)*F] = temp67; xx[index + tile_x + 6 + (tile_y + 8)*F] = temp68; xx[index + tile_x + 6 + (tile_y + 9)*F] = temp69; xx[index + tile_x + 7 + tile_y*F] = temp70; xx[index + tile_x + 7 + (tile_y + 1)*F] = temp71; xx[index + tile_x + 7 + (tile_y + 2)*F] = temp72; xx[index + tile_x + 7 + (tile_y + 3)*F] = temp73; xx[index + tile_x + 7 + (tile_y + 4)*F] = temp74; xx[index + tile_x + 7 + (tile_y + 5)*F] = temp75; xx[index + tile_x + 7 + (tile_y + 6)*F] = temp76; xx[index + tile_x + 7 + (tile_y + 7)*F] = temp77; xx[index + tile_x + 7 + (tile_y + 8)*F] = temp78; xx[index + tile_x + 7 + (tile_y + 9)*F] = temp79; xx[index + tile_x + 8 + tile_y*F] = temp80; xx[index + tile_x + 8 + (tile_y + 1)*F] = temp81; xx[index + tile_x + 8 + (tile_y + 2)*F] = temp82; xx[index + tile_x + 8 + (tile_y + 3)*F] = temp83; xx[index + tile_x + 8 + (tile_y + 4)*F] = temp84; xx[index + tile_x + 8 + (tile_y + 5)*F] = temp85; xx[index + tile_x + 8 + (tile_y + 6)*F] = temp86; xx[index + tile_x + 8 + (tile_y + 7)*F] = temp87; xx[index + tile_x + 8 + (tile_y + 8)*F] = temp88; xx[index + tile_x + 8 + (tile_y + 9)*F] = temp89; xx[index + tile_x + 9 + tile_y*F] = temp90; xx[index + tile_x + 9 + (tile_y + 1)*F] = temp91; xx[index + tile_x + 9 + (tile_y + 2)*F] = temp92; xx[index + tile_x + 9 + (tile_y + 3)*F] = temp93; xx[index + tile_x + 9 + (tile_y + 4)*F] = temp94; xx[index + tile_x + 9 + (tile_y + 5)*F] = temp95; xx[index + tile_x + 9 + (tile_y + 6)*F] = temp96; xx[index + tile_x + 9 + (tile_y + 7)*F] = temp97; xx[index + tile_x + 9 + (tile_y + 8)*F] = temp98; xx[index + tile_x + 9 + (tile_y + 9)*F] = temp99; //*/ //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) xx[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } __global__ void __launch_bounds__(64, 6) get_hermitian_x(float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda) { __shared__ float2 thetaTemp[SCAN_BATCH * F/2]; int row = blockIdx.x; if (row < M) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; //int tile_x = (threadIdx.x/tile) * tile;//start x of this tile //int tile_y = (threadIdx.x%tile) * tile;//start y of this tile int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync /* if(threadIdx.x < SCAN_BATCH){ if(iter*SCAN_BATCH + threadIdx.x < end - start){ for (int k = 0; k < F; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1); thetaTemp[threadIdx.x * F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float)); } */ //two layers: warp divergence unless we split at 32 //32 > SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } /* //issue: not coalesced access to csrColIndex if(threadIdx.x < F && threadIdx.x%2 == 0){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1); thetaTemp[k * F/2 + threadIdx.x/2] = theta; } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float)); } } */ /* int layers = blockDim.x/SCAN_BATCH; //100/30 = 3 //int height = blockDim.x/layers; //30 int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable //min(y, (layers-1)) * height int y_start = y * 30;//0-29:0;30-59:30;60-89:60 int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90 if(y >= layers - 1) y_end = F; //60-89:100 if(threadIdx.x - y_start < SCAN_BATCH){ if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){ for (int k = y_start; k < y_end; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1); thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float)); } */ __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////////////////////////// //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ temp0 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x; temp1 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y; temp2 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x; temp3 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y; temp4 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x; temp5 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y; temp6 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x; temp7 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y; temp8 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x; temp9 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y; temp10 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x; temp11 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y; temp12 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x; temp13 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y; temp14 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x; temp15 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y; temp16 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x; temp17 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y; temp18 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x; temp19 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y; temp20 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x; temp21 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y; temp22 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x; temp23 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y; temp24 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x; temp25 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y; temp26 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x; temp27 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y; temp28 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x; temp29 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y; temp30 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x; temp31 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y; temp32 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x; temp33 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y; temp34 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x; temp35 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y; temp36 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x; temp37 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y; temp38 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x; temp39 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y; temp40 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x; temp41 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y; temp42 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x; temp43 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y; temp44 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x; temp45 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y; temp46 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x; temp47 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y; temp48 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x; temp49 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y; temp50 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x; temp51 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y; temp52 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x; temp53 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y; temp54 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x; temp55 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y; temp56 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x; temp57 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y; temp58 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x; temp59 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y; temp60 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x; temp61 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y; temp62 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x; temp63 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y; temp64 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x; temp65 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y; temp66 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x; temp67 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y; temp68 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x; temp69 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y; temp70 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x; temp71 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y; temp72 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x; temp73 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y; temp74 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x; temp75 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y; temp76 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x; temp77 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y; temp78 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x; temp79 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y; temp80 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x; temp81 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y; temp82 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x; temp83 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y; temp84 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x; temp85 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y; temp86 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x; temp87 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y; temp88 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x; temp89 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y; temp90 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x; temp91 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y; temp92 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x; temp93 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y; temp94 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x; temp95 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y; temp96 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x; temp97 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y; temp98 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x; temp99 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y; } } } //end of iteration in copying from smem and aggregating in register /////////////////////////////////////////////////////////////////////////////////////////////////////////// __syncthreads(); ///* if(threadIdx.x < 55 ){ //copy output to gmem int index = blockIdx.x*F*F; tt[index + tile_x + tile_y*F] = temp0; tt[index + tile_x + (tile_y + 1)*F] = temp1; tt[index + tile_x + (tile_y + 2)*F] = temp2; tt[index + tile_x + (tile_y + 3)*F] = temp3; tt[index + tile_x + (tile_y + 4)*F] = temp4; tt[index + tile_x + (tile_y + 5)*F] = temp5; tt[index + tile_x + (tile_y + 6)*F] = temp6; tt[index + tile_x + (tile_y + 7)*F] = temp7; tt[index + tile_x + (tile_y + 8)*F] = temp8; tt[index + tile_x + (tile_y + 9)*F] = temp9; tt[index + tile_x + 1 + tile_y*F] = temp10; tt[index + tile_x + 1 + (tile_y + 1)*F] = temp11; tt[index + tile_x + 1 + (tile_y + 2)*F] = temp12; tt[index + tile_x + 1 + (tile_y + 3)*F] = temp13; tt[index + tile_x + 1 + (tile_y + 4)*F] = temp14; tt[index + tile_x + 1 + (tile_y + 5)*F] = temp15; tt[index + tile_x + 1 + (tile_y + 6)*F] = temp16; tt[index + tile_x + 1 + (tile_y + 7)*F] = temp17; tt[index + tile_x + 1 + (tile_y + 8)*F] = temp18; tt[index + tile_x + 1 + (tile_y + 9)*F] = temp19; tt[index + tile_x + 2 + tile_y*F] = temp20; tt[index + tile_x + 2 + (tile_y + 1)*F] = temp21; tt[index + tile_x + 2 + (tile_y + 2)*F] = temp22; tt[index + tile_x + 2 + (tile_y + 3)*F] = temp23; tt[index + tile_x + 2 + (tile_y + 4)*F] = temp24; tt[index + tile_x + 2 + (tile_y + 5)*F] = temp25; tt[index + tile_x + 2 + (tile_y + 6)*F] = temp26; tt[index + tile_x + 2 + (tile_y + 7)*F] = temp27; tt[index + tile_x + 2 + (tile_y + 8)*F] = temp28; tt[index + tile_x + 2 + (tile_y + 9)*F] = temp29; tt[index + tile_x + 3 + tile_y*F] = temp30; tt[index + tile_x + 3 + (tile_y + 1)*F] = temp31; tt[index + tile_x + 3 + (tile_y + 2)*F] = temp32; tt[index + tile_x + 3 + (tile_y + 3)*F] = temp33; tt[index + tile_x + 3 + (tile_y + 4)*F] = temp34; tt[index + tile_x + 3 + (tile_y + 5)*F] = temp35; tt[index + tile_x + 3 + (tile_y + 6)*F] = temp36; tt[index + tile_x + 3 + (tile_y + 7)*F] = temp37; tt[index + tile_x + 3 + (tile_y + 8)*F] = temp38; tt[index + tile_x + 3 + (tile_y + 9)*F] = temp39; tt[index + tile_x + 4 + tile_y*F] = temp40; tt[index + tile_x + 4 + (tile_y + 1)*F] = temp41; tt[index + tile_x + 4 + (tile_y + 2)*F] = temp42; tt[index + tile_x + 4 + (tile_y + 3)*F] = temp43; tt[index + tile_x + 4 + (tile_y + 4)*F] = temp44; tt[index + tile_x + 4 + (tile_y + 5)*F] = temp45; tt[index + tile_x + 4 + (tile_y + 6)*F] = temp46; tt[index + tile_x + 4 + (tile_y + 7)*F] = temp47; tt[index + tile_x + 4 + (tile_y + 8)*F] = temp48; tt[index + tile_x + 4 + (tile_y + 9)*F] = temp49; tt[index + tile_x + 5 + tile_y*F] = temp50; tt[index + tile_x + 5 + (tile_y + 1)*F] = temp51; tt[index + tile_x + 5 + (tile_y + 2)*F] = temp52; tt[index + tile_x + 5 + (tile_y + 3)*F] = temp53; tt[index + tile_x + 5 + (tile_y + 4)*F] = temp54; tt[index + tile_x + 5 + (tile_y + 5)*F] = temp55; tt[index + tile_x + 5 + (tile_y + 6)*F] = temp56; tt[index + tile_x + 5 + (tile_y + 7)*F] = temp57; tt[index + tile_x + 5 + (tile_y + 8)*F] = temp58; tt[index + tile_x + 5 + (tile_y + 9)*F] = temp59; tt[index + tile_x + 6 + tile_y*F] = temp60; tt[index + tile_x + 6 + (tile_y + 1)*F] = temp61; tt[index + tile_x + 6 + (tile_y + 2)*F] = temp62; tt[index + tile_x + 6 + (tile_y + 3)*F] = temp63; tt[index + tile_x + 6 + (tile_y + 4)*F] = temp64; tt[index + tile_x + 6 + (tile_y + 5)*F] = temp65; tt[index + tile_x + 6 + (tile_y + 6)*F] = temp66; tt[index + tile_x + 6 + (tile_y + 7)*F] = temp67; tt[index + tile_x + 6 + (tile_y + 8)*F] = temp68; tt[index + tile_x + 6 + (tile_y + 9)*F] = temp69; tt[index + tile_x + 7 + tile_y*F] = temp70; tt[index + tile_x + 7 + (tile_y + 1)*F] = temp71; tt[index + tile_x + 7 + (tile_y + 2)*F] = temp72; tt[index + tile_x + 7 + (tile_y + 3)*F] = temp73; tt[index + tile_x + 7 + (tile_y + 4)*F] = temp74; tt[index + tile_x + 7 + (tile_y + 5)*F] = temp75; tt[index + tile_x + 7 + (tile_y + 6)*F] = temp76; tt[index + tile_x + 7 + (tile_y + 7)*F] = temp77; tt[index + tile_x + 7 + (tile_y + 8)*F] = temp78; tt[index + tile_x + 7 + (tile_y + 9)*F] = temp79; tt[index + tile_x + 8 + tile_y*F] = temp80; tt[index + tile_x + 8 + (tile_y + 1)*F] = temp81; tt[index + tile_x + 8 + (tile_y + 2)*F] = temp82; tt[index + tile_x + 8 + (tile_y + 3)*F] = temp83; tt[index + tile_x + 8 + (tile_y + 4)*F] = temp84; tt[index + tile_x + 8 + (tile_y + 5)*F] = temp85; tt[index + tile_x + 8 + (tile_y + 6)*F] = temp86; tt[index + tile_x + 8 + (tile_y + 7)*F] = temp87; tt[index + tile_x + 8 + (tile_y + 8)*F] = temp88; tt[index + tile_x + 8 + (tile_y + 9)*F] = temp89; tt[index + tile_x + 9 + tile_y*F] = temp90; tt[index + tile_x + 9 + (tile_y + 1)*F] = temp91; tt[index + tile_x + 9 + (tile_y + 2)*F] = temp92; tt[index + tile_x + 9 + (tile_y + 3)*F] = temp93; tt[index + tile_x + 9 + (tile_y + 4)*F] = temp94; tt[index + tile_x + 9 + (tile_y + 5)*F] = temp95; tt[index + tile_x + 9 + (tile_y + 6)*F] = temp96; tt[index + tile_x + 9 + (tile_y + 7)*F] = temp97; tt[index + tile_x + 9 + (tile_y + 8)*F] = temp98; tt[index + tile_x + 9 + (tile_y + 9)*F] = temp99; //symmetric if(tile_x!=tile_y){ tt[index + tile_y + 0+ (tile_x + 0)*F]= temp0; tt[index + tile_y + 1+ (tile_x + 0)*F]= temp1; tt[index + tile_y + 2+ (tile_x + 0)*F]= temp2; tt[index + tile_y + 3+ (tile_x + 0)*F]= temp3; tt[index + tile_y + 4+ (tile_x + 0)*F]= temp4; tt[index + tile_y + 5+ (tile_x + 0)*F]= temp5; tt[index + tile_y + 6+ (tile_x + 0)*F]= temp6; tt[index + tile_y + 7+ (tile_x + 0)*F]= temp7; tt[index + tile_y + 8+ (tile_x + 0)*F]= temp8; tt[index + tile_y + 9+ (tile_x + 0)*F]= temp9; tt[index + tile_y + 0+ (tile_x + 1)*F]= temp10; tt[index + tile_y + 1+ (tile_x + 1)*F]= temp11; tt[index + tile_y + 2+ (tile_x + 1)*F]= temp12; tt[index + tile_y + 3+ (tile_x + 1)*F]= temp13; tt[index + tile_y + 4+ (tile_x + 1)*F]= temp14; tt[index + tile_y + 5+ (tile_x + 1)*F]= temp15; tt[index + tile_y + 6+ (tile_x + 1)*F]= temp16; tt[index + tile_y + 7+ (tile_x + 1)*F]= temp17; tt[index + tile_y + 8+ (tile_x + 1)*F]= temp18; tt[index + tile_y + 9+ (tile_x + 1)*F]= temp19; tt[index + tile_y + 0+ (tile_x + 2)*F]= temp20; tt[index + tile_y + 1+ (tile_x + 2)*F]= temp21; tt[index + tile_y + 2+ (tile_x + 2)*F]= temp22; tt[index + tile_y + 3+ (tile_x + 2)*F]= temp23; tt[index + tile_y + 4+ (tile_x + 2)*F]= temp24; tt[index + tile_y + 5+ (tile_x + 2)*F]= temp25; tt[index + tile_y + 6+ (tile_x + 2)*F]= temp26; tt[index + tile_y + 7+ (tile_x + 2)*F]= temp27; tt[index + tile_y + 8+ (tile_x + 2)*F]= temp28; tt[index + tile_y + 9+ (tile_x + 2)*F]= temp29; tt[index + tile_y + 0+ (tile_x + 3)*F]= temp30; tt[index + tile_y + 1+ (tile_x + 3)*F]= temp31; tt[index + tile_y + 2+ (tile_x + 3)*F]= temp32; tt[index + tile_y + 3+ (tile_x + 3)*F]= temp33; tt[index + tile_y + 4+ (tile_x + 3)*F]= temp34; tt[index + tile_y + 5+ (tile_x + 3)*F]= temp35; tt[index + tile_y + 6+ (tile_x + 3)*F]= temp36; tt[index + tile_y + 7+ (tile_x + 3)*F]= temp37; tt[index + tile_y + 8+ (tile_x + 3)*F]= temp38; tt[index + tile_y + 9+ (tile_x + 3)*F]= temp39; tt[index + tile_y + 0+ (tile_x + 4)*F]= temp40; tt[index + tile_y + 1+ (tile_x + 4)*F]= temp41; tt[index + tile_y + 2+ (tile_x + 4)*F]= temp42; tt[index + tile_y + 3+ (tile_x + 4)*F]= temp43; tt[index + tile_y + 4+ (tile_x + 4)*F]= temp44; tt[index + tile_y + 5+ (tile_x + 4)*F]= temp45; tt[index + tile_y + 6+ (tile_x + 4)*F]= temp46; tt[index + tile_y + 7+ (tile_x + 4)*F]= temp47; tt[index + tile_y + 8+ (tile_x + 4)*F]= temp48; tt[index + tile_y + 9+ (tile_x + 4)*F]= temp49; tt[index + tile_y + 0+ (tile_x + 5)*F]= temp50; tt[index + tile_y + 1+ (tile_x + 5)*F]= temp51; tt[index + tile_y + 2+ (tile_x + 5)*F]= temp52; tt[index + tile_y + 3+ (tile_x + 5)*F]= temp53; tt[index + tile_y + 4+ (tile_x + 5)*F]= temp54; tt[index + tile_y + 5+ (tile_x + 5)*F]= temp55; tt[index + tile_y + 6+ (tile_x + 5)*F]= temp56; tt[index + tile_y + 7+ (tile_x + 5)*F]= temp57; tt[index + tile_y + 8+ (tile_x + 5)*F]= temp58; tt[index + tile_y + 9+ (tile_x + 5)*F]= temp59; tt[index + tile_y + 0+ (tile_x + 6)*F]= temp60; tt[index + tile_y + 1+ (tile_x + 6)*F]= temp61; tt[index + tile_y + 2+ (tile_x + 6)*F]= temp62; tt[index + tile_y + 3+ (tile_x + 6)*F]= temp63; tt[index + tile_y + 4+ (tile_x + 6)*F]= temp64; tt[index + tile_y + 5+ (tile_x + 6)*F]= temp65; tt[index + tile_y + 6+ (tile_x + 6)*F]= temp66; tt[index + tile_y + 7+ (tile_x + 6)*F]= temp67; tt[index + tile_y + 8+ (tile_x + 6)*F]= temp68; tt[index + tile_y + 9+ (tile_x + 6)*F]= temp69; tt[index + tile_y + 0+ (tile_x + 7)*F]= temp70; tt[index + tile_y + 1+ (tile_x + 7)*F]= temp71; tt[index + tile_y + 2+ (tile_x + 7)*F]= temp72; tt[index + tile_y + 3+ (tile_x + 7)*F]= temp73; tt[index + tile_y + 4+ (tile_x + 7)*F]= temp74; tt[index + tile_y + 5+ (tile_x + 7)*F]= temp75; tt[index + tile_y + 6+ (tile_x + 7)*F]= temp76; tt[index + tile_y + 7+ (tile_x + 7)*F]= temp77; tt[index + tile_y + 8+ (tile_x + 7)*F]= temp78; tt[index + tile_y + 9+ (tile_x + 7)*F]= temp79; tt[index + tile_y + 0+ (tile_x + 8)*F]= temp80; tt[index + tile_y + 1+ (tile_x + 8)*F]= temp81; tt[index + tile_y + 2+ (tile_x + 8)*F]= temp82; tt[index + tile_y + 3+ (tile_x + 8)*F]= temp83; tt[index + tile_y + 4+ (tile_x + 8)*F]= temp84; tt[index + tile_y + 5+ (tile_x + 8)*F]= temp85; tt[index + tile_y + 6+ (tile_x + 8)*F]= temp86; tt[index + tile_y + 7+ (tile_x + 8)*F]= temp87; tt[index + tile_y + 8+ (tile_x + 8)*F]= temp88; tt[index + tile_y + 9+ (tile_x + 8)*F]= temp89; tt[index + tile_y + 0+ (tile_x + 9)*F]= temp90; tt[index + tile_y + 1+ (tile_x + 9)*F]= temp91; tt[index + tile_y + 2+ (tile_x + 9)*F]= temp92; tt[index + tile_y + 3+ (tile_x + 9)*F]= temp93; tt[index + tile_y + 4+ (tile_x + 9)*F]= temp94; tt[index + tile_y + 5+ (tile_x + 9)*F]= temp95; tt[index + tile_y + 6+ (tile_x + 9)*F]= temp96; tt[index + tile_y + 7+ (tile_x + 9)*F]= temp97; tt[index + tile_y + 8+ (tile_x + 9)*F]= temp98; tt[index + tile_y + 9+ (tile_x + 9)*F]= temp99; } //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } //*/ } } __global__ void __launch_bounds__(64, 6) get_hermitian_theta(float* xx, const int* cscRowIndex, const int* cscColIndex, const float lambda, const float * XT) { __shared__ float2 xTemp[SCAN_BATCH * F/2]; int col = blockIdx.x; if (col < N) { //this block needs to handle end - start thetaT columns int start = cscColIndex[col]; int end = cscColIndex[col + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; //int tile_x = (threadIdx.x/tile) * tile;//start x of this tile //int tile_y = (threadIdx.x%tile) * tile;//start y of this tile int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 x; //copy texture --> smem, and sync /* if(threadIdx.x < SCAN_BATCH){ if(iter*SCAN_BATCH + threadIdx.x < end - start){ for (int k = 0; k < F; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1); thetaTemp[threadIdx.x * F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float)); } */ //two layers: warp divergence unless we split at 32 //32 > SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ x.x = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k ]; x.y = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k+1]; xTemp[index * F/2 + k/2] = x; } } else { for (int k = 0; k < 50; k += 2){ x.x = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k + 50]; x.y = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k + 51]; xTemp[index * F/2 + k/2 + 25] = x; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&xTemp[index*F/2], 0, F*sizeof(float)); } } /* //issue: not coalesced access to csrColIndex if(threadIdx.x < F && threadIdx.x%2 == 0){ for(int k = 0; k< SCAN_BATCH; k++){ if(iter*SCAN_BATCH + k < end - start){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1); thetaTemp[k * F/2 + threadIdx.x/2] = theta; } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float)); } } */ /* int layers = blockDim.x/SCAN_BATCH; //100/30 = 3 //int height = blockDim.x/layers; //30 int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable //min(y, (layers-1)) * height int y_start = y * 30;//0-29:0;30-59:30;60-89:60 int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90 if(y >= layers - 1) y_end = F; //60-89:100 if(threadIdx.x - y_start < SCAN_BATCH){ if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){ for (int k = y_start; k < y_end; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1); thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float)); } */ __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////////////////////////// //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ temp0 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x; temp1 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y; temp2 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x; temp3 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y; temp4 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x; temp5 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y; temp6 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x; temp7 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y; temp8 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x; temp9 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y; temp10 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x; temp11 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y; temp12 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x; temp13 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y; temp14 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x; temp15 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y; temp16 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x; temp17 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y; temp18 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x; temp19 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y; temp20 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x; temp21 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y; temp22 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x; temp23 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y; temp24 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x; temp25 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y; temp26 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x; temp27 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y; temp28 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x; temp29 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y; temp30 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x; temp31 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y; temp32 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x; temp33 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y; temp34 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x; temp35 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y; temp36 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x; temp37 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y; temp38 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x; temp39 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y; temp40 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x; temp41 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y; temp42 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x; temp43 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y; temp44 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x; temp45 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y; temp46 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x; temp47 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y; temp48 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x; temp49 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y; temp50 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x; temp51 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y; temp52 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x; temp53 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y; temp54 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x; temp55 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y; temp56 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x; temp57 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y; temp58 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x; temp59 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y; temp60 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x; temp61 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y; temp62 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x; temp63 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y; temp64 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x; temp65 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y; temp66 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x; temp67 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y; temp68 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x; temp69 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y; temp70 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x; temp71 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y; temp72 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x; temp73 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y; temp74 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x; temp75 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y; temp76 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x; temp77 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y; temp78 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x; temp79 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y; temp80 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x; temp81 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y; temp82 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x; temp83 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y; temp84 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x; temp85 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y; temp86 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x; temp87 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y; temp88 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x; temp89 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y; temp90 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x; temp91 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y; temp92 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x; temp93 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y; temp94 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x; temp95 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y; temp96 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x; temp97 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y; temp98 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x; temp99 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y; } } } //end of iteration in copying from smem and aggregating in register /////////////////////////////////////////////////////////////////////////////////////////////////////////// __syncthreads(); ///* if(threadIdx.x < 55 ){ //copy output to gmem int index = blockIdx.x*F*F; xx[index + tile_x + tile_y*F] = temp0; xx[index + tile_x + (tile_y + 1)*F] = temp1; xx[index + tile_x + (tile_y + 2)*F] = temp2; xx[index + tile_x + (tile_y + 3)*F] = temp3; xx[index + tile_x + (tile_y + 4)*F] = temp4; xx[index + tile_x + (tile_y + 5)*F] = temp5; xx[index + tile_x + (tile_y + 6)*F] = temp6; xx[index + tile_x + (tile_y + 7)*F] = temp7; xx[index + tile_x + (tile_y + 8)*F] = temp8; xx[index + tile_x + (tile_y + 9)*F] = temp9; xx[index + tile_x + 1 + tile_y*F] = temp10; xx[index + tile_x + 1 + (tile_y + 1)*F] = temp11; xx[index + tile_x + 1 + (tile_y + 2)*F] = temp12; xx[index + tile_x + 1 + (tile_y + 3)*F] = temp13; xx[index + tile_x + 1 + (tile_y + 4)*F] = temp14; xx[index + tile_x + 1 + (tile_y + 5)*F] = temp15; xx[index + tile_x + 1 + (tile_y + 6)*F] = temp16; xx[index + tile_x + 1 + (tile_y + 7)*F] = temp17; xx[index + tile_x + 1 + (tile_y + 8)*F] = temp18; xx[index + tile_x + 1 + (tile_y + 9)*F] = temp19; xx[index + tile_x + 2 + tile_y*F] = temp20; xx[index + tile_x + 2 + (tile_y + 1)*F] = temp21; xx[index + tile_x + 2 + (tile_y + 2)*F] = temp22; xx[index + tile_x + 2 + (tile_y + 3)*F] = temp23; xx[index + tile_x + 2 + (tile_y + 4)*F] = temp24; xx[index + tile_x + 2 + (tile_y + 5)*F] = temp25; xx[index + tile_x + 2 + (tile_y + 6)*F] = temp26; xx[index + tile_x + 2 + (tile_y + 7)*F] = temp27; xx[index + tile_x + 2 + (tile_y + 8)*F] = temp28; xx[index + tile_x + 2 + (tile_y + 9)*F] = temp29; xx[index + tile_x + 3 + tile_y*F] = temp30; xx[index + tile_x + 3 + (tile_y + 1)*F] = temp31; xx[index + tile_x + 3 + (tile_y + 2)*F] = temp32; xx[index + tile_x + 3 + (tile_y + 3)*F] = temp33; xx[index + tile_x + 3 + (tile_y + 4)*F] = temp34; xx[index + tile_x + 3 + (tile_y + 5)*F] = temp35; xx[index + tile_x + 3 + (tile_y + 6)*F] = temp36; xx[index + tile_x + 3 + (tile_y + 7)*F] = temp37; xx[index + tile_x + 3 + (tile_y + 8)*F] = temp38; xx[index + tile_x + 3 + (tile_y + 9)*F] = temp39; xx[index + tile_x + 4 + tile_y*F] = temp40; xx[index + tile_x + 4 + (tile_y + 1)*F] = temp41; xx[index + tile_x + 4 + (tile_y + 2)*F] = temp42; xx[index + tile_x + 4 + (tile_y + 3)*F] = temp43; xx[index + tile_x + 4 + (tile_y + 4)*F] = temp44; xx[index + tile_x + 4 + (tile_y + 5)*F] = temp45; xx[index + tile_x + 4 + (tile_y + 6)*F] = temp46; xx[index + tile_x + 4 + (tile_y + 7)*F] = temp47; xx[index + tile_x + 4 + (tile_y + 8)*F] = temp48; xx[index + tile_x + 4 + (tile_y + 9)*F] = temp49; xx[index + tile_x + 5 + tile_y*F] = temp50; xx[index + tile_x + 5 + (tile_y + 1)*F] = temp51; xx[index + tile_x + 5 + (tile_y + 2)*F] = temp52; xx[index + tile_x + 5 + (tile_y + 3)*F] = temp53; xx[index + tile_x + 5 + (tile_y + 4)*F] = temp54; xx[index + tile_x + 5 + (tile_y + 5)*F] = temp55; xx[index + tile_x + 5 + (tile_y + 6)*F] = temp56; xx[index + tile_x + 5 + (tile_y + 7)*F] = temp57; xx[index + tile_x + 5 + (tile_y + 8)*F] = temp58; xx[index + tile_x + 5 + (tile_y + 9)*F] = temp59; xx[index + tile_x + 6 + tile_y*F] = temp60; xx[index + tile_x + 6 + (tile_y + 1)*F] = temp61; xx[index + tile_x + 6 + (tile_y + 2)*F] = temp62; xx[index + tile_x + 6 + (tile_y + 3)*F] = temp63; xx[index + tile_x + 6 + (tile_y + 4)*F] = temp64; xx[index + tile_x + 6 + (tile_y + 5)*F] = temp65; xx[index + tile_x + 6 + (tile_y + 6)*F] = temp66; xx[index + tile_x + 6 + (tile_y + 7)*F] = temp67; xx[index + tile_x + 6 + (tile_y + 8)*F] = temp68; xx[index + tile_x + 6 + (tile_y + 9)*F] = temp69; xx[index + tile_x + 7 + tile_y*F] = temp70; xx[index + tile_x + 7 + (tile_y + 1)*F] = temp71; xx[index + tile_x + 7 + (tile_y + 2)*F] = temp72; xx[index + tile_x + 7 + (tile_y + 3)*F] = temp73; xx[index + tile_x + 7 + (tile_y + 4)*F] = temp74; xx[index + tile_x + 7 + (tile_y + 5)*F] = temp75; xx[index + tile_x + 7 + (tile_y + 6)*F] = temp76; xx[index + tile_x + 7 + (tile_y + 7)*F] = temp77; xx[index + tile_x + 7 + (tile_y + 8)*F] = temp78; xx[index + tile_x + 7 + (tile_y + 9)*F] = temp79; xx[index + tile_x + 8 + tile_y*F] = temp80; xx[index + tile_x + 8 + (tile_y + 1)*F] = temp81; xx[index + tile_x + 8 + (tile_y + 2)*F] = temp82; xx[index + tile_x + 8 + (tile_y + 3)*F] = temp83; xx[index + tile_x + 8 + (tile_y + 4)*F] = temp84; xx[index + tile_x + 8 + (tile_y + 5)*F] = temp85; xx[index + tile_x + 8 + (tile_y + 6)*F] = temp86; xx[index + tile_x + 8 + (tile_y + 7)*F] = temp87; xx[index + tile_x + 8 + (tile_y + 8)*F] = temp88; xx[index + tile_x + 8 + (tile_y + 9)*F] = temp89; xx[index + tile_x + 9 + tile_y*F] = temp90; xx[index + tile_x + 9 + (tile_y + 1)*F] = temp91; xx[index + tile_x + 9 + (tile_y + 2)*F] = temp92; xx[index + tile_x + 9 + (tile_y + 3)*F] = temp93; xx[index + tile_x + 9 + (tile_y + 4)*F] = temp94; xx[index + tile_x + 9 + (tile_y + 5)*F] = temp95; xx[index + tile_x + 9 + (tile_y + 6)*F] = temp96; xx[index + tile_x + 9 + (tile_y + 7)*F] = temp97; xx[index + tile_x + 9 + (tile_y + 8)*F] = temp98; xx[index + tile_x + 9 + (tile_y + 9)*F] = temp99; //symmetric if(tile_x!=tile_y){ xx[index + tile_y + 0+ (tile_x + 0)*F]= temp0; xx[index + tile_y + 1+ (tile_x + 0)*F]= temp1; xx[index + tile_y + 2+ (tile_x + 0)*F]= temp2; xx[index + tile_y + 3+ (tile_x + 0)*F]= temp3; xx[index + tile_y + 4+ (tile_x + 0)*F]= temp4; xx[index + tile_y + 5+ (tile_x + 0)*F]= temp5; xx[index + tile_y + 6+ (tile_x + 0)*F]= temp6; xx[index + tile_y + 7+ (tile_x + 0)*F]= temp7; xx[index + tile_y + 8+ (tile_x + 0)*F]= temp8; xx[index + tile_y + 9+ (tile_x + 0)*F]= temp9; xx[index + tile_y + 0+ (tile_x + 1)*F]= temp10; xx[index + tile_y + 1+ (tile_x + 1)*F]= temp11; xx[index + tile_y + 2+ (tile_x + 1)*F]= temp12; xx[index + tile_y + 3+ (tile_x + 1)*F]= temp13; xx[index + tile_y + 4+ (tile_x + 1)*F]= temp14; xx[index + tile_y + 5+ (tile_x + 1)*F]= temp15; xx[index + tile_y + 6+ (tile_x + 1)*F]= temp16; xx[index + tile_y + 7+ (tile_x + 1)*F]= temp17; xx[index + tile_y + 8+ (tile_x + 1)*F]= temp18; xx[index + tile_y + 9+ (tile_x + 1)*F]= temp19; xx[index + tile_y + 0+ (tile_x + 2)*F]= temp20; xx[index + tile_y + 1+ (tile_x + 2)*F]= temp21; xx[index + tile_y + 2+ (tile_x + 2)*F]= temp22; xx[index + tile_y + 3+ (tile_x + 2)*F]= temp23; xx[index + tile_y + 4+ (tile_x + 2)*F]= temp24; xx[index + tile_y + 5+ (tile_x + 2)*F]= temp25; xx[index + tile_y + 6+ (tile_x + 2)*F]= temp26; xx[index + tile_y + 7+ (tile_x + 2)*F]= temp27; xx[index + tile_y + 8+ (tile_x + 2)*F]= temp28; xx[index + tile_y + 9+ (tile_x + 2)*F]= temp29; xx[index + tile_y + 0+ (tile_x + 3)*F]= temp30; xx[index + tile_y + 1+ (tile_x + 3)*F]= temp31; xx[index + tile_y + 2+ (tile_x + 3)*F]= temp32; xx[index + tile_y + 3+ (tile_x + 3)*F]= temp33; xx[index + tile_y + 4+ (tile_x + 3)*F]= temp34; xx[index + tile_y + 5+ (tile_x + 3)*F]= temp35; xx[index + tile_y + 6+ (tile_x + 3)*F]= temp36; xx[index + tile_y + 7+ (tile_x + 3)*F]= temp37; xx[index + tile_y + 8+ (tile_x + 3)*F]= temp38; xx[index + tile_y + 9+ (tile_x + 3)*F]= temp39; xx[index + tile_y + 0+ (tile_x + 4)*F]= temp40; xx[index + tile_y + 1+ (tile_x + 4)*F]= temp41; xx[index + tile_y + 2+ (tile_x + 4)*F]= temp42; xx[index + tile_y + 3+ (tile_x + 4)*F]= temp43; xx[index + tile_y + 4+ (tile_x + 4)*F]= temp44; xx[index + tile_y + 5+ (tile_x + 4)*F]= temp45; xx[index + tile_y + 6+ (tile_x + 4)*F]= temp46; xx[index + tile_y + 7+ (tile_x + 4)*F]= temp47; xx[index + tile_y + 8+ (tile_x + 4)*F]= temp48; xx[index + tile_y + 9+ (tile_x + 4)*F]= temp49; xx[index + tile_y + 0+ (tile_x + 5)*F]= temp50; xx[index + tile_y + 1+ (tile_x + 5)*F]= temp51; xx[index + tile_y + 2+ (tile_x + 5)*F]= temp52; xx[index + tile_y + 3+ (tile_x + 5)*F]= temp53; xx[index + tile_y + 4+ (tile_x + 5)*F]= temp54; xx[index + tile_y + 5+ (tile_x + 5)*F]= temp55; xx[index + tile_y + 6+ (tile_x + 5)*F]= temp56; xx[index + tile_y + 7+ (tile_x + 5)*F]= temp57; xx[index + tile_y + 8+ (tile_x + 5)*F]= temp58; xx[index + tile_y + 9+ (tile_x + 5)*F]= temp59; xx[index + tile_y + 0+ (tile_x + 6)*F]= temp60; xx[index + tile_y + 1+ (tile_x + 6)*F]= temp61; xx[index + tile_y + 2+ (tile_x + 6)*F]= temp62; xx[index + tile_y + 3+ (tile_x + 6)*F]= temp63; xx[index + tile_y + 4+ (tile_x + 6)*F]= temp64; xx[index + tile_y + 5+ (tile_x + 6)*F]= temp65; xx[index + tile_y + 6+ (tile_x + 6)*F]= temp66; xx[index + tile_y + 7+ (tile_x + 6)*F]= temp67; xx[index + tile_y + 8+ (tile_x + 6)*F]= temp68; xx[index + tile_y + 9+ (tile_x + 6)*F]= temp69; xx[index + tile_y + 0+ (tile_x + 7)*F]= temp70; xx[index + tile_y + 1+ (tile_x + 7)*F]= temp71; xx[index + tile_y + 2+ (tile_x + 7)*F]= temp72; xx[index + tile_y + 3+ (tile_x + 7)*F]= temp73; xx[index + tile_y + 4+ (tile_x + 7)*F]= temp74; xx[index + tile_y + 5+ (tile_x + 7)*F]= temp75; xx[index + tile_y + 6+ (tile_x + 7)*F]= temp76; xx[index + tile_y + 7+ (tile_x + 7)*F]= temp77; xx[index + tile_y + 8+ (tile_x + 7)*F]= temp78; xx[index + tile_y + 9+ (tile_x + 7)*F]= temp79; xx[index + tile_y + 0+ (tile_x + 8)*F]= temp80; xx[index + tile_y + 1+ (tile_x + 8)*F]= temp81; xx[index + tile_y + 2+ (tile_x + 8)*F]= temp82; xx[index + tile_y + 3+ (tile_x + 8)*F]= temp83; xx[index + tile_y + 4+ (tile_x + 8)*F]= temp84; xx[index + tile_y + 5+ (tile_x + 8)*F]= temp85; xx[index + tile_y + 6+ (tile_x + 8)*F]= temp86; xx[index + tile_y + 7+ (tile_x + 8)*F]= temp87; xx[index + tile_y + 8+ (tile_x + 8)*F]= temp88; xx[index + tile_y + 9+ (tile_x + 8)*F]= temp89; xx[index + tile_y + 0+ (tile_x + 9)*F]= temp90; xx[index + tile_y + 1+ (tile_x + 9)*F]= temp91; xx[index + tile_y + 2+ (tile_x + 9)*F]= temp92; xx[index + tile_y + 3+ (tile_x + 9)*F]= temp93; xx[index + tile_y + 4+ (tile_x + 9)*F]= temp94; xx[index + tile_y + 5+ (tile_x + 9)*F]= temp95; xx[index + tile_y + 6+ (tile_x + 9)*F]= temp96; xx[index + tile_y + 7+ (tile_x + 9)*F]= temp97; xx[index + tile_y + 8+ (tile_x + 9)*F]= temp98; xx[index + tile_y + 9+ (tile_x + 9)*F]= temp99; } //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) xx[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } //*/ } } __global__ void __launch_bounds__(100, 4) updateXByBlock2pRegDsmemTile(float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda) { __shared__ float2 thetaTemp[SCAN_BATCH * F/2]; int row = blockIdx.x; if (row < M) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; float2 theta; int tile = F/10; int tile_x = (threadIdx.x/tile) * tile;//start x of this tile int tile_y = (threadIdx.x%tile) * tile;//start y of this tile for (int iter = 0; iter < iterations; iter ++){ //copy texture --> smem, and sync if(threadIdx.x < SCAN_BATCH){ if(iter*SCAN_BATCH + threadIdx.x < end - start){ for (int k = 0; k < F; k += 2){ theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k); theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1); thetaTemp[threadIdx.x * F/2 + k/2] = theta; } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float)); } __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////////////////////////// //tile: 10*10 for(int k = 0; k < SCAN_BATCH; k++){ temp0 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x; temp1 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y; temp2 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x; temp3 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y; temp4 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x; temp5 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y; temp6 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x; temp7 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y; temp8 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x; temp9 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y; temp10 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x; temp11 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y; temp12 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x; temp13 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y; temp14 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x; temp15 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y; temp16 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x; temp17 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y; temp18 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x; temp19 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y; temp20 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x; temp21 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y; temp22 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x; temp23 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y; temp24 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x; temp25 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y; temp26 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x; temp27 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y; temp28 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x; temp29 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y; temp30 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x; temp31 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y; temp32 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x; temp33 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y; temp34 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x; temp35 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y; temp36 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x; temp37 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y; temp38 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x; temp39 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y; temp40 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x; temp41 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y; temp42 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x; temp43 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y; temp44 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x; temp45 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y; temp46 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x; temp47 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y; temp48 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x; temp49 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y; temp50 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x; temp51 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y; temp52 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x; temp53 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y; temp54 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x; temp55 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y; temp56 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x; temp57 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y; temp58 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x; temp59 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y; temp60 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x; temp61 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y; temp62 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x; temp63 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y; temp64 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x; temp65 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y; temp66 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x; temp67 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y; temp68 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x; temp69 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y; temp70 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x; temp71 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y; temp72 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x; temp73 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y; temp74 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x; temp75 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y; temp76 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x; temp77 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y; temp78 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x; temp79 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y; temp80 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x; temp81 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y; temp82 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x; temp83 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y; temp84 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x; temp85 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y; temp86 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x; temp87 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y; temp88 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x; temp89 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y; temp90 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x; temp91 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y; temp92 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x; temp93 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y; temp94 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x; temp95 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y; temp96 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x; temp97 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y; temp98 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x; temp99 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y; } /////////////////////////////////////////////////////////////////////////////////////////////////////////// __syncthreads(); } int index = blockIdx.x*F*F; ///* //copy output to gmem tt[index + tile_x + tile_y*F] = temp0; tt[index + tile_x + (tile_y + 1)*F] = temp1; tt[index + tile_x + (tile_y + 2)*F] = temp2; tt[index + tile_x + (tile_y + 3)*F] = temp3; tt[index + tile_x + (tile_y + 4)*F] = temp4; tt[index + tile_x + (tile_y + 5)*F] = temp5; tt[index + tile_x + (tile_y + 6)*F] = temp6; tt[index + tile_x + (tile_y + 7)*F] = temp7; tt[index + tile_x + (tile_y + 8)*F] = temp8; tt[index + tile_x + (tile_y + 9)*F] = temp9; tt[index + tile_x + 1 + tile_y*F] = temp10; tt[index + tile_x + 1 + (tile_y + 1)*F] = temp11; tt[index + tile_x + 1 + (tile_y + 2)*F] = temp12; tt[index + tile_x + 1 + (tile_y + 3)*F] = temp13; tt[index + tile_x + 1 + (tile_y + 4)*F] = temp14; tt[index + tile_x + 1 + (tile_y + 5)*F] = temp15; tt[index + tile_x + 1 + (tile_y + 6)*F] = temp16; tt[index + tile_x + 1 + (tile_y + 7)*F] = temp17; tt[index + tile_x + 1 + (tile_y + 8)*F] = temp18; tt[index + tile_x + 1 + (tile_y + 9)*F] = temp19; tt[index + tile_x + 2 + tile_y*F] = temp20; tt[index + tile_x + 2 + (tile_y + 1)*F] = temp21; tt[index + tile_x + 2 + (tile_y + 2)*F] = temp22; tt[index + tile_x + 2 + (tile_y + 3)*F] = temp23; tt[index + tile_x + 2 + (tile_y + 4)*F] = temp24; tt[index + tile_x + 2 + (tile_y + 5)*F] = temp25; tt[index + tile_x + 2 + (tile_y + 6)*F] = temp26; tt[index + tile_x + 2 + (tile_y + 7)*F] = temp27; tt[index + tile_x + 2 + (tile_y + 8)*F] = temp28; tt[index + tile_x + 2 + (tile_y + 9)*F] = temp29; tt[index + tile_x + 3 + tile_y*F] = temp30; tt[index + tile_x + 3 + (tile_y + 1)*F] = temp31; tt[index + tile_x + 3 + (tile_y + 2)*F] = temp32; tt[index + tile_x + 3 + (tile_y + 3)*F] = temp33; tt[index + tile_x + 3 + (tile_y + 4)*F] = temp34; tt[index + tile_x + 3 + (tile_y + 5)*F] = temp35; tt[index + tile_x + 3 + (tile_y + 6)*F] = temp36; tt[index + tile_x + 3 + (tile_y + 7)*F] = temp37; tt[index + tile_x + 3 + (tile_y + 8)*F] = temp38; tt[index + tile_x + 3 + (tile_y + 9)*F] = temp39; tt[index + tile_x + 4 + tile_y*F] = temp40; tt[index + tile_x + 4 + (tile_y + 1)*F] = temp41; tt[index + tile_x + 4 + (tile_y + 2)*F] = temp42; tt[index + tile_x + 4 + (tile_y + 3)*F] = temp43; tt[index + tile_x + 4 + (tile_y + 4)*F] = temp44; tt[index + tile_x + 4 + (tile_y + 5)*F] = temp45; tt[index + tile_x + 4 + (tile_y + 6)*F] = temp46; tt[index + tile_x + 4 + (tile_y + 7)*F] = temp47; tt[index + tile_x + 4 + (tile_y + 8)*F] = temp48; tt[index + tile_x + 4 + (tile_y + 9)*F] = temp49; tt[index + tile_x + 5 + tile_y*F] = temp50; tt[index + tile_x + 5 + (tile_y + 1)*F] = temp51; tt[index + tile_x + 5 + (tile_y + 2)*F] = temp52; tt[index + tile_x + 5 + (tile_y + 3)*F] = temp53; tt[index + tile_x + 5 + (tile_y + 4)*F] = temp54; tt[index + tile_x + 5 + (tile_y + 5)*F] = temp55; tt[index + tile_x + 5 + (tile_y + 6)*F] = temp56; tt[index + tile_x + 5 + (tile_y + 7)*F] = temp57; tt[index + tile_x + 5 + (tile_y + 8)*F] = temp58; tt[index + tile_x + 5 + (tile_y + 9)*F] = temp59; tt[index + tile_x + 6 + tile_y*F] = temp60; tt[index + tile_x + 6 + (tile_y + 1)*F] = temp61; tt[index + tile_x + 6 + (tile_y + 2)*F] = temp62; tt[index + tile_x + 6 + (tile_y + 3)*F] = temp63; tt[index + tile_x + 6 + (tile_y + 4)*F] = temp64; tt[index + tile_x + 6 + (tile_y + 5)*F] = temp65; tt[index + tile_x + 6 + (tile_y + 6)*F] = temp66; tt[index + tile_x + 6 + (tile_y + 7)*F] = temp67; tt[index + tile_x + 6 + (tile_y + 8)*F] = temp68; tt[index + tile_x + 6 + (tile_y + 9)*F] = temp69; tt[index + tile_x + 7 + tile_y*F] = temp70; tt[index + tile_x + 7 + (tile_y + 1)*F] = temp71; tt[index + tile_x + 7 + (tile_y + 2)*F] = temp72; tt[index + tile_x + 7 + (tile_y + 3)*F] = temp73; tt[index + tile_x + 7 + (tile_y + 4)*F] = temp74; tt[index + tile_x + 7 + (tile_y + 5)*F] = temp75; tt[index + tile_x + 7 + (tile_y + 6)*F] = temp76; tt[index + tile_x + 7 + (tile_y + 7)*F] = temp77; tt[index + tile_x + 7 + (tile_y + 8)*F] = temp78; tt[index + tile_x + 7 + (tile_y + 9)*F] = temp79; tt[index + tile_x + 8 + tile_y*F] = temp80; tt[index + tile_x + 8 + (tile_y + 1)*F] = temp81; tt[index + tile_x + 8 + (tile_y + 2)*F] = temp82; tt[index + tile_x + 8 + (tile_y + 3)*F] = temp83; tt[index + tile_x + 8 + (tile_y + 4)*F] = temp84; tt[index + tile_x + 8 + (tile_y + 5)*F] = temp85; tt[index + tile_x + 8 + (tile_y + 6)*F] = temp86; tt[index + tile_x + 8 + (tile_y + 7)*F] = temp87; tt[index + tile_x + 8 + (tile_y + 8)*F] = temp88; tt[index + tile_x + 8 + (tile_y + 9)*F] = temp89; tt[index + tile_x + 9 + tile_y*F] = temp90; tt[index + tile_x + 9 + (tile_y + 1)*F] = temp91; tt[index + tile_x + 9 + (tile_y + 2)*F] = temp92; tt[index + tile_x + 9 + (tile_y + 3)*F] = temp93; tt[index + tile_x + 9 + (tile_y + 4)*F] = temp94; tt[index + tile_x + 9 + (tile_y + 5)*F] = temp95; tt[index + tile_x + 9 + (tile_y + 6)*F] = temp96; tt[index + tile_x + 9 + (tile_y + 7)*F] = temp97; tt[index + tile_x + 9 + (tile_y + 8)*F] = temp98; tt[index + tile_x + 9 + (tile_y + 9)*F] = temp99; //*/ //regularization if(tile_x == tile_y){ for(int k = 0; k < tile; k++) tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda; } } } void loadCSRSparseMatrix(const char* dataFile, const char* rowFile, const char* colFile, float* data, unsigned int* row, int* col) { printf("\n loading CSR...\n"); FILE *dFile = fopen(dataFile,"rb"); FILE *rFile = fopen(rowFile,"rb"); FILE *cFile = fopen(colFile,"rb"); if (!rFile||!dFile||!cFile) { printf("Unable to open file!"); return; } fread(&row[0], 4*(M+1) ,1, rFile); fread(&col[0], 4*NNZ ,1, cFile); fread(&data[0], 4*NNZ ,1, dFile); fclose(rFile); fclose(dFile); fclose(cFile); } void loadCSCSparseMatrix(const char* dataFile, const char* rowFile, const char* colFile, float * data, int* row, int* col) { printf("\n loading CSC...\n"); FILE *dFile = fopen(dataFile,"rb"); FILE *rFile = fopen(rowFile,"rb"); FILE *cFile = fopen(colFile,"rb"); if (!rFile||!dFile||!dFile) { printf("Unable to open file!"); return; } fread(&data[0], 4*NNZ ,1, dFile); fread(&row[0], 4*NNZ ,1, rFile); fread(&col[0], 4*(N+1) ,1, cFile); fclose(rFile); fclose(dFile); fclose(cFile); } void loadCSCSparseMatrixInBatch(const std::string dataFile, const std::string rowFile, const std::string colFile, float * data, int* row, int* col, long csc_nnz, int n) { printf("\n loading CSC from %s, %s, %s \n", dataFile.c_str(), rowFile.c_str(), colFile.c_str()); FILE *dFile = fopen(dataFile.c_str(),"rb"); FILE *rFile = fopen(rowFile.c_str(),"rb"); FILE *cFile = fopen(colFile.c_str(),"rb"); if (!rFile||!dFile||!dFile) { printf("Unable to open file!"); return; } fread(&data[0], 4*csc_nnz ,1, dFile); fread(&row[0], 4*csc_nnz ,1, rFile); fread(&col[0], 4*(n+1) ,1, cFile); fclose(rFile); fclose(dFile); fclose(cFile); } void loadCooSparseMatrixRowPtr(const char* rowFile, int* row) { printf("\n loading COO...\n"); FILE *rfile = fopen(rowFile,"rb"); fread(&row[0], 4*NNZ ,1, rfile); fclose(rfile); //FILE *file = fopen("./hugewiki_R_train_coo.row.bin", "wb"); //fwrite(row, 4*NNZ, 1, file); //fclose(file); } void loadCooSparseMatrix(const char* dataFile, const char* rowFile, const char* colFile, float* data, int* row, int* col, int nnz) { std::ifstream dfile(dataFile); std::ifstream rfile(rowFile); std::ifstream cfile(colFile); float d; int d_i = 0; while (dfile >> d) { //printf("%f ",d); data[d_i++] = d; } int r; int r_i = 0; while (rfile >> r) { //printf("%d ",r); row[r_i++] = r; } int c; int c_i = 0; while (cfile >> c) { //printf("%d ",c); col[c_i++] = c; } } inline void updateX(const int batch_id, const int batch_size, const long batch_offset, float * ythetaT, float * tt, float * XT_h, cublasHandle_t handle, const int m, const int n, const int f, const int nnz, float** devPtrTTHost, float **devPtrYthetaTHost, float **devPtrTT, float **devPtrYthetaT, int *P, int *INFO){ double t0 = seconds(); //left-hand side pointers for (int k = 0; k < batch_size; k++) { devPtrTTHost[k] = &tt[k * F * F]; } cudacall(cudaMemcpy(devPtrTT, devPtrTTHost, batch_size * sizeof(*devPtrTT),cudaMemcpyHostToDevice)); int * info2 = (int *) malloc(sizeof(int)); //right-hand side pointer for (int k = 0; k < batch_size; k++) { devPtrYthetaTHost[k] = &ythetaT[k * F]; } cudacall(cudaMemcpy(devPtrYthetaT, devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaT), cudaMemcpyHostToDevice)); //getrf then getrs //printf("\t\t\tbatch %d, prepare in secs: %f\n", batch_id, seconds() - t0); //t0 = seconds(); cublasSgetrfBatched(handle, F, devPtrTT, F, P, INFO, batch_size); //cudaDeviceSynchronize(); //cudaCheckError(); //printf("\t\t\tbatch %d, LU factorization of tt in secs: %f\n", batch_id, seconds() - t0); //t0 = seconds(); cublasSgetrsBatched(handle, CUBLAS_OP_N, F, 1, (const float ** ) devPtrTT, F, P, devPtrYthetaT, F, info2, batch_size); //cudaDeviceSynchronize(); //cudaCheckError(); //printf("\t\t\tbatch %d, solve after LU in secs: %f\n", batch_id, seconds() - t0); //t0 = seconds(); cudacall( cudaMemcpy(&XT_h[batch_offset * F], ythetaT, batch_size * F * sizeof(float), cudaMemcpyDeviceToHost) ); //printf("\t\t\tbatch %d, copy to host XT_h secs: %f\n", batch_id, seconds() - t0); } int updateTheta(const int batch_size, const int batch_offset, float * xx, float * yTXT, float * thetaT, cublasHandle_t handle, const int n, const int f){ float ** devPtrXXHost = (float**) malloc(batch_size * sizeof(devPtrXXHost[0])); float **devPtrXX = 0; for (int k = 0; k < batch_size; k++) { devPtrXXHost[k] = &xx[k * F * F]; } cudacall(cudaMalloc((void** ) &devPtrXX, batch_size * sizeof(*devPtrXX))); cudacall(cudaMemcpy(devPtrXX, devPtrXXHost, batch_size * sizeof(*devPtrXX), cudaMemcpyHostToDevice)); int *P, *INFO; cudacall(cudaMalloc(&P, f * batch_size * sizeof(int))); cudacall(cudaMalloc(&INFO, batch_size * sizeof(int))); cublasSgetrfBatched(handle, F, devPtrXX, F, P, INFO, batch_size); cudaDeviceSynchronize(); cudaCheckError(); //gettimeofday(&tv1, NULL); //elapsed = (tv1.tv_sec - tv0.tv_sec) // + (tv1.tv_usec - tv0.tv_usec) / 1000000.0; //printf("\t %f seconds. \n", elapsed); //printf("******* solve xx * thetaT = yTXT with CUDA 7.\n"); float **devPtrYTXTHost = 0; float **devPtrYTXT = 0; devPtrYTXTHost = (float**) malloc(batch_size * sizeof(devPtrYTXTHost[0])); for (int k = 0; k < batch_size; k++) { devPtrYTXTHost[k] = &yTXT[k * F]; } cudacall(cudaMalloc((void** ) &devPtrYTXT, batch_size * sizeof(*devPtrYTXT))); cudacall(cudaMemcpy(devPtrYTXT, devPtrYTXTHost, batch_size * sizeof(*devPtrYTXT),cudaMemcpyHostToDevice)); int * info2 = (int *) malloc(sizeof(int)); cublasSgetrsBatched(handle, CUBLAS_OP_N, F, 1, (const float ** ) devPtrXX, F, P, devPtrYTXT, F, info2, batch_size); cudaDeviceSynchronize(); cudaCheckError(); cudacall( cudaMemcpy( &thetaT[batch_offset * F], yTXT, batch_size * F * sizeof(float), cudaMemcpyDeviceToDevice) ); //gettimeofday(&tv2, NULL); //elapsed = (tv2.tv_sec - tv1.tv_sec) // + (tv2.tv_usec - tv1.tv_usec) / 1000000.0; //printf("\t %f seconds. \n", elapsed); /* //testing purpose float* yTXHost = (float *) malloc(f * n * sizeof(yTXHost[0])); cudacall(cudaMemcpy(yTXHost, yTXT, n * f * sizeof(float), cudaMemcpyDeviceToHost)); printf("\n*********yTXT***\n"); for (int i = 0; i < n * f; i++) { printf("%f\t", yTXHost[i]); } printf("\n"); */ /* float* thetaTHost = (float *) malloc(f * n * sizeof(thetaTHost[0])); cudacall( cudaMemcpy(thetaTHost, thetaT, n * f * sizeof(float),cudaMemcpyDeviceToHost)); printf("\n*********ThetaT***\n"); for (int i = 0; i < n * f; i++) { printf("%f\t", thetaTHost[i]); } printf("\n"); */ free(devPtrXXHost); cudaFree(devPtrXX); cudaFree(P); cudaFree(INFO); free(info2); free(devPtrYTXTHost); cudaFree(devPtrYTXT); return 0; } __global__ void RMSE(const float * csrVal, const int* cooRowIndex, const int* csrColIndex, const float * thetaT, const float * XT, float * error, const int nnz, const int error_size) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < nnz) { int row = cooRowIndex[i]; int col = csrColIndex[i]; float e = csrVal[i]; //if(i%1000000==0) printf("row: %d, col: %d, csrVal[%d]: %f.\t", row, col, i, e); for (int k = 0; k < F; k++) { e -= tex1Dfetch(thetaTTexRef, F * col + k) * tex1Dfetch(xTTexRef, F * row + k); } atomicAdd(&error[i%error_size], e*e); //error[i] = e*e; //if(i%1000000==0) printf("error[%d]: %f.\n", i, e); } } __global__ void RMSE_CSC(const float * cscVal, const int* cscRowIndex, const int* cscColIndex, const float * thetaT, const float * XT, float * error, const int error_size, int* nan) { int col = blockIdx.x; int start = cscColIndex[col]; int end = cscColIndex[col + 1]; if (col < N && threadIdx.x < end - start) { for (int i = 0; threadIdx.x + i*blockDim.x < end - start; i++) { int index = start + i*blockDim.x + threadIdx.x; float e0 = cscVal[index]; float e = e0; //if(isnan(e)) printf("ERROR: NAN***\n"); int row = cscRowIndex[index]; //if(isfinite(((double)row))) printf("ERROR: NAN@@@\n"); for (int k = 0; k < F; k++) { e -= tex1Dfetch(thetaTTexRef, F * col + k) * XT[ F * row + k]; //TODO: fix this, a user/item does not show up in training //if(isnan(e1)) printf("e1: NAN!!!%d, %d, %d\n", index, col, row); //if(isnan(e2)) printf("e2: NAN!!!%d, %d, %d\n", index, col, row); } if(isnan(e)) { e = 0; atomicAdd(&nan[0],1); } //if(isnan(e)) printf("ERROR: NAN!!!%d, %d, %d\n", index, col, row); atomicAdd(&error[row%error_size], e*e); } } } int main() { printf("enable p2p among %d GPUs if available.\n", GPU_COUNT); enableP2P(GPU_COUNT); //initialize cublas, cusparse cublasHandle_t handle[GPU_COUNT]; cusparseHandle_t cushandle[GPU_COUNT]; for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){ cudacall(cudaSetDevice(gpu_id)); cublascall(cublasCreate(&handle[gpu_id])); cusparsecall(cusparseCreate(&cushandle[gpu_id])); } cudaSetDevice(DEVICEID); long m = M; long n = N; long f = F; long nnz = NNZ; float lambda = LAMBDA; unsigned int* csrRowIndexHostPtr; cudacall(cudaMallocHost( (void** ) &csrRowIndexHostPtr, (m + 1) * sizeof(int)) ); int* csrColIndexHostPtr; cudacall(cudaMallocHost( (void** ) &csrColIndexHostPtr, nnz * sizeof(int)) ); float* csrValHostPtr; cudacall(cudaMallocHost( (void** ) &csrValHostPtr, nnz * sizeof(float)) ); long csc_nnz[GPU_COUNT] = {777607310, 773335400, 777305655, 772895948}; long csc_m[GPU_COUNT] = {12520650, 12520650, 12520650, 12520653}; long csc_nnz_test[GPU_COUNT] = {86418516, 85913272, 86357875, 85883667}; float* cscValHostPtr[GPU_COUNT]; int* cscRowIndexHostPtr[GPU_COUNT]; int* cscColIndexHostPtr[GPU_COUNT]; for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){ cudacall(cudaMallocHost( (void** ) &cscValHostPtr[gpu_id], csc_nnz[gpu_id] * sizeof(float)) ); cudacall(cudaMallocHost( (void** ) &cscRowIndexHostPtr[gpu_id], csc_nnz[gpu_id] * sizeof(int)) ); cudacall(cudaMallocHost( (void** ) &cscColIndexHostPtr[gpu_id], (n+1) * sizeof(int)) ); } float* testCscValHostPtr[GPU_COUNT]; int* testCscRowIndexHostPtr[GPU_COUNT]; int* testCscColIndexHostPtr[GPU_COUNT]; for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){ cudacall(cudaMallocHost( (void** ) &testCscValHostPtr[gpu_id], csc_nnz_test[gpu_id] * sizeof(float)) ); cudacall(cudaMallocHost( (void** ) &testCscRowIndexHostPtr[gpu_id], csc_nnz_test[gpu_id] * sizeof(int)) ); cudacall(cudaMallocHost( (void** ) &testCscColIndexHostPtr[gpu_id], (n+1) * sizeof(int)) ); } //calculate X from thetaT first, need to initialize thetaT float* thetaTHost; cudacall(cudaMallocHost( (void** ) &thetaTHost, n * f * sizeof(float)) ); //index of XT_h need a long -- beyond what int32 can handle (2^31 or 2^32) float * XT_h; //cudacall (cudaHostAlloc((void **)&XT_h, f * m * sizeof(XT_h[0]), cudaHostAllocMapped) ); cudacall (cudaMallocHost((void **)&XT_h, f * m * sizeof(XT_h[0])) ); //initialize thetaT on host srand (time(0)); for (int k = 0; k < n * f; k++) thetaTHost[k] = 0.5*((float) rand() / (RAND_MAX)); //thetaTHost[k] = 0.1*((float) rand() / (float)RAND_MAX); //thetaTHost[k] = 0; //CG needs an initial value of XT memset(XT_h,0,m*f*sizeof(float)); //for (long k = 0; k < m * f; k++) // XT_h[k] = 0.5*((float) rand() / (RAND_MAX)); //device pointers int * csrRowIndex[GPU_COUNT]; int * csrColIndex[GPU_COUNT]; float * csrVal[GPU_COUNT]; float * thetaT[GPU_COUNT]; float * XT_d[GPU_COUNT]; float * cscVal[GPU_COUNT]; int * cscRowIndex[GPU_COUNT]; int * cscColIndex[GPU_COUNT]; printf("*******starting loading training and testing sets to host.\n"); loadCSRSparseMatrix("../data/hugewiki/hugewiki_R_train_csr.data", "../data/hugewiki/hugewiki_R_train_csr.indptr", "../data/hugewiki/hugewiki_R_train_csr.indices", csrValHostPtr, csrRowIndexHostPtr, csrColIndexHostPtr); omp_set_num_threads(GPU_COUNT); #pragma omp parallel { int gpu_id = omp_get_thread_num(); std::string str1("../data/hugewiki/hugewiki_R_train_csc.data.bin"); std::string str2("../data/hugewiki/hugewiki_R_train_csc.indices.bin"); std::string str3("../data/hugewiki/hugewiki_R_train_csc.indptr.bin"); //printf("%s",(str+to_string(gpu_id)).c_str()); loadCSCSparseMatrixInBatch((str1 + to_string(gpu_id)).c_str(), (str2 + to_string(gpu_id)).c_str(), (str3 + to_string(gpu_id)).c_str(), cscValHostPtr[gpu_id], cscRowIndexHostPtr[gpu_id], cscColIndexHostPtr[gpu_id], csc_nnz[gpu_id], n); } #pragma omp parallel { int gpu_id = omp_get_thread_num(); std::string str1("../data/hugewiki/hugewiki_R_test_csc.data.bin"); std::string str2("../data/hugewiki/hugewiki_R_test_csc.indices.bin"); std::string str3("../data/hugewiki/hugewiki_R_test_csc.indptr.bin"); //printf("%s",(str+to_string(gpu_id)).c_str()); loadCSCSparseMatrixInBatch((str1 + to_string(gpu_id)).c_str(), (str2 + to_string(gpu_id)).c_str(), (str3 + to_string(gpu_id)).c_str(), testCscValHostPtr[gpu_id], testCscRowIndexHostPtr[gpu_id], testCscColIndexHostPtr[gpu_id], csc_nnz_test[gpu_id], n); } printf("\n loaded csr to host; print data, row and col array\n"); for (int i = 0; i < nnz && i < 10; i++) { printf("%f ", csrValHostPtr[i]); } printf("\n"); for (int i = 0; i < nnz && i < 10; i++) { printf("%d ", csrRowIndexHostPtr[i]); } printf("\n"); for (int i = 0; i < nnz && i < 10; i++) { printf("%d ", csrColIndexHostPtr[i]); } printf("\n"); printf("\n loaded csc to host; print data, row and col array\n"); for (int i = 0; i < nnz && i < 10; i++) { printf("%f ", cscValHostPtr[0][i]); } printf("\n"); for (int i = 0; i < nnz && i < 10; i++) { printf("%d ", cscRowIndexHostPtr[0][i]); } printf("\n"); for (int i = 0; i < nnz && i < 10; i++) { printf("%d ", cscColIndexHostPtr[0][i]); } printf("\n"); printf("\n loaded csc test to host; print data, row and col array\n"); for (int i = 0; i < nnz && i < 10; i++) { printf("%f ", testCscValHostPtr[0][i]); } printf("\n"); for (int i = 0; i < nnz && i < 10; i++) { printf("%d ", testCscRowIndexHostPtr[0][i]); } printf("\n"); for (int i = 0; i < nnz && i < 10; i++) { printf("%d ", testCscColIndexHostPtr[0][i]); } printf("\n"); cudacall(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared)); //64-bit smem access //http://acceleware.com/blog/maximizing-shared-memory-bandwidth-nvidia-kepler-gpus cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); cudaSharedMemConfig pConfig; cudaDeviceGetSharedMemConfig (&pConfig); //printf("%d\n", pConfig); cudacall(cudaSetDevice(DEVICEID)); cusparseMatDescr_t descr; cusparsecall( cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); using namespace std; //variable used to time double t0; double elapsed = 0.0; struct timeval tv; struct timeval start_tv; const float alpha = 1.0f; const float beta = 0.0f; for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){ cudacall(cudaSetDevice(gpu_id)); cudacall(cudaMalloc((void** ) &thetaT[gpu_id], f * n * sizeof(float))); printf("*******copy memory to GPU %d...\n", gpu_id); cudacall(cudaMemcpy(thetaT[gpu_id], thetaTHost, (size_t ) (n * f * sizeof(float)), cudaMemcpyHostToDevice)); } //host pointers for cublas batch operations float ** devPtrTTHost[GPU_COUNT]; float **devPtrYthetaTHost[GPU_COUNT]; for(int iter = 0; iter < ITERS ; iter ++){ printf("---------------------------update X iteration %d ----------------------------------\n", iter); t0 = seconds(); //parallel in all GPUs, or only 1 int parallelism_level = GPU_COUNT; omp_set_num_threads(parallelism_level); //gpu memory to be used across batches //last batch size, the largest among batches int batch_size_max = m - (X_BATCH - 1)*(m/X_BATCH); int counter = 0; #pragma omp parallel shared (counter) { //this is the code on one gpu int gpu_id = omp_get_thread_num(); cudacall(cudaSetDevice(gpu_id)); //for batch solvers cudacall(cudaMallocHost( (void** ) &devPtrTTHost[gpu_id], batch_size_max * sizeof(*devPtrTTHost) ) ); cudacall(cudaMallocHost( (void** ) &devPtrYthetaTHost[gpu_id], batch_size_max * sizeof(*devPtrYthetaTHost) ) ); float * thetaT_local = thetaT[gpu_id]; cudacall (cudaBindTexture(NULL, thetaTTexRef, thetaT_local, n * f * sizeof(float))); float * tt = 0; //last batch size, the largest among batches int batch_size = m - (X_BATCH - 1)*(m/X_BATCH); //TODO: to get batch_nnz_max from csrRowIndexHostPtr int batch_nnz_max = 16000000; long batch_offset; cudacall(cudaMalloc((void** ) &csrRowIndex[gpu_id],(batch_size + 1) * sizeof(csrRowIndex[0][0]))); cudacall(cudaMalloc((void** ) &csrColIndex[gpu_id], batch_nnz_max * sizeof(csrColIndex[0][0]))); cudacall(cudaMalloc((void** ) &csrVal[gpu_id], batch_nnz_max * sizeof(csrVal[0][0]))); float * ytheta = 0; float * ythetaT = 0; cudacall(cudaMalloc((void** ) &ytheta, f * batch_size * sizeof(ytheta[0]))); cudacall(cudaMalloc((void** ) &ythetaT, f * batch_size * sizeof(ythetaT[0]))); #ifdef CUMF_TT_FP16 cudacall(cudaMalloc((void** ) &tt, f/2 * f * batch_size * sizeof(float))); #else cudacall(cudaMalloc((void** ) &tt, f * f * batch_size * sizeof(float))); #endif //for batch solvers float **devPtrTT = 0; float **devPtrYthetaT = 0; int *P, *INFO; cudacall(cudaMalloc((void** ) &devPtrTT, batch_size * sizeof(*devPtrTT))); cudacall(cudaMalloc(&P, f * batch_size * sizeof(int)) ); cudacall(cudaMalloc(&INFO, batch_size * sizeof(int) )); cudacall(cudaMalloc((void** ) &devPtrYthetaT, batch_size * sizeof(*devPtrYthetaT))); int batch_id = 0; //gpu 0 handles batches 0, 4, 8 ... //for(int batch_id = gpu_id; batch_id < X_BATCH; batch_id += parallelism_level) while(counter < X_BATCH) { #pragma omp critical { batch_id = counter; counter = counter + 1; } double t2 = 0; t2 = seconds(); if(batch_id != X_BATCH - 1) batch_size = m/X_BATCH; batch_offset = batch_id * (m/X_BATCH); int batch_nnz = csrRowIndexHostPtr[batch_offset + batch_size] - csrRowIndexHostPtr[batch_offset]; printf("\tbatch %d of %d; size: %d, offset: %d, batch_nnz %d, on gpu %d\n", batch_id, X_BATCH, batch_size, batch_offset, batch_nnz, gpu_id); //copy CSR rating matrices in cudacall(cudaMemcpy(csrRowIndex[gpu_id], &csrRowIndexHostPtr[batch_offset], (batch_size + 1) * sizeof(csrRowIndex[0][0]), cudaMemcpyHostToDevice)); //in place update: csrRowIndex --> csrRowIndex - csrRowIndex[0] zeroIndex<<<(batch_size + 1 - 1)/1024 + 1, 1024>>> (csrRowIndex[gpu_id], csrRowIndexHostPtr[batch_offset], batch_size + 1); cudacall(cudaMemcpy(csrColIndex[gpu_id], &csrColIndexHostPtr[csrRowIndexHostPtr[batch_offset]], batch_nnz * sizeof(csrColIndex[0][0]), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(csrVal[gpu_id], &csrValHostPtr[csrRowIndexHostPtr[batch_offset]], batch_nnz * sizeof(csrVal[0][0]),cudaMemcpyHostToDevice)); //process right hand: Y*theta cusparseScsrmm2(cushandle[gpu_id], CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, batch_size, f, n, batch_nnz, &alpha, descr, csrVal[gpu_id], csrRowIndex[gpu_id], csrColIndex[gpu_id], thetaT[gpu_id], f, &beta, ytheta, batch_size); //transpose ytheta: ytheta: m*f; need ythetaT = (ytheta).T = f*m cublasSgeam(handle[gpu_id], CUBLAS_OP_T, CUBLAS_OP_N, f, batch_size, &alpha, (const float * ) ytheta, batch_size, &beta, ythetaT, f, ythetaT, f); cudaDeviceSynchronize(); cudaCheckError(); //generate left-hand: tt: batch_size*(F*F) printf("\t\t batch %d before tt kernel gpu: %d, seconds: %f \n", batch_id, gpu_id, seconds() - t2); double t1 = seconds(); #ifdef CUMF_TT_FP16 get_hermitian100_tt_fp16<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>> (0, (half2*) tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda, batch_size, thetaT[gpu_id]); #else //get_hermitian_x<<<batch_size, 64>>> // (tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda); //updateXByBlock2pRegDsmemTile<<<batch_size, F>>> // (tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda); get_hermitian100<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>> (0, tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda, batch_size, thetaT[gpu_id]); #endif cudaDeviceSynchronize(); cudaCheckError(); printf("\t\t batch %d tt kernel gpu: %d, seconds: %f \n", batch_id, gpu_id, seconds() - t1); t1 = seconds(); /* #ifdef CUMF_SAVE_MODEL if(iter==0&&batch_id==0) saveDeviceFloatArrayToFile(std::string("../log/0904/hugewiki.tt.hermitkernel"), f * f * batch_size, tt); #endif updateX(batch_id, batch_size, batch_offset, ythetaT, tt, XT_h, handle[gpu_id], m, n, f, nnz, devPtrTTHost[gpu_id], devPtrYthetaTHost[gpu_id], devPtrTT, devPtrYthetaT, P, INFO); #ifdef CUMF_SAVE_MODEL if(iter==0&&batch_id==0) saveDeviceFloatArrayToFile(std::string("../log/0904/hugewiki.lu.hermitkernel.xt"), f * batch_size, ythetaT); #endif */ ///* float * XT = 0; cudacall(cudaMalloc((void** ) &XT, f * batch_size * sizeof(XT[0]))); cudacall( cudaMemcpy(XT, &XT_h[batch_offset * F], batch_size * F * sizeof(float), cudaMemcpyHostToDevice) ); #ifdef CUMF_TT_FP16 printf("CG solver with fp16.\n"); updateXWithCGHost_tt_fp16(tt, XT, ythetaT, batch_size, f, 6); #else printf("CG solver with fp32.\n"); updateXWithCGHost(tt, XT, ythetaT, batch_size, 100, 100); #endif cudacall( cudaMemcpy(&XT_h[batch_offset * F], XT, batch_size * F * sizeof(float), cudaMemcpyDeviceToHost) ); #ifdef CUMF_SAVE_MODEL if(batch_id==0) saveDeviceFloatArrayToFile(std::string("../log/0903/hugewiki.cg.xt.")+ to_string(iter), f * batch_size, XT); #endif cudacall(cudaFree(XT)); //*/ printf("\t\t batch %d updateX by solving tt , gpu: %d, seconds: %f \n", batch_id, gpu_id, seconds() - t1); printf("\tbatch %d on gpu %d, runs %f \n", batch_id, gpu_id, seconds() - t2); }//end of update x batch printf("update X run %f seconds at gpu %d.\n", seconds() - t0, gpu_id); cudacall(cudaFree(ytheta)); cudacall(cudaFree(tt)); cudacall(cudaFree(csrVal[gpu_id])); cudacall(cudaFree(csrRowIndex[gpu_id])); cudacall(cudaFree(csrColIndex[gpu_id])); cudacall(cudaFree(ythetaT)); cudaFree(P); cudaFree(INFO); cudaFree(devPtrTT); cudaFree(devPtrYthetaT); cudacall(cudaFreeHost(devPtrTTHost[gpu_id])); cudacall(cudaFreeHost(devPtrYthetaTHost[gpu_id])); }//end of omp parallel loop printf("update X run %f seconds, gridSize: %d \n", seconds() - t0, m); gettimeofday(&start_tv, NULL); printf("---------------------------------- update theta iteration %d----------------------------------\n", iter); //in batches, when N is huge for(int batch_id = 0; batch_id< THETA_BATCH; batch_id ++){ int batch_size = 0; if(batch_id != THETA_BATCH - 1) batch_size = n/THETA_BATCH; else batch_size = n - batch_id*(n/THETA_BATCH); int batch_offset = batch_id * (n/THETA_BATCH); printf("batch %d / %d, size: %d\n", batch_id + 1, THETA_BATCH, batch_size); float * yTX[GPU_COUNT]; float * yTXT[GPU_COUNT]; const float alpha = 1.0f; const float beta = 0.0f; float * xx[GPU_COUNT]; omp_set_num_threads(GPU_COUNT); t0 = seconds(); #pragma omp parallel { int gpu_id = omp_get_thread_num(); long offset = 0; for(int k = 0; k < gpu_id; k ++) offset += csc_m[k]; cudacall(cudaSetDevice(gpu_id)); printf("\tGather xx on GPU %d.\n",gpu_id); double t1 = seconds(); //distribute XT[] to XT_d[i] cudacall(cudaMalloc((void** ) &XT_d[gpu_id], f * csc_m[gpu_id] * sizeof(float))); //printf("offset: %lld, copy XT_h[%lld] to XT_d[%d]:\n", offset, offset*f, gpu_id); cudacall(cudaMemcpy(XT_d[gpu_id], &XT_h[offset*f], f * csc_m[gpu_id] * sizeof(float), cudaMemcpyHostToDevice)); //copy csc to GPU int batch_nnz = cscColIndexHostPtr[gpu_id][batch_offset + batch_size] - cscColIndexHostPtr[gpu_id][batch_offset]; cudacall(cudaMalloc((void** ) &cscRowIndex[gpu_id],batch_nnz * sizeof(int))); cudacall(cudaMalloc((void** ) &cscColIndex[gpu_id], (batch_size + 1) * sizeof(int))); cudacall(cudaMalloc((void** ) &cscVal[gpu_id], batch_nnz * sizeof(float))); cudaMemcpyAsync(cscRowIndex[gpu_id], &cscRowIndexHostPtr[gpu_id][cscColIndexHostPtr[gpu_id][batch_offset]], batch_nnz * sizeof(cscRowIndex[0][0]), cudaMemcpyHostToDevice); cudaMemcpy(cscColIndex[gpu_id], &cscColIndexHostPtr[gpu_id][batch_offset], (batch_size + 1) * sizeof(cscColIndex[0][0]), cudaMemcpyHostToDevice); cudaMemcpy(cscVal[gpu_id], &cscValHostPtr[gpu_id][cscColIndexHostPtr[gpu_id][batch_offset]], batch_nnz * sizeof(cscVal[0][0]), cudaMemcpyHostToDevice); cudacall(cudaMalloc((void** ) &yTXT[gpu_id], f * batch_size * sizeof(float))); cudacall(cudaMalloc((void** ) &yTX[gpu_id], f * batch_size * sizeof(float))); cudacall(cudaMalloc((void** ) &xx[gpu_id], f * f * batch_size * sizeof(float))); printf("\t\tbatch %d memory alloc and cpy gpu %d seconds: %f.\n", batch_id, gpu_id, seconds() - t1); //in place update: cscColIndex --> cscColIndex - cscColIndex[0] zeroIndex<<<(batch_size + 1 - 1)/256 + 1, 256>>> (cscColIndex[gpu_id], cscColIndexHostPtr[gpu_id][batch_offset], batch_size + 1); //process right-hand side: (Y'*X)' cudaDeviceSynchronize(); cudaCheckError(); t1 = seconds(); cusparseScsrmm2(cushandle[gpu_id], CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, batch_size, f, csc_m[gpu_id], batch_nnz, &alpha, descr, cscVal[gpu_id], cscColIndex[gpu_id], cscRowIndex[gpu_id], XT_d[gpu_id], f, &beta, yTX[gpu_id], batch_size); cublasSgeam(handle[gpu_id], CUBLAS_OP_T, CUBLAS_OP_N, f, batch_size, &alpha, (const float * ) yTX[gpu_id], batch_size, &beta, yTXT[gpu_id], f, yTXT[gpu_id], f); cudaDeviceSynchronize(); cudaCheckError(); printf("\t\tbatch %d right-hand side gpu %d seconds: %f.\n", batch_id, gpu_id, seconds() - t1); //process left-hand side: generate hessian matrix xx t1 = seconds(); get_hermitian_theta<<<batch_size, 64>>> (xx[gpu_id], cscRowIndex[gpu_id], cscColIndex[gpu_id], lambda, XT_d[gpu_id]); //get_hermitian100<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>> // (0, xx[gpu_id], cscColIndex[gpu_id], cscRowIndex[gpu_id], lambda, batch_size, XT_d[gpu_id]); //updateThetaByBlock2pRegDsmemTile<<<batch_size, F>>> // (xx[gpu_id], cscRowIndex[gpu_id], cscColIndex[gpu_id], lambda, XT_d[gpu_id]); cudaDeviceSynchronize(); cudaCheckError(); printf("\t\tbatch %d xx kernel gpu %d seconds: %f.\n", batch_id, gpu_id, seconds() - t1); t1 = seconds(); cudacall(cudaFree(yTX[gpu_id])); cudacall(cudaFree(cscRowIndex[gpu_id])); cudacall(cudaFree(cscColIndex[gpu_id])); cudacall(cudaFree(cscVal[gpu_id])); printf("\t\tbatch %d cudaFree gpu %d seconds: %f.\n", batch_id, gpu_id, seconds() - t1); } printf("\tbatch %d gather xx in %d GPUs run %f seconds.\n", batch_id, GPU_COUNT, seconds() - t0); t0 = seconds(); printf("\t\tadd xx before updateTheta on a given GPU.\n"); //xx[0] += xx[1] + xx[2] + xx[3] cudacall(cudaSetDevice(0)); float * xx_hotel; cudacall(cudaMalloc((void** ) &xx_hotel, f * f * batch_size * sizeof(float))); cudaCheckError(); for(int gpu_id = 1; gpu_id < GPU_COUNT; gpu_id ++){ //printf("copy from gpu:%d.\n", gpu_id); cudacall(cudaMemcpy(xx_hotel, xx[gpu_id], f * f * batch_size * sizeof(float), cudaMemcpyDefault)); cudaDeviceSynchronize(); cudaCheckError(); //printf("add.\n"); cublasSaxpy(handle[0], f * f * batch_size, &alpha, xx_hotel, 1, xx[0], 1); cudaDeviceSynchronize(); cudaCheckError(); } cudacall(cudaFree(xx_hotel)); printf("\t\tadd yTXT before updateTheta on a given GPU.\n"); //xx[0] += xx[1] + xx[2] + xx[3] float * yTXT_hotel; cudacall(cudaMalloc((void** ) &yTXT_hotel, f * batch_size * sizeof(float))); for(int gpu_id = 1; gpu_id < GPU_COUNT; gpu_id ++){ cudacall(cudaMemcpy(yTXT_hotel, yTXT[gpu_id], f * batch_size * sizeof(float), cudaMemcpyDefault)); cublasSaxpy(handle[0], f * batch_size, &alpha, yTXT_hotel, 1, yTXT[0], 1); cudaDeviceSynchronize(); cudaCheckError(); } cudacall(cudaFree(yTXT_hotel)); //printf("*******invoke updateTheta with batch_size: %d, batch_offset: %d.\n", batch_size, batch_offset); updateTheta(batch_size, batch_offset, xx[0], yTXT[0], thetaT[0], handle[0], n, f); printf("\tbatch: %d gather and updateTheta in one GPU run %f seconds.\n", batch_id, seconds() - t0); for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){ cudacall(cudaFree(xx[gpu_id])); cudacall(cudaFree(yTXT[gpu_id])); cudacall(cudaFree(XT_d[gpu_id])); } }//end of update theta batches //propagate thetaT[0] to non-anchor devices for(int gpu_id = 1; gpu_id < GPU_COUNT; gpu_id ++) cudacall( cudaMemcpy(thetaT[gpu_id], thetaT[0], n * F * sizeof(float), cudaMemcpyDeviceToDevice) ); gettimeofday(&tv, NULL); elapsed = (tv.tv_sec - start_tv.tv_sec) + (tv.tv_usec - start_tv.tv_usec) / 1000000.0; printf("update theta run %f seconds, gridSize: %d.\n", elapsed, n); ////////////////////////////////////////////////////////////////////////////////////////////////// printf("Calculate RMSE in batches.\n"); //has to calculate in batches since cooRowIndex + csrColIndex + csrVal is so big cudacall(cudaSetDevice(0)); float * errors_train = 0; float * errors_test = 0; int error_size = 4096; int* nan_train = 0; int* nan_test = 0; cudacall(cudaMalloc((void** ) &errors_train, error_size * sizeof(errors_train[0]))); cudacall(cudaMemset(errors_train, 0, error_size*sizeof(float)) ); cudacall(cudaMalloc((void** ) &errors_test, error_size * sizeof(errors_test[0]))); cudacall(cudaMemset(errors_test, 0, error_size*sizeof(float)) ); for(int batch_id = 0; batch_id < GPU_COUNT; batch_id ++){ printf("iteration: %d\n", batch_id); int row_offset = 0; for(int k = 0; k < batch_id; k ++){ row_offset += csc_m[k]; } float * XT_small; int * cscRowIndex_small; int * cscColIndex_small; float * cscVal_small; cudacall(cudaMalloc((void** ) &XT_small, f * csc_m[batch_id] * sizeof(float))); cudacall(cudaMemcpy(XT_small, &XT_h[(long) row_offset*f], f * csc_m[batch_id] * sizeof(float), cudaMemcpyHostToDevice)); printf("cal train rmse in batch: %d/%d, nnz:%d, n(col): %d, \n", batch_id, GPU_COUNT, csc_nnz[batch_id], n); cudacall(cudaMalloc((void** ) &cscRowIndex_small,csc_nnz[batch_id] * sizeof(int))); cudacall(cudaMalloc((void** ) &cscColIndex_small, (n + 1) * sizeof(int))); cudacall(cudaMalloc((void** ) &cscVal_small, csc_nnz[batch_id] * sizeof(float))); cudacall(cudaMemcpy(cscRowIndex_small, cscRowIndexHostPtr[batch_id], csc_nnz[batch_id] * sizeof(int), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(cscColIndex_small, cscColIndexHostPtr[batch_id], (n + 1) * sizeof(int), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(cscVal_small, cscValHostPtr[batch_id], csc_nnz[batch_id] * sizeof(float), cudaMemcpyHostToDevice)); cudacall(cudaMalloc((void** ) &nan_train, sizeof(int))); cudacall( cudaMemset(nan_train, 0, sizeof(int)) ); cudacall(cudaMalloc((void** ) &nan_test, sizeof(int))); cudacall( cudaMemset(nan_test, 0, sizeof(int)) ); RMSE_CSC<<<n, 512>>>(cscVal_small, cscRowIndex_small, cscColIndex_small, thetaT[0], XT_small, errors_train, error_size, nan_train); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaFree(cscRowIndex_small)); cudacall(cudaFree(cscColIndex_small)); cudacall(cudaFree(cscVal_small)); printf("cal test rmse in batch: %d/%d, nnz_test:%d, n(col): %d, \n", batch_id, GPU_COUNT, csc_nnz_test[batch_id], n); cudacall(cudaMalloc((void** ) &cscRowIndex_small,csc_nnz_test[batch_id] * sizeof(int))); cudacall(cudaMalloc((void** ) &cscColIndex_small, (n + 1) * sizeof(int))); cudacall(cudaMalloc((void** ) &cscVal_small, csc_nnz_test[batch_id] * sizeof(float))); cudacall(cudaMemcpy(cscRowIndex_small, testCscRowIndexHostPtr[batch_id], csc_nnz_test[batch_id] * sizeof(int), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(cscColIndex_small, testCscColIndexHostPtr[batch_id], (n + 1) * sizeof(int), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy(cscVal_small, testCscValHostPtr[batch_id], csc_nnz_test[batch_id] * sizeof(float), cudaMemcpyHostToDevice)); RMSE_CSC<<<n, 512>>>(cscVal_small, cscRowIndex_small, cscColIndex_small, thetaT[0], XT_small, errors_test, error_size, nan_test); cudaDeviceSynchronize(); cudaCheckError(); int* nan_train_host = (int*) malloc (sizeof(int)); int* nan_test_host = (int*) malloc (sizeof(int)); cudaMemcpy(nan_train_host, nan_train, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(nan_test_host, nan_test, sizeof(int), cudaMemcpyDeviceToHost); printf("train #nan: %d\n", *nan_train_host); printf("test #nan: %d\n", *nan_test_host); cudacall(cudaFree(nan_train)); cudacall(cudaFree(nan_test)); cudacall(cudaFree(cscRowIndex_small)); cudacall(cudaFree(cscColIndex_small)); cudacall(cudaFree(cscVal_small)); cudacall(cudaFree(XT_small)); } printf("summarize RMSE: \n"); float* rmse_train = (float*) malloc (sizeof(float)); cublascall( cublasSasum(handle[0], error_size, errors_train, 1, rmse_train) ); cudaDeviceSynchronize(); cudaCheckError(); float* rmse_test = (float*) malloc (sizeof(float)); cublascall( cublasSasum(handle[0], error_size, errors_test, 1, rmse_test) ); cudaDeviceSynchronize(); cudaCheckError(); printf("@@@@@@@@@@@@@@@@@@@ Train RMSE in iter %d: %f\n", iter, sqrt((*rmse_train)/nnz)); printf("@@@@@@@@@@@@@@@@@@@ Test RMSE in iter %d: %f\n", iter, sqrt((*rmse_test)/(NNZ_TEST - 12750))); cudacall(cudaFree(errors_train)); cudacall(cudaFree(errors_test)); //*/ } /* //save model to a file cudacall(cudaMemcpy(thetaTHost, thetaT[0], n * f * sizeof(float), cudaMemcpyDeviceToHost) ); FILE * xfile = fopen("XT.data", "wb"); FILE * thetafile = fopen("thetaT.data", "wb"); fwrite(XT_h, sizeof(float), m*f, xfile); fwrite(thetaTHost, sizeof(float), n*f, thetafile); fclose(xfile); fclose(thetafile); */ cudacall(cudaFreeHost(XT_h)); cudacall(cudaFreeHost(csrRowIndexHostPtr)); cudacall(cudaFreeHost(csrColIndexHostPtr)); cudacall(cudaFreeHost(csrValHostPtr)); cudaFreeHost(thetaTHost); for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){ cudacall(cudaFreeHost(cscValHostPtr[gpu_id])); cudacall(cudaFreeHost(cscRowIndexHostPtr[gpu_id])); cudacall(cudaFreeHost(cscColIndexHostPtr[gpu_id])); cudacall(cudaSetDevice(gpu_id)); //cudacall(cudaDeviceReset()); } printf("ALS Done.\n"); return 0; }
the_stack
typedef enum test_enum { test_set_1, test_swap_1, test_add_1, test_negate_1, test_sub_1, test_mul_1, test_mul_high_1, test_sqr_1, test_sqr_high_1, test_div_1, test_rem_1, test_div_rem_1, test_sqrt_1, test_sqrt_rem_1, test_equals_1, test_equals_2, test_equals_3, test_compare_1, test_compare_2, test_compare_3, test_compare_4, test_extract_bits_1, test_insert_bits_1, test_get_ui32_set_ui32_1, test_add_ui32_1, test_sub_ui32_1, test_mul_ui32_1, test_div_ui32_1, test_rem_ui32_1, test_equals_ui32_1, test_equals_ui32_2, test_equals_ui32_3, test_equals_ui32_4, test_compare_ui32_1, test_compare_ui32_2, test_extract_bits_ui32_1, test_insert_bits_ui32_1, test_binary_inverse_ui32_1, test_gcd_ui32_1, test_mul_wide_1, test_sqr_wide_1, test_div_wide_1, test_rem_wide_1, test_div_rem_wide_1, test_sqrt_wide_1, test_sqrt_rem_wide_1, test_bitwise_and_1, test_bitwise_ior_1, test_bitwise_xor_1, test_bitwise_complement_1, test_bitwise_select_1, test_bitwise_mask_copy_1, test_bitwise_mask_and_1, test_bitwise_mask_ior_1, test_bitwise_mask_xor_1, test_bitwise_mask_select_1, test_shift_left_1, test_shift_right_1, test_rotate_left_1, test_rotate_right_1, test_pop_count_1, test_clz_1, test_ctz_1, test_accumulator_1, test_accumulator_2, test_binary_inverse_1, test_gcd_1, test_modular_inverse_1, test_modular_power_1, test_bn2mont_1, test_mont2bn_1, test_mont_mul_1, test_mont_sqr_1, test_mont_reduce_wide_1, test_barrett_div_1, test_barrett_rem_1, test_barrett_div_rem_1, test_barrett_div_wide_1, test_barrett_rem_wide_1, test_barrett_div_rem_wide_1 } test_t; template<test_t test, class params> struct implementation { public: __device__ __forceinline__ static void run(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, int32_t instance) { printf("TEST NOT IMPLEMENTED! FIX ME!\n"); } }; #include "tests/tests.h" static gmp_randstate_t _state; static uint32_t _seed=0; static uint32_t _bits=0; static uint32_t _count=0; static void *_cpu_data=NULL; static void *_gpu_data=NULL; #define $GPU(call) if((call)!=0) { printf("\nCall \"" #call "\" failed from %s, line %d\n", __FILE__, __LINE__); exit(1); } void zero_words(uint32_t *x, uint32_t count) { int index; for(index=0;index<count;index++) x[index]=0; } void print_words(uint32_t *x, uint32_t count) { int index; for(index=count-1;index>=0;index--) printf("%08X", x[index]); printf("\n"); } void copy_words(uint32_t *from, uint32_t *to, uint32_t count) { int index; for(index=0;index<count;index++) to[index]=from[index]; } int compare_words(uint32_t *x, uint32_t *y, uint32_t count) { int index; for(index=count-1;index>=0;index--) { if(x[index]!=y[index]) { if(x[index]>y[index]) return 1; else return -1; } } return 0; } void random_words(uint32_t *x, uint32_t count, gmp_randstate_t state) { int32_t index; for(index=0;index<count;index++) x[index]=gmp_urandomb_ui(state, 32); } void hard_random_words(uint32_t *x, uint32_t count, gmp_randstate_t state) { uint32_t values[6]={0x0, 0x1, 0x7FFFFFFF, 0x80000000, 0x80000001, 0xFFFFFFFF}; int32_t offset, bit, bits, index; switch(gmp_urandomb_ui(state, 16)%3) { case 0: for(index=0;index<count;index++) x[index]=gmp_urandomb_ui(state, 32); break; case 1: for(index=0;index<count;index++) x[index]=values[gmp_urandomb_ui(state, 16)%6]; break; case 2: zero_words(x, count); offset=0; while(offset<count*32) { bit=gmp_urandomb_ui(state, 16)%2; bits=gmp_urandomb_ui(state, 32)%(32*count/2)+16; if(bit==1) { if(bits>count*32-offset) bits=count*32-offset; while(bits>0) { if(offset%32==0 && bits>=32) { while(bits>=32) { x[offset/32]=0xFFFFFFFF; bits-=32; offset+=32; } } else { x[offset/32]=x[offset/32] + (1<<offset%32); bits--; offset++; } } } else offset+=bits; } break; } } template<class params> static void generate_data(uint32_t count) { typename types<params>::input_t *inputs; int32_t instance; // printf("generating %d\n", params::size); if(_cpu_data!=NULL) { free(_cpu_data); _cpu_data=NULL; } if(_gpu_data!=NULL) { $GPU(cudaFree(_gpu_data)); _gpu_data=NULL; } _cpu_data=malloc(sizeof(typename types<params>::input_t)*count); inputs=(typename types<params>::input_t *)_cpu_data; gmp_randseed_ui(_state, _seed); for(instance=0;instance<count;instance++) { hard_random_words(inputs[instance].h1._limbs, params::size/32, _state); hard_random_words(inputs[instance].h2._limbs, params::size/32, _state); random_words(inputs[instance].x1._limbs, params::size/32, _state); random_words(inputs[instance].x2._limbs, params::size/32, _state); random_words(inputs[instance].x3._limbs, params::size/32, _state); random_words(inputs[instance].u, 32, _state); } $GPU(cudaMalloc((void **)&_gpu_data, sizeof(typename types<params>::input_t)*count)); $GPU(cudaMemcpy(_gpu_data, _cpu_data, sizeof(typename types<params>::input_t)*count, cudaMemcpyHostToDevice)); } template<class params> static typename types<params>::input_t *cpu_data(uint32_t count) { if(params::size!=_bits || count>_count || _gpu_data==NULL) { if(_seed==0) { _seed=time(NULL); gmp_randinit_default(_state); } generate_data<params>(count); _bits=params::size; _count=count; } return (typename types<params>::input_t *)_cpu_data; } template<class params> static typename types<params>::input_t *gpu_data(uint32_t count) { if(params::size!=_bits || count>_count || _gpu_data==NULL) { if(_seed==0) { _seed=time(NULL); gmp_randinit_default(_state); } generate_data<params>(count); _bits=params::size; _count=count; } return (typename types<params>::input_t *)_gpu_data; } template<test_t TEST, class params> __global__ void gpu_kernel(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, uint32_t count) { implementation<TEST, params> impl; int32_t instance=(blockIdx.x * blockDim.x + threadIdx.x)/params::TPI; if(instance>=count) return; impl.run(inputs, outputs, instance); } template<test_t TEST, class params> void gpu_run(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, uint32_t count) { uint32_t TPB=(params::TPB==0) ? 128 : params::TPB; uint32_t TPI=params::TPI, IPB=TPB/TPI; uint32_t blocks=(count+IPB+1)/IPB; gpu_kernel<TEST, params><<<blocks, TPB>>>(inputs, outputs, count); } template<test_t TEST, class params> void cpu_run(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, uint32_t count) { implementation<TEST, params> impl; #pragma omp parallel for for(int index=0;index<count;index++) impl.run(inputs, outputs, index); } template<test_t TEST, class params> bool run_test(uint32_t count) { typename types<params>::input_t *cpu_inputs, *gpu_inputs; typename types<params>::output_t *compare, *cpu_outputs, *gpu_outputs; int instance; if(params::size>1024) count=count*(1024*1024/params::size)/1024; cpu_inputs=cpu_data<params>(count); gpu_inputs=gpu_data<params>(count); compare=(typename types<params>::output_t *)malloc(sizeof(typename types<params>::output_t)*count); cpu_outputs=(typename types<params>::output_t *)malloc(sizeof(typename types<params>::output_t)*count); memset(cpu_outputs, 0, sizeof(typename types<params>::output_t)*count); $GPU(cudaMalloc((void **)&gpu_outputs, sizeof(typename types<params>::output_t)*count)); $GPU(cudaMemset(gpu_outputs, 0, sizeof(typename types<params>::output_t)*count)); cpu_run<TEST, params>(cpu_inputs, cpu_outputs, count); gpu_run<TEST, params>(gpu_inputs, gpu_outputs, count); $GPU(cudaMemcpy(compare, gpu_outputs, sizeof(typename types<params>::output_t)*count, cudaMemcpyDeviceToHost)); for(instance=0;instance<count;instance++) { if(compare_words(cpu_outputs[instance].r1._limbs, compare[instance].r1._limbs, params::size/32)!=0 || compare_words(cpu_outputs[instance].r2._limbs, compare[instance].r2._limbs, params::size/32)!=0) { printf("Test failed at index %d\n", instance); printf("h1: "); print_words(cpu_inputs[instance].h1._limbs, params::size/32); printf("\n"); printf("h2: "); print_words(cpu_inputs[instance].h2._limbs, params::size/32); printf("\n"); printf("x1: "); print_words(cpu_inputs[instance].x1._limbs, params::size/32); printf("\n"); // printf("x2: "); // print_words(cpu_inputs[instance].x2._limbs, params::size/32); // printf("\n"); // printf("x3: "); // print_words(cpu_inputs[instance].x3._limbs, params::size/32); // printf("\n"); printf("u0: %08X u1: %08X u2: %08X\n\n", cpu_inputs[instance].u[0], cpu_inputs[instance].u[1], cpu_inputs[instance].u[2]); printf("CPU R1: "); print_words(cpu_outputs[instance].r1._limbs, params::size/32); printf("\n"); printf("GPU R1: "); print_words(compare[instance].r1._limbs, params::size/32); printf("\n"); printf("CPU R2: "); print_words(cpu_outputs[instance].r2._limbs, params::size/32); printf("\n"); printf("GPU R2: "); print_words(compare[instance].r2._limbs, params::size/32); printf("\n"); return false; } } free(compare); free(cpu_outputs); $GPU(cudaFree(gpu_outputs)); return true; } #define LONG_TEST 1000000 #define MEDIUM_TEST 100000 #define SHORT_TEST 10000 #define TINY_TEST 1000 #define SINGLE_TEST 1 /* int main() { run_test<test_add_1, 2048>(LONG_TEST); run_test<test_sub_1, 2048>(LONG_TEST); } */ #include "gtest/gtest.h" #include "unit_tests.cc" int main(int argc, char **argv) { int nDevice=-1, result; cudaGetDeviceCount(&nDevice); if(nDevice<=0) { printf("Error no cuda device found. Aborting tests\n"); exit(EXIT_FAILURE); } testing::InitGoogleTest(&argc, argv); result=RUN_ALL_TESTS(); if(result!=0) printf("Please report random seed %08X along with failure\n", _seed); return result; }
the_stack
#include <google/protobuf/text_format.h> #include <google/protobuf/io/zero_copy_stream_impl.h> #include <google/protobuf/io/coded_stream.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/util_img.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/im2col.hpp" #include "caffe/proto/caffe.pb.h" namespace caffe { template <typename Dtype> __global__ void kernel_BiLinearResize(const int nthreads, const Dtype* src_data, const int src_height, const int src_width, Dtype* dst_data, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w) { CUDA_KERNEL_LOOP(i, nthreads) { int dst_h = i /dst_width; Dtype fh = dst_h * scale_h; const int src_h = floor(fh); fh -= src_h; const Dtype w_h0 = std::abs(1.0f - fh); const Dtype w_h1 = std::abs(fh); const int dst_offset_1 = dst_h * dst_width; const int src_offset_1 = src_h * src_width; int dst_w = i %dst_width; Dtype fw = dst_w * scale_w; const int src_w = floor(fw); fw -= src_w; const Dtype w_w0 = std::abs(1.0f - fw); const Dtype w_w1 = std::abs(fw); const int dst_idx = dst_offset_1 + dst_w; const int src_idx = src_offset_1 + src_w; Dtype res = (w_h0 * w_w0 * src_data[src_idx]); if (src_w + 1 < src_width) res += (w_h0 * w_w1 * src_data[src_idx + 1]); if (src_h + 1 < src_height) res += (w_h1 * w_w0 * src_data[src_idx + src_width]); if (src_w + 1 < src_width && src_h + 1 < src_height) res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]); dst_data[dst_idx] = res; } } template <typename Dtype> void BiLinearResizeMat_gpu(const Dtype* src, const int src_height, const int src_width, Dtype* dst, const int dst_height, const int dst_width) { const Dtype scale_w = src_width / (Dtype)dst_width; const Dtype scale_h = src_height / (Dtype)dst_height; int loop_n = dst_height * dst_width; kernel_BiLinearResize<Dtype> <<<CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>>( loop_n,src, src_height, src_width, dst, dst_height, dst_width, scale_h, scale_w); //CUDA_POST_KERNEL_CHECK; } template void BiLinearResizeMat_gpu(const float* src, const int src_height, const int src_width, float* dst, const int dst_height, const int dst_width); template void BiLinearResizeMat_gpu(const double* src, const int src_height, const int src_width, double* dst, const int dst_height, const int dst_width); template <typename Dtype> void ResizeBlob_gpu(const Blob<Dtype>* src, const int src_n, const int src_c, Blob<Dtype>* dst, const int dst_n, const int dst_c) { const int src_channels = src->channels(); const int src_height = src->height(); const int src_width = src->width(); const int src_offset = (src_n * src_channels + src_c) * src_height * src_width; const int dst_channels = dst->channels(); const int dst_height = dst->height(); const int dst_width = dst->width(); const int dst_offset = (dst_n * dst_channels + dst_c) * dst_height * dst_width; const Dtype* src_data = &(src->gpu_data()[src_offset]); Dtype* dst_data = &(dst->mutable_gpu_data()[dst_offset]); BiLinearResizeMat_gpu(src_data, src_height, src_width, dst_data, dst_height, dst_width); CUDA_POST_KERNEL_CHECK; } template void ResizeBlob_gpu(const Blob<float>* src, const int src_n, const int src_c, Blob<float>* dst, const int dst_n, const int dst_c); template void ResizeBlob_gpu(const Blob<double>* src, const int src_n, const int src_c, Blob<double>* dst, const int dst_n, const int dst_c); template <typename Dtype> __global__ void kernel_GetBiLinearResizeMatRules(const int nthreads, const int src_height, const int src_width, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w, Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2, Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4) { CUDA_KERNEL_LOOP(index, nthreads) { int dst_h = index /dst_width; Dtype fh = dst_h * scale_h; const int src_h = floor(fh); fh -= src_h; const Dtype w_h0 = std::abs(1.0f - fh); const Dtype w_h1 = std::abs(fh); const int dst_offset_1 = dst_h * dst_width; const int src_offset_1 = src_h * src_width; int dst_w = index %dst_width; Dtype fw = dst_w * scale_w; const int src_w = floor(fw); fw -= src_w; const Dtype w_w0 = std::abs(1.0f - fw); const Dtype w_w1 = std::abs(fw); const int dst_idx = dst_offset_1 + dst_w; // dst_data[dst_idx] = 0; const int src_idx = src_offset_1 + src_w; loc1[dst_idx] = src_idx; weight1[dst_idx] = w_h0 * w_w0; loc2[dst_idx] = 0; weight2[dst_idx] = 0; weight3[dst_idx] = 0; loc3[dst_idx] = 0; loc4[dst_idx] = 0; weight4[dst_idx] = 0; if (src_w + 1 < src_width) { loc2[dst_idx] = src_idx + 1; weight2[dst_idx] = w_h0 * w_w1; // dst_data[dst_idx] += (w_h0 * w_w1 * src_data[src_idx + 1]); } if (src_h + 1 < src_height) { // dst_data[dst_idx] += (w_h1 * w_w0 * src_data[src_idx + src_width]); weight3[dst_idx] = w_h1 * w_w0; loc3[dst_idx] = src_idx + src_width; } if (src_w + 1 < src_width && src_h + 1 < src_height) { loc4[dst_idx] = src_idx + src_width + 1; weight4[dst_idx] = w_h1 * w_w1; // dst_data[dst_idx] += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]); } } } template <typename Dtype> __global__ void kernel_ResizeBlob(const int nthreads,const int num,const int channels, const Dtype* src, const int src_height, const int src_width, Dtype* dst, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w) { CUDA_KERNEL_LOOP(index, nthreads) { int i = index %( dst_height * dst_width); int c = (index/(dst_height * dst_width))%channels; int n = (index/(dst_height * dst_width))/channels; int src_offset = (n * channels + c) * src_height * src_width; int dst_offset = (n * channels + c) * dst_height * dst_width; const Dtype* src_data = src+src_offset; Dtype* dst_data = dst+dst_offset; int dst_h = i /dst_width; Dtype fh = dst_h * scale_h; const int src_h = floor(fh); fh -= src_h; const Dtype w_h0 = std::abs(1.0f - fh); const Dtype w_h1 = std::abs(fh); const int dst_offset_1 = dst_h * dst_width; const int src_offset_1 = src_h * src_width; int dst_w = i %dst_width; Dtype fw = dst_w * scale_w; const int src_w = floor(fw); fw -= src_w; const Dtype w_w0 = std::abs(1.0f - fw); const Dtype w_w1 = std::abs(fw); const int dst_idx = dst_offset_1 + dst_w; const int src_idx = src_offset_1 + src_w; Dtype res = (w_h0 * w_w0 * src_data[src_idx]); if (src_w + 1 < src_width) res += (w_h0 * w_w1 * src_data[src_idx + 1]); if (src_h + 1 < src_height) res += (w_h1 * w_w0 * src_data[src_idx + src_width]); if (src_w + 1 < src_width && src_h + 1 < src_height) res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]); dst_data[dst_idx] = res; } } /* // new version by Sifei Liu template <typename Dtype> __global__ void kernel_ResizeBlob(const int nthreads,const int num,const int channels, const Dtype* src, const int src_height, const int src_width, Dtype* dst, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w) { CUDA_KERNEL_LOOP(index, nthreads) { int d_i = index %( dst_height * dst_width); int d_c = (index/(dst_height * dst_width))%channels; int d_n = (index/(dst_height * dst_width))/channels; int s_c = (index/(src_height * src_width))%channels; int s_n = (index/(src_height * src_width))/channels; int src_offset = (s_n * channels + s_c) * src_height * src_width; int dst_offset = (d_n * channels + d_c) * dst_height * dst_width; const Dtype* src_data = src+src_offset; Dtype* dst_data = dst+dst_offset; int dst_h = d_i /dst_width; Dtype fh = dst_h * scale_h; const int src_h = floor(fh); fh -= src_h; const Dtype w_h0 = std::abs(1.0f - fh); const Dtype w_h1 = std::abs(fh); const int dst_offset_1 = dst_h * dst_width; const int src_offset_1 = src_h * src_width; int dst_w = d_i %dst_width; Dtype fw = dst_w * scale_w; const int src_w = floor(fw); fw -= src_w; const Dtype w_w0 = std::abs(1.0f - fw); const Dtype w_w1 = std::abs(fw); const int dst_idx = dst_offset_1 + dst_w; const int src_idx = src_offset_1 + src_w; Dtype res = (w_h0 * w_w0 * src_data[src_idx]); if (src_w + 1 < src_width) res += (w_h0 * w_w1 * src_data[src_idx + 1]); if (src_h + 1 < src_height) res += (w_h1 * w_w0 * src_data[src_idx + src_width]); if (src_w + 1 < src_width && src_h + 1 < src_height) res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]); dst_data[dst_idx] = res; } }*/ template <typename Dtype> void ResizeBlob_gpu(const Blob<Dtype>* src,Blob<Dtype>* dst) { CHECK(src->num() == dst->num())<<"src->num() == dst->num()"; CHECK(src->channels() == dst->channels())<< "src->channels() == dst->channels()"; const int src_num = src->num(); const int src_channels = src->channels(); const int src_height = src->height(); const int src_width = src->width(); const int dst_channels = dst->channels(); const int dst_height = dst->height(); const int dst_width = dst->width(); const Dtype scale_w = src_width / (Dtype)dst_width; const Dtype scale_h = src_height / (Dtype)dst_height; int loop_n = dst_height * dst_width*dst_channels*src_num; const Dtype* src_data = src->gpu_data(); Dtype* dst_data = dst->mutable_gpu_data(); kernel_ResizeBlob<Dtype> <<<CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>>(loop_n,src_num,src_channels, src_data, src_height,src_width, dst_data, dst_height, dst_width, scale_h,scale_w); CUDA_POST_KERNEL_CHECK; } template void ResizeBlob_gpu(const Blob<float>* src, Blob<float>* dst); template void ResizeBlob_gpu(const Blob<double>* src, Blob<double>* dst); template <typename Dtype> void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width, const int dst_height, const int dst_width, Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2, Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4) { const Dtype scale_w = src_width / (Dtype)dst_width; const Dtype scale_h = src_height / (Dtype)dst_height; int loop_n = dst_height * dst_width; kernel_GetBiLinearResizeMatRules<Dtype> <<<CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>>( loop_n, src_height, src_width, dst_height, dst_width, scale_h, scale_w, loc1, weight1, loc2, weight2, loc3, weight3, loc4, weight4); CUDA_POST_KERNEL_CHECK; } template void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width, const int dst_height, const int dst_width, float* loc1, float* weight1, float* loc2, float* weight2, float* loc3, float* weight3, float* loc4, float* weight4); template void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width, const int dst_height, const int dst_width, double* loc1, double* weight1, double* loc2, double* weight2, double* loc3, double* weight3, double* loc4, double* weight4); template <typename Dtype> void ResizeBlob_gpu(const Blob<Dtype>* src,Blob<Dtype>* dst, Blob<Dtype>* loc1, Blob<Dtype>* loc2, Blob<Dtype>* loc3, Blob<Dtype>* loc4){ CHECK(src->num() == dst->num())<<"src->num() == dst->num()"; CHECK(src->channels() == dst->channels())<< "src->channels() == dst->channels()"; GetBiLinearResizeMatRules_gpu( src->height(),src->width(), dst->height(), dst->width(), loc1->mutable_gpu_data(), loc1->mutable_gpu_diff(), loc2->mutable_gpu_data(), loc2->mutable_gpu_diff(), loc3->mutable_gpu_data(), loc3->mutable_gpu_diff(), loc4->mutable_gpu_data(), loc4->mutable_gpu_diff()); ResizeBlob_gpu( src, dst) ; } template void ResizeBlob_gpu(const Blob<float>* src,Blob<float>* dst, Blob<float>* loc1, Blob<float>* loc2, Blob<float>* loc3, Blob<float>* loc4); template void ResizeBlob_gpu(const Blob<double>* src,Blob<double>* dst, Blob<double>* loc1, Blob<double>* loc2, Blob<double>* loc3, Blob<double>* loc4); /* template <typename Dtype> void GenerateSubBlobs_gpu(const Blob<Dtype>& src, Blob<Dtype>& dst,const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w) { const int nums_ = src.num(); const int channels_ = src.channels(); const int height_ = src.height(); const int width_ = src.width(); const int height_col_ =(height_ + 2 * pad_h - kernel_h) / stride_h + 1; const int width_col_ = (width_ + 2 * pad_w - kernel_w) / stride_w + 1; * * actually after im2col_v2, data is stored as * col_buffer_.Reshape(1*height_out_*width_out_, channels_ , kernel_h_ , kernel_w_); * * dst.Reshape(height_col_*width_col_*nums_,channels_, kernel_h, kernel_w); caffe::caffe_gpu_set(dst.count(),Dtype(0),dst.mutable_gpu_data()); for(int n = 0; n < nums_; n++){ const Dtype* src_data = src.gpu_data() + src.offset(n); Dtype* dst_data = dst.mutable_gpu_data() + dst.offset(n*height_col_*width_col_); caffe::im2col_v2_gpu(src_data, channels_, height_, width_, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dst_data); } } template void GenerateSubBlobs_gpu(const Blob<float>& src, Blob<float>& dst,const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w); template void GenerateSubBlobs_gpu(const Blob<double>& src, Blob<double>& dst,const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w); */ template <typename Dtype> __global__ void kernel_CropBlob(const int nthreads, const Dtype* src_data, Dtype* dst_data, const int num, const int channels, const int in_h, const int in_w, const int out_h, const int out_w, const int start_h, const int start_w) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index/channels/out_h/out_w; int c = (index/(out_h*out_w))% channels; int h = (index%(out_h*out_w))/out_w; int w = (index%(out_h*out_w))%out_w; Dtype* dst_data_ptr =dst_data+ ((n* channels+c)*out_h )*out_w ; const Dtype* src_data_ptr = src_data + ((n* channels+c)*in_h )*in_w ; dst_data_ptr[h*out_w+w] = src_data_ptr[(h+start_h)*in_w + w+start_w]; } } template <typename Dtype> void CropBlobs_gpu( const Blob<Dtype>&src, const int start_h, const int start_w, const int end_h, const int end_w, Blob<Dtype>&dst) { const int in_h = src.height(); const int in_w = src.width(); const int num = src.num(); const int channels = src.channels(); const int out_h = end_h - start_h; const int out_w = end_w - start_w; CHECK(out_h > 0) <<" end_h should be larger than start_h"; CHECK(out_w > 0) <<" end_w should be larger than start_w"; CHECK(out_h <=in_h) <<" out_h should nor be larger than input_height"; CHECK(out_w <=in_w) <<" out_w should nor be larger than input_width"; dst.Reshape(num,channels,out_h,out_w); if((out_h != in_h) || (out_w != in_w)){ const int loop_n = num*channels*out_h*out_w; kernel_CropBlob <Dtype> <<< CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>> (loop_n, src.gpu_data(), dst.mutable_gpu_data(), num, channels, in_h, in_w, out_h, out_w, start_h, start_w); } else { caffe::caffe_copy(src.count(),src.gpu_data(),dst.mutable_gpu_data()); } } template void CropBlobs_gpu( const Blob<float>&src, const int start_h, const int start_w, const int end_h, const int end_w, Blob<float>&dst); template void CropBlobs_gpu( const Blob<double>&src, const int start_h, const int start_w, const int end_h, const int end_w, Blob<double>&dst); template <typename Dtype> __global__ void kernel_CropBlob(const int nthreads, const Dtype* src_data, Dtype* dst_data, const int num, const int channels, const int in_h, const int in_w, const int dst_num, const int dst_h, const int dst_w, const int src_num_id, const int dst_num_id,const int out_h, const int out_w, const int start_h, const int start_w, const int dst_start_h, const int dst_start_w){ CUDA_KERNEL_LOOP(index, nthreads) { int c = (index/(out_h*out_w))% channels; int h = (index%(out_h*out_w))/out_w; int w = (index%(out_h*out_w))%out_w; Dtype* dst_data_ptr =dst_data+ ((dst_num_id* channels+c)*dst_h )*dst_w ; const Dtype* src_data_ptr = src_data + ((src_num_id* channels+c)*in_h )*in_w ; int true_src_h = h+start_h; int true_dst_h = h+dst_start_h; int true_src_w = w+start_w; int true_dst_w = w + dst_start_w; if(true_src_h >= 0 && true_src_h < in_h && true_src_w >= 0 && true_src_w < in_w && true_dst_h >= 0 && true_dst_h < dst_h && true_dst_w>= 0 && true_dst_w< dst_w ) dst_data_ptr[true_dst_h *dst_w + true_dst_w] = src_data_ptr[true_src_h * in_w + true_src_w]; } } template <typename Dtype> void CropBlobs_gpu( const Blob<Dtype>&src, const int src_num_id, const int start_h, const int start_w, const int end_h, const int end_w, Blob<Dtype>&dst, const int dst_num_id,const int dst_start_h , const int dst_start_w ){ const int in_h = src.height(); const int in_w = src.width(); const int dst_h = dst.height(); const int dst_w = dst.width(); const int channels = src.channels(); const int out_h = end_h - start_h; const int out_w = end_w - start_w; CHECK(out_h > 0) <<" end_h should be larger than start_h"; CHECK(out_w > 0) <<" end_w should be larger than start_w"; // CHECK(out_h <=in_h) <<" out_h should nor be larger than input_height"; // CHECK(out_w <=in_w) <<" out_w should nor be larger than input_width"; CHECK_GT(src.num(), src_num_id); CHECK_GT(dst.num(), dst_num_id); CHECK_EQ(channels, dst.channels()); // CHECK_GE(dst.height(), end_h); // CHECK_GE(dst.width(), end_w); const int loop_n = channels*out_h*out_w; kernel_CropBlob <Dtype> <<< CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>> (loop_n, src.gpu_data(), dst.mutable_gpu_data(), src.num(), channels, in_h, in_w, dst.num(),dst_h,dst_w, src_num_id,dst_num_id, out_h, out_w, start_h, start_w, dst_start_h, dst_start_w); } template void CropBlobs_gpu( const Blob<float>&src, const int src_num_id, const int start_h, const int start_w, const int end_h, const int end_w, Blob<float>&dst, const int dst_num_id,const int dst_start_h , const int dst_start_w ); template void CropBlobs_gpu( const Blob<double>&src, const int src_num_id, const int start_h, const int start_w, const int end_h, const int end_w, Blob<double>&dst, const int dst_num_id,const int dst_start_h , const int dst_start_w ); /* template <typename Dtype> void ConcateSubImagesInBlobs_gpu(const Blob<Dtype>& src, Blob<Dtype>& dst,const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int out_img_h, const int out_img_w) { const int in_nums = src.num(); const int height_col_ =(out_img_h + 2 * pad_h - kernel_h) / stride_h + 1; const int width_col_ = (out_img_w + 2 * pad_w - kernel_w) / stride_w + 1; // std::cout<<"in_nums:"<<in_nums<<" kernel_h:"<<kernel_h<<" kernel_w:"<<kernel_w // <<" pad_h:"<<pad_h<<" pad_w:"<<pad_w<<" stride_h:"<<stride_h<< // " stride_w:"<<stride_w<<" out_img_h:"<<out_img_h <<" out_img_w:"<<out_img_w // << " height_col:"<<height_col_<<" width_col:"<<width_col_<<std::endl; dst.Reshape(in_nums/height_col_/width_col_,src.channels(), out_img_h, out_img_w); // std::cout<<"in_nums/height_col_/width_col_,src.channels(), out_img_h, out_img_w: "<< // in_nums/height_col_/width_col_<< " "<<src.channels()<<" "<<out_img_h<<" "<< // out_img_w<<std::endl; const int channels_ = dst.channels(); const int height_ = dst.height(); const int width_ = dst.width(); const int out_num = dst.num(); for(int n = 0; n < out_num; n++){ const Dtype* src_data = src.gpu_data() + src.offset(n*height_col_*width_col_); Dtype* dst_data = dst.mutable_gpu_data() + dst.offset(n); caffe::col2im_v2_gpu(src_data, channels_, height_, width_, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dst_data); } return; } template void ConcateSubImagesInBlobs_gpu(const Blob<float>& src, Blob<float>& dst,const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int out_img_h, const int out_img_w); template void ConcateSubImagesInBlobs_gpu(const Blob<double>& src, Blob<double>& dst,const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int out_img_h, const int out_img_w); */ // namespace caffe } #endif
the_stack
// workaround issue between gcc >= 4.7 and cuda 5.5 #if (defined __GNUC__) && (__GNUC__>4 || __GNUC_MINOR__>=7) #undef _GLIBCXX_ATOMIC_BUILTINS #undef _GLIBCXX_USE_INT128 #endif #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #include "main.h" #include "gpu_common.h" // Check that dense modules can be properly parsed by nvcc #include <Eigen/Dense> // struct Foo{ // EIGEN_DEVICE_FUNC // void operator()(int i, const float* mats, float* vecs) const { // using namespace Eigen; // // Matrix3f M(data); // // Vector3f x(data+9); // // Map<Vector3f>(data+9) = M.inverse() * x; // Matrix3f M(mats+i/16); // Vector3f x(vecs+i*3); // // using std::min; // // using std::sqrt; // Map<Vector3f>(vecs+i*3) << x.minCoeff(), 1, 2;// / x.dot(x);//(M.inverse() * x) / x.x(); // //x = x*2 + x.y() * x + x * x.maxCoeff() - x / x.sum(); // } // }; template<typename T> struct coeff_wise { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; T x1(in+i); T x2(in+i+1); T x3(in+i+2); Map<T> res(out+i*T::MaxSizeAtCompileTime); res.array() += (in[0] * x1 + x2).array() * x3.array(); } }; template<typename T> struct complex_sqrt { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; typedef typename T::Scalar ComplexType; typedef typename T::Scalar::value_type ValueType; const int num_special_inputs = 18; if (i == 0) { const ValueType nan = std::numeric_limits<ValueType>::quiet_NaN(); typedef Eigen::Vector<ComplexType, num_special_inputs> SpecialInputs; SpecialInputs special_in; special_in.setZero(); int idx = 0; special_in[idx++] = ComplexType(0, 0); special_in[idx++] = ComplexType(-0, 0); special_in[idx++] = ComplexType(0, -0); special_in[idx++] = ComplexType(-0, -0); // GCC's fallback sqrt implementation fails for inf inputs. // It is called when _GLIBCXX_USE_C99_COMPLEX is false or if // clang includes the GCC header (which temporarily disables // _GLIBCXX_USE_C99_COMPLEX) #if !defined(_GLIBCXX_COMPLEX) || \ (_GLIBCXX_USE_C99_COMPLEX && !defined(__CLANG_CUDA_WRAPPERS_COMPLEX)) const ValueType inf = std::numeric_limits<ValueType>::infinity(); special_in[idx++] = ComplexType(1.0, inf); special_in[idx++] = ComplexType(nan, inf); special_in[idx++] = ComplexType(1.0, -inf); special_in[idx++] = ComplexType(nan, -inf); special_in[idx++] = ComplexType(-inf, 1.0); special_in[idx++] = ComplexType(inf, 1.0); special_in[idx++] = ComplexType(-inf, -1.0); special_in[idx++] = ComplexType(inf, -1.0); special_in[idx++] = ComplexType(-inf, nan); special_in[idx++] = ComplexType(inf, nan); #endif special_in[idx++] = ComplexType(1.0, nan); special_in[idx++] = ComplexType(nan, 1.0); special_in[idx++] = ComplexType(nan, -1.0); special_in[idx++] = ComplexType(nan, nan); Map<SpecialInputs> special_out(out); special_out = special_in.cwiseSqrt(); } T x1(in + i); Map<T> res(out + num_special_inputs + i*T::MaxSizeAtCompileTime); res = x1.cwiseSqrt(); } }; template<typename T> struct complex_operators { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; typedef typename T::Scalar ComplexType; typedef typename T::Scalar::value_type ValueType; const int num_scalar_operators = 24; const int num_vector_operators = 23; // no unary + operator. int out_idx = i * (num_scalar_operators + num_vector_operators * T::MaxSizeAtCompileTime); // Scalar operators. const ComplexType a = in[i]; const ComplexType b = in[i + 1]; out[out_idx++] = +a; out[out_idx++] = -a; out[out_idx++] = a + b; out[out_idx++] = a + numext::real(b); out[out_idx++] = numext::real(a) + b; out[out_idx++] = a - b; out[out_idx++] = a - numext::real(b); out[out_idx++] = numext::real(a) - b; out[out_idx++] = a * b; out[out_idx++] = a * numext::real(b); out[out_idx++] = numext::real(a) * b; out[out_idx++] = a / b; out[out_idx++] = a / numext::real(b); out[out_idx++] = numext::real(a) / b; out[out_idx] = a; out[out_idx++] += b; out[out_idx] = a; out[out_idx++] -= b; out[out_idx] = a; out[out_idx++] *= b; out[out_idx] = a; out[out_idx++] /= b; const ComplexType true_value = ComplexType(ValueType(1), ValueType(0)); const ComplexType false_value = ComplexType(ValueType(0), ValueType(0)); out[out_idx++] = (a == b ? true_value : false_value); out[out_idx++] = (a == numext::real(b) ? true_value : false_value); out[out_idx++] = (numext::real(a) == b ? true_value : false_value); out[out_idx++] = (a != b ? true_value : false_value); out[out_idx++] = (a != numext::real(b) ? true_value : false_value); out[out_idx++] = (numext::real(a) != b ? true_value : false_value); // Vector versions. T x1(in + i); T x2(in + i + 1); const int res_size = T::MaxSizeAtCompileTime * num_scalar_operators; const int size = T::MaxSizeAtCompileTime; int block_idx = 0; Map<VectorX<ComplexType>> res(out + out_idx, res_size); res.segment(block_idx, size) = -x1; block_idx += size; res.segment(block_idx, size) = x1 + x2; block_idx += size; res.segment(block_idx, size) = x1 + x2.real(); block_idx += size; res.segment(block_idx, size) = x1.real() + x2; block_idx += size; res.segment(block_idx, size) = x1 - x2; block_idx += size; res.segment(block_idx, size) = x1 - x2.real(); block_idx += size; res.segment(block_idx, size) = x1.real() - x2; block_idx += size; res.segment(block_idx, size) = x1.array() * x2.array(); block_idx += size; res.segment(block_idx, size) = x1.array() * x2.real().array(); block_idx += size; res.segment(block_idx, size) = x1.real().array() * x2.array(); block_idx += size; res.segment(block_idx, size) = x1.array() / x2.array(); block_idx += size; res.segment(block_idx, size) = x1.array() / x2.real().array(); block_idx += size; res.segment(block_idx, size) = x1.real().array() / x2.array(); block_idx += size; res.segment(block_idx, size) = x1; res.segment(block_idx, size) += x2; block_idx += size; res.segment(block_idx, size) = x1; res.segment(block_idx, size) -= x2; block_idx += size; res.segment(block_idx, size) = x1; res.segment(block_idx, size).array() *= x2.array(); block_idx += size; res.segment(block_idx, size) = x1; res.segment(block_idx, size).array() /= x2.array(); block_idx += size; // Equality comparisons currently not functional on device // (std::equal_to<T> is host-only). // const T true_vector = T::Constant(true_value); // const T false_vector = T::Constant(false_value); // res.segment(block_idx, size) = (x1 == x2 ? true_vector : false_vector); // block_idx += size; // res.segment(block_idx, size) = (x1 == x2.real() ? true_vector : false_vector); // block_idx += size; // res.segment(block_idx, size) = (x1.real() == x2 ? true_vector : false_vector); // block_idx += size; // res.segment(block_idx, size) = (x1 != x2 ? true_vector : false_vector); // block_idx += size; // res.segment(block_idx, size) = (x1 != x2.real() ? true_vector : false_vector); // block_idx += size; // res.segment(block_idx, size) = (x1.real() != x2 ? true_vector : false_vector); // block_idx += size; } }; template<typename T> struct replicate { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; T x1(in+i); int step = x1.size() * 4; int stride = 3 * step; typedef Map<Array<typename T::Scalar,Dynamic,Dynamic> > MapType; MapType(out+i*stride+0*step, x1.rows()*2, x1.cols()*2) = x1.replicate(2,2); MapType(out+i*stride+1*step, x1.rows()*3, x1.cols()) = in[i] * x1.colwise().replicate(3); MapType(out+i*stride+2*step, x1.rows(), x1.cols()*3) = in[i] * x1.rowwise().replicate(3); } }; template<typename T> struct alloc_new_delete { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { int offset = 2*i*T::MaxSizeAtCompileTime; T* x = new T(in + offset); Eigen::Map<T> u(out + offset); u = *x; delete x; offset += T::MaxSizeAtCompileTime; T* y = new T[1]; y[0] = T(in + offset); Eigen::Map<T> v(out + offset); v = y[0]; delete[] y; } }; template<typename T> struct redux { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; int N = 10; T x1(in+i); out[i*N+0] = x1.minCoeff(); out[i*N+1] = x1.maxCoeff(); out[i*N+2] = x1.sum(); out[i*N+3] = x1.prod(); out[i*N+4] = x1.matrix().squaredNorm(); out[i*N+5] = x1.matrix().norm(); out[i*N+6] = x1.colwise().sum().maxCoeff(); out[i*N+7] = x1.rowwise().maxCoeff().sum(); out[i*N+8] = x1.matrix().colwise().squaredNorm().sum(); } }; template<typename T1, typename T2> struct prod_test { EIGEN_DEVICE_FUNC void operator()(int i, const typename T1::Scalar* in, typename T1::Scalar* out) const { using namespace Eigen; typedef Matrix<typename T1::Scalar, T1::RowsAtCompileTime, T2::ColsAtCompileTime> T3; T1 x1(in+i); T2 x2(in+i+1); Map<T3> res(out+i*T3::MaxSizeAtCompileTime); res += in[i] * x1 * x2; } }; template<typename T1, typename T2> struct diagonal { EIGEN_DEVICE_FUNC void operator()(int i, const typename T1::Scalar* in, typename T1::Scalar* out) const { using namespace Eigen; T1 x1(in+i); Map<T2> res(out+i*T2::MaxSizeAtCompileTime); res += x1.diagonal(); } }; template<typename T> struct eigenvalues_direct { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; typedef Matrix<typename T::Scalar, T::RowsAtCompileTime, 1> Vec; T M(in+i); Map<Vec> res(out+i*Vec::MaxSizeAtCompileTime); T A = M*M.adjoint(); SelfAdjointEigenSolver<T> eig; eig.computeDirect(A); res = eig.eigenvalues(); } }; template<typename T> struct eigenvalues { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; typedef Matrix<typename T::Scalar, T::RowsAtCompileTime, 1> Vec; T M(in+i); Map<Vec> res(out+i*Vec::MaxSizeAtCompileTime); T A = M*M.adjoint(); SelfAdjointEigenSolver<T> eig; eig.compute(A); res = eig.eigenvalues(); } }; template<typename T> struct matrix_inverse { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; T M(in+i); Map<T> res(out+i*T::MaxSizeAtCompileTime); res = M.inverse(); } }; template<typename T> struct numeric_limits_test { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { EIGEN_UNUSED_VARIABLE(in) int out_idx = i * 5; out[out_idx++] = numext::numeric_limits<float>::epsilon(); out[out_idx++] = (numext::numeric_limits<float>::max)(); out[out_idx++] = (numext::numeric_limits<float>::min)(); out[out_idx++] = numext::numeric_limits<float>::infinity(); out[out_idx++] = numext::numeric_limits<float>::quiet_NaN(); } }; template<typename Type1, typename Type2> bool verifyIsApproxWithInfsNans(const Type1& a, const Type2& b, typename Type1::Scalar* = 0) // Enabled for Eigen's type only { if (a.rows() != b.rows()) { return false; } if (a.cols() != b.cols()) { return false; } for (Index r = 0; r < a.rows(); ++r) { for (Index c = 0; c < a.cols(); ++c) { if (a(r, c) != b(r, c) && !((numext::isnan)(a(r, c)) && (numext::isnan)(b(r, c))) && !test_isApprox(a(r, c), b(r, c))) { return false; } } } return true; } template<typename Kernel, typename Input, typename Output> void test_with_infs_nans(const Kernel& ker, int n, const Input& in, Output& out) { Output out_ref, out_gpu; #if !defined(EIGEN_GPU_COMPILE_PHASE) out_ref = out_gpu = out; #else EIGEN_UNUSED_VARIABLE(in); EIGEN_UNUSED_VARIABLE(out); #endif run_on_cpu (ker, n, in, out_ref); run_on_gpu(ker, n, in, out_gpu); #if !defined(EIGEN_GPU_COMPILE_PHASE) verifyIsApproxWithInfsNans(out_ref, out_gpu); #endif } EIGEN_DECLARE_TEST(gpu_basic) { ei_test_init_gpu(); int nthreads = 100; Eigen::VectorXf in, out; Eigen::VectorXcf cfin, cfout; #if !defined(EIGEN_GPU_COMPILE_PHASE) int data_size = nthreads * 512; in.setRandom(data_size); out.setConstant(data_size, -1); cfin.setRandom(data_size); cfout.setConstant(data_size, -1); #endif CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise<Vector3f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise<Array44f>(), nthreads, in, out) ); #if !defined(EIGEN_USE_HIP) // FIXME // These subtests result in a compile failure on the HIP platform // // eigen-upstream/Eigen/src/Core/Replicate.h:61:65: error: // base class 'internal::dense_xpr_base<Replicate<Array<float, 4, 1, 0, 4, 1>, -1, -1> >::type' // (aka 'ArrayBase<Eigen::Replicate<Eigen::Array<float, 4, 1, 0, 4, 1>, -1, -1> >') has protected default constructor CALL_SUBTEST( run_and_compare_to_gpu(replicate<Array4f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(replicate<Array33f>(), nthreads, in, out) ); // HIP does not support new/delete on device. CALL_SUBTEST( run_and_compare_to_gpu(alloc_new_delete<Vector3f>(), nthreads, in, out) ); #endif CALL_SUBTEST( run_and_compare_to_gpu(redux<Array4f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(redux<Matrix3f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(prod_test<Matrix3f,Matrix3f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(prod_test<Matrix4f,Vector4f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(diagonal<Matrix3f,Vector3f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(diagonal<Matrix4f,Vector4f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix2f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix3f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse<Matrix4f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct<Matrix3f>(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct<Matrix2f>(), nthreads, in, out) ); // Test std::complex. CALL_SUBTEST( run_and_compare_to_gpu(complex_operators<Vector3cf>(), nthreads, cfin, cfout) ); CALL_SUBTEST( test_with_infs_nans(complex_sqrt<Vector3cf>(), nthreads, cfin, cfout) ); // numeric_limits CALL_SUBTEST( test_with_infs_nans(numeric_limits_test<Vector3f>(), 1, in, out) ); #if defined(__NVCC__) // FIXME // These subtests compiles only with nvcc and fail with HIPCC and clang-cuda CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues<Matrix4f>(), nthreads, in, out) ); typedef Matrix<float,6,6> Matrix6f; CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues<Matrix6f>(), nthreads, in, out) ); #endif }
the_stack
#pragma once #include <cuda_runtime.h> #include <libvis/cuda/cuda_buffer.cuh> #include <libvis/libvis.h> #include <libvis/opengl.h> #include "badslam/cost_function.cuh" #include "badslam/cuda_matrix.cuh" #include "badslam/cuda_util.cuh" #include "badslam/surfel_projection.cuh" #include "badslam/util.cuh" #include "badslam/util_nvcc_only.cuh" namespace vis { // Tests whether a pixel corresponds to a surfel (i.e., probably represents the // same surface). template <bool return_free_space_violations, bool return_surfel_normal> __forceinline__ __device__ bool IsAssociatedWithPixel( const CUDABuffer_<float>& surfels, u32 surfel_index, const float3& surfel_local_position, const CUDAMatrix3x4& frame_T_global, const CUDABuffer_<u16>& normals_buffer, int px, int py, const DepthParameters& depth_params, u16 measured_depth, float depth_tukey_parameter, const PixelCenterUnprojector& center_unprojector, bool* is_free_space_violation, float3* surfel_normal, float* out_calibrated_depth) { if (measured_depth & kInvalidDepthBit) { return false; } float calibrated_depth = RawToCalibratedDepth( depth_params.a, depth_params.cfactor_buffer(py / depth_params.sparse_surfel_cell_size, px / depth_params.sparse_surfel_cell_size), depth_params.raw_to_float_depth, measured_depth); if (out_calibrated_depth) { *out_calibrated_depth = calibrated_depth; } float3 surfel_local_normal; if (return_surfel_normal) { *surfel_normal = SurfelGetNormal(surfels, surfel_index); surfel_local_normal = frame_T_global.Rotate(*surfel_normal); } else { surfel_local_normal = frame_T_global.Rotate(SurfelGetNormal(surfels, surfel_index)); } // Compute association depth difference threshold float depth_residual_stddev_estimate = ComputeDepthResidualStddevEstimate( center_unprojector.nx(px), center_unprojector.ny(py), calibrated_depth, surfel_local_normal, depth_params.baseline_fx); // TODO: return to caller if useful const float depth_difference_threshold = depth_tukey_parameter * depth_residual_stddev_estimate; // Check whether the depth is similar enough to consider the measurement to belong to the surfel if (return_free_space_violations) { float depth_difference = calibrated_depth - surfel_local_position.z; if (depth_difference > depth_difference_threshold) { *is_free_space_violation = true; return false; } else if (depth_difference < -depth_difference_threshold) { return false; } } else { if (fabs(surfel_local_position.z - calibrated_depth) > depth_difference_threshold) { return false; } } // Check whether the surfel normal looks towards the camera (instead of away from it). float surfel_distance = Norm(surfel_local_position); float surfel_vs_camera_dir_dot_angle = (1.0f / surfel_distance) * Dot(surfel_local_position, surfel_local_normal); if (surfel_vs_camera_dir_dot_angle > 0) { return false; } // Check whether the surfel normal is compatible with the measurement normal. float3 local_normal = U16ToImageSpaceNormal(normals_buffer(py, px)); // TODO: export this to calling functions if they need it to improve performance float surfel_vs_measurement_dot_angle = Dot(surfel_local_normal, local_normal); if (surfel_vs_measurement_dot_angle < cos_normal_compatibility_threshold) { // if (return_free_space_violations && surfel_local_position.z < calibrated_depth) { // // NOTE: Careful here, this might lead to a bias since we only remove // // surfels on one side of the surface. // *is_free_space_violation = true; // } return false; } return true; } // Version of IsAssociatedWithPixel() for using a surfel that is implicitly // defined by a pixel. template <bool return_free_space_violations> __forceinline__ __device__ bool IsAssociatedWithPixel( const float3& surfel_local_position, const CUDABuffer_<u16>& surfel_normals_buffer, int x, int y, const CUDAMatrix3x4& test_frame_T_surfel_frame, const CUDABuffer_<u16>& test_normals_buffer, int px, int py, const DepthParameters& depth_params, u16 pixel_measured_depth, float depth_tukey_parameter, const PixelCenterUnprojector& center_unprojector, bool* is_free_space_violation) { if (pixel_measured_depth & kInvalidDepthBit) { return false; } float pixel_calibrated_depth = RawToCalibratedDepth( depth_params.a, depth_params.cfactor_buffer(py / depth_params.sparse_surfel_cell_size, px / depth_params.sparse_surfel_cell_size), depth_params.raw_to_float_depth, pixel_measured_depth); float3 surfel_local_normal; return IsAssociatedWithPixel<return_free_space_violations>( surfel_local_position, surfel_normals_buffer, x, y, test_frame_T_surfel_frame, test_normals_buffer, px, py, pixel_calibrated_depth, depth_tukey_parameter, depth_params.baseline_fx, center_unprojector, is_free_space_violation, &surfel_local_normal); } // Version of IsAssociatedWithPixel() for using a surfel that is implicitly // defined by a pixel. template <bool return_free_space_violations> __forceinline__ __device__ bool IsAssociatedWithPixel( const float3& surfel_local_position, const CUDABuffer_<u16>& surfel_normals_buffer, int x, int y, const CUDAMatrix3x4& test_frame_T_surfel_frame, const CUDABuffer_<u16>& test_normals_buffer, int px, int py, float pixel_calibrated_depth, float depth_tukey_parameter, float baseline_fx, const PixelCenterUnprojector& center_unprojector, bool* is_free_space_violation, float3* surfel_local_normal) { *surfel_local_normal = test_frame_T_surfel_frame.Rotate(U16ToImageSpaceNormal(surfel_normals_buffer(y, x))); // Compute association depth difference threshold float depth_residual_stddev_estimate = ComputeDepthResidualStddevEstimate( center_unprojector.nx(px), center_unprojector.ny(py), pixel_calibrated_depth, *surfel_local_normal, baseline_fx); // TODO: return to caller if useful const float depth_difference_threshold = depth_tukey_parameter * depth_residual_stddev_estimate; // Check whether the depth is similar enough to consider the measurement to belong to the surfel if (return_free_space_violations) { float depth_difference = pixel_calibrated_depth - surfel_local_position.z; if (depth_difference > depth_difference_threshold) { *is_free_space_violation = true; return false; } else if (depth_difference < -depth_difference_threshold) { return false; } } else { if (fabs(surfel_local_position.z - pixel_calibrated_depth) > depth_difference_threshold) { return false; } } // Check whether the surfel normal looks towards the camera (instead of away from it). float surfel_distance = Norm(surfel_local_position); float surfel_vs_camera_dir_dot_angle = (1.0f / surfel_distance) * Dot(surfel_local_position, *surfel_local_normal); if (surfel_vs_camera_dir_dot_angle > 0) { return false; } // Check whether the surfel normal is compatible with the measurement normal. float3 local_normal = U16ToImageSpaceNormal(test_normals_buffer(py, px)); float surfel_vs_measurement_dot_angle = Dot(*surfel_local_normal, local_normal); if (surfel_vs_measurement_dot_angle < cos_normal_compatibility_threshold) { // if (return_free_space_violations && surfel_local_position.z < pixel_calibrated_depth) { // // NOTE: Careful here, this might lead to a bias since we only remove // // surfels on one side of the surface. // *is_free_space_violation = true; // } return false; } return true; } // ----------------------------------------------------------------------------- // Groups result variables of surfel projection. struct SurfelProjectionResult6 { float3 surfel_global_position; // Local position of the surfel in the keyframe coordinate system. float3 surfel_local_position; // Global normal vector of the surfel. float3 surfel_normal; // Calibrated depth value of the pixel the surfel projects to. float pixel_calibrated_depth; // Integer coordinates of the pixel the surfel projects to. int px; int py; // Float coordinates of the pixel the surfel projects to ("pixel corner" convention). float2 pxy; }; // Groups result variables of surfel projection. struct SurfelProjectionResult5 { // Local position of the surfel in the keyframe coordinate system. float3 surfel_local_position; // Global normal vector of the surfel. float3 surfel_normal; // Calibrated depth value of the pixel the surfel projects to. float pixel_calibrated_depth; // Integer coordinates of the pixel the surfel projects to. int px; int py; }; // Groups result variables of surfel projection. struct SurfelProjectionResultXY { // Integer coordinates of the pixel the surfel projects to. int px; int py; }; // Groups result variables of surfel projection. struct SurfelProjectionResultXYFloat { float2 pxy; }; // Groups result variables of surfel projection. struct SurfelProjectionResultXYFreeSpace { // Integer coordinates of the pixel the surfel projects to. int px; int py; bool is_free_space_violation; }; // Projects a surfel to a pixel. Returns true if it projects to a corresponding // pixel, and outputs the projection result. __forceinline__ __device__ bool SurfelProjectsToAssociatedPixel( unsigned int surfel_index, const SurfelProjectionParameters& surfel_projection, SurfelProjectionResult5* result) { if (surfel_index < surfel_projection.surfels_size) { // Project the surfel onto depth_buffer to find the corresponding pixel float3 global_position = SurfelGetPosition(surfel_projection.surfels, surfel_index); if (surfel_projection.frame_T_global.MultiplyIfResultZIsPositive(global_position, &result->surfel_local_position)) { if (ProjectSurfelToImage( surfel_projection.depth_buffer.width(), surfel_projection.depth_buffer.height(), surfel_projection.projector, result->surfel_local_position, &result->px, &result->py)) { // Check whether the surfel gets associated with the pixel. if (IsAssociatedWithPixel<false, true>( surfel_projection.surfels, surfel_index, result->surfel_local_position, surfel_projection.frame_T_global, surfel_projection.normals_buffer, result->px, result->py, surfel_projection.depth_params, surfel_projection.depth_buffer(result->py, result->px), kDepthResidualDefaultTukeyParam, surfel_projection.center_unprojector, nullptr, &result->surfel_normal, &result->pixel_calibrated_depth)) { return true; } } } } return false; } // Projects a surfel to a pixel. Returns true if it projects to a corresponding // pixel, and outputs the projection result. __forceinline__ __device__ bool SurfelProjectsToAssociatedPixel( unsigned int surfel_index, const SurfelProjectionParameters& surfel_projection, SurfelProjectionResult6* result) { if (surfel_index < surfel_projection.surfels_size) { // Project the surfel onto depth_buffer to find the corresponding pixel result->surfel_global_position = SurfelGetPosition(surfel_projection.surfels, surfel_index); if (surfel_projection.frame_T_global.MultiplyIfResultZIsPositive(result->surfel_global_position, &result->surfel_local_position)) { if (ProjectSurfelToImage( surfel_projection.depth_buffer.width(), surfel_projection.depth_buffer.height(), surfel_projection.projector, result->surfel_local_position, &result->px, &result->py, &result->pxy)) { // Check whether the surfel gets associated with the pixel. if (IsAssociatedWithPixel<false, true>( surfel_projection.surfels, surfel_index, result->surfel_local_position, surfel_projection.frame_T_global, surfel_projection.normals_buffer, result->px, result->py, surfel_projection.depth_params, surfel_projection.depth_buffer(result->py, result->px), kDepthResidualDefaultTukeyParam, surfel_projection.center_unprojector, nullptr, &result->surfel_normal, &result->pixel_calibrated_depth)) { return true; } } } } return false; } // Projects a surfel to a pixel. Returns true if any surfel from the thread's // CUDA block projects to a corresponding pixel, and false otherwise. Outputs // the projection result. __forceinline__ __device__ bool AnySurfelProjectsToAssociatedPixel( unsigned int* surfel_index, const SurfelProjectionParameters& surfel_projection, bool* visible, SurfelProjectionResult6* result) { *visible = *surfel_index < surfel_projection.surfels_size; if (!*visible) { *surfel_index = 0; } // Project the surfel onto depth_buffer to find the corresponding pixel result->surfel_global_position = SurfelGetPosition(surfel_projection.surfels, *surfel_index); if (!surfel_projection.frame_T_global.MultiplyIfResultZIsPositive(result->surfel_global_position, &result->surfel_local_position)) { *visible = false; } if (!*visible || !ProjectSurfelToImage( surfel_projection.depth_buffer.width(), surfel_projection.depth_buffer.height(), surfel_projection.projector, result->surfel_local_position, &result->px, &result->py, &result->pxy)) { result->px = 0; result->py = 0; *visible = false; } // Early exit if all threads within the block (!) have invisible surfels. // Checking for invisible threads within the warp (using __all(), for example) // is not sufficient since we do block-wide collective operations later. if (__syncthreads_or(*visible) == 0) { return false; } // Check for depth compatibility. if (!IsAssociatedWithPixel<false, true>( surfel_projection.surfels, *surfel_index, result->surfel_local_position, surfel_projection.frame_T_global, surfel_projection.normals_buffer, result->px, result->py, surfel_projection.depth_params, surfel_projection.depth_buffer(result->py, result->px), kDepthResidualDefaultTukeyParam, surfel_projection.center_unprojector, nullptr, &result->surfel_normal, &result->pixel_calibrated_depth)) { *visible = false; } // Second early exit test (see above) if (__syncthreads_or(*visible) == 0) { return false; } return true; } // Projects a surfel to a pixel. Returns true if it projects to a corresponding // pixel, and outputs the projection result. __forceinline__ __device__ bool SurfelProjectsToAssociatedPixel( unsigned int surfel_index, const SurfelProjectionParameters& surfel_projection, SurfelProjectionResultXY* result) { if (surfel_index < surfel_projection.surfels_size) { // Project the surfel onto depth_buffer to find the corresponding pixel float3 global_position = SurfelGetPosition(surfel_projection.surfels, surfel_index); float3 surfel_local_position; if (surfel_projection.frame_T_global.MultiplyIfResultZIsPositive(global_position, &surfel_local_position)) { if (ProjectSurfelToImage( surfel_projection.depth_buffer.width(), surfel_projection.depth_buffer.height(), surfel_projection.projector, surfel_local_position, &result->px, &result->py)) { // Check whether the surfel gets associated with the pixel. if (IsAssociatedWithPixel<false, false>( surfel_projection.surfels, surfel_index, surfel_local_position, surfel_projection.frame_T_global, surfel_projection.normals_buffer, result->px, result->py, surfel_projection.depth_params, surfel_projection.depth_buffer(result->py, result->px), kDepthResidualDefaultTukeyParam, surfel_projection.center_unprojector, nullptr, nullptr, nullptr)) { return true; } } } } return false; } // Projects a surfel to a pixel. Returns true if it projects to a corresponding // pixel, and outputs the projection result. __forceinline__ __device__ bool SurfelProjectsToAssociatedPixel( unsigned int surfel_index, const SurfelProjectionParameters& surfel_projection, SurfelProjectionResultXYFloat* result) { if (surfel_index < surfel_projection.surfels_size) { // Project the surfel onto depth_buffer to find the corresponding pixel float3 global_position = SurfelGetPosition(surfel_projection.surfels, surfel_index); float3 surfel_local_position; if (surfel_projection.frame_T_global.MultiplyIfResultZIsPositive(global_position, &surfel_local_position)) { int px, py; if (ProjectSurfelToImage( surfel_projection.depth_buffer.width(), surfel_projection.depth_buffer.height(), surfel_projection.projector, surfel_local_position, &px, &py, &result->pxy)) { // Check whether the surfel gets associated with the pixel. if (IsAssociatedWithPixel<false, false>( surfel_projection.surfels, surfel_index, surfel_local_position, surfel_projection.frame_T_global, surfel_projection.normals_buffer, px, py, surfel_projection.depth_params, surfel_projection.depth_buffer(py, px), kDepthResidualDefaultTukeyParam, surfel_projection.center_unprojector, nullptr, nullptr, nullptr)) { return true; } } } } return false; } // Projects a surfel to a pixel. Returns true if it projects to a corresponding // pixel, and outputs the projection result. __forceinline__ __device__ bool SurfelProjectsToAssociatedPixel( unsigned int surfel_index, const SurfelProjectionParameters& surfel_projection, SurfelProjectionResultXYFreeSpace* result) { result->is_free_space_violation = false; if (surfel_index < surfel_projection.surfels_size) { // Project the surfel onto depth_buffer to find the corresponding pixel float3 global_position = SurfelGetPosition(surfel_projection.surfels, surfel_index); float3 surfel_local_position; if (surfel_projection.frame_T_global.MultiplyIfResultZIsPositive(global_position, &surfel_local_position)) { if (ProjectSurfelToImage( surfel_projection.depth_buffer.width(), surfel_projection.depth_buffer.height(), surfel_projection.projector, surfel_local_position, &result->px, &result->py)) { // Check whether the surfel gets associated with the pixel. if (IsAssociatedWithPixel<true, false>( surfel_projection.surfels, surfel_index, surfel_local_position, surfel_projection.frame_T_global, surfel_projection.normals_buffer, result->px, result->py, surfel_projection.depth_params, surfel_projection.depth_buffer(result->py, result->px), kDepthResidualDefaultTukeyParam, surfel_projection.center_unprojector, &result->is_free_space_violation, nullptr, nullptr)) { return true; } } } } return false; } }
the_stack
#include "./physlib/R2grid.h" // Grid2d #include "./physlib/dev_R2grid.h" // dev_Grid2d #include "./commonlib/surfObj2d.h" // SurfObj2d #include "./commonlib/checkerror.h" // checkCudaErrors constexpr const int L_X { 128 } ; // WIDTH constexpr const int L_Y { 64 } ; // HEIGHT // Simple copy kernel /* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#surface-object-api * 3.2.11.2.1. Surface Object API */ __global__ void copyKernel( cudaSurfaceObject_t inputSurfObj, cudaSurfaceObject_t outputSurfObj, int width, int height) { // Calculate surface coordinates const int k_x = threadIdx.x + blockIdx.x * blockDim.x ; const int k_y = threadIdx.y + blockIdx.y * blockDim.y ; if ((k_x >= width) || (k_y >= height)) { return; } float data; // Read from input surface surf2Dread(&data, inputSurfObj, k_x * 4, k_y, cudaBoundaryModeClamp ); // surf2Dread(&data, inputSurfObj, (k_x+1) * 4, k_y, cudaBoundaryModeClamp ); surf2Dwrite(data, outputSurfObj, k_x * 4, k_y ); } __global__ void addrKernel( cudaSurfaceObject_t inputSurfObj, cudaSurfaceObject_t outputSurfObj, int L_x, int L_y) { // Calculate surface coordinates const int k_x = threadIdx.x + blockIdx.x * blockDim.x ; const int k_y = threadIdx.y + blockIdx.y * blockDim.y ; if ((k_x >= L_x) || (k_y >= L_y)) { return ; } float c, r, tempval; surf2Dread(&c, inputSurfObj, k_x * 4, k_y ) ; surf2Dread(&r, inputSurfObj, (k_x+1) * 4, k_y , cudaBoundaryModeClamp) ; tempval = c+r; surf2Dwrite( tempval, outputSurfObj, k_x * 4, k_y); } __global__ void addlKernel( cudaSurfaceObject_t inputSurfObj, cudaSurfaceObject_t outputSurfObj, int L_x, int L_y) { // Calculate surface coordinates const int k_x = threadIdx.x + blockIdx.x * blockDim.x ; const int k_y = threadIdx.y + blockIdx.y * blockDim.y ; if ((k_x >= L_x) || (k_y >= L_y)) { return ; } float c, l, tempval; surf2Dread(&c, inputSurfObj, k_x * 4, k_y ) ; surf2Dread(&l, inputSurfObj, (k_x-1) * 4, k_y , cudaBoundaryModeClamp) ; tempval = c+l; surf2Dwrite( tempval, outputSurfObj, k_x * 4, k_y); } void addlrKernels_launch( cudaSurfaceObject_t inputSurfObj, cudaSurfaceObject_t outputSurfObj, const int L_x, const int L_y, dim3 M_in, const int NITERS =1) { volatile bool dstOut = true; dim3 dimGrid((L_x + M_in.x - 1)/ M_in.x , (L_y + M_in.y - 1) / M_in.y); for (auto iter = 0; iter < NITERS; ++iter) { cudaSurfaceObject_t inSurfObj, outSurfObj; if (dstOut) { inSurfObj = inputSurfObj ; outSurfObj = outputSurfObj ; addrKernel<<<dimGrid,M_in>>>( inSurfObj, outSurfObj, L_x,L_y) ; } else { outSurfObj = inputSurfObj; inSurfObj = outputSurfObj; addlKernel<<<dimGrid,M_in>>>( inSurfObj, outSurfObj, L_x,L_y) ; } dstOut = !dstOut; } } // addrxyf2Kernel - add the right element and do it for both x,y components for float2 __global__ void addrxyf2Kernel( cudaSurfaceObject_t inputSurfObj, cudaSurfaceObject_t outputSurfObj, int L_x, int L_y) { // Calculate surface coordinates const int k_x = threadIdx.x + blockIdx.x * blockDim.x ; const int k_y = threadIdx.y + blockIdx.y * blockDim.y ; if ((k_x >= L_x) || (k_y >= L_y)) { return ; } float2 c, r, tempval; /* * CUDA Toolkit documentation said byte-addressing in Surface Object API was 4 bytes for a float, as an example. * But when someone does sizeof(float2), one gets 4; so what's the number of bytes to do Data Alignment * when using float2? * * cf. http://www.cs.nthu.edu.tw/~cherung/teaching/2010gpucell/CUDA02.pdf * gave a very clear answer on Data Alignment: 8 for float2 * */ const int RADIUS = 1; int stencilindex_x = k_x + RADIUS; stencilindex_x = min( max( stencilindex_x,0), L_x-1) ; surf2Dread(&c, inputSurfObj, k_x * 8, k_y ) ; surf2Dread(&r, inputSurfObj, ( stencilindex_x ) * 8, k_y , cudaBoundaryModeClamp) ; // tempval = c+r; tempval.x = c.x + r.x ; tempval.y = c.y + r.y ; surf2Dwrite( tempval, outputSurfObj, k_x * 8, k_y); } int main(int argc, char* argv[]) { // sanity check - surface memory read/writes use byte-addressing, so what's the number of bytes of a float? std::cout << "\n sizeof(float) : " << sizeof(float) << std::endl ; std::cout << "\n sizeof(float2) : " << sizeof(float) << std::endl ; // boilerplate // constexpr const int DISPLAY_SIZE { 14 }; constexpr const int NITERS { 2 }; // physics; Euclidean space constexpr std::array<int,2> LdS { L_X, L_Y }; constexpr std::array<float,2> ldS { 1.f , 1.f }; Grid2d grid2d{ LdS, ldS }; dim3 dev_L2 { static_cast<unsigned int>(L_X), static_cast<unsigned int>(L_Y) }; dev_Grid2d dev_grid2d( dev_L2 ); // initial condition for (auto j = 0; j < grid2d.Ld[1] ; ++j) { for (auto i = 0; i < grid2d.Ld[0]; ++i ) { grid2d.f[ grid2d.flatten(i,j) ] = static_cast<float>( grid2d.flatten(i,j) ) + 0.1f ; } } // Copy to device memory some data located at address grid2d.rho in host memory checkCudaErrors( cudaMemcpy(dev_grid2d.dev_f, (grid2d.f).data(), sizeof(float)*grid2d.NFLAT(), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpyToArray(dev_grid2d.cuArr_f, 0, 0, dev_grid2d.dev_f, sizeof(float)*dev_grid2d.NFLAT(), cudaMemcpyDeviceToDevice) ); // Create surface object SurfObj2d surf_f( dev_grid2d.cuArr_f ) ; SurfObj2d surf_f_out( dev_grid2d.cuArr_f_out ) ; // Invoke kernel // MANUALLY CHANGE M_i here constexpr const int M_X { 8 }; constexpr const int M_Y { 4 }; dim3 M_i(M_X,M_Y); dim3 dimGrid((L_X + M_i.x - 1)/ M_i.x , (L_Y + M_i.y - 1) / M_i.y); copyKernel<<<dimGrid, M_i>>>( surf_f.surfObj, surf_f_out.surfObj, L_X,L_Y); // copy result, output array from device to host memory checkCudaErrors( cudaMemcpyFromArray( grid2d.f_out.data(), dev_grid2d.cuArr_f_out, 0,0, dev_grid2d.NFLAT()*sizeof(float), cudaMemcpyDeviceToHost) ); // C++ file Input/Output <fstream> std::ofstream simplecpy_file; simplecpy_file.open("./dataout/simplecpy.csv"); for (auto j=0; j<grid2d.Ld[1]; ++j) { // print first column's element simplecpy_file << grid2d.f_out[ 0 + j * grid2d.Ld[0] ] ; // print remaining columns for (auto i=1; i<grid2d.Ld[0]; ++i) { simplecpy_file << ", " << grid2d.f_out[i+j*grid2d.Ld[0] ] ; } // print new line between rows simplecpy_file << std::endl; } simplecpy_file.close(); // testing out once (1) addrKernel addrKernel<<<dimGrid, M_i>>>( surf_f.surfObj, surf_f_out.surfObj, L_X,L_Y); // copy result, output array from device to host memory checkCudaErrors( cudaMemcpyFromArray( grid2d.f_out.data(), dev_grid2d.cuArr_f_out, 0,0, dev_grid2d.NFLAT()*sizeof(float), cudaMemcpyDeviceToHost) ); // C++ file Input/Output <fstream> std::ofstream addr01_file; addr01_file.open("./dataout/addr01.csv"); for (auto j=0; j<grid2d.Ld[1]; ++j) { // print first column's element addr01_file << grid2d.f_out[ 0 + j * grid2d.Ld[0] ] ; // print remaining columns for (auto i=1; i<grid2d.Ld[0]; ++i) { addr01_file << ", " << grid2d.f_out[i+j*grid2d.Ld[0] ] ; } // print new line between rows addr01_file << std::endl; } addr01_file.close(); // testing out addlrKernels_launch addlrKernels_launch( surf_f.surfObj, surf_f_out.surfObj, L_X, L_Y, M_i, NITERS ) ; // copy results, output arrays from device to host memory checkCudaErrors( cudaMemcpyFromArray( grid2d.f.data(), dev_grid2d.cuArr_f, 0,0, dev_grid2d.NFLAT()*sizeof(float), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpyFromArray( grid2d.f_out.data(), dev_grid2d.cuArr_f_out, 0,0, dev_grid2d.NFLAT()*sizeof(float), cudaMemcpyDeviceToHost) ); // C++ file Input/Output <fstream> std::ofstream addlrin_file, addlrout_file ; addlrin_file.open("./dataout/addlrin.csv"); addlrout_file.open("./dataout/addlrout.csv"); for (auto j=0; j<grid2d.Ld[1]; ++j) { // print first column's element addlrin_file << grid2d.f[ 0 + j * grid2d.Ld[0] ] ; addlrout_file << grid2d.f_out[ 0 + j * grid2d.Ld[0] ] ; // print remaining columns for (auto i=1; i<grid2d.Ld[0]; ++i) { addlrin_file << ", " << grid2d.f[i+j*grid2d.Ld[0] ] ; addlrout_file << ", " << grid2d.f_out[i+j*grid2d.Ld[0] ] ; } // print new line between rows addlrin_file << std::endl; addlrout_file << std::endl; } addlrin_file.close(); addlrout_file.close(); // Create surface object to test the "binding" to the same cudaArray SurfObj2d surf_p( dev_grid2d.cuArr_f ) ; // testing out once (1) addrKernel addrKernel<<<dimGrid, M_i>>>( surf_p.surfObj, surf_f.surfObj, L_X,L_Y); // copy results, output arrays from device to host memory checkCudaErrors( cudaMemcpyFromArray( grid2d.f.data(), dev_grid2d.cuArr_f, 0,0, dev_grid2d.NFLAT()*sizeof(float), cudaMemcpyDeviceToHost) ); // C++ file Input/Output <fstream> std::ofstream addr_p_file ; addr_p_file.open("./dataout/addr_p.csv"); for (auto j=0; j<grid2d.Ld[1]; ++j) { // print first column's element addr_p_file << grid2d.f[ 0 + j * grid2d.Ld[0] ] ; // print remaining columns for (auto i=1; i<grid2d.Ld[0]; ++i) { addr_p_file << ", " << grid2d.f[i+j*grid2d.Ld[0] ] ; } // print new line between rows addr_p_file << std::endl; } addr_p_file.close(); // initial condition for u for (auto j = 0; j < grid2d.Ld[1] ; ++j) { for (auto i = 0; i < grid2d.Ld[0]; ++i ) { grid2d.u[ grid2d.flatten(i,j) ].x = static_cast<float>( grid2d.flatten(i,j))*10.f+0.1f; grid2d.u[ grid2d.flatten(i,j) ].y = static_cast<float>( grid2d.flatten(i,j))*0.001f + 0.00001f ; } } // Copy to device memory some data located at address grid2d.u in host memory checkCudaErrors( cudaMemcpyToArray(dev_grid2d.cuArr_u, 0, 0, (grid2d.u).data(), sizeof(float2)*dev_grid2d.NFLAT(), cudaMemcpyHostToDevice) ); // Create surface object to test the "binding" to cudaArray for float2 SurfObj2d surf_u( dev_grid2d.cuArr_u ) ; SurfObj2d surf_u_out( dev_grid2d.cuArr_u_out ) ; // Invoke kernel // testing out once (1) addrxyKernel addrxyf2Kernel<<<dimGrid, M_i>>>( surf_u.surfObj, surf_u_out.surfObj, L_X,L_Y); /* * misaligned address * FIXED (keep in mind the byte-addressing for float2, i.e. not float */ // copy results, output arrays from device to host memory checkCudaErrors( cudaMemcpyFromArray( (grid2d.u).data(), dev_grid2d.cuArr_u, 0,0, dev_grid2d.NFLAT()*sizeof(float2), cudaMemcpyDeviceToHost) ); /* * misaligned address cudaMemcpyFromArray( (grid2d.u).data(), dev_grid2d.cuArr_u, 0,0, dev_grid2d.NFLAT()*sizeof(float2), cudaMemcpyDeviceToHost) * FIXED (mind the byte-addressing for float2, i.e. not float) checkCudaErrors( cudaMemcpyFromArray( dev_grid2d.dev_u, dev_grid2d.cuArr_u, 0,0, sizeof(float2)*dev_grid2d.NFLAT(), cudaMemcpyDeviceToDevice) ); float2 tempu[ dev_grid2d.NFLAT() ]; checkCudaErrors( cudaMemcpy( tempu, dev_grid2d.dev_u, sizeof(float2)*dev_grid2d.NFLAT(), cudaMemcpyDeviceToHost) ); */ // C++ file Input/Output <fstream> std::ofstream addrxyf2_u_x_file, addrxyf2_u_y_file ; addrxyf2_u_x_file.open("./dataout/addrxyf2_u_x.csv"); addrxyf2_u_y_file.open("./dataout/addrxyf2_u_y.csv"); for (auto j=0; j<grid2d.Ld[1]; ++j) { // print first column's element addrxyf2_u_x_file << grid2d.u[ 0 + j * grid2d.Ld[0] ].x ; addrxyf2_u_y_file << grid2d.u[ 0 + j * grid2d.Ld[0] ].y ; // print remaining columns for (auto i=1; i<grid2d.Ld[0]; ++i) { addrxyf2_u_x_file << ", " << grid2d.u[i+j*grid2d.Ld[0] ].x ; addrxyf2_u_y_file << ", " << grid2d.u[i+j*grid2d.Ld[0] ].y ; } // print new line between rows addrxyf2_u_x_file << std::endl; addrxyf2_u_y_file << std::endl; } addrxyf2_u_x_file.close(); addrxyf2_u_y_file.close(); // copy results, output arrays from device to host memory checkCudaErrors( cudaMemcpyFromArray( grid2d.u.data(), dev_grid2d.cuArr_u_out, 0,0, dev_grid2d.NFLAT()*sizeof(float2), cudaMemcpyDeviceToHost) ); // C++ file Input/Output <fstream> std::ofstream addrxyf2_u_out_x_file, addrxyf2_u_out_y_file ; addrxyf2_u_out_x_file.open("./dataout/addrxyf2_u_out_x.csv"); addrxyf2_u_out_y_file.open("./dataout/addrxyf2_u_out_y.csv"); for (auto j=0; j<grid2d.Ld[1]; ++j) { // print first column's element addrxyf2_u_out_x_file << grid2d.u[ 0 + j * grid2d.Ld[0] ].x ; addrxyf2_u_out_y_file << grid2d.u[ 0 + j * grid2d.Ld[0] ].y ; // print remaining columns for (auto i=1; i<grid2d.Ld[0]; ++i) { addrxyf2_u_out_x_file << ", " << grid2d.u[i+j*grid2d.Ld[0] ].x ; addrxyf2_u_out_y_file << ", " << grid2d.u[i+j*grid2d.Ld[0] ].y ; } // print new line between rows addrxyf2_u_out_x_file << std::endl; addrxyf2_u_out_y_file << std::endl; } addrxyf2_u_out_x_file.close(); addrxyf2_u_out_y_file.close(); // checkCudaErrors( // cudaFree( dev_grid2d.dev_f ) ); return 0; }
the_stack
#include <ops/declarable/helpers/random.h> //#include <NativeOps.h> #include <vector> #include <memory> #include <graph/Context.h> #include <helpers/RandomLauncher.h> #include <helpers/ShapeUtils.h> #include <array/NDArrayFactory.h> #include <exceptions/cuda_exception.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { /* * fillGammaKernel - fill up output with gamma distributed values * * uList - uniformly distributed values set * uLength - length of uList * alpha - alpha param * beta - beta param * output - distributed output. * */ template <typename T> static __global__ void fillGammaKernel(T* uList, Nd4jLong uLength, T* alpha, const Nd4jLong* alphaShape, T* beta, const Nd4jLong* betaShape, T* output, const Nd4jLong* outputShape) { // fill up __shared__ Nd4jLong aLength; if (threadIdx.x == 0) { aLength = shape::length(alphaShape); } __syncthreads(); for (auto k = blockIdx.x; k < (int)uLength; k += gridDim.x) { auto pos = k * aLength; auto u = uList[k]; // this is a vector for (auto e = threadIdx.x; e < (int)aLength; e += blockDim.x) { auto aIndex = shape::getIndexOffset(e, alphaShape); auto bIndex = betaShape?shape::getIndexOffset(e, betaShape):-1LL; auto betaV = T(beta != nullptr ? beta[bIndex] * u : u); auto zIndex = shape::getIndexOffset(e + pos, outputShape); output[zIndex] = math::nd4j_igamma<T, T, T>(alpha[aIndex], betaV); } } } template <typename T> static void fillRandomGamma_(LaunchContext* context, graph::RandomGenerator& rng, NDArray* alpha, NDArray* beta, NDArray* output) { // To fill up output need to broadcast alpha and beta to the same shape and in const Nd4jLong* broadcasted = nullptr; if (beta != nullptr) ShapeUtils::evalBroadcastShapeInfo(*alpha, *beta, true, broadcasted, context->getWorkspace()); else broadcasted = alpha->shapeInfo(); auto step = shape::length(broadcasted); auto shift = output->lengthOf() / step; auto copyAlpha = alpha; auto copyBeta = beta; if (beta != nullptr) { NDArray alphaBroadcasted(broadcasted, alpha->dataType(), true, context); NDArray betaBroadcasted(broadcasted, beta->dataType(), true, context); copyAlpha = new NDArray(alphaBroadcasted.applyTrueBroadcast(BroadcastOpsTuple::Assign(), *alpha)); copyBeta = new NDArray(betaBroadcasted.applyTrueBroadcast(BroadcastOpsTuple::Assign(), *beta)); copyAlpha->tickWriteDevice(); copyBeta->tickWriteDevice(); } auto stream = context->getCudaStream(); NDArray uniform = NDArrayFactory::create<T>('c', {shift}, context); uniform.syncToDevice(); // fill up uniform with given length RandomLauncher::fillUniform(context, rng, &uniform, 0., 1.); fillGammaKernel<T><<<128, 128, 256, *stream>>>(uniform.dataBuffer()->specialAsT<T>(), shift, copyAlpha->dataBuffer()->specialAsT<T>(), copyAlpha->specialShapeInfo(), beta?copyBeta->dataBuffer()->specialAsT<T>():(T*)nullptr, beta?copyBeta->specialShapeInfo():(Nd4jLong*)nullptr, output->dataBuffer()->specialAsT<T>(), output->specialShapeInfo()); if (beta != nullptr) { delete copyAlpha; delete copyBeta; //delete broadcasted; } } void fillRandomGamma(LaunchContext* context, graph::RandomGenerator& rng, NDArray* alpha, NDArray* beta, NDArray* output) { if (beta) NDArray::prepareSpecialUse({output}, {alpha, beta}); else NDArray::prepareSpecialUse({output}, {alpha}); BUILD_SINGLE_SELECTOR(output->dataType(), fillRandomGamma_, (context, rng, alpha, beta, output), FLOAT_NATIVE); if (beta) NDArray::registerSpecialUse({output}, {alpha, beta}); else NDArray::prepareSpecialUse({output}, {alpha}); } BUILD_SINGLE_TEMPLATE(template void fillRandomGamma_, (LaunchContext* context, graph::RandomGenerator& rng, NDArray* alpha, NDArray* beta, NDArray* output), FLOAT_NATIVE); /* * algorithm Poisson generator based upon the inversion by sequential search * init: Let x ← 0, p ← e−λ, s ← p. using uniformly random sequence U (u in U) distributed at [0, 1]. while u > s do: x ← x + 1. p ← p * λ / x. s ← s + p. return x. * */ template <typename T> static __global__ void fillPoissonKernel(T* uList, Nd4jLong uLength, T* lambda, const Nd4jLong* lambdaShape, T* output, const Nd4jLong* outputShape) { __shared__ Nd4jLong step; if (threadIdx.x == 0) { step = shape::length(lambdaShape); } __syncthreads(); for (auto k = blockIdx.x; k < (int)uLength; k += gridDim.x) { auto pos = k * step; auto u = uList[k]; for (auto e = threadIdx.x; e < step; e += blockDim.x) { auto p = math::nd4j_exp<T,T>(-lambda[e]); auto s = p; auto x = T(0.f); auto lIndex = shape::getIndexOffset(e, lambdaShape); auto zIndex = shape::getIndexOffset(e + pos, outputShape); while (u > s) { x += T(1.); p *= lambda[lIndex] / x; s += p; } output[zIndex] = x; } } } template <typename T> static void fillRandomPoisson_(LaunchContext* context, graph::RandomGenerator& rng, NDArray* lambda, NDArray* output) { auto shift = output->lengthOf() / lambda->lengthOf(); NDArray uniform('c', {shift}, output->dataType()); auto stream = context->getCudaStream(); // fill up uniform with given length RandomLauncher::fillUniform(context, rng, &uniform, 0., 1.); fillPoissonKernel<T><<<128, 256, 128, *stream>>>(uniform.dataBuffer()->specialAsT<T>(), uniform.lengthOf(), lambda->dataBuffer()->specialAsT<T>(), lambda->specialShapeInfo(), output->dataBuffer()->specialAsT<T>(), output->specialShapeInfo()); } void fillRandomPoisson(LaunchContext* context, graph::RandomGenerator& rng, NDArray* lambda, NDArray* output) { NDArray::prepareSpecialUse({output}, {lambda}); BUILD_SINGLE_SELECTOR(output->dataType(), fillRandomPoisson_, (context, rng, lambda, output), FLOAT_NATIVE); NDArray::registerSpecialUse({output}, {lambda}); } BUILD_SINGLE_TEMPLATE(template void fillRandomPoisson_, (LaunchContext* context, graph::RandomGenerator& rng, NDArray* lambda, NDArray* output), FLOAT_NATIVE); template <typename T> static __global__ void fillUniformKernel(graph::RandomGenerator* devRng, T from, T to, T* output, const Nd4jLong* outputShape) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; __shared__ Nd4jLong outputLen; if (0 == threadIdx.x) { outputLen = shape::length(outputShape); } __syncthreads(); for (auto i = start; i < outputLen; i += step) { auto zIndex = shape::getIndexOffset(i, outputShape); output[zIndex] = devRng->relativeT<T>(i, from, to); } } template <typename T> static void fillRandomUniform_(LaunchContext* context, graph::RandomGenerator& rng, NDArray* min, NDArray* max, NDArray* output) { T minVal = T(0); T maxVal = DataTypeUtils::infOrMax<T>(); if (min) minVal = min->t<T>(0); if (max) maxVal = max->t<T>(0); if (output->isR()) RandomLauncher::fillUniform(context, rng, output, minVal, maxVal); else { auto stream = context->getCudaStream(); graph::RandomGenerator *devRng; auto err = cudaMalloc(&devRng, sizeof(graph::RandomGenerator)); if (err != 0) { cuda_exception::build("fillRandomUniform_: Cannot allocate device memory for random generator due error", err); } err = cudaMemcpy(devRng, &rng, sizeof(graph::RandomGenerator), cudaMemcpyHostToDevice); if (err != 0) { cuda_exception::build("fillRandomUniform_: Cannot copy random generator to device", err); } auto outputBuf = output->dataBuffer()->specialAsT<T>(); auto outputShape = output->specialShapeInfo(); fillUniformKernel<T><<<128, 128, 128, *stream>>>(devRng, minVal, maxVal, outputBuf, outputShape); err = cudaStreamSynchronize(*stream); if (err != 0) { cuda_exception::build("fillRandomUniform_: Cannot successfully finish kernel call", err); } err = cudaFree(devRng); if (err != 0) { cuda_exception::build("fillRandomUniform_: Cannot deallocate device memory for random generator", err); } } } void fillRandomUniform(LaunchContext* context, graph::RandomGenerator& rng, NDArray* min, NDArray* max, NDArray* output) { BUILD_SINGLE_SELECTOR(output->dataType(), fillRandomUniform_, (context, rng, min, max, output), NUMERIC_TYPES); } /////////////////////////////////////////////////////////////////// // used https://en.wikipedia.org/wiki/Categorical_distribution // methods: gumbel trick + softmax + argmax template<typename X, typename Z> __global__ static void fillMultiNomialCuda_(graph::RandomGenerator* devRng, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong batchValue, const Nd4jLong numOfSamples, const Nd4jLong numOfClassX, const Nd4jLong dimA, const X minVal, const X maxVal) { const X* x = reinterpret_cast<const X*>(vx); Z* z = reinterpret_cast<Z*>(vz); __shared__ Nd4jLong xDimAstride, zDimAstride, xDimCstride, zDimCstride, dimC; if (0 == threadIdx.x) { dimC = (0 == dimA) ? 1 : 0; zDimAstride = shape::stride(zShapeInfo)[dimA]; xDimAstride = shape::stride(xShapeInfo)[dimA]; zDimCstride = shape::stride(zShapeInfo)[dimC]; xDimCstride = shape::stride(xShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong index = tid; index < batchValue*numOfSamples; index += gridDim.x * blockDim.x) { Nd4jLong nBatchIndex = index / numOfSamples; Nd4jLong nSampleIndexInBatch = index - (nBatchIndex * numOfSamples); const X* xTad = x + (nBatchIndex * xDimCstride); Z* zTad = z + (nBatchIndex * zDimCstride); Z& arg = zTad[nSampleIndexInBatch * zDimAstride]; X Max = -minVal; Nd4jLong nSamplesPerBatch = nBatchIndex * numOfClassX * numOfSamples; Nd4jLong nClassPerSamples = nSampleIndexInBatch * numOfClassX; for (Nd4jLong nClass = 0; nClass < numOfClassX; nClass++) { Nd4jLong nIndex = nSamplesPerBatch + nClassPerSamples + nClass; X tValue = (xTad[nClass * xDimAstride] - sd::math::nd4j_log<X, X>(-sd::math::nd4j_log<X, X>(devRng->relativeT<X>(nIndex, minVal, maxVal)))); if (tValue > Max) { Max = tValue; arg = nClass; } } } } ////////////////////////////////////////////////////////////////////////// template<typename X, typename Z> __host__ static void fillMultiNomialCudaLauncher( const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t* stream, graph::RandomGenerator* devRng, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong batchValue, const Nd4jLong numOfSamples, const Nd4jLong numOfClassX, const Nd4jLong dimA){ const X minVal = DataTypeUtils::min<X>(); const X maxVal = 1.0; fillMultiNomialCuda_<X, Z> <<< blocksPerGrid, threadsPerBlock, 256, * stream >>> ( devRng, vx, xShapeInfo, vz, zShapeInfo, batchValue, numOfSamples, numOfClassX, dimA, minVal, maxVal); } /////////////////////////////////////////////////////////////////// void fillRandomMultiNomial(LaunchContext* context, graph::RandomGenerator& rng, NDArray& input, NDArray& output, const Nd4jLong numOfSamples, const int dimC) { Nd4jLong dimA = (0 == dimC) ? 1 : 0; const Nd4jLong batchValue = output.sizeAt(dimC); const Nd4jLong numOfClassX = input.sizeAt(dimA); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (batchValue * numOfSamples + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "fillMultinomial"); graph::RandomGenerator *devRng; auto err = cudaMalloc(&devRng, sizeof(graph::RandomGenerator)); if (err != 0) { cuda_exception::build("fillRandomMultiNomial: Cannot allocate device memory for random generator due error", err); } err = cudaStreamSynchronize(*context->getCudaStream()); if (err != 0) { cuda_exception::build("fillRandomMultiNomial: Cannot synchronize stream for random generator due error", err); } err = cudaMemcpyAsync(devRng, &rng, sizeof(graph::RandomGenerator), cudaMemcpyHostToDevice, *context->getCudaStream()); if (err != 0) { cuda_exception::build("fillRandomMultiNomial: Cannot copy random generator to device", err); } NDArray::prepareSpecialUse({ &output }, { &input }); BUILD_DOUBLE_SELECTOR(input.dataType(), output.dataType(), fillMultiNomialCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), devRng, input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), batchValue, numOfSamples, numOfClassX, dimA), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({ &output }, { &input }); manager.synchronize(); err = cudaFree(devRng); if (err != 0) { cuda_exception::build("fillRandomMultiNomial: Cannot deallocate device memory for random generator", err); } rng.rewindH(output.lengthOf() * numOfClassX); } } } }
the_stack
#include <nvbio/basic/numbers.h> #include <nvbio/basic/algorithms.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/transform_iterator.h> #include <nvbio/basic/vector_view.h> #include <nvbio/basic/primitives.h> #include <nvbio/alignment/alignment.h> #include <nvbio/alignment/batched.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> using namespace nvbio; // initialize the alignment pipeline // void align_init(struct pipeline_state *pipeline, const io::SequenceDataDevice *batch) { struct chains_state<device_tag> *chn = &pipeline->chn; struct alignment_state<device_tag> *aln = &pipeline->aln; const uint32 n_reads = pipeline->chunk.read_end - pipeline->chunk.read_begin; const uint32 n_chains = chn->n_chains; // initially, target the device pipeline->system = DEVICE; // reserve enough storage if (aln->stencil.size() < n_reads) { aln->begin_chains.clear(); aln->begin_chains.resize( n_reads ); aln->end_chains.clear(); aln->end_chains.resize( n_reads ); aln->stencil.clear(); aln->stencil.resize( n_reads ); aln->temp_queue.clear(); aln->temp_queue.resize( n_reads ); aln->query_spans.clear(); aln->query_spans.resize( n_reads ); aln->ref_spans.clear(); aln->ref_spans.resize( n_reads ); aln->sinks.clear(); aln->sinks.resize( n_reads ); } // find the first chain for each read thrust::lower_bound( chn->chain_reads.begin(), chn->chain_reads.begin() + n_chains, thrust::make_counting_iterator<uint32>( pipeline->chunk.read_begin ), thrust::make_counting_iterator<uint32>( pipeline->chunk.read_end ), aln->begin_chains.begin() ); // find the ending chain for each read thrust::upper_bound( chn->chain_reads.begin(), chn->chain_reads.begin() + n_chains, thrust::make_counting_iterator<uint32>( pipeline->chunk.read_begin ), thrust::make_counting_iterator<uint32>( pipeline->chunk.read_end ), aln->end_chains.begin() ); aln->n_active = n_reads; } #define MEM_SHORT_EXT 50 #define MEM_SHORT_LEN 200 // a functor to compute the size of a span // struct span_size { typedef uint2 argument_type; typedef uint32 result_type; NVBIO_FORCEINLINE NVBIO_HOST_DEVICE uint32 operator() (const uint2 span) const { return span.y - span.x; } }; // a functor to extract the reference span from a chain // struct span_functor { typedef io::SequenceDataAccess<DNA_N> reads_access_type; typedef io::SequenceDataAccess<DNA> reference_access_type; NVBIO_HOST_DEVICE span_functor( const runtime_options _options, const reads_access_type _reads, const reference_access_type _reference, const chains_view _chains, const uint32* _active_chains, uint2* _query_spans, uint2* _ref_spans, uint8* _flags) : options ( _options ), reads ( _reads ), reference ( _reference ), chains ( _chains ), active_chains ( _active_chains ), query_spans ( _query_spans ), ref_spans ( _ref_spans ), flags ( _flags ) {} // the functor operator NVBIO_HOST_DEVICE void operator() (const uint32 idx) const { const uint32 chain_idx = active_chains[idx]; const chain_reference chain = chains[chain_idx]; const uint32 len = chain.size(); uint2 qspan = make_uint2( uint32(-1), 0u ); uint2 rspan = make_uint2( uint32(-1), 0u ); // loop through all seeds in this chain for (uint32 i = 0; i < len; ++i) { // fetch the i-th seed const chains_view::mem_type seed = chain[i]; qspan.x = nvbio::min( qspan.x, seed.span().x ); qspan.y = nvbio::max( qspan.y, seed.span().y ); rspan.x = nvbio::min( rspan.x, seed.index_pos() ); rspan.y = nvbio::max( rspan.y, seed.index_pos() + seed.span().y - seed.span().x ); } const uint32 read_id = chain.read(); const uint2 read_range = reads.get_range( read_id ); const uint32 read_len = read_range.y - read_range.x; qspan.x = qspan.x > MEM_SHORT_EXT ? qspan.x - MEM_SHORT_EXT : 0u; qspan.y = qspan.y + MEM_SHORT_EXT < read_len ? qspan.y + MEM_SHORT_EXT : read_len; rspan.x = rspan.x > MEM_SHORT_EXT ? rspan.x - MEM_SHORT_EXT : 0u; rspan.y = rspan.y + MEM_SHORT_EXT < reference.bps() ? rspan.y + MEM_SHORT_EXT : reference.bps(); const uint32 qdelta = qspan.y - qspan.x; const uint32 rdelta = rspan.y - rspan.x; if ((qspan.x <= 10 || qspan.y >= read_len - 10) || // because ksw_align() does not support end-to-end alignment (rdelta > qdelta + MEM_SHORT_EXT || qdelta > rdelta + MEM_SHORT_EXT) || (qdelta >= options.w * 4 || rdelta >= options.w * 4)) { flags[idx] = 0; // because ksw_align() does not support end-to-end alignment return; } // save the resulting spans query_spans[idx] = make_uint2( qspan.x + read_range.x, qspan.y + read_range.x ); ref_spans[idx] = rspan; // flag to perform short alignment flags[idx] = 1; } const runtime_options options; const reads_access_type reads; const reference_access_type reference; const chains_view chains; const uint32* active_chains; uint2* query_spans; uint2* ref_spans; uint8* flags; }; // perform banded alignment // template <typename system_tag> uint32 align_short( chains_state<system_tag> *chn, alignment_state<system_tag> *aln, const io::SequenceData *reference, const io::SequenceData *reads) { typedef io::SequenceDataAccess<DNA_N> read_access_type; typedef io::SequenceDataAccess<DNA> reference_access_type; // prepare POD access pointers to the reads and reference const read_access_type reads_access( *reads ); const reference_access_type reference_access( *reference ); // // During alignment, we essentially keep a queue of "active" reads, corresponding // to those reads for which there's more chains to process; at every step, we select // one new chain from each read as an alignment candidate, removing it from the set. // This is done keeping a set of (begin,end) pointers per read and advancing the // begin field - when a range becomes empty, it's removed // uint32 n_active = aln->n_active; // build a stencil of the active reads, stencil[i] = (begin_chains[i] != end_chains[i]) transform<system_tag>( n_active, aln->begin_chains.begin(), aln->end_chains.begin(), aln->stencil.begin(), nvbio::not_equal_functor<uint32>() ); nvbio::vector<system_tag,uint8> temp_storage; // filter away reads that are done processing because there's no more chains copy_flagged( n_active, aln->begin_chains.begin(), aln->stencil.begin(), aln->temp_queue.begin(), temp_storage ); aln->begin_chains.swap( aln->temp_queue ); n_active = copy_flagged( n_active, aln->end_chains.begin(), aln->stencil.begin(), aln->temp_queue.begin(), temp_storage ); aln->end_chains.swap( aln->temp_queue ); // reset the number of active reads aln->n_active = n_active; // check whether there's no more work to do if (n_active == 0) return 0u; // now build a view of the chains const chains_view chains( *chn ); typedef typename alignment_state<system_tag>::sink_type sink_type; const nvbio::vector<system_tag,uint32>& cur_chains = aln->begin_chains; nvbio::vector<system_tag,uint2>& query_spans = aln->query_spans; nvbio::vector<system_tag,uint2>& ref_spans = aln->ref_spans; nvbio::vector<system_tag,uint8>& stencil = aln->stencil; nvbio::vector<system_tag,uint32>& list = aln->temp_queue; nvbio::vector<system_tag,sink_type>& sinks = aln->sinks; // compute the chain query-spans for_each<system_tag>( n_active, thrust::make_counting_iterator<uint32>(0u), span_functor( command_line_options, reads_access, reference_access, chains, raw_pointer( cur_chains ), raw_pointer( query_spans ), raw_pointer( ref_spans ), raw_pointer( stencil ) ) ); // copy the list of indices to the short alignment problems const uint32 n_alns = copy_flagged( n_active, thrust::make_counting_iterator<uint32>(0u), stencil.begin(), list.begin(), temp_storage ); if (n_alns) { // // perform a Gotoh batched alignment between two string-sets: // the string-sets here are sparse subsets of the symbol streams holding // the reads and the reference data // typedef read_access_type::sequence_stream_type read_stream_type; typedef reference_access_type::sequence_stream_type reference_stream_type; typedef thrust::permutation_iterator<const uint2*, const uint32*> infix_iterator; const infix_iterator reads_infixes = thrust::make_permutation_iterator( raw_pointer( query_spans ), raw_pointer( list ) ); const infix_iterator reference_infixes = thrust::make_permutation_iterator( raw_pointer( ref_spans ), raw_pointer( list ) ); // build the sparse subset of the reads sequence const SparseStringSet<read_stream_type,infix_iterator> read_infix_set( n_alns, reads_access.sequence_stream(), reads_infixes ); // build the sparse subset of the reference sequence const SparseStringSet<reference_stream_type,infix_iterator> reference_infix_set( n_alns, reference_access.sequence_stream(), reference_infixes ); // compute the largest reference span const uint32 max_rspan = nvbio::reduce( n_alns, thrust::make_transform_iterator( reference_infixes, span_size() ), thrust::maximum<uint32>(), temp_storage ); const aln::SimpleGotohScheme gotoh( 2, -2, -5, -3 ); // TODO: assign the correct scores here // invoke the parallel alignment aln::batch_alignment_score( aln::make_gotoh_aligner<aln::LOCAL>( gotoh ), read_infix_set, reference_infix_set, sinks.begin(), aln::DeviceThreadScheduler(), reads_access.max_sequence_len(), max_rspan ); // TODO: // - check which alignments were successful // - perform a reverse alignment to find the source cell of each alignment } // add one to the processed chains nvbio::transform<system_tag>( n_active, aln->begin_chains.begin(), thrust::make_constant_iterator<uint32>( 1u ), aln->begin_chains.begin(), nvbio::add_functor() ); return n_active; } // perform banded alignment // uint32 align( struct pipeline_state *pipeline, const nvbio::io::SequenceDataHost *reads_host, const nvbio::io::SequenceDataDevice *reads_device) { if (pipeline->system == DEVICE && // if currently on the device, pipeline->aln.n_active < 16*1024) // but too little parallelism... { // copy the state of the pipeline to the host pipeline->system = HOST; pipeline->h_chn = pipeline->chn; pipeline->h_aln = pipeline->aln; } if (pipeline->system == HOST) { return align_short<host_tag>( &pipeline->h_chn, &pipeline->h_aln, (const io::SequenceData*)pipeline->mem.reference_data_host, (const io::SequenceData*)reads_host ); } else { return align_short<device_tag>( &pipeline->chn, &pipeline->aln, (const io::SequenceData*)pipeline->mem.reference_data_device, (const io::SequenceData*)reads_device ); } }
the_stack
#include "HugeCTR/include/embeddings/hybrid_embedding/hybrid_indices.hpp" #include "HugeCTR/include/embeddings/hybrid_embedding/utils.cuh" #include "HugeCTR/include/utils.cuh" namespace indices_kernels { template <typename dtype> __global__ void fused_cache_masks(const dtype* __restrict__ samples, const dtype* __restrict__ category_frequent_index, bool* __restrict__ model_cache_mask, bool* __restrict__ network_cache_mask, uint32_t offset, uint32_t samples_size, uint32_t local_samples_size, uint32_t num_frequent, uint32_t num_frequent_per_model, uint32_t model_id) { uint32_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < samples_size) { dtype category = __ldg(samples + tid); dtype frequent_index = __ldg(category_frequent_index + category); if (frequent_index < num_frequent && frequent_index / num_frequent_per_model == model_id) model_cache_mask[(tid / local_samples_size) * num_frequent_per_model + frequent_index % num_frequent_per_model] = true; } if (tid < local_samples_size) { dtype category = __ldg(samples + offset + tid); dtype frequent_index = __ldg(category_frequent_index + category); if (frequent_index < num_frequent) network_cache_mask[frequent_index] = true; } } __global__ void mask_indices_to_buffer_indices( uint32_t* __restrict__ model_cache_indices, const uint32_t* __restrict__ model_cache_indices_offsets, uint32_t num_instances, uint32_t num_frequent_per_model, uint32_t model_id) { const uint32_t num_selected = __ldg(model_cache_indices_offsets + num_instances); for (uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num_selected; i += blockDim.x * gridDim.x) model_cache_indices[i] = model_cache_indices[i] % num_frequent_per_model + num_frequent_per_model * model_id; } template <typename dtype> __global__ void calculate_network_indices_mask(const dtype* __restrict__ local_samples, const dtype* __restrict__ category_location, bool* mask, uint32_t local_samples_size, uint32_t num_instances) { for (uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < local_samples_size; i += gridDim.x * blockDim.x) { dtype category = local_samples[i]; uint32_t model_id = static_cast<uint32_t>(category_location[2 * category]); for (uint32_t section_id = 0; section_id < num_instances; section_id++) { mask[local_samples_size * section_id + i] = (model_id == section_id); } } } } // namespace indices_kernels namespace HugeCTR { namespace hybrid_embedding { // =========================================================================================== // Frequent Compression // =========================================================================================== template <typename dtype> FrequentEmbeddingCompression<dtype>::FrequentEmbeddingCompression( size_t max_num_frequent_categories, const Data<dtype>& data, const Model<dtype>& model) : data_(data), model_(model) { const int num_tables = data_.table_sizes.size(); std::shared_ptr<GeneralBuffer2<CudaAllocator>> buf = GeneralBuffer2<CudaAllocator>::create(); buf->reserve({max_num_frequent_categories, 1}, &model_cache_indices_); buf->reserve({model.num_instances + 1, 1}, &model_cache_indices_offsets_); buf->reserve({max_num_frequent_categories, 1}, &network_cache_indices_); buf->reserve({model.num_instances + 1, 1}, &network_cache_indices_offsets_); buf->reserve({2 * max_num_frequent_categories, 1}, &cache_masks_); buf->reserve({ceildiv<size_t>(data_.batch_size, model.num_instances) * num_tables, 1}, &frequent_sample_indices_); buf->reserve({1}, &d_num_frequent_sample_indices_); // Temporary storage calculate_frequent_sample_indices_temp_storage_bytes((data_.batch_size / model.num_instances) * num_tables); calculate_model_cache_indices_temp_storage_bytes(max_num_frequent_categories); calculate_network_cache_indices_temp_storage_bytes(max_num_frequent_categories); buf->reserve({frequent_sample_indices_temp_storage_bytes_, 1}, &frequent_sample_indices_temp_storage_); buf->reserve({model_cache_indices_temp_storage_bytes_, 1}, &model_cache_indices_temp_storage_); buf->reserve({network_cache_indices_temp_storage_bytes_, 1}, &network_cache_indices_temp_storage_); buf->allocate(); FrequentEmbeddingCompressionView<dtype> view = {data_.samples.get_ptr(), cache_masks_.get_ptr(), model_cache_indices_.get_ptr(), model_cache_indices_offsets_.get_ptr(), network_cache_indices_.get_ptr(), network_cache_indices_offsets_.get_ptr(), d_num_frequent_sample_indices_.get_ptr(), frequent_sample_indices_.get_ptr()}; HCTR_LIB_THROW(cudaMalloc(&device_indices_view_, sizeof(view))); HCTR_LIB_THROW(cudaMemcpy(device_indices_view_, &view, sizeof(view), cudaMemcpyHostToDevice)); } template <typename dtype> struct FrequentSampleIndicesSelectOp { const dtype* samples; const dtype* category_frequent_index; uint32_t offset; dtype num_frequent; __host__ __device__ __forceinline__ FrequentSampleIndicesSelectOp(const dtype* samples, const dtype* category_frequent_index, uint32_t offset, dtype num_frequent) : samples(samples), category_frequent_index(category_frequent_index), offset(offset), num_frequent(num_frequent) {} __device__ __forceinline__ bool operator()(const uint32_t& idx) const { dtype category = __ldg(samples + offset + idx); dtype frequent_index = __ldg(category_frequent_index + category); return frequent_index < num_frequent; } }; template <typename dtype> void FrequentEmbeddingCompression<dtype>::calculate_frequent_sample_indices_temp_storage_bytes( const size_t local_samples_size) { cub::CountingInputIterator<uint32_t> counting(0); FrequentSampleIndicesSelectOp<dtype> select_op(nullptr, nullptr, 0, 0); cub::DeviceSelect::If(nullptr, frequent_sample_indices_temp_storage_bytes_, counting, (uint32_t*)nullptr, (uint32_t*)nullptr, local_samples_size, select_op, 0); } template <typename dtype> void FrequentEmbeddingCompression<dtype>::calculate_model_cache_indices_temp_storage_bytes( const size_t num_frequent) { size_t select_bytes = 0; cub::CountingInputIterator<uint32_t> counting(0); cub::DeviceSelect::Flagged(nullptr, select_bytes, counting, (bool*)nullptr, (uint32_t*)nullptr, (uint32_t*)nullptr, num_frequent, 0); constexpr uint32_t align = 256; model_cache_indices_temp_storage_bytes_ = alignTo<size_t>(num_frequent, align) + select_bytes; } template <typename dtype> void FrequentEmbeddingCompression<dtype>::calculate_network_cache_indices_temp_storage_bytes( const size_t num_frequent) { size_t select_bytes = (size_t)0; cub::CountingInputIterator<uint32_t> counting(0); cub::DeviceSelect::Flagged(nullptr, select_bytes, counting, (bool*)nullptr, (uint32_t*)nullptr, (uint32_t*)nullptr, num_frequent, 0); network_cache_indices_temp_storage_bytes_ = select_bytes; } template <typename dtype> void FrequentEmbeddingCompression<dtype>::calculate_frequent_sample_indices(cudaStream_t stream) { const size_t num_networks = model_.num_instances; size_t local_samples_size = (data_.batch_size / num_networks) * data_.table_sizes.size(); // Select indices of frequent categories appearing in the local MLP batch cub::CountingInputIterator<uint32_t> counting(0); FrequentSampleIndicesSelectOp<dtype> select_op( data_.samples.get_ptr(), model_.category_frequent_index.get_ptr(), model_.global_instance_id * local_samples_size, model_.num_frequent); cub::DeviceSelect::If( reinterpret_cast<void*>(frequent_sample_indices_temp_storage_.get_ptr()), frequent_sample_indices_temp_storage_bytes_, counting, frequent_sample_indices_.get_ptr(), d_num_frequent_sample_indices_.get_ptr(), local_samples_size, select_op, stream); } template <typename dtype> void FrequentEmbeddingCompression<dtype>::calculate_model_cache_indices(size_t sm_count, cudaStream_t stream) { const size_t num_instances = model_.num_instances; const size_t num_frequent = model_.num_frequent; const size_t samples_size = data_.batch_size * data_.table_sizes.size(); size_t local_samples_size = ceildiv<size_t>(data_.batch_size, num_instances) * data_.table_sizes.size(); // Note: we assume that the number of frequent categories is a // multiple of the number of models! const size_t num_frequent_per_model = num_frequent / num_instances; /** * Explanation of the mask: * The model owns num_frequent_per_model categories. For each network, * we want to know the categories that appear in their local batch and * belong to this model. The mask is the concatenation of num_network * sections of size num_frequent_per_model. * It has a size num_frequent but does not represent all the frequent * categories, only num_networks repetitions of the same categories. */ // Temporary storage char* scratch_ptr = model_cache_indices_temp_storage_.get_ptr(); void* d_temp_storage = reinterpret_cast<void*>(scratch_ptr); size_t temp_storage_bytes = model_cache_indices_temp_storage_bytes_; const bool* d_model_cache_mask = cache_masks_.get_ptr() + num_frequent; /* Select categories according to the mask */ cub::CountingInputIterator<uint32_t> counting(0); cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, counting, d_model_cache_mask, model_cache_indices_.get_ptr(), model_cache_indices_offsets_.get_ptr() + num_instances, num_frequent, stream); /* Compute offsets */ constexpr size_t TPB_offsets = 256; size_t n_blocks = ceildiv<size_t>(num_instances, TPB_offsets); offsets_kernel<<<n_blocks, TPB_offsets, 0, stream>>>(model_cache_indices_.get_ptr(), model_cache_indices_offsets_.get_ptr(), num_instances, num_frequent_per_model); HCTR_LIB_THROW(cudaPeekAtLastError()); /* Convert to buffer indices */ constexpr size_t TPB_convert = 256; n_blocks = sm_count; indices_kernels::mask_indices_to_buffer_indices<<<n_blocks, TPB_convert, 0, stream>>>( model_cache_indices_.get_ptr(), model_cache_indices_offsets_.get_ptr(), num_instances, num_frequent_per_model, model_.global_instance_id); HCTR_LIB_THROW(cudaPeekAtLastError()); } template <typename dtype> void FrequentEmbeddingCompression<dtype>::calculate_cache_masks(cudaStream_t stream) { const size_t num_instances = model_.num_instances; const size_t num_frequent = model_.num_frequent; size_t samples_size = data_.batch_size * data_.table_sizes.size(); size_t local_samples_size = ceildiv<size_t>(samples_size, num_instances); const size_t num_frequent_per_model = num_frequent / num_instances; bool* d_network_cache_mask = cache_masks_.get_ptr(); bool* d_model_cache_mask = cache_masks_.get_ptr() + num_frequent; /* Initialize the masks to false */ // // PROFILE_RECORD("fre_calculate_cache_masks.memset.start", stream); HCTR_LIB_THROW(cudaMemsetAsync(cache_masks_.get_ptr(), 0, 2 * num_frequent, stream)); // // PROFILE_RECORD("fre_calculate_cache_masks.memset.stop", stream); /* Compute the model cache mask */ constexpr size_t TPB_mask = 256; size_t n_blocks = ceildiv<size_t>(samples_size, TPB_mask); // // PROFILE_RECORD("fre_calculate_cache_masks.start", stream); indices_kernels::fused_cache_masks<<<n_blocks, TPB_mask, 0, stream>>>( data_.samples.get_ptr(), model_.category_frequent_index.get_ptr(), d_model_cache_mask, d_network_cache_mask, model_.global_instance_id * local_samples_size, samples_size, local_samples_size, num_frequent, num_frequent_per_model, model_.global_instance_id); HCTR_LIB_THROW(cudaPeekAtLastError()); // // PROFILE_RECORD("fre_calculate_cache_masks.stop", stream); } template <typename dtype> void FrequentEmbeddingCompression<dtype>::calculate_network_cache_indices(cudaStream_t stream) { const size_t num_instances = model_.num_instances; const size_t num_frequent = model_.num_frequent; size_t local_samples_size = ceildiv<size_t>(data_.batch_size, num_instances) * data_.table_sizes.size(); // Note: we assume that the number of frequent categories is a // multiple of the number of models! const size_t num_frequent_per_model = num_frequent / num_instances; // Temporary storage char* scratch_ptr = network_cache_indices_temp_storage_.get_ptr(); void* d_temp_storage = reinterpret_cast<void*>(scratch_ptr); size_t temp_storage_bytes = network_cache_indices_temp_storage_bytes_; const bool* d_network_cache_mask = cache_masks_.get_ptr(); /* Select categories according to the mask */ cub::CountingInputIterator<uint32_t> counting(0); // // PROFILE_RECORD("fre_calculate_network_cache_indices.device_select_flagged.start", stream); cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, counting, d_network_cache_mask, network_cache_indices_.get_ptr(), network_cache_indices_offsets_.get_ptr() + num_instances, num_frequent, stream); // // PROFILE_RECORD("fre_calculate_network_cache_indices.device_select_flagged.stop", stream); /* Compute offsets */ constexpr size_t TPB_offsets = 256; size_t n_blocks = ceildiv<size_t>(num_instances, TPB_offsets); // // PROFILE_RECORD("fre_calculate_network_cache_indices.offsets_kernel.start", stream); offsets_kernel<<<n_blocks, TPB_offsets, 0, stream>>>(network_cache_indices_.get_ptr(), network_cache_indices_offsets_.get_ptr(), num_instances, num_frequent_per_model); HCTR_LIB_THROW(cudaPeekAtLastError()); // // PROFILE_RECORD("fre_calculate_network_cache_indices.offsets_kernel.stop", stream); } // =========================================================================================== // Inrequent Selection // =========================================================================================== template <typename dtype> InfrequentEmbeddingSelection<dtype>::InfrequentEmbeddingSelection(const Data<dtype>& data, const Model<dtype>& model) : data_(data), model_(model) { const size_t num_tables = data_.table_sizes.size(); auto buf = GeneralBuffer2<CudaAllocator>::create(); buf->reserve({data_.batch_size, num_tables}, &model_indices_); buf->reserve({ceildiv<size_t>(data_.batch_size, model.num_instances), num_tables}, &network_indices_); // buf->reserve({model.num_instances}, &model_indices_sizes_); // buf->reserve({model.num_instances}, &model_indices_sizes_ptrs_); // buf->reserve({model.num_instances}, &network_indices_sizes_); // buf->reserve({model.num_instances}, &network_indices_sizes_ptrs_); // Temporary storage calculate_model_indices_temp_storage_bytes(data_.batch_size, num_tables); calculate_network_indices_temp_storage_bytes(data_.batch_size, num_tables, model.num_instances); buf->reserve({model_indices_temp_storage_bytes_, 1}, &model_indices_temp_storage_); buf->reserve({network_indices_temp_storage_bytes_, 1}, &network_indices_temp_storage_); buf->allocate(); auto managed_buf = GeneralBuffer2<CudaManagedAllocator>::create(); managed_buf->reserve({model.num_instances + 1, 1}, &model_indices_offsets_); managed_buf->reserve({model.num_instances + 1, 1}, &network_indices_offsets_); managed_buf->allocate(); int current_device; HCTR_LIB_THROW(cudaGetDevice(&current_device)); HCTR_LIB_THROW(cudaMemAdvise(managed_buf->get_ptr(), managed_buf->get_size_in_bytes(), cudaMemAdviseSetReadMostly, current_device)); InfrequentEmbeddingSelectionView<dtype> view = { data_.samples.get_ptr(), model_indices_.get_ptr(), model_indices_offsets_.get_ptr(), network_indices_.get_ptr(), network_indices_offsets_.get_ptr()}; HCTR_LIB_THROW(cudaMalloc(&device_indices_view_, sizeof(view))); HCTR_LIB_THROW(cudaMemcpy(device_indices_view_, &view, sizeof(view), cudaMemcpyHostToDevice)); } template <typename dtype> struct ModelIndicesSelectOp { const dtype* samples; const dtype* category_location; uint32_t my_model_id; __host__ __device__ __forceinline__ ModelIndicesSelectOp(const dtype* samples, const dtype* category_location, uint32_t my_model_id) : samples(samples), category_location(category_location), my_model_id(my_model_id) {} __device__ __forceinline__ bool operator()(const uint32_t& idx) const { dtype category = __ldg(samples + idx); dtype model_id = __ldg(category_location + 2 * category); return model_id == my_model_id; } }; template <typename dtype> void InfrequentEmbeddingSelection<dtype>::calculate_model_indices_temp_storage_bytes( size_t max_batch_size, size_t table_size) { cub::CountingInputIterator<uint32_t> counting(0); ModelIndicesSelectOp<dtype> select_op(nullptr, nullptr, 0); cub::DeviceSelect::If(nullptr, model_indices_temp_storage_bytes_, counting, (uint32_t*)nullptr, (uint32_t*)nullptr, max_batch_size * table_size, select_op, 0); } template <typename dtype> void InfrequentEmbeddingSelection<dtype>::calculate_network_indices_temp_storage_bytes( size_t max_batch_size, size_t table_size, const uint32_t num_instances) { uint32_t samples_size = max_batch_size * table_size; uint32_t local_samples_size = ceildiv<uint32_t>(samples_size, num_instances); // Calculate select bytes size_t select_bytes = 0; cub::CountingInputIterator<uint32_t> counting(0); cub::DeviceSelect::Flagged(nullptr, select_bytes, counting, (bool*)nullptr, (uint32_t*)nullptr, (uint32_t*)nullptr, samples_size, 0); // Total size constexpr uint32_t align = 256; network_indices_temp_storage_bytes_ = alignTo<size_t>(sizeof(bool) * samples_size, align) + select_bytes; } template <typename dtype> void InfrequentEmbeddingSelection<dtype>::calculate_model_indices(cudaStream_t stream) { const uint32_t& num_instances = model_.num_instances; size_t local_batch_size = ceildiv<size_t>(data_.batch_size, num_instances); // Select indices of infrequent categories belonging to this model cub::CountingInputIterator<uint32_t> counting(0); ModelIndicesSelectOp<dtype> select_op(data_.samples.get_ptr(), model_.category_location.get_ptr(), model_.global_instance_id); // // PROFILE_RECORD("inf_calculate_model_indices.device_select_if.start", stream); cub::DeviceSelect::If(reinterpret_cast<void*>(model_indices_temp_storage_.get_ptr()), model_indices_temp_storage_bytes_, counting, model_indices_.get_ptr(), model_indices_offsets_.get_ptr() + num_instances, data_.batch_size * data_.table_sizes.size(), select_op, stream); // // PROFILE_RECORD("inf_calculate_model_indices.device_select_if.stop", stream); // Compute offsets constexpr size_t TPB = 256; const size_t n_blocks = ceildiv<size_t>(num_instances, TPB); // // PROFILE_RECORD("inf_calculate_model_indices.offsets_kernel.start", stream); offsets_kernel<<<n_blocks, TPB, 0, stream>>>(model_indices_.get_ptr(), model_indices_offsets_.get_ptr(), num_instances, local_batch_size * data_.table_sizes.size()); // // PROFILE_RECORD("inf_calculate_model_indices.offsets_kernel.stop", stream); HCTR_LIB_THROW(cudaPeekAtLastError()); } template <typename dtype> void InfrequentEmbeddingSelection<dtype>::calculate_network_indices(size_t sm_count, cudaStream_t stream) { const uint32_t num_instances = model_.num_instances; uint32_t samples_size = data_.batch_size * data_.table_sizes.size(); uint32_t local_samples_size = ceildiv<uint32_t>(samples_size, num_instances); // Temporary storage constexpr uint32_t align = 256; char* scratch_ptr = network_indices_temp_storage_.get_ptr(); size_t scratch_offset = 0; bool* d_mask = reinterpret_cast<bool*>(scratch_ptr + scratch_offset); scratch_offset += alignTo<size_t>(sizeof(bool) * samples_size, align); void* d_temp_storage = reinterpret_cast<void*>(scratch_ptr + scratch_offset); size_t temp_storage_bytes = network_indices_temp_storage_bytes_ - scratch_offset; // Compute mask (for each source GPU, whether each element in the batch is located there) constexpr uint32_t TPB_mask = 256; uint32_t n_blocks_mask = ceildiv<uint32_t>(local_samples_size, TPB_mask); // // PROFILE_RECORD("inf_calculate_network_indices.calculate_network_indices_mask.start", // stream); indices_kernels::calculate_network_indices_mask<<<n_blocks_mask, TPB_mask, 0, stream>>>( data_.samples.get_ptr() + model_.global_instance_id * local_samples_size, model_.category_location.get_ptr(), d_mask, local_samples_size, num_instances); HCTR_LIB_THROW(cudaPeekAtLastError()); // // PROFILE_RECORD("inf_calculate_network_indices.calculate_network_indices_mask.stop", stream); // Select indices according to the mask cub::CountingInputIterator<uint32_t> counting(0); // // PROFILE_RECORD("inf_calculate_network_indices.device_select_flagged.start", stream); cub::DeviceSelect::Flagged( d_temp_storage, temp_storage_bytes, counting, d_mask, network_indices_.get_ptr(), network_indices_offsets_.get_ptr() + num_instances, samples_size, stream); // // PROFILE_RECORD("inf_calculate_network_indices.device_select_flagged.stop", stream); // Compute offsets constexpr uint32_t TPB_offsets = 256; uint32_t n_blocks_offsets = ceildiv<uint32_t>(num_instances, TPB_offsets); // // PROFILE_RECORD("inf_calculate_network_indices.offsets_kernel.start", stream); offsets_kernel<<<n_blocks_offsets, TPB_offsets, 0, stream>>>(network_indices_.get_ptr(), network_indices_offsets_.get_ptr(), num_instances, local_samples_size); HCTR_LIB_THROW(cudaPeekAtLastError()); // // PROFILE_RECORD("inf_calculate_network_indices.offsets_kernel.stop", stream); // Re-map indices between 0 and local_samples_size - 1 uint32_t TPB_remap = 256; uint32_t n_blocks_remap = sm_count; // // PROFILE_RECORD("inf_calculate_network_indices.modulo_kernel.start", stream); modulo_kernel<<<n_blocks_remap, TPB_remap, 0, stream>>>( network_indices_.get_ptr(), network_indices_offsets_.get_ptr() + num_instances, local_samples_size); HCTR_LIB_THROW(cudaPeekAtLastError()); // // PROFILE_RECORD("inf_calculate_network_indices.modulo_kernel.stop", stream); } // template <typename dtype> // void InfrequentEmbeddingSelection<dtype>::calculate_model_indices_sizes_from_offsets( // size_t embedding_vec_bytes, cudaStream_t stream) { // constexpr size_t TPB = 256; // const size_t n_blocks = ceildiv<size_t>(model_.num_instances, TPB); // offsets_to_sizes<<<n_blocks, TPB, 0, stream>>>( // model_indices_sizes_.get_ptr(), model_indices_offsets_.get_ptr(), // embedding_vec_bytes, model_.num_instances); // } // template <typename dtype> // void InfrequentEmbeddingSelection<dtype>::calculate_network_indices_sizes_from_offsets( // size_t embedding_vec_bytes, cudaStream_t stream) { // constexpr size_t TPB = 256; // const size_t n_blocks = ceildiv<size_t>(model_.num_instances, TPB); // offsets_to_sizes<<<n_blocks, TPB, 0, stream>>>( // network_indices_sizes_.get_ptr(), network_indices_offsets_.get_ptr(), // embedding_vec_bytes, model_.num_instances); // } template <typename dtype> void compute_indices(FrequentEmbeddingCompression<dtype>& compression, InfrequentEmbeddingSelection<dtype>& selection, CommunicationType communication_type, bool compute_network_cache_indices, cudaStream_t stream, int sm_count) { compression.calculate_frequent_sample_indices(stream); selection.calculate_model_indices(stream); if (communication_type != CommunicationType::NVLink_SingleNode) { selection.calculate_network_indices(sm_count, stream); } else { compression.calculate_cache_masks(stream); if (compute_network_cache_indices) { compression.calculate_network_cache_indices(stream); } compression.calculate_model_cache_indices(sm_count, stream); } } template void compute_indices<uint32_t>(FrequentEmbeddingCompression<uint32_t>& compression, InfrequentEmbeddingSelection<uint32_t>& selection, CommunicationType communication_type, bool compute_network_cache_indices, cudaStream_t stream, int sm_count); template void compute_indices<long long>(FrequentEmbeddingCompression<long long>& compression, InfrequentEmbeddingSelection<long long>& selection, CommunicationType communication_type, bool compute_network_cache_indices, cudaStream_t stream, int sm_count); template class FrequentEmbeddingCompression<uint32_t>; template class FrequentEmbeddingCompression<long long>; template class InfrequentEmbeddingSelection<uint32_t>; template class InfrequentEmbeddingSelection<long long>; } // namespace hybrid_embedding } // namespace HugeCTR
the_stack
#include <vector_types.h> #include <thrust/copy.h> #include <thrust/count.h> #include <stdio.h> #include <limits> namespace pcl { namespace cuda { ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> SampleConsensusModelPlane<Storage>::SampleConsensusModelPlane ( const PointCloudConstPtr &cloud) : SampleConsensusModel<Storage> (cloud) { } ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> void SampleConsensusModelPlane<Storage>::getSamples (int &iterations, Indices &samples) { samples.resize (3); float trand = indices_->size () / (RAND_MAX + 1.0f); for (int i = 0; i < 3; ++i) { int idx = (int)(rngl_ () * trand); samples[i] = (*indices_)[idx]; } } ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> bool SampleConsensusModelPlane<Storage>::computeModelCoefficients ( const Indices &samples, Coefficients &model_coefficients) { if (samples.size () != 3) { return (false); } // Compute the segment values (in 3d) between p1 and p0 float3 p1p0 = ((PointXYZRGB)input_->points[samples[1]]).xyz - ((PointXYZRGB)input_->points[samples[0]]).xyz; // Compute the segment values (in 3d) between p2 and p0 float3 p2p0 = ((PointXYZRGB)input_->points[samples[2]]).xyz - ((PointXYZRGB)input_->points[samples[0]]).xyz; // Avoid some crashes by checking for collinearity here float3 dy1dy2 = p1p0 / p2p0; if ( (dy1dy2.x == dy1dy2.y) && (dy1dy2.z == dy1dy2.y) ) // Check for collinearity return (false); // Compute the plane coefficients from the 3 given points in a straightforward manner // calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1) float3 mc = normalize (cross (p1p0, p2p0)); if (model_coefficients.size () != 4) model_coefficients.resize (4); model_coefficients[0] = mc.x; model_coefficients[1] = mc.y; model_coefficients[2] = mc.z; // ... + d = 0 model_coefficients[3] = -1 * dot (mc, ((PointXYZRGB)input_->points[samples[0]]).xyz); return (true); } ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> //template <typename Tuple> float4 CreatePlaneHypothesis<Storage>::operator () (int t) { float4 coeff; coeff.x = coeff.y = coeff.z = coeff.w = bad_value; int3 samples; float trand = nr_indices / (RAND_MAX + 1.0f); thrust::default_random_engine rng (t); // rng.discard (10); samples.x = indices[(int)(rng () * trand)]; // rng.discard (20); samples.y = indices[(int)(rng () * trand)]; // rng.discard (30); samples.z = indices[(int)(rng () * trand)]; /* samples.x = indices[(int)(thrust::get<0>(t) * trand)]; samples.y = indices[(int)(thrust::get<1>(t) * trand)]; samples.z = indices[(int)(thrust::get<2>(t) * trand)];*/ if (isnan (input[samples.x].x) || isnan (input[samples.y].x) || isnan (input[samples.z].x)) return (coeff); // Compute the segment values (in 3d) between p1 and p0 float3 p1p0 = input[samples.y].xyz - input[samples.x].xyz; // Compute the segment values (in 3d) between p2 and p0 float3 p2p0 = input[samples.z].xyz - input[samples.x].xyz; // Avoid some crashes by checking for collinearity here float3 dy1dy2 = p1p0 / p2p0; if ( (dy1dy2.x == dy1dy2.y) && (dy1dy2.z == dy1dy2.y) ) // Check for collinearity return (coeff); // Compute the plane coefficients from the 3 given points in a straightforward manner // calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1) float3 mc = normalize (cross (p1p0, p2p0)); coeff.x = mc.x; coeff.y = mc.y; coeff.z = mc.z; // ... + d = 0 coeff.w = -1 * dot (mc, input[samples.x].xyz); return (coeff); } ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> bool SampleConsensusModelPlane<Storage>::generateModelHypotheses ( Hypotheses &h, int max_iterations) { using namespace thrust; // Create a vector of how many samples/coefficients do we want to get h.resize (max_iterations); typename Storage<int>::type randoms (max_iterations); // a sequence counting up from 0 thrust::counting_iterator<int> index_sequence_begin (0); // transform the range [0,1,2,...N] // to a range of random numbers thrust::transform (index_sequence_begin, index_sequence_begin + max_iterations, randoms.begin (), parallel_random_generator ((int) time (0))); thrust::counting_iterator<int> first (0); // Input: Point Cloud, Indices // Output: Hypotheses transform (//first, first + max_iterations, randoms.begin (), randoms.begin () + max_iterations, h.begin (), CreatePlaneHypothesis<Storage> (thrust::raw_pointer_cast (&input_->points[0]), thrust::raw_pointer_cast (&(*indices_)[0]), (int) indices_->size (), std::numeric_limits<float>::quiet_NaN ())); return (true); } ////////////////////////////////////////////////////////////////////////// template <typename Tuple> bool CountPlanarInlier::operator () (const Tuple &t) { if (!isfinite (thrust::get<0>(t).x)) return (false); return (fabs (thrust::get<0>(t).x * coefficients.x + thrust::get<0>(t).y * coefficients.y + thrust::get<0>(t).z * coefficients.z + coefficients.w) < threshold); } ////////////////////////////////////////////////////////////////////////// template <typename Tuple> int CheckPlanarInlier::operator () (const Tuple &t) { if (isnan (thrust::get<0>(t).x)) return (-1); // Fill in XYZ (and copy NaNs with it) float4 pt; pt.x = thrust::get<0>(t).x; pt.y = thrust::get<0>(t).y; pt.z = thrust::get<0>(t).z; pt.w = 1; if (fabs (dot (pt, coefficients)) < threshold) // If inlier, return its position in the vector return (thrust::get<1>(t)); else // If outlier, return -1 return (-1); } ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> int SampleConsensusModelPlane<Storage>::countWithinDistance ( const Coefficients &model_coefficients, float threshold) { using namespace thrust; // Needs a valid set of model coefficients if (model_coefficients.size () != 4) { fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::countWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ()); return 0; } float4 coefficients; coefficients.x = model_coefficients[0]; coefficients.y = model_coefficients[1]; coefficients.z = model_coefficients[2]; coefficients.w = model_coefficients[3]; return (int) count_if ( make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())), make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) + indices_->size (), CountPlanarInlier (coefficients, threshold)); } ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> int SampleConsensusModelPlane<Storage>::countWithinDistance ( const Hypotheses &h, int idx, float threshold) { if (isnan (((float4)h[idx]).x)) return (0); return (int) (thrust::count_if ( thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())), thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())) + indices_->size (), CountPlanarInlier (h[idx], threshold))); } ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> int SampleConsensusModelPlane<Storage>::selectWithinDistance ( const Coefficients &model_coefficients, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil) { using namespace thrust; // Needs a valid set of model coefficients if (model_coefficients.size () != 4) { fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ()); return 0; } int nr_points = (int) indices_->size (); if (!inliers_stencil) inliers_stencil.reset (new Indices()); inliers_stencil->resize (nr_points); float4 coefficients; coefficients.x = model_coefficients[0]; coefficients.y = model_coefficients[1]; coefficients.z = model_coefficients[2]; coefficients.w = model_coefficients[3]; // Send the data to the device transform ( make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())), make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) + nr_points, inliers_stencil->begin (), CheckPlanarInlier (coefficients, threshold)); if (!inliers) inliers.reset (new Indices()); inliers->resize (nr_points); typename Indices::iterator it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ()); // Copy data //it = remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), -1); inliers->resize (it - inliers->begin ()); return (int) inliers->size(); } ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> int SampleConsensusModelPlane<Storage>::selectWithinDistance ( const Hypotheses &h, int idx, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil) { using namespace thrust; // Needs a valid set of model coefficients /* if (model_coefficients.size () != 4) { fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ()); return; }*/ int nr_points = (int) indices_->size (); if (!inliers_stencil) inliers_stencil.reset (new Indices()); inliers_stencil->resize (nr_points); float4 coefficients; coefficients.x = ((float4)h[idx]).x; coefficients.y = ((float4)h[idx]).y; coefficients.z = ((float4)h[idx]).z; coefficients.w = ((float4)h[idx]).w; // Send the data to the device transform ( make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())), make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) + nr_points, inliers_stencil->begin (), CheckPlanarInlier (coefficients, threshold)); if (!inliers) inliers.reset (new Indices()); inliers->resize (nr_points); // Copy data typename Indices::iterator it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ()); inliers->resize (it - inliers->begin ()); return (int) inliers->size (); } ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> int SampleConsensusModelPlane<Storage>::selectWithinDistance ( Hypotheses &h, int idx, float threshold, IndicesPtr &inliers_stencil, float3 & centroid) { using namespace thrust; // Needs a valid set of model coefficients /* if (model_coefficients.size () != 4) { fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ()); return; }*/ int nr_points = (int) indices_->size (); if (!inliers_stencil) inliers_stencil.reset (new Indices()); inliers_stencil->resize (nr_points); float4 coefficients; coefficients.x = ((float4)h[idx]).x; coefficients.y = ((float4)h[idx]).y; coefficients.z = ((float4)h[idx]).z; coefficients.w = ((float4)h[idx]).w; transform ( make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())), make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) + nr_points, inliers_stencil->begin (), CheckPlanarInlier (coefficients, threshold)); return nr_points - (int) thrust::count (inliers_stencil->begin (), inliers_stencil->end (), -1); } template class SampleConsensusModelPlane<Device>; template class SampleConsensusModelPlane<Host>; } // namespace } // namespace
the_stack
__device__ __forceinline__ float getMinTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmin = ( (dir.x > 0 ? 0.f : volume_max.x) - origin.x) / dir.x; float tymin = ( (dir.y > 0 ? 0.f : volume_max.y) - origin.y) / dir.y; float tzmin = ( (dir.z > 0 ? 0.f : volume_max.z) - origin.z) / dir.z; return fmax ( fmax (txmin, tymin), tzmin); } __device__ __forceinline__ float getMaxTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmax = ( (dir.x > 0 ? volume_max.x : 0.f) - origin.x) / dir.x; float tymax = ( (dir.y > 0 ? volume_max.y : 0.f) - origin.y) / dir.y; float tzmax = ( (dir.z > 0 ? volume_max.z : 0.f) - origin.z) / dir.z; return fmin (fmin (txmax, tymax), tzmax); } struct RayCaster { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 }; Mat33 Rcurr; float3 tcurr; float time_step; float3 volume_size; float3 cell_size; int cols, rows; PtrStep<short> volume; Intr intr; mutable PtrStep<float> nmap; mutable PtrStep<float> vmap; int3 voxelWrap; mutable PtrStep<uchar4> vmap_curr_color; PtrStep<uchar4> color_volume; __device__ __forceinline__ float3 get_ray_next (int x, int y) const { float3 ray_next; ray_next.x = (x - intr.cx) / intr.fx; ray_next.y = (y - intr.cy) / intr.fy; ray_next.z = 1; return ray_next; } __device__ __forceinline__ bool checkInds (const int3& g) const { return (g.x >= 0 && g.y >= 0 && g.z >= 0 && g.x < VOLUME_X && g.y < VOLUME_Y && g.z < VOLUME_X); } __device__ __forceinline__ float readTsdf (int x, int y, int z) const { const short * pos = &volume.ptr(0)[((x + voxelWrap.x) % VOLUME_X) + ((y + voxelWrap.y) % VOLUME_Y) * VOLUME_X + ((z + voxelWrap.z) % VOLUME_Z) * VOLUME_X * VOLUME_Y]; return unpack_tsdf (*pos); } __device__ __forceinline__ float readHeat (int x, int y, int z) const { const uchar4 * ptrColor = &color_volume.ptr(0)[((x + voxelWrap.x) % VOLUME_X) + ((y + voxelWrap.y) % VOLUME_Y) * VOLUME_X + ((z + voxelWrap.z) % VOLUME_Z) * VOLUME_X * VOLUME_Y]; return ptrColor->w; } __device__ __forceinline__ float readRed (int x, int y, int z) const { const uchar4 * ptrColor = &color_volume.ptr(0)[((x + voxelWrap.x) % VOLUME_X) + ((y + voxelWrap.y) % VOLUME_Y) * VOLUME_X + ((z + voxelWrap.z) % VOLUME_Z) * VOLUME_X * VOLUME_Y]; return ptrColor->x; } __device__ __forceinline__ float readGreen (int x, int y, int z) const { const uchar4 * ptrColor = &color_volume.ptr(0)[((x + voxelWrap.x) % VOLUME_X) + ((y + voxelWrap.y) % VOLUME_Y) * VOLUME_X + ((z + voxelWrap.z) % VOLUME_Z) * VOLUME_X * VOLUME_Y]; return ptrColor->y; } __device__ __forceinline__ float readBlue (int x, int y, int z) const { const uchar4 * ptrColor = &color_volume.ptr(0)[((x + voxelWrap.x) % VOLUME_X) + ((y + voxelWrap.y) % VOLUME_Y) * VOLUME_X + ((z + voxelWrap.z) % VOLUME_Z) * VOLUME_X * VOLUME_Y]; return ptrColor->z; } __device__ __forceinline__ int3 getVoxel (float3 point) const { int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity int vy = __float2int_rd (point.y / cell_size.y); int vz = __float2int_rd (point.z / cell_size.z); return make_int3 (vx, vy, vz); } __device__ __forceinline__ float interpolateTrilineary (const float3& origin, const float3& dir, float time) const { return interpolateTrilineary (origin + dir * time); } __device__ __forceinline__ float interpolateTrilineary (const float3& point) const { int3 g = getVoxel (point); if (g.x <= 0 || g.x >= VOLUME_X - 1) return numeric_limits<float>::quiet_NaN (); if (g.y <= 0 || g.y >= VOLUME_Y - 1) return numeric_limits<float>::quiet_NaN (); if (g.z <= 0 || g.z >= VOLUME_Z - 1) return numeric_limits<float>::quiet_NaN (); float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; g.x = (point.x < vx) ? (g.x - 1) : g.x; g.y = (point.y < vy) ? (g.y - 1) : g.y; g.z = (point.z < vz) ? (g.z - 1) : g.z; float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; float res = readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) + readTsdf (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c + readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) + readTsdf (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c + readTsdf (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) + readTsdf (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c + readTsdf (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) + readTsdf (g.x + 1, g.y + 1, g.z + 1) * a * b * c; return res; } __device__ __forceinline__ uchar3 interpolateColorTrilineary (const float3& point) const { int3 g = getVoxel (point); uchar3 black = {0, 0, 0}; if (g.x <= 0 || g.x >= VOLUME_X - 1) return black; if (g.y <= 0 || g.y >= VOLUME_Y - 1) return black; if (g.z <= 0 || g.z >= VOLUME_Z - 1) return black; float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; g.x = (point.x < vx) ? (g.x - 1) : g.x; g.y = (point.y < vy) ? (g.y - 1) : g.y; g.z = (point.z < vz) ? (g.z - 1) : g.z; float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; uchar3 res = {readRed (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) + readRed (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c + readRed (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) + readRed (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c + readRed (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) + readRed (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c + readRed (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) + readRed (g.x + 1, g.y + 1, g.z + 1) * a * b * c, readGreen (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) + readGreen (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c + readGreen (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) + readGreen (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c + readGreen (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) + readGreen (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c + readGreen (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) + readGreen (g.x + 1, g.y + 1, g.z + 1) * a * b * c, readBlue (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) + readBlue (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c + readBlue (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) + readBlue (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c + readBlue (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) + readBlue (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c + readBlue (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) + readBlue (g.x + 1, g.y + 1, g.z + 1) * a * b * c}; return res; } __device__ __forceinline__ float interpolateHeatTrilineary (const float3& point) const { int3 g = getVoxel (point); if (g.x <= 0 || g.x >= VOLUME_X - 1) return numeric_limits<float>::quiet_NaN (); if (g.y <= 0 || g.y >= VOLUME_Y - 1) return numeric_limits<float>::quiet_NaN (); if (g.z <= 0 || g.z >= VOLUME_Z - 1) return numeric_limits<float>::quiet_NaN (); float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; g.x = (point.x < vx) ? (g.x - 1) : g.x; g.y = (point.y < vy) ? (g.y - 1) : g.y; g.z = (point.z < vz) ? (g.z - 1) : g.z; float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; float res = readHeat (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) + readHeat (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c + readHeat (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) + readHeat (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c + readHeat (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) + readHeat (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c + readHeat (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) + readHeat (g.x + 1, g.y + 1, g.z + 1) * a * b * c; return res; } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; if (x >= cols || y >= rows) return; vmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN (); nmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN (); float3 ray_start = tcurr; float3 ray_next = Rcurr * get_ray_next (x, y) + tcurr; float3 ray_dir = normalized (ray_next - ray_start); //ensure that it isn't a degenerate case ray_dir.x = (ray_dir.x == 0.f) ? 1e-15 : ray_dir.x; ray_dir.y = (ray_dir.y == 0.f) ? 1e-15 : ray_dir.y; ray_dir.z = (ray_dir.z == 0.f) ? 1e-15 : ray_dir.z; // computer time when entry and exit volume float time_start_volume = getMinTime (volume_size, ray_start, ray_dir); float time_exit_volume = getMaxTime (volume_size, ray_start, ray_dir); const float min_dist = 0.f; //in meters time_start_volume = fmax (time_start_volume, min_dist); if (time_start_volume >= time_exit_volume) return; float time_curr = time_start_volume; int3 g = getVoxel (ray_start + ray_dir * time_curr); g.x = max (0, min (g.x, VOLUME_X - 1)); g.y = max (0, min (g.y, VOLUME_Y - 1)); g.z = max (0, min (g.z, VOLUME_Z - 1)); float tsdf = readTsdf (g.x, g.y, g.z); //infinite loop guard const float max_time = 3 * (volume_size.x + volume_size.y + volume_size.z); for (; time_curr < max_time; time_curr += time_step) { float tsdf_prev = tsdf; int3 g = getVoxel ( ray_start + ray_dir * (time_curr + time_step) ); if (!checkInds (g)) break; tsdf = readTsdf (g.x, g.y, g.z); if (tsdf_prev < 0.f && tsdf > 0.f) break; if (tsdf_prev > 0.f && tsdf < 0.f) //zero crossing { float Ftdt = interpolateTrilineary (ray_start, ray_dir, time_curr + time_step); if (isnan (Ftdt)) break; float Ft = interpolateTrilineary (ray_start, ray_dir, time_curr); if (isnan (Ft)) break; float Ts = time_curr - time_step * Ft / (Ftdt - Ft); float3 vetex_found = ray_start + ray_dir * Ts; vmap.ptr (y )[x] = vetex_found.x; vmap.ptr (y + rows)[x] = vetex_found.y; vmap.ptr (y + 2 * rows)[x] = vetex_found.z; int3 g = getVoxel ( ray_start + ray_dir * time_curr ); uchar3 pointColor = interpolateColorTrilineary(vetex_found); vmap_curr_color.ptr(y)[x].x = pointColor.x; vmap_curr_color.ptr(y)[x].y = pointColor.y; vmap_curr_color.ptr(y)[x].z = pointColor.z; vmap_curr_color.ptr(y)[x].w = interpolateHeatTrilineary(vetex_found); if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2) { float3 t; float3 n; t = vetex_found; t.x += cell_size.x; float Fx1 = interpolateTrilineary (t); t = vetex_found; t.x -= cell_size.x; float Fx2 = interpolateTrilineary (t); n.x = (Fx1 - Fx2); t = vetex_found; t.y += cell_size.y; float Fy1 = interpolateTrilineary (t); t = vetex_found; t.y -= cell_size.y; float Fy2 = interpolateTrilineary (t); n.y = (Fy1 - Fy2); t = vetex_found; t.z += cell_size.z; float Fz1 = interpolateTrilineary (t); t = vetex_found; t.z -= cell_size.z; float Fz2 = interpolateTrilineary (t); n.z = (Fz1 - Fz2); n = normalized (n); nmap.ptr (y )[x] = n.x; nmap.ptr (y + rows)[x] = n.y; nmap.ptr (y + 2 * rows)[x] = n.z; } break; } } /* for(;;) */ } }; __global__ void rayCastKernel (const RayCaster rc) { rc (); } void raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr, float tranc_dist, const float3& volume_size, const PtrStep<short>& volume, DeviceArray2D<float>& vmap, DeviceArray2D<float>& nmap, const int3 & voxelWrap, DeviceArray2D<uchar4> & vmap_curr_color, PtrStep<uchar4> color_volume) { RayCaster rc; rc.Rcurr = Rcurr; rc.tcurr = tcurr; rc.time_step = tranc_dist * 0.8f; rc.volume_size = volume_size; rc.cell_size.x = volume_size.x / VOLUME_X; rc.cell_size.y = volume_size.y / VOLUME_Y; rc.cell_size.z = volume_size.z / VOLUME_Z; rc.cols = vmap.cols (); rc.rows = vmap.rows () / 3; rc.intr = intr; rc.volume = volume; rc.vmap = vmap; rc.nmap = nmap; rc.voxelWrap = voxelWrap; rc.vmap_curr_color = vmap_curr_color; rc.color_volume = color_volume; dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y); dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y)); rayCastKernel<<<grid, block>>>(rc); cudaSafeCall (cudaGetLastError ()); }
the_stack
extern "C" { // CUDA version of the components in // "ai_economist.foundation.components.covid19_components.py" __global__ void CudaControlUSStateOpenCloseStatusStep( int * stringency_level, const int kActionCooldownPeriod, int * action_in_cooldown_until, const int * kDefaultAgentActionMask, const int * kNoOpAgentActionMask, const int kNumStringencyLevels, int * actions, float * obs_a_stringency_policy_indicators, float * obs_a_action_mask, float * obs_p_stringency_policy_indicators, int * env_timestep_arr, const int kNumAgents, const int kEpisodeLength ) { const int kEnvId = blockIdx.x; const int kAgentId = threadIdx.x; // Increment time ONCE -- only 1 thread can do this. if (kAgentId == 0) { env_timestep_arr[kEnvId] += 1; } // Wait here until timestep has been updated __syncthreads(); assert(env_timestep_arr[kEnvId] > 0 && env_timestep_arr[kEnvId] <= kEpisodeLength); assert (kAgentId <= kNumAgents - 1); // Update the stringency levels for the US states if (kAgentId < (kNumAgents - 1)) { // Indices for time-dependent and time-independent arrays // Time dependent arrays have shapes // (num_envs, kEpisodeLength + 1, kNumAgents - 1) // Time independent arrays have shapes (num_envs, kNumAgents - 1) const int kArrayIdxOffset = kEnvId * (kEpisodeLength + 1) * (kNumAgents - 1); int time_dependent_array_index_curr_t = kArrayIdxOffset + env_timestep_arr[kEnvId] * (kNumAgents - 1) + kAgentId; int time_dependent_array_index_prev_t = kArrayIdxOffset + (env_timestep_arr[kEnvId] - 1) * (kNumAgents - 1) + kAgentId; const int time_independent_array_index = kEnvId * (kNumAgents - 1) + kAgentId; // action is not a NO-OP if (actions[time_independent_array_index] != 0) { stringency_level[time_dependent_array_index_curr_t] = actions[time_independent_array_index]; } else { stringency_level[time_dependent_array_index_curr_t] = stringency_level[time_dependent_array_index_prev_t]; } if (env_timestep_arr[kEnvId] == action_in_cooldown_until[ time_independent_array_index] + 1) { if (actions[time_independent_array_index] != 0) { assert(0 <= actions[time_independent_array_index] <= kNumStringencyLevels); action_in_cooldown_until[time_independent_array_index] += kActionCooldownPeriod; } else { action_in_cooldown_until[time_independent_array_index] += 1; } } obs_a_stringency_policy_indicators[ time_independent_array_index ] = stringency_level[time_dependent_array_index_curr_t] / static_cast<float>(kNumStringencyLevels); // CUDA version of generate_masks() for (int action_id = 0; action_id < (kNumStringencyLevels + 1); action_id++) { int action_mask_array_index = kEnvId * (kNumStringencyLevels + 1) * (kNumAgents - 1) + action_id * (kNumAgents - 1) + kAgentId; if (env_timestep_arr[kEnvId] < action_in_cooldown_until[ time_independent_array_index] ) { obs_a_action_mask[action_mask_array_index] = kNoOpAgentActionMask[action_id]; } else { obs_a_action_mask[action_mask_array_index] = kDefaultAgentActionMask[action_id]; } } } // Update planner obs after all the agents' obs are updated __syncthreads(); if (kAgentId == kNumAgents - 1) { for (int ag_id = 0; ag_id < (kNumAgents - 1); ag_id++) { const int kIndex = kEnvId * (kNumAgents - 1) + ag_id; obs_p_stringency_policy_indicators[ kIndex ] = obs_a_stringency_policy_indicators[ kIndex ]; } } } __global__ void CudaFederalGovernmentSubsidyStep( int * subsidy_level, float * subsidy, const int kSubsidyInterval, const int kNumSubsidyLevels, const float * KMaxDailySubsidyPerState, const int * kDefaultPlannerActionMask, const int * kNoOpPlannerActionMask, int * actions, float * obs_a_time_until_next_subsidy, float * obs_a_current_subsidy_level, float * obs_p_time_until_next_subsidy, float * obs_p_current_subsidy_level, float * obs_p_action_mask, int * env_timestep_arr, const int kNumAgents, const int kEpisodeLength ) { const int kEnvId = blockIdx.x; const int kAgentId = threadIdx.x; assert(env_timestep_arr[kEnvId] > 0 && env_timestep_arr[kEnvId] <= kEpisodeLength); assert (kAgentId <= kNumAgents - 1); int t_since_last_subsidy = env_timestep_arr[kEnvId] % kSubsidyInterval; // Setting the (federal government) planner's subsidy level // to be the subsidy level for all the US states if (kAgentId < kNumAgents - 1) { // Indices for time-dependent and time-independent arrays // Time dependent arrays have shapes (num_envs, // kEpisodeLength + 1, kNumAgents - 1) // Time independent arrays have shapes (num_envs, kNumAgents - 1) const int kArrayIdxOffset = kEnvId * (kEpisodeLength + 1) * (kNumAgents - 1); int time_dependent_array_index_curr_t = kArrayIdxOffset + env_timestep_arr[kEnvId] * (kNumAgents - 1) + kAgentId; int time_dependent_array_index_prev_t = kArrayIdxOffset + (env_timestep_arr[kEnvId] - 1) * (kNumAgents - 1) + kAgentId; const int time_independent_array_index = kEnvId * (kNumAgents - 1) + kAgentId; if ((env_timestep_arr[kEnvId] - 1) % kSubsidyInterval == 0) { assert(0 <= actions[kEnvId] <= kNumSubsidyLevels); subsidy_level[time_dependent_array_index_curr_t] = actions[kEnvId]; } else { subsidy_level[time_dependent_array_index_curr_t] = subsidy_level[time_dependent_array_index_prev_t]; } // Setting the subsidies for the US states // based on the federal government's subsidy level subsidy[time_dependent_array_index_curr_t] = subsidy_level[time_dependent_array_index_curr_t] * KMaxDailySubsidyPerState[kAgentId] / kNumSubsidyLevels; obs_a_time_until_next_subsidy[ time_independent_array_index] = 1 - (t_since_last_subsidy / static_cast<float>(kSubsidyInterval)); obs_a_current_subsidy_level[ time_independent_array_index] = subsidy_level[time_dependent_array_index_curr_t] / static_cast<float>(kNumSubsidyLevels); } else if (kAgentId == (kNumAgents - 1)) { for (int action_id = 0; action_id < kNumSubsidyLevels + 1; action_id++) { int action_mask_array_index = kEnvId * (kNumSubsidyLevels + 1) + action_id; if (env_timestep_arr[kEnvId] % kSubsidyInterval == 0) { obs_p_action_mask[action_mask_array_index] = kDefaultPlannerActionMask[action_id]; } else { obs_p_action_mask[action_mask_array_index] = kNoOpPlannerActionMask[action_id]; } } // Update planner obs after the agent's obs are updated __syncthreads(); if (kAgentId == (kNumAgents - 1)) { // Just use the values for agent id 0 obs_p_time_until_next_subsidy[kEnvId] = obs_a_time_until_next_subsidy[ kEnvId * (kNumAgents - 1) ]; obs_p_current_subsidy_level[kEnvId] = obs_a_current_subsidy_level[ kEnvId * (kNumAgents - 1) ]; } } } __global__ void CudaVaccinationCampaignStep( int * vaccinated, const int * kNumVaccinesPerDelivery, int * num_vaccines_available_t, const int kDeliveryInterval, const int kTimeWhenVaccineDeliveryBegins, float * obs_a_vaccination_campaign_t_until_next_vaccines, float * obs_p_vaccination_campaign_t_until_next_vaccines, int * env_timestep_arr, int kNumAgents, int kEpisodeLength ) { const int kEnvId = blockIdx.x; const int kAgentId = threadIdx.x; assert(env_timestep_arr[kEnvId] > 0 && env_timestep_arr[kEnvId] <= kEpisodeLength); assert(kTimeWhenVaccineDeliveryBegins > 0); assert (kAgentId <= kNumAgents - 1); // CUDA version of generate observations() int t_first_delivery = kTimeWhenVaccineDeliveryBegins + kTimeWhenVaccineDeliveryBegins % kDeliveryInterval; int next_t = env_timestep_arr[kEnvId] + 1; float t_until_next_vac; if (next_t <= t_first_delivery) { t_until_next_vac = min( 1, (t_first_delivery - next_t) / kDeliveryInterval); } else { float t_since_last_vac = next_t % kDeliveryInterval; t_until_next_vac = 1 - (t_since_last_vac / kDeliveryInterval); } // Update the vaccinated numbers for just the US states if (kAgentId < (kNumAgents - 1)) { const int time_independent_array_index = kEnvId * (kNumAgents - 1) + kAgentId; if ((env_timestep_arr[kEnvId] >= kTimeWhenVaccineDeliveryBegins) && (env_timestep_arr[kEnvId] % kDeliveryInterval == 0)) { num_vaccines_available_t[time_independent_array_index] = kNumVaccinesPerDelivery[kAgentId]; } else { num_vaccines_available_t[time_independent_array_index] = 0; } obs_a_vaccination_campaign_t_until_next_vaccines[ time_independent_array_index] = t_until_next_vac; } else if (kAgentId == kNumAgents - 1) { obs_p_vaccination_campaign_t_until_next_vaccines[kEnvId] = t_until_next_vac; } } }
the_stack
#include <glog/logging.h> #include <cmath> #include <memory> #include <vector> namespace dietgpu { template <FloatType FT, int Threads> struct SplitFloatNonAligned { static __device__ void split( const typename FloatTypeInfo<FT>::WordT* in, uint32_t size, typename FloatTypeInfo<FT>::CompT* compOut, typename FloatTypeInfo<FT>::NonCompT* nonCompOut, uint32_t* warpHistogram) { using FTI = FloatTypeInfo<FT>; using CompT = typename FTI::CompT; using NonCompT = typename FTI::NonCompT; for (uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { CompT comp; NonCompT nonComp; FTI::split(in[i], comp, nonComp); atomicAdd(&warpHistogram[comp], 1); compOut[i] = comp; nonCompOut[i] = nonComp; } } }; template <int Threads> struct SplitFloatNonAligned<FloatType::kFloat32, Threads> { static __device__ void split( const typename FloatTypeInfo<FloatType::kFloat32>::WordT* in, uint32_t size, typename FloatTypeInfo<FloatType::kFloat32>::CompT* compOut, typename FloatTypeInfo<FloatType::kFloat32>::NonCompT* nonCompOut, uint32_t* warpHistogram) { using FTI = FloatTypeInfo<FloatType::kFloat32>; using CompT = typename FTI::CompT; using NonCompT = typename FTI::NonCompT; // Where the low order 2 bytes are written uint16_t* nonComp2Out = (uint16_t*)nonCompOut; // Where the high order byte is written uint8_t* nonComp1Out = (uint8_t*)(nonComp2Out + roundUp(size, 8)); for (uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { CompT comp; NonCompT nonComp; FTI::split(in[i], comp, nonComp); nonComp2Out[i] = nonComp & 0xffffU; nonComp1Out[i] = nonComp >> 16; compOut[i] = comp; atomicAdd(&warpHistogram[comp], 1); } } }; template <FloatType FT, int Threads> struct SplitFloatAligned16 { static __device__ void split( const typename FloatTypeInfo<FT>::WordT* __restrict__ in, uint32_t size, typename FloatTypeInfo<FT>::CompT* __restrict__ compOut, typename FloatTypeInfo<FT>::NonCompT* __restrict__ nonCompOut, uint32_t* warpHistogram) { using FTI = FloatTypeInfo<FT>; using WordT = typename FTI::WordT; using CompT = typename FTI::CompT; using NonCompT = typename FTI::NonCompT; using VecT = typename FTI::VecT; using CompVecT = typename FTI::CompVecT; using NonCompVecT = typename FTI::NonCompVecT; constexpr int kOuterUnroll = 2; constexpr int kInnerUnroll = sizeof(VecT) / sizeof(WordT); const VecT* inV = (const VecT*)in; CompVecT* compOutV = (CompVecT*)compOut; NonCompVecT* nonCompOutV = (NonCompVecT*)nonCompOut; // Each block handles Threads * kOuterUnroll * kInnerUnroll inputs/outputs // at a time, or Threads * kOuterUnroll 16-byte words at a time constexpr int kWordsPerBlock = Threads * kOuterUnroll; constexpr int kFloatsPerBlock = kWordsPerBlock * kInnerUnroll; uint32_t fullBlocks = divDown(size, kFloatsPerBlock); // Handle by block uint32_t startBlock = blockIdx.x * kWordsPerBlock; inV += startBlock + threadIdx.x; compOutV += startBlock + threadIdx.x; nonCompOutV += startBlock + threadIdx.x; for (uint32_t b = blockIdx.x; b < fullBlocks; b += gridDim.x, inV += gridDim.x * kWordsPerBlock, compOutV += gridDim.x * kWordsPerBlock, nonCompOutV += gridDim.x * kWordsPerBlock) { VecT v[kOuterUnroll]; #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { v[i] = inV[i * Threads]; } CompVecT compV[kOuterUnroll]; NonCompVecT nonCompV[kOuterUnroll]; #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { #pragma unroll for (int j = 0; j < kInnerUnroll; ++j) { CompT comp; NonCompT nonComp; FTI::split(v[i].x[j], comp, nonComp); atomicAdd(&warpHistogram[comp], 1); compV[i].x[j] = comp; nonCompV[i].x[j] = nonComp; } } #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { compOutV[i * Threads] = compV[i]; nonCompOutV[i * Threads] = nonCompV[i]; } } // Handle last (partial) block for (uint32_t i = fullBlocks * kFloatsPerBlock + blockIdx.x * Threads + threadIdx.x; i < size; i += gridDim.x * Threads) { CompT comp; NonCompT nonComp; FTI::split(in[i], comp, nonComp); atomicAdd(&warpHistogram[comp], 1); compOut[i] = comp; nonCompOut[i] = nonComp; } } }; // float32 specialization template <int Threads> struct SplitFloatAligned16<FloatType::kFloat32, Threads> { static __device__ void split( const typename FloatTypeInfo<FloatType::kFloat32>::WordT* __restrict__ in, uint32_t size, typename FloatTypeInfo<FloatType::kFloat32>::CompT* __restrict__ compOut, typename FloatTypeInfo< FloatType::kFloat32>::NonCompT* __restrict__ nonCompOut, uint32_t* warpHistogram) { using FTI = FloatTypeInfo<FloatType::kFloat32>; using WordT = typename FTI::WordT; using CompT = typename FTI::CompT; using NonCompT = typename FTI::NonCompT; constexpr int kOuterUnroll = 1; constexpr int kInnerUnroll = sizeof(uint32x4) / sizeof(uint32_t); auto inV = (const uint32x4*)in; auto compOutV = (uint8x4*)compOut; auto nonCompOut2 = (uint16_t*)nonCompOut; auto nonCompOut1 = (uint8_t*)(nonCompOut2 + roundUp(size, 8)); auto nonCompOutV2 = (uint16x4*)nonCompOut2; auto nonCompOutV1 = (uint8x4*)nonCompOut1; // Each block handles Threads * kOuterUnroll * kInnerUnroll inputs/outputs // at a time, or Threads * kOuterUnroll 16-byte words at a time constexpr int kWordsPerBlock = Threads * kOuterUnroll; constexpr int kFloatsPerBlock = kWordsPerBlock * kInnerUnroll; uint32_t fullBlocks = divDown(size, kFloatsPerBlock); // Handle by block uint32_t startBlock = blockIdx.x * kWordsPerBlock; inV += startBlock + threadIdx.x; compOutV += startBlock + threadIdx.x; nonCompOutV2 += startBlock + threadIdx.x; nonCompOutV1 += startBlock + threadIdx.x; for (uint32_t b = blockIdx.x; b < fullBlocks; b += gridDim.x, inV += gridDim.x * kWordsPerBlock, compOutV += gridDim.x * kWordsPerBlock, nonCompOutV2 += gridDim.x * kWordsPerBlock, nonCompOutV1 += gridDim.x * kWordsPerBlock) { uint32x4 v[kOuterUnroll]; #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { v[i] = inV[i * Threads]; } uint8x4 compV[kOuterUnroll]; uint32x4 nonCompV[kOuterUnroll]; #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { #pragma unroll for (int j = 0; j < kInnerUnroll; ++j) { CompT comp; NonCompT nonComp; FTI::split(v[i].x[j], comp, nonComp); atomicAdd(&warpHistogram[comp], 1); compV[i].x[j] = comp; nonCompV[i].x[j] = nonComp; } } #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { compOutV[i * Threads] = compV[i]; uint16x4 nonCompV2; uint8x4 nonCompV1; for (int j = 0; j < kInnerUnroll; ++j) { nonCompV2.x[j] = nonCompV[i].x[j] & 0xffffU; nonCompV1.x[j] = nonCompV[i].x[j] >> 16; } nonCompOutV2[i * Threads] = nonCompV2; nonCompOutV1[i * Threads] = nonCompV1; } } // Handle last (partial) block for (uint32_t i = fullBlocks * kFloatsPerBlock + blockIdx.x * Threads + threadIdx.x; i < size; i += gridDim.x * Threads) { CompT comp; NonCompT nonComp; FTI::split(in[i], comp, nonComp); atomicAdd(&warpHistogram[comp], 1); compOut[i] = comp; nonCompOut2[i] = nonComp & 0xffffU; nonCompOut1[i] = nonComp >> 16; } } }; template < typename InProvider, typename NonCompProvider, FloatType FT, int Threads> __global__ void splitFloat( InProvider inProvider, void* __restrict__ compOut, uint32_t compOutStride, NonCompProvider nonCompProvider, uint32_t* __restrict__ histogramOut) { using WordT = typename FloatTypeInfo<FT>::WordT; using CompT = typename FloatTypeInfo<FT>::CompT; using NonCompT = typename FloatTypeInfo<FT>::NonCompT; constexpr int kWarps = Threads / kWarpSize; static_assert(Threads == kNumSymbols, ""); int batch = blockIdx.y; int warpId = threadIdx.x / kWarpSize; histogramOut += batch * kNumSymbols; // +1 in order to force very common symbols that could overlap into different // banks between different warps __shared__ uint32_t histogram[kWarps][kNumSymbols + 1]; #pragma unroll for (int i = 0; i < kWarps; ++i) { histogram[i][threadIdx.x] = 0; } __syncthreads(); uint32_t* warpHistogram = histogram[warpId]; auto curIn = (const WordT*)inProvider.getBatchStart(batch); auto headerOut = (GpuFloatHeader*)nonCompProvider.getBatchStart(batch); auto curCompOut = (CompT*)compOut + compOutStride * batch; auto curSize = inProvider.getBatchSize(batch); // Write size as a header if (blockIdx.x == 0 && threadIdx.x == 0) { GpuFloatHeader h; h.magic = kGpuFloatHeaderMagic; h.size = curSize; h.floatType = FT; *headerOut = h; } auto curNonCompOut = (NonCompT*)(headerOut + 1); // How many bytes are before the point where we are 16 byte aligned? auto nonAlignedBytes = getAlignmentRoundUp<sizeof(uint4)>(curIn); if (nonAlignedBytes > 0) { SplitFloatNonAligned<FT, Threads>::split( curIn, curSize, curCompOut, curNonCompOut, warpHistogram); } else { SplitFloatAligned16<FT, Threads>::split( curIn, curSize, curCompOut, curNonCompOut, warpHistogram); } // Accumulate warp histogram data and write into the gmem histogram __syncthreads(); uint32_t sum = histogram[0][threadIdx.x]; #pragma unroll for (int j = 1; j < kWarps; ++j) { sum += histogram[j][threadIdx.x]; } // The count for the thread's bucket could be 0 if (sum) { atomicAdd(&histogramOut[threadIdx.x], sum); } } // Update the final byte counts for the batch to take into account the // uncompressed and compressed portions template <FloatType FT, typename InProvider> __global__ void incOutputSizes(InProvider inProvider, uint32_t* outSize, uint32_t numInBatch) { uint32_t batch = blockIdx.x * blockDim.x + threadIdx.x; if (batch < numInBatch) { outSize[batch] += sizeof(GpuFloatHeader) + FloatTypeInfo<FT>::getUncompDataSize(inProvider.getBatchSize(batch)); } } // Provides the input data to ANS compression template <typename SizeProvider> struct FloatANSInProvider { using Writer = BatchWriter; __host__ FloatANSInProvider(void* ptr_dev, uint32_t stride, SizeProvider& sizeProvider) : ptr_dev_(ptr_dev), stride_(stride), sizeProvider_(sizeProvider) {} __device__ void* getBatchStart(uint32_t batch) { return (uint8_t*)ptr_dev_ + batch * stride_; } __device__ const void* getBatchStart(uint32_t batch) const { return (uint8_t*)ptr_dev_ + batch * stride_; } __device__ BatchWriter getWriter(uint32_t batch) { return BatchWriter(getBatchStart(batch)); } __device__ uint32_t getBatchSize(uint32_t batch) { return sizeProvider_.getBatchSize(batch); } void* ptr_dev_; uint32_t stride_; SizeProvider sizeProvider_; }; // Provides the output data to ANS compression template <FloatType FT, typename OutProvider, typename SizeProvider> struct FloatANSOutProvider { using Writer = BatchWriter; using FTI = FloatTypeInfo<FT>; __host__ FloatANSOutProvider( OutProvider& outProvider, SizeProvider& sizeProvider) : outProvider_(outProvider), sizeProvider_(sizeProvider) {} __device__ void* getBatchStart(uint32_t batch) { uint8_t* p = (uint8_t*)outProvider_.getBatchStart(batch); // Increment the pointer to past the floating point data assert(((GpuFloatHeader*)p)->magic == kGpuFloatHeaderMagic); return p + sizeof(GpuFloatHeader) + FTI::getUncompDataSize(sizeProvider_.getBatchSize(batch)); } __device__ const void* getBatchStart(uint32_t batch) const { const uint8_t* p = (const uint8_t*)outProvider_.getBatchStart(batch); // Increment the pointer to past the floating point data assert(((GpuFloatHeader*)p)->magic == kGpuFloatHeaderMagic); return p + sizeof(GpuFloatHeader) + FTI::getUncompDataSize(sizeProvider_.getBatchSize(batch)); } __device__ BatchWriter getWriter(uint32_t batch) { return BatchWriter(getBatchStart(batch)); } OutProvider outProvider_; SizeProvider sizeProvider_; }; template <typename InProvider, typename OutProvider> void floatCompressDevice( StackDeviceMemory& res, const FloatCompressConfig& config, uint32_t numInBatch, InProvider& inProvider, uint32_t maxSize, OutProvider& outProvider, uint32_t* outSize_dev, cudaStream_t stream) { auto maxUncompressedWords = maxSize / sizeof(ANSDecodedT); uint32_t maxNumCompressedBlocks = divUp(maxUncompressedWords, kDefaultBlockSize); // Temporary space for the extracted exponents; all rows must be 16 byte // aligned uint32_t compRowStride = roundUp(maxSize, sizeof(uint4)); auto toComp_dev = res.alloc<uint8_t>(stream, numInBatch * compRowStride); // We calculate a histogram of the symbols to be compressed as part of // extracting the compressible symbol from the float auto histogram_dev = res.alloc<uint32_t>(stream, numInBatch * kNumSymbols); // zero out buckets before proceeding, as we aggregate with atomic adds CUDA_VERIFY(cudaMemsetAsync( histogram_dev.data(), 0, sizeof(uint32_t) * numInBatch * kNumSymbols, stream)); #define RUN_SPLIT(FLOAT_TYPE) \ do { \ constexpr int kBlock = 256; \ auto& props = getCurrentDeviceProperties(); \ int maxBlocksPerSM = 0; \ CUDA_VERIFY(cudaOccupancyMaxActiveBlocksPerMultiprocessor( \ &maxBlocksPerSM, \ splitFloat<InProvider, OutProvider, FLOAT_TYPE, kBlock>, \ kBlock, \ 0)); \ uint32_t maxGrid = maxBlocksPerSM * props.multiProcessorCount; \ uint32_t perBatchGrid = 4 * divUp(maxGrid, numInBatch); \ auto grid = dim3(perBatchGrid, numInBatch); \ \ splitFloat<InProvider, OutProvider, FLOAT_TYPE, kBlock> \ <<<grid, kBlock, 0, stream>>>( \ inProvider, \ toComp_dev.data(), \ compRowStride, \ outProvider, \ histogram_dev.data()); \ } while (false) switch (config.floatType) { case kFloat16: RUN_SPLIT(FloatType::kFloat16); break; case kBFloat16: RUN_SPLIT(FloatType::kBFloat16); break; case kFloat32: RUN_SPLIT(FloatType::kFloat32); break; default: assert(false); break; } #undef RUN_SPLIT // outSize as reported by ansEncode is just the ANS-encoded portion of the // data. // We need to increment the sizes by the uncompressed portion (header plus // uncompressed float data) with incOutputSizes #define RUN_ANS(FT) \ do { \ auto inProviderANS = FloatANSInProvider<InProvider>( \ toComp_dev.data(), compRowStride, inProvider); \ \ auto outProviderANS = FloatANSOutProvider<FT, OutProvider, InProvider>( \ outProvider, inProvider); \ \ ansEncodeBatchDevice( \ res, \ config.ansConfig, \ numInBatch, \ inProviderANS, \ histogram_dev.data(), \ maxSize, \ outProviderANS, \ outSize_dev, \ stream); \ \ incOutputSizes<FT><<<divUp(numInBatch, 128), 128, 0, stream>>>( \ inProvider, outSize_dev, numInBatch); \ \ } while (false) // We have written the non-compressed portions of the floats into the output, // along with a header that indicates how many floats there are. // For compression, we need to increment the address in which the compressed // outputs are written. switch (config.floatType) { case kFloat16: RUN_ANS(FloatType::kFloat16); break; case kBFloat16: RUN_ANS(FloatType::kBFloat16); break; case kFloat32: RUN_ANS(FloatType::kFloat32); break; default: assert(false); break; } #undef RUN_ANS CUDA_TEST_ERROR(); } } // namespace dietgpu
the_stack
#include <curand.h> #include <curand_kernel.h> #define TPB 32 namespace { // FORWARD KERNELS extern "C" __global__ void logsum_kernel0( float* __restrict__ A, float* __restrict__ B, float* __restrict__ C) { float M[64]; __shared__ float A_shared[4096]; __shared__ float B_shared[4096]; float A_shared_local[8]; float B_shared_local[8]; float M2[64]; float A_shared_local1[8]; float B_shared_local1[8]; #pragma unroll for (int ii_init = 0; ii_init < 8; ++ii_init) { #pragma unroll for (int jj_init = 0; jj_init < 8; ++jj_init) { M[((ii_init * 8) + jj_init)] = -3.402823e+38f; } } for (int k_outer = 0; k_outer < 16; ++k_outer) { __syncthreads(); #pragma unroll for (int ax1_inner = 0; ax1_inner < 8; ++ax1_inner) { #pragma unroll for (int ax2_inner = 0; ax2_inner < 2; ++ax2_inner) { A_shared[((((((int)threadIdx.y) * 256) + (ax1_inner * 32)) + (((int)threadIdx.x) * 2)) + ax2_inner)] = A[(((((((((int)blockIdx.z) * 262144) + (((int)blockIdx.x) * 65536)) + (((int)threadIdx.y) * 4096)) + (ax1_inner * 512)) + (k_outer * 32)) + (((int)threadIdx.x) * 2)) + ax2_inner)]; } } #pragma unroll for (int ax1_inner1 = 0; ax1_inner1 < 8; ++ax1_inner1) { #pragma unroll for (int ax2_inner1 = 0; ax2_inner1 < 2; ++ax2_inner1) { B_shared[((((((int)threadIdx.y) * 256) + (ax1_inner1 * 32)) + (((int)threadIdx.x) * 2)) + ax2_inner1)] = B[(((((((((int)blockIdx.z) * 262144) + (((int)blockIdx.y) * 65536)) + (((int)threadIdx.y) * 4096)) + (ax1_inner1 * 512)) + (k_outer * 32)) + (((int)threadIdx.x) * 2)) + ax2_inner1)]; } } __syncthreads(); for (int k_inner = 0; k_inner < 32; ++k_inner) { #pragma unroll for (int ax1 = 0; ax1 < 8; ++ax1) { A_shared_local[ax1] = A_shared[(((((int)threadIdx.x) * 256) + (ax1 * 32)) + k_inner)]; } #pragma unroll for (int ax11 = 0; ax11 < 8; ++ax11) { B_shared_local[ax11] = B_shared[(((((int)threadIdx.y) * 256) + (ax11 * 32)) + k_inner)]; } #pragma unroll for (int ii = 0; ii < 8; ++ii) { #pragma unroll for (int jj = 0; jj < 8; ++jj) { M[((ii * 8) + jj)] = (M[((ii * 8) + jj)]) > ((A_shared_local[jj] + B_shared_local[ii])) ? (M[((ii * 8) + jj)]) : ((A_shared_local[jj] + B_shared_local[ii])); } } } } #pragma unroll for (int ii_init1 = 0; ii_init1 < 8; ++ii_init1) { #pragma unroll for (int jj_init1 = 0; jj_init1 < 8; ++jj_init1) { M2[((ii_init1 * 8) + jj_init1)] = 0.000000e+00f; } } for (int k2_outer = 0; k2_outer < 16; ++k2_outer) { __syncthreads(); #pragma unroll for (int ax1_inner2 = 0; ax1_inner2 < 8; ++ax1_inner2) { #pragma unroll for (int ax2_inner2 = 0; ax2_inner2 < 2; ++ax2_inner2) { A_shared[((((((int)threadIdx.y) * 256) + (ax1_inner2 * 32)) + (((int)threadIdx.x) * 2)) + ax2_inner2)] = A[(((((((((int)blockIdx.z) * 262144) + (((int)blockIdx.x) * 65536)) + (((int)threadIdx.y) * 4096)) + (ax1_inner2 * 512)) + (k2_outer * 32)) + (((int)threadIdx.x) * 2)) + ax2_inner2)]; } } #pragma unroll for (int ax1_inner3 = 0; ax1_inner3 < 8; ++ax1_inner3) { #pragma unroll for (int ax2_inner3 = 0; ax2_inner3 < 2; ++ax2_inner3) { B_shared[((((((int)threadIdx.y) * 256) + (ax1_inner3 * 32)) + (((int)threadIdx.x) * 2)) + ax2_inner3)] = B[(((((((((int)blockIdx.z) * 262144) + (((int)blockIdx.y) * 65536)) + (((int)threadIdx.y) * 4096)) + (ax1_inner3 * 512)) + (k2_outer * 32)) + (((int)threadIdx.x) * 2)) + ax2_inner3)]; } } __syncthreads(); for (int k2_inner = 0; k2_inner < 32; ++k2_inner) { #pragma unroll for (int ax12 = 0; ax12 < 8; ++ax12) { A_shared_local1[ax12] = A_shared[(((((int)threadIdx.x) * 256) + (ax12 * 32)) + k2_inner)]; } #pragma unroll for (int ax13 = 0; ax13 < 8; ++ax13) { B_shared_local1[ax13] = B_shared[(((((int)threadIdx.y) * 256) + (ax13 * 32)) + k2_inner)]; } #pragma unroll for (int ii1 = 0; ii1 < 8; ++ii1) { #pragma unroll for (int jj1 = 0; jj1 < 8; ++jj1) { M2[((ii1 * 8) + jj1)] = (M2[((ii1 * 8) + jj1)] + __expf(((A_shared_local1[jj1] + B_shared_local1[ii1]) - M[((ii1 * 8) + jj1)]))); } } } } #pragma unroll for (int ii_inner = 0; ii_inner < 8; ++ii_inner) { #pragma unroll for (int jj_inner = 0; jj_inner < 8; ++jj_inner) { C[(((((((((int)blockIdx.z) * 262144) + (((int)blockIdx.y) * 65536)) + (((int)threadIdx.y) * 4096)) + (ii_inner * 512)) + (((int)blockIdx.x) * 128)) + (((int)threadIdx.x) * 8)) + jj_inner)] = (__logf(M2[((ii_inner * 8) + jj_inner)]) + M[((ii_inner * 8) + jj_inner)]); } } } template <typename scalar_t> __global__ void matmul_cuda_forward_kernel( const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> b, torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> out, const int in_size, const int a_size, const int b_size) { __shared__ scalar_t sA[TPB * TPB]; __shared__ scalar_t sB[TPB * TPB]; const int batch = blockIdx.z; const int row = threadIdx.x + blockIdx.x * blockDim.x; const int col = threadIdx.y + blockIdx.y * blockDim.y; const int tx = threadIdx.x; const int ty = threadIdx.y; const int inner_blocks = int(in_size / TPB) + 1; if (row >= a_size && col >= b_size) return; scalar_t m = -1e9; __syncthreads(); for (int q = 0; q < inner_blocks; q++) { if (ty + q * TPB < in_size) { sA[tx * TPB + ty] = a[batch][row][ty + q * TPB]; } else { sA[tx * TPB + ty] = -1e9; } if (tx + q * TPB < in_size) { sB[tx * TPB + ty] = b[batch][tx + q * TPB][col]; } else { sB[tx * TPB + ty] = -1e9; } __syncthreads(); for (int i = 0; i < TPB; ++i) { scalar_t v = sA[tx * TPB + i] + sB[i * TPB + ty]; if (v > m) m = v; } __syncthreads(); } scalar_t val = 0.0; for (int q = 0; q < inner_blocks; q++) { if (ty + q * TPB < in_size) { sA[tx * TPB + ty] = a[batch][row][ty + q * TPB]; } else { sA[tx * TPB + ty] = -1e9; } if (tx + q * TPB < in_size) { sB[tx * TPB + ty] = b[batch][tx + q * TPB][col]; } else { sB[tx * TPB + ty] = -1e9; } __syncthreads(); for (int i = 0; i < TPB; ++i) { scalar_t v = sA[tx * TPB + i] + sB[i * TPB + ty]; val += exp(v - m); } __syncthreads(); } if (row < a_size && col < b_size) out[batch][row][col] = log(val) + m; return; } template <typename scalar_t> __global__ void max_cuda_forward_kernel( const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> b, torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> out, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> indices, const int in_size, const int a_size, const int b_size ) { const int n = blockIdx.z; const int row = threadIdx.x + blockIdx.x * blockDim.x; const int col = threadIdx.y + blockIdx.y * blockDim.y; scalar_t val = 0.0; scalar_t m = -1e9; int ind = -1; if (row < a_size && col < b_size) { for (int i = 0; i < in_size; ++i) { scalar_t v = a[n][row][i] + b[n][i][col]; if (v > m) { m = v; ind = i; } } out[n][row][col] = m; indices[n][row][col] = ind; } } template <typename scalar_t> __global__ void sample_cuda_forward_kernel( const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> b, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> rand, torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> out, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> indices, const int in_size, const int a_size, const int b_size ) { const int n = blockIdx.z; const int row = threadIdx.x + blockIdx.x * blockDim.x; const int col = threadIdx.y + blockIdx.y * blockDim.y; scalar_t val = 0.0; scalar_t m = -1e9; int ind = -1; if (row < a_size && col < b_size) { for (int i = 0; i < in_size; ++i) { scalar_t v = a[n][row][i] + b[n][i][col]; if (v > m) { m = v; } } for (int i = 0; i < in_size; ++i) { scalar_t v = a[n][row][i] + b[n][i][col]; val += exp(v - m); } out[n][row][col] = log(val) + m; scalar_t total = 0.0; auto r = rand[n][row][col]; for (int i = 0; i < in_size; ++i) { scalar_t v = a[n][row][i] + b[n][i][col] - out[n][row][col]; if (total < r && total + exp(v) > r ){ indices[n][row][col] = i; break; } total += exp(v); } } } // BACKWARD KERNELS // LOGSUM template <typename scalar_t> __global__ void matmul_cuda_backward_kernel_A( torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> grad_a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> b, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> part, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> grad_output, const int in_size, const int a_size, const int b_size ) { const int n = blockIdx.z; const int row = threadIdx.x + blockIdx.x * blockDim.x; const int col = threadIdx.y + blockIdx.y * blockDim.y; if (row < a_size && col < in_size) { scalar_t val = 0.0; for (int k = 0; k < b_size; ++k) { scalar_t v = a[n][row][col] + b[n][col][k] - part[n][row][k]; val += exp(v) * grad_output[n][row][k]; } grad_a[n][row][col] = val; } } template <typename scalar_t> __global__ void matmul_cuda_backward_kernel_B( torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> grad_b, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> b, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> part, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> grad_output, const int in_size, const int a_size, const int b_size ) { const int n = blockIdx.z; const int row = threadIdx.x + blockIdx.x * blockDim.x; const int col = threadIdx.y + blockIdx.y * blockDim.y; if (row < in_size && col < b_size) { scalar_t val = 0.0; for (int k = 0; k < a_size; ++k) { scalar_t v = a[n][k][row] + b[n][row][col] - part[n][k][col]; val += exp(v) * grad_output[n][k][col]; } grad_b[n][row][col] = val; } } // MAX / SAMPLE template <typename scalar_t> __global__ void max_cuda_backward_kernel_A( torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> grad_a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> b, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> part, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> grad_output, const int in_size, const int a_size, const int b_size ) { const int n = blockIdx.z; const int row = threadIdx.x + blockIdx.x * blockDim.x; const int col = threadIdx.y + blockIdx.y * blockDim.y; if (row < a_size && col < in_size) { scalar_t val = 0.0; for (int k = 0; k < b_size; ++k) { scalar_t v = (col == part[n][row][k]) ? 1 : 0; val += v * grad_output[n][row][k]; } grad_a[n][row][col] = val; } } template <typename scalar_t> __global__ void max_cuda_backward_kernel_B( torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> grad_b, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> b, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> part, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> grad_output, const int in_size, const int a_size, const int b_size ) { const int n = blockIdx.z; const int row = threadIdx.x + blockIdx.x * blockDim.x; const int col = threadIdx.y + blockIdx.y * blockDim.y; if (row < in_size && col < b_size) { scalar_t val = 0.0; for (int k = 0; k < a_size; ++k) { scalar_t v = (row == part[n][k][col]) ? 1 : 0; val += v * grad_output[n][k][col]; } grad_b[n][row][col] = val; } } // BANDED KERNELS template <typename scalar_t> __global__ void banded_cuda_forward_kernel_mul( const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> b, torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> out, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> indices, const int n, const int a_lu, const int a_lb, const int b_lu, const int b_lb, const int c_lu, const int c_lb, const int mode ) { __shared__ scalar_t sA[TPB * TPB]; __shared__ scalar_t sB[TPB * TPB]; const int batch = blockIdx.z; const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; const int tx = threadIdx.x; const int ty = threadIdx.y; const int a_width = a_lu + a_lb + 1; const int b_width = b_lu + b_lb + 1; const int c_width = c_lu + c_lb + 1; // b position. const int o = i + (j - c_lu); if (mode == 3) { int k2, pos; if (o < 0 || o >= n) return; __syncthreads(); int q = 0; int load_a = ty + q * TPB; if (load_a < a_width) { sA[tx * TPB + ty] = a[batch][i][load_a]; } else { sA[tx * TPB + ty] = 0; } int load_b = ty + q * TPB; if (load_b < b_width) { sB[tx * TPB + ty] = b[batch][i][load_b]; } else { sB[tx * TPB + ty] = 0; } /* pos = (i + (load_b - a_lu)); */ /* k2 = (pos - o) + b_lu; */ /* if ((k2 < 0 || k2 >= b_width) || (pos < 0 || pos >= n)) { */ /* sB[tx * TPB + ty] = 0; */ /* } else { */ /* sB[tx * TPB + ty] = b[batch][o][k2]; */ /* } */ __syncthreads(); scalar_t val = 0.0; for (int k = 0; k < a_width; ++k) { pos = (tx + (k - a_lu)); k2 = (pos - o) + b_lu; if (k2 < 0 || k2 >= b_width) continue; if (pos < 0 || pos >= n) continue; /* val += a[batch][i][k] * b[batch][o][k2]; */ val += sA[tx * TPB + k] * sB[o * TPB + k2]; } __syncthreads(); if (i < n && j < c_width) out[batch][i][j] = val; return; } if (i < n && j < c_lu + c_lb + 1) { int k2 = 0; int pos = 0; if (o < 0 || o >= n) return; if (mode == 1) { scalar_t val = 0.0; scalar_t m = -1e9; int ind = -1; for (int k = 0; k < a_width; ++k) { pos = (i + (k - a_lu)); k2 = (pos - o) + b_lu; if (k2 < 0 || k2 >= b_width) continue; if (pos < 0 || pos >= n) continue; scalar_t v = a[batch][i][k] + b[batch][o][k2]; if (v > m) { m = v; ind = k; } } out[batch][i][j] = m; indices[batch][i][j] = ind; } else if (mode == 0) { scalar_t val = 0.0; scalar_t m = -1e9; for (int k = 0; k < a_width; ++k) { pos = (i + (k - a_lu)); if (pos < 0 || pos >= n) continue; k2 = (pos - o) + b_lu; if (k2 < 0 || k2 >= b_width) continue; scalar_t v = a[batch][i][k] + b[batch][o][k2]; if (v > m) m = v; } for (int k = 0; k < a_width; ++k) { pos = (i + (k - a_lu)); if (pos < 0 || pos >= n) continue; k2 = (pos - o) + b_lu; if (k2 < 0 || k2 >= b_width) continue; val += exp(a[batch][i][k] + b[batch][o][k2] - m); } out[batch][i][j] = log(val) + m; } } } template <typename scalar_t> __global__ void banded_cuda_backward_kernel_mul( torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> grad_a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> a, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> b, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> part, const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> grad_output, const int n, const int a_lu, const int a_lb, const int b_lu, const int b_lb, const int c_lu, const int c_lb, const int mode) { const int batch = blockIdx.z; const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < n && j < a_lu + a_lb + 1) { const int o = i + (j - a_lu); scalar_t val = 0.0; const int gradout_width = c_lu + c_lb + 1; if (mode == 3) { for (int k = 0; k < gradout_width; ++k) { const int pos = i + (k - c_lu); const int k2 = (o - pos) + b_lu; if (k2 < 0 || k2 >= b_lu + b_lb +1) continue; if (pos < 0 || pos >= n) continue; val += b[batch][pos][k2] * grad_output[batch][i][k]; } } else if (mode == 1) { // Max for (int k = 0; k < gradout_width; ++k) { const int pos = i + (k - c_lu); const int k2 = (o - pos) + b_lu; if (k2 < 0 || k2 >= b_lu + b_lb +1) continue; if (pos < 0 || pos >= n) continue; scalar_t v = (j == part[batch][i][k]) ? 1 : 0; val += v * grad_output[batch][i][k]; } } else if (mode == 0) { for (int k = 0; k < gradout_width; ++k) { const int pos = i + (k - c_lu); if (pos < 0 || pos >= n) continue; const int k2 = (o - pos) + b_lu; if (k2 < 0 || k2 >= b_lu + b_lb +1) continue; scalar_t v = a[batch][i][j] + b[batch][pos][k2] - part[batch][i][k]; val += exp(v) * grad_output[batch][i][k]; } } grad_a[batch][i][j] = val; } } } // namespace // MATMUL FORWARD DISPATCH std::vector<torch::Tensor> matmul_cuda_forward( torch::Tensor a, torch::Tensor b, int mode) { const int batch_size = a.size(0); const int a_size = a.size(1); const int b_size = b.size(2); auto options = torch::TensorOptions() .dtype(a.dtype()) .device(torch::kCUDA, a.device().index()); auto out = torch::zeros({batch_size, a_size, b_size}, options); const int in_size = a.size(2); const int threads = 32; const dim3 threads_per_block(threads, threads, 1); const dim3 blocks(a_size / threads + 1, b_size / threads + 1, batch_size); // Dispatch if (mode == 0) { const int threads = 16; const dim3 threads_per_block(threads, threads, 1); const dim3 blocks(4, 4, batch_size); logsum_kernel0<<<blocks, threads_per_block>>>(a.data<float>(), b.data<float>(), out.data<float>()); /* AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_forward_cuda", ([&] { */ /* matmul_cuda_forward_kernel<scalar_t><<<blocks, threads_per_block>>>( */ /* a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), */ /* b.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), */ /* out.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), */ /* in_size, a_size, b_size); */ /* } ) ); */ return {out}; } else if (mode == 1) { auto options2 = torch::TensorOptions() .dtype(torch::kInt) .device(torch::kCUDA, a.device().index()); auto indices = torch::zeros({batch_size, a_size, b_size}, options2); AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_forward_cuda", ([&] { max_cuda_forward_kernel<scalar_t><<<blocks, threads_per_block>>>( a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), b.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), out.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), indices.packed_accessor32<int,3,torch::RestrictPtrTraits>(), in_size, a_size, b_size); } ) ); return {out, indices}; } else if (mode == 2) { auto options2 = torch::TensorOptions() .dtype(torch::kInt) .device(torch::kCUDA, a.device().index()); auto indices = torch::zeros({batch_size, a_size, b_size}, options2); auto rand = torch::rand({batch_size, a_size, b_size}, options); AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_forward_cuda", ([&] { sample_cuda_forward_kernel<scalar_t><<<blocks, threads_per_block>>>( a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), b.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), rand.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), out.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), indices.packed_accessor32<int,3,torch::RestrictPtrTraits>(), in_size, a_size, b_size); } ) ); return {out, indices}; } } // MATMUL BACKWARD DISPATCH std::vector<torch::Tensor> matmul_cuda_backward( torch::Tensor a, torch::Tensor b, torch::Tensor grad_out, torch::Tensor part, int mode) { const auto batch_size = a.size(0); const auto in_size = a.size(2); const int a_size = a.size(1); const int b_size = b.size(2); const int threads = 32; const dim3 blocks(a_size / threads + 1, in_size / threads + 1, batch_size); const dim3 threads_per_block(threads, threads, 1); auto grad_a = torch::zeros_like(a); auto grad_b = torch::zeros_like(b); auto grad_bp = grad_b.packed_accessor32<float,3,torch::RestrictPtrTraits>(); const int threads2 = 32; const dim3 blocks2(in_size / threads2 + 1, b_size / threads2 + 1, batch_size); if (mode == 0) { AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_forward_cuda", ([&] { matmul_cuda_backward_kernel_A<scalar_t><<<blocks, threads_per_block>>>( grad_a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), b.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), part.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), grad_out.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), in_size, a_size, b_size ); })); AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_forward_cuda", ([&] { matmul_cuda_backward_kernel_B<scalar_t><<<blocks2, threads_per_block>>>( grad_b.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), b.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), part.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), grad_out.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), in_size, a_size, b_size); })); } else if (mode == 1 or mode == 2) { AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_forward_cuda", ([&] { max_cuda_backward_kernel_A<scalar_t><<<blocks, threads_per_block>>>( grad_a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), b.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), part.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), grad_out.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), in_size, a_size, b_size); })); AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_forward_cuda", ([&] { max_cuda_backward_kernel_B<scalar_t><<<blocks2, threads_per_block>>>( grad_b.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), b.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), part.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), grad_out.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), in_size, a_size, b_size); })); } return {grad_a, grad_b}; } // BANDED FORWARD std::vector<torch::Tensor> banded_cuda_forward( torch::Tensor a, int a_lu, int a_lb, torch::Tensor b, int b_lu, int b_lb, int mode) { const int batch_size = a.size(0); const int out_lu = a_lu + b_lb; const int out_lb = a_lb + b_lu; const int a_size = a.size(1); const int new_size = out_lu + out_lb + 1; auto options = torch::TensorOptions() .dtype(a.dtype()) .device(torch::kCUDA, a.device().index()); auto out = torch::zeros({batch_size, a_size, new_size}, options); const int in_size = a.size(2); const int threads = 32; const dim3 threads_per_block(threads, threads, 1); const dim3 blocks(a_size / threads + 1, new_size / threads + 1, batch_size); auto options2 = torch::TensorOptions() .dtype(torch::kInt) .device(torch::kCUDA, a.device().index()); auto indices = torch::zeros({batch_size, a_size, new_size}, options2); AT_DISPATCH_FLOATING_TYPES(a.type(), "banded_forward_cuda", ([&] { banded_cuda_forward_kernel_mul<scalar_t><<<blocks, threads_per_block>>>( a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), b.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), out.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), indices.packed_accessor32<int,3,torch::RestrictPtrTraits>(), a_size, a_lu, a_lb, b_lu, b_lb, out_lu, out_lb, mode); } ) ); return {out, indices}; } std::vector<torch::Tensor> banded_cuda_backward( torch::Tensor a, int a_lu, int a_lb, torch::Tensor b, int b_lu, int b_lb, torch::Tensor grad_output, torch::Tensor part, int mode) { const int batch_size = a.size(0); const int out_lu = a_lu + b_lb; const int out_lb = a_lb + b_lu; const int a_size = a.size(1); const int new_size = out_lu + out_lb + 1; auto options = torch::TensorOptions() .dtype(a.dtype()) .device(torch::kCUDA, a.device().index()); auto out = torch::zeros({batch_size, a_size, new_size}, options); const int in_size = a.size(2); const int threads = 32; const dim3 blocks(a_size / threads + 1, in_size / threads + 1, batch_size); const dim3 threads_per_block(threads, threads, 1); auto grad_a = torch::zeros_like(a); AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_forward_cuda", ([&] { banded_cuda_backward_kernel_mul<scalar_t><<<blocks, threads_per_block>>>( grad_a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), a.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), b.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), part.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), grad_output.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(), a_size, a_lu, a_lb, b_lu, b_lb, out_lu, out_lb, mode ); })); return {grad_a}; }
the_stack
#include "TorusSegmentation.h" // 宏:DEF_BLOCK_1D // 定义了默认的 1D 线程块的尺寸。 #define DEF_BLOCK_1D 256 // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了二维结构的并行线程块默认的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:DEF_BLACK 和 DEF_WHITE // 定义了黑色和白色的像素值。 #define DEF_BLACK 0 #define DEF_WHITE 255 // 核函数:_initLblMatToZeroKer(初始化标记矩阵为全 0) // 在设备端初始化标记矩阵为全零。 static __global__ void // Kernel 函数无返回值。 _initLblMatToZeroKer( ImageCuda outlblimgcud // 坐标集位置标记矩阵 ); // 核函数:_initLblMatKer(根据坐标集初始化标记矩阵) // 在设备端初始化标记矩阵,坐标集内部的点初始化标记为 1。 static __global__ void // Kernel 函数无返回值。 _initLblMatKer( CoordiSet incoordiset, // 输入坐标集 ImageCuda outlblimgcud // 坐标集位置标记矩阵 ); // 核函数:_torusSegLblKer(标记坐标集实现二分类) // 在设备端通过考查当前像素邻域是否都在坐标集内,标记分类。 static __global__ void // Kernel 函数无返回值。 _torusSegLblKer( ImageCuda inlblimgcud, // 输入坐标集位置标记矩阵 CoordiSet incoordiset, // 输入坐标集 TorusSegmentation ts, // 分割操作类 unsigned char *outlbl // 输出标记数组 ); // 核函数:_labelToImgKer(将分割结果反映到图像上) // 该核函数,根据之前分割得到的标记数组,将分割结果映射到图像上去,在 // CoordiSet 中记录了在图像中该点的位置,将对应位置的像素二值化为标记值。 static __global__ void _labelToImgKer( // Kernel 函数没有返回值。 CoordiSet incoordiset, // 输入坐标集 unsigned char *inlabel, // 输入的分类结果数组 ImageCuda outimgcud // 用于标记的图像 ); // 核函数:_initLblMatToZeroKer(初始化标记矩阵为全 0) static __global__ void _initLblMatToZeroKer( ImageCuda outlblimgcud) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= outlblimgcud.imgMeta.width || r >= outlblimgcud.imgMeta.height) return; for (int i = 0; i < 4; i++) { // 给当前标记矩阵标记为零。 outlblimgcud.imgMeta.imgData[r * outlblimgcud.pitchBytes + c] = 0; // 继续处理该线程中下一行同一列的点。 r++; // 检查是否越界 if (r >= outlblimgcud.imgMeta.height) return; } } // 核函数:_initLblMatKer(根据坐标集初始化标记矩阵) static __global__ void _initLblMatKer( CoordiSet incoordiset, ImageCuda outlblimgcud) { // 计算当前 Thread 所对应的坐标集中的点的位置。 int index = blockIdx.x * blockDim.x + threadIdx.x; // 如果当前索引超过了坐标集中的点的个数,直接返回。 if(index >= incoordiset.count) return; // 计算该点在原图像中的位置。 int xcrd = incoordiset.tplData[2 * index]; int ycrd = incoordiset.tplData[2 * index + 1]; // 将标记矩阵中对应位置的点标记为 1。 outlblimgcud.imgMeta.imgData[ycrd * outlblimgcud.pitchBytes + xcrd] = 1; } // 核函数:_torusSegLblKer(标记坐标集实现二分类) static __global__ void _torusSegLblKer( ImageCuda inlblimgcud, CoordiSet incoordiset, TorusSegmentation ts, unsigned char *outlbl) { // 计算当前 Thread 所对应的坐标集中的点的位置。 int index = blockIdx.x * blockDim.x + threadIdx.x; // 如果当前索引超过了坐标集中的点的个数,直接返回。 if (index >= incoordiset.count) return; // 计算该点在原图像中的位置。 int xcrd = incoordiset.tplData[2 * index]; int ycrd = incoordiset.tplData[2 * index + 1]; // 获取邻域宽度。 int neighborsize = ts.getNeighborSize(); // 获取标记图像尺寸。 int width = inlblimgcud.imgMeta.width; int height = inlblimgcud.imgMeta.height; int pitch = inlblimgcud.pitchBytes; // 如果当前像素邻域宽度超过了物理范围,直接标记为 1 类别,核函数返回。 if (xcrd + neighborsize >= width || xcrd - neighborsize < 0 || ycrd + neighborsize >= height || ycrd - neighborsize < 0) { outlbl[index] = 1; return; } // 遍历当前点的 neighborsize 邻域,发现坐标集外的点即将当前点标记为 1 类别。 for (int i = ycrd - neighborsize; i <= ycrd + neighborsize; i++) { for (int j = xcrd - neighborsize; j <= xcrd + neighborsize; j++) { if (inlblimgcud.imgMeta.imgData[i * pitch + j] == 0) { outlbl[index] = 1; return; } } } // 其余的情况标记为 2 类别。 outlbl[index] = 2; } // 核函数:_labelToImgKer(将分割结果反映到图像上) static __global__ void _labelToImgKer( CoordiSet incoordiset, unsigned char *inlabel, ImageCuda outimgcud) { // 计算当前 Thread 所对应的坐标集中的点的位置。 int index = blockIdx.x * blockDim.x + threadIdx.x; // 如果当前索引超过了坐标集中的点的个数,直接返回。 if (index >= incoordiset.count) return; // 计算该点在原图像中的位置。 int xcrd = incoordiset.tplData[2 * index]; int ycrd = incoordiset.tplData[2 * index + 1]; // 获取标记图像 pitch。 int pitch = outimgcud.pitchBytes; // 根据标记值设置对应图像的像素值。 if (inlabel[index] == 1) outimgcud.imgMeta.imgData[ycrd * pitch + xcrd] = DEF_WHITE; else outimgcud.imgMeta.imgData[ycrd * pitch + xcrd] = DEF_BLACK; } // 宏:FREE_LOCAL_MEMORY_TORUS_SEGREGATE(清理局部申请的设备端或者主机端内存) // 该宏用于清理在 torusSegregate 过程中申请的设备端或者主机端内存空间。 #define FREE_LOCAL_MEMORY_TORUS_SEGREGATE do { \ if ((lblimg) != NULL) \ ImageBasicOp::deleteImage((lblimg)); \ if ((outlbldev) != NULL) \ cudaFree((outlbldev)); \ } while (0) // Host 成员函数:torusSegregate(对圆环进行二分割) __host__ int TorusSegmentation::torusSegregate( int width, int height, CoordiSet *incoordiset, unsigned char *outlbl) { // 检查指针是否为空。 if (incoordiset == NULL || outlbl == NULL) return NULL_POINTER; // 检查参数是否合法。 if (width <= 0 || height <= 0 || incoordiset->count <= 0) return INVALID_DATA; // 局部变量 count, 简化代码书写。 int count = incoordiset->count; // 申明局部变量。 unsigned char *outlbldev; // 设备端标记数组 Image *lblimg; // 坐标集范围二维标记矩阵 int errcode; // 错误码 // 将坐标集拷贝到 Device 内存中。 errcode = CoordiSetBasicOp::copyToCurrentDevice(incoordiset); if (errcode != NO_ERROR) return errcode; // 创建坐标集范围二维标记矩阵指针。 errcode = ImageBasicOp:: newImage(&lblimg); if (errcode != NO_ERROR) { FREE_LOCAL_MEMORY_TORUS_SEGREGATE; return CUDA_ERROR; } // 在设备端坐标集范围二维标记矩阵指针。 errcode = ImageBasicOp::makeAtCurrentDevice(lblimg, width, height); if (errcode != NO_ERROR) { FREE_LOCAL_MEMORY_TORUS_SEGREGATE; return CUDA_ERROR; } // 获取设备端标记矩阵。 ImageCuda lblimgcud; // 坐标集范围设备端标记矩阵 errcode = ImageBasicOp::roiSubImage(lblimg, &lblimgcud); if (errcode != NO_ERROR) { FREE_LOCAL_MEMORY_TORUS_SEGREGATE; return CUDA_ERROR; } // 计算调用初始化矩阵的核函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (width + blocksize.x - 1) / blocksize.x; gridsize.y = (height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 调用核函数,初始化标记矩阵为零。 _initLblMatToZeroKer<<<gridsize, blocksize>>>(lblimgcud); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_1D; blocksize.y = 1; gridsize.x = count / blocksize.x + 1; gridsize.y = 1; // 调用核函数,使用坐标集初始化标记矩阵对应位置为 1。 _initLblMatKer<<<gridsize, blocksize>>>(*incoordiset, lblimgcud); // 在设备端申请标记数组。 errcode = cudaMalloc((void **)&outlbldev, count * sizeof(unsigned char)); if (errcode != NO_ERROR) { FREE_LOCAL_MEMORY_TORUS_SEGREGATE; return CUDA_ERROR; } // 调用核函数,进行圆环区域二分类。 _torusSegLblKer<<<gridsize, blocksize>>>(lblimgcud, *incoordiset, *this, outlbldev); // 将标记值拷贝到主机端。 errcode = cudaMemcpy(outlbl, outlbldev, count * sizeof(unsigned char), cudaMemcpyDeviceToHost); if (errcode != NO_ERROR) { FREE_LOCAL_MEMORY_TORUS_SEGREGATE; return CUDA_ERROR; } // 内存清理。 FREE_LOCAL_MEMORY_TORUS_SEGREGATE; return NO_ERROR; } // 宏:FREE_LOCAL_MEMORY_TORUS_SEGREGATE_TO_IMG(清理申请的设备端或主机端内存) // 该宏用于清理在 torusSegregateToImg 过程中申请的设备端或者主机端内存空间。 #define FREE_LOCAL_MEMORY_TORUS_SEGREGATE_TO_IMG do { \ if ((outlabel) != NULL) \ delete [] (outlabel); \ if ((outlabeldev) != NULL) \ cudaFree((outlabeldev)); \ }while (0) // Host 成员函数:torusSegregateToImg(对圆环进行二分割,结果体现到图像上) __host__ int TorusSegmentation::torusSegregateToImg( int width, int height, CoordiSet *incoordiset, Image *outimg) { // 检查指针是否为空。 if (incoordiset == NULL || outimg == NULL) return NULL_POINTER; // 检查参数是否合法。 if (width <= 0 || height <= 0 || incoordiset->count <= 0) return INVALID_DATA; // 局部变量 count, 简化代码书写。 int count = incoordiset->count; // 定义局部变量。 int errcode; // 错误码 cudaError_t cuerrcode; // CUDA 错误码 unsigned char *outlabel = NULL; // 主机端标记数组 unsigned char *outlabeldev = NULL; // 设备端标记数组 ImageCuda insubimgCud; // ImgCuda 对象 // 将坐标集拷贝到 Device 内存中。 errcode = CoordiSetBasicOp::copyToCurrentDevice(incoordiset); if (errcode != NO_ERROR) return errcode; // 申请主机端标记数组空间。 outlabel = new unsigned char[count]; if (outlabel == NULL) { FREE_LOCAL_MEMORY_TORUS_SEGREGATE_TO_IMG; return OUT_OF_MEM; } // 调用圆环分割 host 函数。 errcode = torusSegregate(width, height, incoordiset, outlabel); if (errcode != NO_ERROR) { FREE_LOCAL_MEMORY_TORUS_SEGREGATE_TO_IMG; return errcode; } // 申请设备端标记数组空间。 cuerrcode = cudaMalloc((void **)&outlabeldev, sizeof(unsigned char) * count); if (cuerrcode != NO_ERROR) { FREE_LOCAL_MEMORY_TORUS_SEGREGATE_TO_IMG; return cuerrcode; } // 将标记数组拷贝到设备端。 cuerrcode = cudaMemcpy(outlabeldev, outlabel, sizeof(unsigned char) * count, cudaMemcpyHostToDevice); if (cuerrcode != NO_ERROR) { FREE_LOCAL_MEMORY_TORUS_SEGREGATE_TO_IMG; return cuerrcode; } // 将输出图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { FREE_LOCAL_MEMORY_TORUS_SEGREGATE_TO_IMG; return errcode; } // 提取输入图像的 ROI 子图像。 errcode = ImageBasicOp::roiSubImage(outimg, &insubimgCud); if (errcode != NO_ERROR) { FREE_LOCAL_MEMORY_TORUS_SEGREGATE_TO_IMG; return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (count + blocksize.x - 1) / blocksize.x; gridsize.y = 1; // 调用核函数,将标记数组映射到图像上。 _labelToImgKer<<<gridsize, blocksize>>>(*incoordiset, outlabeldev, insubimgCud); // 若调用 CUDA 出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { // 释放申请的内存,防止内存泄漏。 FREE_LOCAL_MEMORY_TORUS_SEGREGATE_TO_IMG; return CUDA_ERROR; } // 释放内存。 FREE_LOCAL_MEMORY_TORUS_SEGREGATE_TO_IMG; return NO_ERROR; }
the_stack
* \file * The cub::BlockExchange class provides [<em>collective</em>](index.html#sec0) methods for rearranging data partitioned across a CUDA thread block. */ #pragma once #include "../util_ptx.cuh" #include "../util_arch.cuh" #include "../util_macro.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief The BlockExchange class provides [<em>collective</em>](index.html#sec0) methods for rearranging data partitioned across a CUDA thread block. ![](transpose_logo.png) * \ingroup BlockModule * * \tparam T The data type to be exchanged. * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension * \tparam ITEMS_PER_THREAD The number of items partitioned onto each thread. * \tparam WARP_TIME_SLICING <b>[optional]</b> When \p true, only use enough shared memory for a single warp's worth of tile data, time-slicing the block-wide exchange over multiple synchronized rounds. Yields a smaller memory footprint at the expense of decreased parallelism. (Default: false) * \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1) * \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1) * \tparam PTX_ARCH <b>[optional]</b> \ptxversion * * \par Overview * - It is commonplace for blocks of threads to rearrange data items between * threads. For example, the device-accessible memory subsystem prefers access patterns * where data items are "striped" across threads (where consecutive threads access consecutive items), * yet most block-wide operations prefer a "blocked" partitioning of items across threads * (where consecutive items belong to a single thread). * - BlockExchange supports the following types of data exchanges: * - Transposing between [<em>blocked</em>](index.html#sec5sec3) and [<em>striped</em>](index.html#sec5sec3) arrangements * - Transposing between [<em>blocked</em>](index.html#sec5sec3) and [<em>warp-striped</em>](index.html#sec5sec3) arrangements * - Scattering ranked items to a [<em>blocked arrangement</em>](index.html#sec5sec3) * - Scattering ranked items to a [<em>striped arrangement</em>](index.html#sec5sec3) * - \rowmajor * * \par A Simple Example * \blockcollective{BlockExchange} * \par * The code snippet below illustrates the conversion from a "blocked" to a "striped" arrangement * of 512 integer items partitioned across 128 threads where each thread owns 4 items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_exchange.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockExchange<int, 128, 4> BlockExchange; * * // Allocate shared memory for BlockExchange * __shared__ typename BlockExchange::TempStorage temp_storage; * * // Load a tile of data striped across threads * int thread_data[4]; * cub::LoadDirectStriped<128>(threadIdx.x, d_data, thread_data); * * // Collectively exchange data into a blocked arrangement across threads * BlockExchange(temp_storage).StripedToBlocked(thread_data); * * \endcode * \par * Suppose the set of striped input \p thread_data across the block of threads is * <tt>{ [0,128,256,384], [1,129,257,385], ..., [127,255,383,511] }</tt>. * The corresponding output \p thread_data in those threads will be * <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>. * * \par Performance Considerations * - Proper device-specific padding ensures zero bank conflicts for most types. * */ template < typename InputT, int BLOCK_DIM_X, int ITEMS_PER_THREAD, bool WARP_TIME_SLICING = false, int BLOCK_DIM_Y = 1, int BLOCK_DIM_Z = 1, int PTX_ARCH = CUB_PTX_ARCH> class BlockExchange { private: /****************************************************************************** * Constants ******************************************************************************/ /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(PTX_ARCH), WARP_THREADS = 1 << LOG_WARP_THREADS, WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, LOG_SMEM_BANKS = CUB_LOG_SMEM_BANKS(PTX_ARCH), SMEM_BANKS = 1 << LOG_SMEM_BANKS, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, TIME_SLICES = (WARP_TIME_SLICING) ? WARPS : 1, TIME_SLICED_THREADS = (WARP_TIME_SLICING) ? CUB_MIN(BLOCK_THREADS, WARP_THREADS) : BLOCK_THREADS, TIME_SLICED_ITEMS = TIME_SLICED_THREADS * ITEMS_PER_THREAD, WARP_TIME_SLICED_THREADS = CUB_MIN(BLOCK_THREADS, WARP_THREADS), WARP_TIME_SLICED_ITEMS = WARP_TIME_SLICED_THREADS * ITEMS_PER_THREAD, // Insert padding to avoid bank conflicts during raking when items per thread is a power of two and > 4 (otherwise we can typically use 128b loads) INSERT_PADDING = (ITEMS_PER_THREAD > 4) && (PowerOfTwo<ITEMS_PER_THREAD>::VALUE), PADDING_ITEMS = (INSERT_PADDING) ? (TIME_SLICED_ITEMS >> LOG_SMEM_BANKS) : 0, }; /****************************************************************************** * Type definitions ******************************************************************************/ /// Shared memory storage layout type struct __align__(16) _TempStorage { InputT buff[TIME_SLICED_ITEMS + PADDING_ITEMS]; }; public: /// \smemstorage{BlockExchange} struct TempStorage : Uninitialized<_TempStorage> {}; private: /****************************************************************************** * Thread fields ******************************************************************************/ /// Shared storage reference _TempStorage &temp_storage; /// Linear thread-id unsigned int linear_tid; unsigned int lane_id; unsigned int warp_id; unsigned int warp_offset; /****************************************************************************** * Utility methods ******************************************************************************/ /// Internal storage allocator __device__ __forceinline__ _TempStorage& PrivateStorage() { __shared__ _TempStorage private_storage; return private_storage; } /** * Transposes data items from <em>blocked</em> arrangement to <em>striped</em> arrangement. Specialized for no timeslicing. */ template <typename OutputT> __device__ __forceinline__ void BlockedToStriped( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. Int2Type<false> /*time_slicing*/) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = (linear_tid * ITEMS_PER_THREAD) + ITEM; if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_storage.buff[item_offset] = input_items[ITEM]; } CTA_SYNC(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid; if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; output_items[ITEM] = temp_storage.buff[item_offset]; } } /** * Transposes data items from <em>blocked</em> arrangement to <em>striped</em> arrangement. Specialized for warp-timeslicing. */ template <typename OutputT> __device__ __forceinline__ void BlockedToStriped( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. Int2Type<true> /*time_slicing*/) { InputT temp_items[ITEMS_PER_THREAD]; #pragma unroll for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++) { const int SLICE_OFFSET = SLICE * TIME_SLICED_ITEMS; const int SLICE_OOB = SLICE_OFFSET + TIME_SLICED_ITEMS; CTA_SYNC(); if (warp_id == SLICE) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = (lane_id * ITEMS_PER_THREAD) + ITEM; if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_storage.buff[item_offset] = input_items[ITEM]; } } CTA_SYNC(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { // Read a strip of items const int STRIP_OFFSET = ITEM * BLOCK_THREADS; const int STRIP_OOB = STRIP_OFFSET + BLOCK_THREADS; if ((SLICE_OFFSET < STRIP_OOB) && (SLICE_OOB > STRIP_OFFSET)) { int item_offset = STRIP_OFFSET + linear_tid - SLICE_OFFSET; if ((item_offset >= 0) && (item_offset < TIME_SLICED_ITEMS)) { if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_items[ITEM] = temp_storage.buff[item_offset]; } } } } // Copy #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { output_items[ITEM] = temp_items[ITEM]; } } /** * Transposes data items from <em>blocked</em> arrangement to <em>warp-striped</em> arrangement. Specialized for no timeslicing */ template <typename OutputT> __device__ __forceinline__ void BlockedToWarpStriped( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. Int2Type<false> /*time_slicing*/) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = warp_offset + ITEM + (lane_id * ITEMS_PER_THREAD); if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_storage.buff[item_offset] = input_items[ITEM]; } WARP_SYNC(0xffffffff); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = warp_offset + (ITEM * WARP_TIME_SLICED_THREADS) + lane_id; if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; output_items[ITEM] = temp_storage.buff[item_offset]; } } /** * Transposes data items from <em>blocked</em> arrangement to <em>warp-striped</em> arrangement. Specialized for warp-timeslicing */ template <typename OutputT> __device__ __forceinline__ void BlockedToWarpStriped( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. Int2Type<true> /*time_slicing*/) { if (warp_id == 0) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = ITEM + (lane_id * ITEMS_PER_THREAD); if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_storage.buff[item_offset] = input_items[ITEM]; } WARP_SYNC(0xffffffff); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = (ITEM * WARP_TIME_SLICED_THREADS) + lane_id; if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; output_items[ITEM] = temp_storage.buff[item_offset]; } } #pragma unroll for (unsigned int SLICE = 1; SLICE < TIME_SLICES; ++SLICE) { CTA_SYNC(); if (warp_id == SLICE) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = ITEM + (lane_id * ITEMS_PER_THREAD); if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_storage.buff[item_offset] = input_items[ITEM]; } WARP_SYNC(0xffffffff); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = (ITEM * WARP_TIME_SLICED_THREADS) + lane_id; if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; output_items[ITEM] = temp_storage.buff[item_offset]; } } } } /** * Transposes data items from <em>striped</em> arrangement to <em>blocked</em> arrangement. Specialized for no timeslicing. */ template <typename OutputT> __device__ __forceinline__ void StripedToBlocked( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. Int2Type<false> /*time_slicing*/) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid; if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_storage.buff[item_offset] = input_items[ITEM]; } CTA_SYNC(); // No timeslicing #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = (linear_tid * ITEMS_PER_THREAD) + ITEM; if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; output_items[ITEM] = temp_storage.buff[item_offset]; } } /** * Transposes data items from <em>striped</em> arrangement to <em>blocked</em> arrangement. Specialized for warp-timeslicing. */ template <typename OutputT> __device__ __forceinline__ void StripedToBlocked( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. Int2Type<true> /*time_slicing*/) { // Warp time-slicing InputT temp_items[ITEMS_PER_THREAD]; #pragma unroll for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++) { const int SLICE_OFFSET = SLICE * TIME_SLICED_ITEMS; const int SLICE_OOB = SLICE_OFFSET + TIME_SLICED_ITEMS; CTA_SYNC(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { // Write a strip of items const int STRIP_OFFSET = ITEM * BLOCK_THREADS; const int STRIP_OOB = STRIP_OFFSET + BLOCK_THREADS; if ((SLICE_OFFSET < STRIP_OOB) && (SLICE_OOB > STRIP_OFFSET)) { int item_offset = STRIP_OFFSET + linear_tid - SLICE_OFFSET; if ((item_offset >= 0) && (item_offset < TIME_SLICED_ITEMS)) { if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_storage.buff[item_offset] = input_items[ITEM]; } } } CTA_SYNC(); if (warp_id == SLICE) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = (lane_id * ITEMS_PER_THREAD) + ITEM; if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_items[ITEM] = temp_storage.buff[item_offset]; } } } // Copy #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { output_items[ITEM] = temp_items[ITEM]; } } /** * Transposes data items from <em>warp-striped</em> arrangement to <em>blocked</em> arrangement. Specialized for no timeslicing */ template <typename OutputT> __device__ __forceinline__ void WarpStripedToBlocked( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. Int2Type<false> /*time_slicing*/) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = warp_offset + (ITEM * WARP_TIME_SLICED_THREADS) + lane_id; if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_storage.buff[item_offset] = input_items[ITEM]; } WARP_SYNC(0xffffffff); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = warp_offset + ITEM + (lane_id * ITEMS_PER_THREAD); if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; output_items[ITEM] = temp_storage.buff[item_offset]; } } /** * Transposes data items from <em>warp-striped</em> arrangement to <em>blocked</em> arrangement. Specialized for warp-timeslicing */ template <typename OutputT> __device__ __forceinline__ void WarpStripedToBlocked( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. Int2Type<true> /*time_slicing*/) { #pragma unroll for (unsigned int SLICE = 0; SLICE < TIME_SLICES; ++SLICE) { CTA_SYNC(); if (warp_id == SLICE) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = (ITEM * WARP_TIME_SLICED_THREADS) + lane_id; if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_storage.buff[item_offset] = input_items[ITEM]; } WARP_SYNC(0xffffffff); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = ITEM + (lane_id * ITEMS_PER_THREAD); if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; output_items[ITEM] = temp_storage.buff[item_offset]; } } } } /** * Exchanges data items annotated by rank into <em>blocked</em> arrangement. Specialized for no timeslicing. */ template <typename OutputT, typename OffsetT> __device__ __forceinline__ void ScatterToBlocked( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks Int2Type<false> /*time_slicing*/) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = ranks[ITEM]; if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); temp_storage.buff[item_offset] = input_items[ITEM]; } CTA_SYNC(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = (linear_tid * ITEMS_PER_THREAD) + ITEM; if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); output_items[ITEM] = temp_storage.buff[item_offset]; } } /** * Exchanges data items annotated by rank into <em>blocked</em> arrangement. Specialized for warp-timeslicing. */ template <typename OutputT, typename OffsetT> __device__ __forceinline__ void ScatterToBlocked( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks Int2Type<true> /*time_slicing*/) { InputT temp_items[ITEMS_PER_THREAD]; #pragma unroll for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++) { CTA_SYNC(); const int SLICE_OFFSET = TIME_SLICED_ITEMS * SLICE; #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = ranks[ITEM] - SLICE_OFFSET; if ((item_offset >= 0) && (item_offset < WARP_TIME_SLICED_ITEMS)) { if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); temp_storage.buff[item_offset] = input_items[ITEM]; } } CTA_SYNC(); if (warp_id == SLICE) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = (lane_id * ITEMS_PER_THREAD) + ITEM; if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); temp_items[ITEM] = temp_storage.buff[item_offset]; } } } // Copy #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { output_items[ITEM] = temp_items[ITEM]; } } /** * Exchanges data items annotated by rank into <em>striped</em> arrangement. Specialized for no timeslicing. */ template <typename OutputT, typename OffsetT> __device__ __forceinline__ void ScatterToStriped( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks Int2Type<false> /*time_slicing*/) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = ranks[ITEM]; if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); temp_storage.buff[item_offset] = input_items[ITEM]; } CTA_SYNC(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid; if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); output_items[ITEM] = temp_storage.buff[item_offset]; } } /** * Exchanges data items annotated by rank into <em>striped</em> arrangement. Specialized for warp-timeslicing. */ template <typename OutputT, typename OffsetT> __device__ __forceinline__ void ScatterToStriped( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between <em>blocked</em> and <em>striped</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks Int2Type<true> /*time_slicing*/) { InputT temp_items[ITEMS_PER_THREAD]; #pragma unroll for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++) { const int SLICE_OFFSET = SLICE * TIME_SLICED_ITEMS; const int SLICE_OOB = SLICE_OFFSET + TIME_SLICED_ITEMS; CTA_SYNC(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = ranks[ITEM] - SLICE_OFFSET; if ((item_offset >= 0) && (item_offset < WARP_TIME_SLICED_ITEMS)) { if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); temp_storage.buff[item_offset] = input_items[ITEM]; } } CTA_SYNC(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { // Read a strip of items const int STRIP_OFFSET = ITEM * BLOCK_THREADS; const int STRIP_OOB = STRIP_OFFSET + BLOCK_THREADS; if ((SLICE_OFFSET < STRIP_OOB) && (SLICE_OOB > STRIP_OFFSET)) { int item_offset = STRIP_OFFSET + linear_tid - SLICE_OFFSET; if ((item_offset >= 0) && (item_offset < TIME_SLICED_ITEMS)) { if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; temp_items[ITEM] = temp_storage.buff[item_offset]; } } } } // Copy #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { output_items[ITEM] = temp_items[ITEM]; } } public: /******************************************************************//** * \name Collective constructors *********************************************************************/ //@{ /** * \brief Collective constructor using a private static allocation of shared memory as temporary storage. */ __device__ __forceinline__ BlockExchange() : temp_storage(PrivateStorage()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS), lane_id(LaneId()), warp_offset(warp_id * WARP_TIME_SLICED_ITEMS) {} /** * \brief Collective constructor using the specified memory allocation as temporary storage. */ __device__ __forceinline__ BlockExchange( TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), lane_id(LaneId()), warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS), warp_offset(warp_id * WARP_TIME_SLICED_ITEMS) {} //@} end member group /******************************************************************//** * \name Structured exchanges *********************************************************************/ //@{ /** * \brief Transposes data items from <em>striped</em> arrangement to <em>blocked</em> arrangement. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates the conversion from a "striped" to a "blocked" arrangement * of 512 integer items partitioned across 128 threads where each thread owns 4 items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_exchange.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockExchange<int, 128, 4> BlockExchange; * * // Allocate shared memory for BlockExchange * __shared__ typename BlockExchange::TempStorage temp_storage; * * // Load a tile of ordered data into a striped arrangement across block threads * int thread_data[4]; * cub::LoadDirectStriped<128>(threadIdx.x, d_data, thread_data); * * // Collectively exchange data into a blocked arrangement across threads * BlockExchange(temp_storage).StripedToBlocked(thread_data, thread_data); * * \endcode * \par * Suppose the set of striped input \p thread_data across the block of threads is * <tt>{ [0,128,256,384], [1,129,257,385], ..., [127,255,383,511] }</tt> after loading from device-accessible memory. * The corresponding output \p thread_data in those threads will be * <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>. * */ template <typename OutputT> __device__ __forceinline__ void StripedToBlocked( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. { StripedToBlocked(input_items, output_items, Int2Type<WARP_TIME_SLICING>()); } /** * \brief Transposes data items from <em>blocked</em> arrangement to <em>striped</em> arrangement. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates the conversion from a "blocked" to a "striped" arrangement * of 512 integer items partitioned across 128 threads where each thread owns 4 items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_exchange.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockExchange<int, 128, 4> BlockExchange; * * // Allocate shared memory for BlockExchange * __shared__ typename BlockExchange::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively exchange data into a striped arrangement across threads * BlockExchange(temp_storage).BlockedToStriped(thread_data, thread_data); * * // Store data striped across block threads into an ordered tile * cub::StoreDirectStriped<STORE_DEFAULT, 128>(threadIdx.x, d_data, thread_data); * * \endcode * \par * Suppose the set of blocked input \p thread_data across the block of threads is * <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>. * The corresponding output \p thread_data in those threads will be * <tt>{ [0,128,256,384], [1,129,257,385], ..., [127,255,383,511] }</tt> in * preparation for storing to device-accessible memory. * */ template <typename OutputT> __device__ __forceinline__ void BlockedToStriped( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. { BlockedToStriped(input_items, output_items, Int2Type<WARP_TIME_SLICING>()); } /** * \brief Transposes data items from <em>warp-striped</em> arrangement to <em>blocked</em> arrangement. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates the conversion from a "warp-striped" to a "blocked" arrangement * of 512 integer items partitioned across 128 threads where each thread owns 4 items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_exchange.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockExchange<int, 128, 4> BlockExchange; * * // Allocate shared memory for BlockExchange * __shared__ typename BlockExchange::TempStorage temp_storage; * * // Load a tile of ordered data into a warp-striped arrangement across warp threads * int thread_data[4]; * cub::LoadSWarptriped<LOAD_DEFAULT>(threadIdx.x, d_data, thread_data); * * // Collectively exchange data into a blocked arrangement across threads * BlockExchange(temp_storage).WarpStripedToBlocked(thread_data); * * \endcode * \par * Suppose the set of warp-striped input \p thread_data across the block of threads is * <tt>{ [0,32,64,96], [1,33,65,97], [2,34,66,98], ..., [415,447,479,511] }</tt> * after loading from device-accessible memory. (The first 128 items are striped across * the first warp of 32 threads, the second 128 items are striped across the second warp, etc.) * The corresponding output \p thread_data in those threads will be * <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>. * */ template <typename OutputT> __device__ __forceinline__ void WarpStripedToBlocked( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. { WarpStripedToBlocked(input_items, output_items, Int2Type<WARP_TIME_SLICING>()); } /** * \brief Transposes data items from <em>blocked</em> arrangement to <em>warp-striped</em> arrangement. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates the conversion from a "blocked" to a "warp-striped" arrangement * of 512 integer items partitioned across 128 threads where each thread owns 4 items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_exchange.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockExchange<int, 128, 4> BlockExchange; * * // Allocate shared memory for BlockExchange * __shared__ typename BlockExchange::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively exchange data into a warp-striped arrangement across threads * BlockExchange(temp_storage).BlockedToWarpStriped(thread_data, thread_data); * * // Store data striped across warp threads into an ordered tile * cub::StoreDirectStriped<STORE_DEFAULT, 128>(threadIdx.x, d_data, thread_data); * * \endcode * \par * Suppose the set of blocked input \p thread_data across the block of threads is * <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>. * The corresponding output \p thread_data in those threads will be * <tt>{ [0,32,64,96], [1,33,65,97], [2,34,66,98], ..., [415,447,479,511] }</tt> * in preparation for storing to device-accessible memory. (The first 128 items are striped across * the first warp of 32 threads, the second 128 items are striped across the second warp, etc.) * */ template <typename OutputT> __device__ __forceinline__ void BlockedToWarpStriped( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. { BlockedToWarpStriped(input_items, output_items, Int2Type<WARP_TIME_SLICING>()); } //@} end member group /******************************************************************//** * \name Scatter exchanges *********************************************************************/ //@{ /** * \brief Exchanges data items annotated by rank into <em>blocked</em> arrangement. * * \par * - \smemreuse * * \tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets */ template <typename OutputT, typename OffsetT> __device__ __forceinline__ void ScatterToBlocked( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks { ScatterToBlocked(input_items, output_items, ranks, Int2Type<WARP_TIME_SLICING>()); } /** * \brief Exchanges data items annotated by rank into <em>striped</em> arrangement. * * \par * - \smemreuse * * \tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets */ template <typename OutputT, typename OffsetT> __device__ __forceinline__ void ScatterToStriped( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks { ScatterToStriped(input_items, output_items, ranks, Int2Type<WARP_TIME_SLICING>()); } /** * \brief Exchanges data items annotated by rank into <em>striped</em> arrangement. Items with rank -1 are not exchanged. * * \par * - \smemreuse * * \tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets */ template <typename OutputT, typename OffsetT> __device__ __forceinline__ void ScatterToStripedGuarded( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = ranks[ITEM]; if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); if (ranks[ITEM] >= 0) temp_storage.buff[item_offset] = input_items[ITEM]; } CTA_SYNC(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid; if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); output_items[ITEM] = temp_storage.buff[item_offset]; } } /** * \brief Exchanges valid data items annotated by rank into <em>striped</em> arrangement. * * \par * - \smemreuse * * \tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets * \tparam ValidFlag <b>[inferred]</b> FlagT type denoting which items are valid */ template <typename OutputT, typename OffsetT, typename ValidFlag> __device__ __forceinline__ void ScatterToStripedFlagged( InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks ValidFlag is_valid[ITEMS_PER_THREAD]) ///< [in] Corresponding flag denoting item validity { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = ranks[ITEM]; if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); if (is_valid[ITEM]) temp_storage.buff[item_offset] = input_items[ITEM]; } CTA_SYNC(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid; if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); output_items[ITEM] = temp_storage.buff[item_offset]; } } //@} end member group #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document __device__ __forceinline__ void StripedToBlocked( InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. { StripedToBlocked(items, items); } __device__ __forceinline__ void BlockedToStriped( InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. { BlockedToStriped(items, items); } __device__ __forceinline__ void WarpStripedToBlocked( InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. { WarpStripedToBlocked(items, items); } __device__ __forceinline__ void BlockedToWarpStriped( InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. { BlockedToWarpStriped(items, items); } template <typename OffsetT> __device__ __forceinline__ void ScatterToBlocked( InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks { ScatterToBlocked(items, items, ranks); } template <typename OffsetT> __device__ __forceinline__ void ScatterToStriped( InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks { ScatterToStriped(items, items, ranks); } template <typename OffsetT> __device__ __forceinline__ void ScatterToStripedGuarded( InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks { ScatterToStripedGuarded(items, items, ranks); } template <typename OffsetT, typename ValidFlag> __device__ __forceinline__ void ScatterToStripedFlagged( InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between <em>striped</em> and <em>blocked</em> arrangements. OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks ValidFlag is_valid[ITEMS_PER_THREAD]) ///< [in] Corresponding flag denoting item validity { ScatterToStriped(items, items, ranks, is_valid); } #endif // DOXYGEN_SHOULD_SKIP_THIS }; #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document template < typename T, int ITEMS_PER_THREAD, int LOGICAL_WARP_THREADS = CUB_PTX_WARP_THREADS, int PTX_ARCH = CUB_PTX_ARCH> class WarpExchange { private: /****************************************************************************** * Constants ******************************************************************************/ /// Constants enum { // Whether the logical warp size and the PTX warp size coincide IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), WARP_ITEMS = (ITEMS_PER_THREAD * LOGICAL_WARP_THREADS) + 1, LOG_SMEM_BANKS = CUB_LOG_SMEM_BANKS(PTX_ARCH), SMEM_BANKS = 1 << LOG_SMEM_BANKS, // Insert padding if the number of items per thread is a power of two and > 4 (otherwise we can typically use 128b loads) INSERT_PADDING = (ITEMS_PER_THREAD > 4) && (PowerOfTwo<ITEMS_PER_THREAD>::VALUE), PADDING_ITEMS = (INSERT_PADDING) ? (WARP_ITEMS >> LOG_SMEM_BANKS) : 0, }; /****************************************************************************** * Type definitions ******************************************************************************/ /// Shared memory storage layout type struct _TempStorage { T buff[WARP_ITEMS + PADDING_ITEMS]; }; public: /// \smemstorage{WarpExchange} struct TempStorage : Uninitialized<_TempStorage> {}; private: /****************************************************************************** * Thread fields ******************************************************************************/ _TempStorage &temp_storage; int lane_id; public: /****************************************************************************** * Construction ******************************************************************************/ /// Constructor __device__ __forceinline__ WarpExchange( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), lane_id(IS_ARCH_WARP ? LaneId() : LaneId() % LOGICAL_WARP_THREADS) {} /****************************************************************************** * Interface ******************************************************************************/ /** * \brief Exchanges valid data items annotated by rank into <em>striped</em> arrangement. * * \par * - \smemreuse * * \tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets */ template <typename OffsetT> __device__ __forceinline__ void ScatterToStriped( T items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (INSERT_PADDING) ranks[ITEM] = SHR_ADD(ranks[ITEM], LOG_SMEM_BANKS, ranks[ITEM]); temp_storage.buff[ranks[ITEM]] = items[ITEM]; } WARP_SYNC(0xffffffff); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = (ITEM * LOGICAL_WARP_THREADS) + lane_id; if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); items[ITEM] = temp_storage.buff[item_offset]; } } }; #endif // DOXYGEN_SHOULD_SKIP_THIS } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#include <nvbio/basic/numbers.h> #include <nvbio/basic/algorithms.h> #include <nvbio/basic/priority_queue.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/transform_iterator.h> #include <nvbio/basic/vector_view.h> #include <nvbio/basic/primitives.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/sort.h> using namespace nvbio; // a functor to extract the read id from a mem struct mem_read_id_functor { typedef mem_state::mem_type argument_type; typedef uint32 result_type; NVBIO_HOST_DEVICE uint32 operator() (const argument_type mem) const { return mem.string_id(); } }; // a class to keep track of a chain struct chain { // construct an empty chain NVBIO_FORCEINLINE NVBIO_HOST_DEVICE chain() : id(uint32(-1)) {} // construct a new chain from a single seed NVBIO_FORCEINLINE NVBIO_HOST_DEVICE chain(const uint32 _id, const mem_state::mem_type seed) : id( _id ), ref( seed.index_pos() ), span_beg( seed.span().x ), last_ref( seed.index_pos() ), last_span( seed.span() ) {} // test whether we can merge the given mem into this chain NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool merge(const mem_state::mem_type seed, const uint32 w, const uint32 max_chain_gap) { const uint32 seed_len = seed.span().y - seed.span().x; const uint32 last_len = last_span.y - last_span.x; const uint32 rbeg = ref; const uint32 rend = last_ref + last_len; // check whether seed is contained in the chain if (seed.span().x >= span_beg && seed.span().y <= last_span.y && seed.index_pos() >= rbeg && seed.index_pos() + seed_len <= rend) return true; // contained seed; do nothing const int32 x = seed.span().x - last_span.x; // always non-negative const int32 y = seed.index_pos() - last_ref; if ((y >= 0) && (x - y <= w) && (x - last_len < max_chain_gap) && (y - last_len < max_chain_gap)) { // grow the chain last_span = seed.span(); last_ref = seed.index_pos(); return true; } return false; } uint32 id; // chain id uint32 ref; // reference coordinate of the first seed in the chain uint32 span_beg; // read span begin uint32 last_ref; // the reference coordinate of the last seed in the chain uint2 last_span; // the read span of the last seed in the chain }; struct chain_compare { NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator() (const chain& chain1, const chain& chain2) const { // compare by the reference coordinate of the first seed of each chain return chain1.ref < chain2.ref; } }; // assign a chain id to all MEMs for the current pipeline::chunk of reads __global__ void build_chains_kernel( const read_chunk chunk, // the current sub-batch const uint32 pass_number, // the pass number - we process up to N seeds per pass const uint32 n_active, // the number of active reads in this pass const uint32* active_reads, // the set of active reads uint8* active_flags, // the output set of active read flags const uint32 w, // w parameter const uint32 max_chain_gap, // max chain gap parameter const uint32 n_mems, // the total number of MEMs for this chunk of reads const mem_state::mem_type* mems, // the MEMs for this chunk of reads const uint32* mems_index, // a sorting index into the MEMs specifying the processing order uint64* mems_chains) // the output chain IDs corresponding to the sorted MEMs { const uint32 thread_id = threadIdx.x + blockIdx.x * blockDim.x; if (thread_id >= n_active) return; const uint32 read_id = active_reads[ thread_id ]; // find the first seed belonging to this read const uint32 mem_begin = uint32( nvbio::lower_bound( read_id, nvbio::make_transform_iterator( mems, mem_read_id_functor() ), n_mems ) - nvbio::make_transform_iterator( mems, mem_read_id_functor() ) ); // find the first seed belonging to the next read const uint32 mem_end = uint32( nvbio::lower_bound( read_id+1u, nvbio::make_transform_iterator( mems, mem_read_id_functor() ), n_mems ) - nvbio::make_transform_iterator( mems, mem_read_id_functor() ) ); // the maximum amount of chains we can output in one pass const uint32 MAX_CHAINS = 128; // keep a priority queue of the chains organized by the reference coordinate of their leftmost seed typedef nvbio::vector_view<chain*> chain_vector_type; typedef nvbio::priority_queue<chain, chain_vector_type, chain_compare> chain_queue_type; chain chain_queue_storage[MAX_CHAINS+1]; chain_queue_type chain_queue( chain_vector_type( 0u, chain_queue_storage ) ); // keep a counter tracking the number of chains that get created // // NOTE: here we conservatively assume that in the previous passes we have // created the maximum number of chains, so as to avoid assigning an already // taken ID to a new chain (which would result in merging potentially unrelated // chains) uint64 n_chains = pass_number * MAX_CHAINS; // compute the first and ending MEM to process in this pass const uint32 mem_batch_begin = mem_begin + pass_number * MAX_CHAINS; const uint32 mem_batch_end = nvbio::min( mem_batch_begin + MAX_CHAINS, mem_end ); // process the seeds in order for (uint32 i = mem_batch_begin; i < mem_batch_end; ++i) { const uint32 seed_idx = mems_index[i]; const mem_state::mem_type seed = mems[ seed_idx ]; // the chain id for this seed, to be determined uint32 chain_id; // insert seed if (chain_queue.empty()) { // get a new chain id chain_id = n_chains++; // build a new chain chain_queue.push( chain( chain_id, seed ) ); } else { // find the closest chain... chain_queue_type::iterator chain_it = chain_queue.upper_bound( chain( 0u, seed ) ); // and test whether we can merge this seed into it if (chain_it != chain_queue.end() && chain_it->merge( seed, w, max_chain_gap ) == false) { // get a new chain id chain_id = n_chains++; // build a new chain chain_queue.push( chain( chain_id, seed ) ); } else { // merge with the existing chain chain_id = chain_it->id; } } // write out the chain id (OR'd with the read id) mems_chains[i] = chain_id | (uint64( read_id ) << 32); } // write out whether we need more passes active_flags[ thread_id ] = (mem_batch_begin < mem_end) ? 1u : 0u; } // build chains for the current pipeline::chunk of reads void build_chains(pipeline_state *pipeline, const io::SequenceDataDevice *reads) { const ScopedTimer<float> timer( &pipeline->stats.chain_time ); // keep track of the time spent here struct chains_state<device_tag> *chn = &pipeline->chn; const uint32 n_reads = pipeline->chunk.read_end - pipeline->chunk.read_begin; const uint32 n_mems = pipeline->chunk.mem_end - pipeline->chunk.mem_begin; // skip pathological cases if (n_mems == 0u) return; // // Here we are going to run multiple passes of the same kernel, as we cannot fit // all chains in local memory at once... // // prepare some ping-pong queues for tracking active reads that need more passes nvbio::vector<device_tag,uint32> active_reads( n_reads ); nvbio::vector<device_tag,uint8> active_flags( n_reads ); nvbio::vector<device_tag,uint32> out_reads( n_reads ); nvbio::vector<device_tag,uint8> temp_storage; // initialize the active reads queue thrust::copy( thrust::make_counting_iterator<uint32>(0u) + pipeline->chunk.read_begin, thrust::make_counting_iterator<uint32>(0u) + pipeline->chunk.read_end, active_reads.begin() ); uint32 n_active = n_reads; for (uint32 pass_number = 0u; n_active; ++pass_number) { const uint32 block_dim = 128; const uint32 n_blocks = util::divide_ri( n_active, block_dim ); // assign a chain id to each mem build_chains_kernel<<<n_blocks, block_dim>>>( pipeline->chunk, pass_number, n_active, nvbio::plain_view( active_reads ), nvbio::plain_view( active_flags ), command_line_options.w, command_line_options.max_chain_gap, n_mems, nvbio::plain_view( chn->mems ), nvbio::plain_view( chn->mems_index ), nvbio::plain_view( chn->mems_chain ) ); optional_device_synchronize(); cuda::check_error("build-chains kernel"); // shrink the set of active reads n_active = copy_flagged( n_active, // the number of input elements active_reads.begin(), // the input sequence of elements to copy active_flags.begin(), // the input sequence of copy flags out_reads.begin(), // the output sequence of copied elements temp_storage ); // some temporary storage active_reads.swap( out_reads ); } // sort mems by chain id // NOTE: it's important here to use a stable-sort, so as to guarantee preserving // the ordering by left-coordinate of the MEMs thrust::sort_by_key( // TODO: this is slow, switch to nvbio::cuda::SortEnactor chn->mems_chain.begin(), chn->mems_chain.begin() + n_mems, chn->mems_index.begin() ); optional_device_synchronize(); nvbio::cuda::check_error("build-chains kernel"); }
the_stack
#include <taskflow/cuda/cudaflow.hpp> #include <iomanip> #include <cfloat> #include <climits> #define L2(x1, y1, x2, y2) ((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2)) // ---------------------------------------------------------------------------- // CPU (sequential) implementation // ---------------------------------------------------------------------------- // run k-means on cpu std::pair<std::vector<float>, std::vector<float>> cpu_seq( const int N, const int K, const int M, const std::vector<float>& px, const std::vector<float>& py ) { std::vector<int> c(K); std::vector<float> sx(K), sy(K), mx(K), my(K); // initial centroids for(int i=0; i<K; ++i) { mx[i] = px[i]; my[i] = py[i]; } for(int m=0; m<M; m++) { // clear the storage for(int k=0; k<K; ++k) { sx[k] = 0.0f; sy[k] = 0.0f; c [k] = 0; } // find the best k (cluster id) for each point for(int i=0; i<N; ++i) { float x = px[i]; float y = py[i]; float best_d = std::numeric_limits<float>::max(); int best_k = 0; for (int k = 0; k < K; ++k) { const float d = L2(x, y, mx[k], my[k]); if (d < best_d) { best_d = d; best_k = k; } } sx[best_k] += x; sy[best_k] += y; c [best_k] += 1; } // update the centroid for(int k=0; k<K; k++) { const int count = max(1, c[k]); // turn 0/0 to 0/1 mx[k] = sx[k] / count; my[k] = sy[k] / count; } } return {mx, my}; } // ---------------------------------------------------------------------------- // CPU (parallel) implementation // ---------------------------------------------------------------------------- // run k-means on cpu (parallel) std::pair<std::vector<float>, std::vector<float>> cpu_par( const int N, const int K, const int M, const std::vector<float>& px, const std::vector<float>& py ) { const auto num_threads = std::thread::hardware_concurrency(); tf::Executor executor; tf::Taskflow taskflow("K-Means"); std::vector<int> c(K), best_ks(N); std::vector<float> sx(K), sy(K), mx(K), my(K); // initial centroids auto init = taskflow.emplace([&](){ for(int i=0; i<K; ++i) { mx[i] = px[i]; my[i] = py[i]; } }).name("init"); // clear the storage auto clean_up = taskflow.emplace([&](){ for(int k=0; k<K; ++k) { sx[k] = 0.0f; sy[k] = 0.0f; c [k] = 0; } }).name("clean_up"); tf::Task pf; // update cluster pf = taskflow.for_each_index(0, N, 1, [&](int i){ float x = px[i]; float y = py[i]; float best_d = std::numeric_limits<float>::max(); int best_k = 0; for (int k = 0; k < K; ++k) { const float d = L2(x, y, mx[k], my[k]); if (d < best_d) { best_d = d; best_k = k; } } best_ks[i] = best_k; }); pf.name("parallel-for"); auto update_cluster = taskflow.emplace([&](){ for(int i=0; i<N; i++) { sx[best_ks[i]] += px[i]; sy[best_ks[i]] += py[i]; c [best_ks[i]] += 1; } for(int k=0; k<K; ++k) { auto count = max(1, c[k]); // turn 0/0 to 0/1 mx[k] = sx[k] / count; my[k] = sy[k] / count; } }).name("update_cluster"); auto condition = taskflow.emplace([m=0, M]() mutable { return (m++ < M) ? 0 : 1; }).name("converged?"); init.precede(clean_up); clean_up.precede(pf); pf.precede(update_cluster); condition.precede(clean_up) .succeed(update_cluster); executor.run(taskflow).wait(); return {mx, my}; } // ---------------------------------------------------------------------------- // GPU implementation // ---------------------------------------------------------------------------- // Each point (thread) computes its distance to each centroid // and adds its x and y values to the sum of its closest // centroid, as well as incrementing that centroid's count of assigned points. __global__ void assign_clusters( const float* px, const float* py, int N, const float* mx, const float* my, float* sx, float* sy, int k, int* c ) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= N) { return; } // Make global loads once. const float x = px[index]; const float y = py[index]; float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = L2(x, y, mx[cluster], my[cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } atomicAdd(&sx[best_cluster], x); atomicAdd(&sy[best_cluster], y); atomicAdd(&c [best_cluster], 1); } // Each thread is one cluster, which just recomputes its coordinates as the mean // of all points assigned to it. __global__ void compute_new_means( float* mx, float* my, const float* sx, const float* sy, const int* c ) { const int cluster = threadIdx.x; const int count = max(1, c[cluster]); // turn 0/0 to 0/1 mx[cluster] = sx[cluster] / count; my[cluster] = sy[cluster] / count; } // Runs k-means on gpu using conditional tasking std::pair<std::vector<float>, std::vector<float>> gpu( const int N, const int K, const int M, const std::vector<float>& h_px, const std::vector<float>& h_py ) { std::vector<float> h_mx, h_my; float *d_px, *d_py, *d_mx, *d_my, *d_sx, *d_sy, *d_c; for(int i=0; i<K; ++i) { h_mx.push_back(h_px[i]); h_my.push_back(h_py[i]); } // create a taskflow graph tf::Executor executor; tf::Taskflow taskflow("K-Means"); auto allocate_px = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_px, N*sizeof(float)), "failed to allocate d_px"); }).name("allocate_px"); auto allocate_py = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_py, N*sizeof(float)), "failed to allocate d_py"); }).name("allocate_py"); auto allocate_mx = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_mx, K*sizeof(float)), "failed to allocate d_mx"); }).name("allocate_mx"); auto allocate_my = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_my, K*sizeof(float)), "failed to allocate d_my"); }).name("allocate_my"); auto allocate_sx = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_sx, K*sizeof(float)), "failed to allocate d_sx"); }).name("allocate_sx"); auto allocate_sy = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_sy, K*sizeof(float)), "failed to allocate d_sy"); }).name("allocate_sy"); auto allocate_c = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_c, K*sizeof(float)), "failed to allocate dc"); }).name("allocate_c"); auto h2d = taskflow.emplace([&](tf::cudaFlow& cf){ cf.copy(d_px, h_px.data(), N).name("h2d_px"); cf.copy(d_py, h_py.data(), N).name("h2d_py"); cf.copy(d_mx, h_mx.data(), K).name("h2d_mx"); cf.copy(d_my, h_my.data(), K).name("h2d_my"); }).name("h2d"); auto kmeans = taskflow.emplace([&](tf::cudaFlow& cf){ auto zero_c = cf.zero(d_c, K).name("zero_c"); auto zero_sx = cf.zero(d_sx, K).name("zero_sx"); auto zero_sy = cf.zero(d_sy, K).name("zero_sy"); auto cluster = cf.kernel( (N+512-1) / 512, 512, 0, assign_clusters, d_px, d_py, N, d_mx, d_my, d_sx, d_sy, K, d_c ).name("cluster"); auto new_centroid = cf.kernel( 1, K, 0, compute_new_means, d_mx, d_my, d_sx, d_sy, d_c ).name("new_centroid"); cluster.precede(new_centroid) .succeed(zero_c, zero_sx, zero_sy); }).name("update_means"); auto condition = taskflow.emplace([i=0, M] () mutable { return i++ < M ? 0 : 1; }).name("converged?"); auto stop = taskflow.emplace([&](tf::cudaFlow& cf){ cf.copy(h_mx.data(), d_mx, K).name("d2h_mx"); cf.copy(h_my.data(), d_my, K).name("d2h_my"); }).name("d2h"); auto free = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaFree(d_px), "failed to free d_px"); TF_CHECK_CUDA(cudaFree(d_py), "failed to free d_py"); TF_CHECK_CUDA(cudaFree(d_mx), "failed to free d_mx"); TF_CHECK_CUDA(cudaFree(d_my), "failed to free d_my"); TF_CHECK_CUDA(cudaFree(d_sx), "failed to free d_sx"); TF_CHECK_CUDA(cudaFree(d_sy), "failed to free d_sy"); TF_CHECK_CUDA(cudaFree(d_c), "failed to free d_c"); }).name("free"); // build up the dependency h2d.succeed(allocate_px, allocate_py, allocate_mx, allocate_my); kmeans.succeed(allocate_sx, allocate_sy, allocate_c, h2d) .precede(condition); condition.precede(kmeans, stop); stop.precede(free); //taskflow.dump(std::cout); // run the taskflow executor.run(taskflow).wait(); //std::cout << "dumping kmeans graph ...\n"; //taskflow.dump(std::cout); return {h_mx, h_my}; } // Runs k-means on gpu without using conditional tasking std::pair<std::vector<float>, std::vector<float>> gpu_predicate( const int N, const int K, const int M, const std::vector<float>& h_px, const std::vector<float>& h_py ) { std::vector<float> h_mx, h_my; float *d_px, *d_py, *d_mx, *d_my, *d_sx, *d_sy, *d_c; for(int i=0; i<K; ++i) { h_mx.push_back(h_px[i]); h_my.push_back(h_py[i]); } // create a taskflow graph tf::Executor executor; tf::Taskflow taskflow("K-Means"); auto allocate_px = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_px, N*sizeof(float)), "failed to allocate d_px"); }).name("allocate_px"); auto allocate_py = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_py, N*sizeof(float)), "failed to allocate d_py"); }).name("allocate_py"); auto allocate_mx = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_mx, K*sizeof(float)), "failed to allocate d_mx"); }).name("allocate_mx"); auto allocate_my = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_my, K*sizeof(float)), "failed to allocate d_my"); }).name("allocate_my"); auto allocate_sx = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_sx, K*sizeof(float)), "failed to allocate d_sx"); }).name("allocate_sx"); auto allocate_sy = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_sy, K*sizeof(float)), "failed to allocate d_sy"); }).name("allocate_sy"); auto allocate_c = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaMalloc(&d_c, K*sizeof(float)), "failed to allocate dc"); }).name("allocate_c"); auto h2d = taskflow.emplace([&](tf::cudaFlow& cf){ cf.copy(d_px, h_px.data(), N).name("h2d_px"); cf.copy(d_py, h_py.data(), N).name("h2d_py"); cf.copy(d_mx, h_mx.data(), K).name("h2d_mx"); cf.copy(d_my, h_my.data(), K).name("h2d_my"); }).name("h2d"); auto kmeans = taskflow.emplace([&](tf::cudaFlow& cf){ auto zero_c = cf.zero(d_c, K).name("zero_c"); auto zero_sx = cf.zero(d_sx, K).name("zero_sx"); auto zero_sy = cf.zero(d_sy, K).name("zero_sy"); auto cluster = cf.kernel( (N+512-1) / 512, 512, 0, assign_clusters, d_px, d_py, N, d_mx, d_my, d_sx, d_sy, K, d_c ).name("cluster"); auto new_centroid = cf.kernel( 1, K, 0, compute_new_means, d_mx, d_my, d_sx, d_sy, d_c ).name("new_centroid"); cluster.precede(new_centroid) .succeed(zero_c, zero_sx, zero_sy); cf.offload_n(M); }).name("update_means"); auto stop = taskflow.emplace([&](tf::cudaFlow& cf){ cf.copy(h_mx.data(), d_mx, K).name("d2h_mx"); cf.copy(h_my.data(), d_my, K).name("d2h_my"); }).name("d2h"); auto free = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaFree(d_px), "failed to free d_px"); TF_CHECK_CUDA(cudaFree(d_py), "failed to free d_py"); TF_CHECK_CUDA(cudaFree(d_mx), "failed to free d_mx"); TF_CHECK_CUDA(cudaFree(d_my), "failed to free d_my"); TF_CHECK_CUDA(cudaFree(d_sx), "failed to free d_sx"); TF_CHECK_CUDA(cudaFree(d_sy), "failed to free d_sy"); TF_CHECK_CUDA(cudaFree(d_c), "failed to free d_c"); }).name("free"); // build up the dependency h2d.succeed(allocate_px, allocate_py, allocate_mx, allocate_my); kmeans.succeed(allocate_sx, allocate_sy, allocate_c, h2d) .precede(stop); stop.precede(free); //taskflow.dump(std::cout); // run the taskflow executor.run(taskflow).wait(); //std::cout << "dumping kmeans graph ...\n"; //taskflow.dump(std::cout); return {h_mx, h_my}; } // Function: main int main(int argc, const char* argv[]) { if(argc != 4) { std::cerr << "usage: ./kmeans num_points k num_iterations\n"; std::exit(EXIT_FAILURE); } const int N = std::atoi(argv[1]); const int K = std::atoi(argv[2]); const int M = std::atoi(argv[3]); if(N < 1) { throw std::runtime_error("num_points must be at least one"); } if(K >= N) { throw std::runtime_error("k must be smaller than the number of points"); } if(M < 1) { throw std::runtime_error("num_iterations must be larger than 0"); } std::vector<float> h_px, h_py, mx, my; // Randomly generate N points std::cout << "generating " << N << " random points ...\n"; for(int i=0; i<N; ++i) { h_px.push_back(rand()%1000 - 500); h_py.push_back(rand()%1000 - 500); } // k-means on cpu_seq std::cout << "running k-means on cpu (sequential) ... "; auto sbeg = std::chrono::steady_clock::now(); std::tie(mx, my) = cpu_seq(N, K, M, h_px, h_py); auto send = std::chrono::steady_clock::now(); std::cout << "completed with " << std::chrono::duration_cast<std::chrono::milliseconds>(send-sbeg).count() << " ms\n"; std::cout << "k centroids found by cpu (sequential)\n"; for(int k=0; k<K; ++k) { std::cout << "centroid " << k << ": " << std::setw(10) << mx[k] << ' ' << std::setw(10) << my[k] << '\n'; } // k-means on cpu_par std::cout << "running k-means on cpu (parallel) ... "; auto pbeg = std::chrono::steady_clock::now(); std::tie(mx, my) = cpu_par(N, K, M, h_px, h_py); auto pend = std::chrono::steady_clock::now(); std::cout << "completed with " << std::chrono::duration_cast<std::chrono::milliseconds>(pend-pbeg).count() << " ms\n"; std::cout << "k centroids found by cpu (parallel)\n"; for(int k=0; k<K; ++k) { std::cout << "centroid " << k << ": " << std::setw(10) << mx[k] << ' ' << std::setw(10) << my[k] << '\n'; } // k-means on gpu with conditional tasking std::cout << "running k-means on gpu (with conditional tasking) ... "; auto gbeg = std::chrono::steady_clock::now(); std::tie(mx, my) = gpu(N, K, M, h_px, h_py); auto gend = std::chrono::steady_clock::now(); std::cout << "completed with " << std::chrono::duration_cast<std::chrono::milliseconds>(gend-gbeg).count() << " ms\n"; std::cout << "k centroids found by gpu (with conditional tasking)\n"; for(int k=0; k<K; ++k) { std::cout << "centroid " << k << ": " << std::setw(10) << mx[k] << ' ' << std::setw(10) << my[k] << '\n'; } // k-means on gpu without conditional tasking std::cout << "running k-means on gpu (without conditional tasking) ... "; auto rbeg = std::chrono::steady_clock::now(); std::tie(mx, my) = gpu_predicate(N, K, M, h_px, h_py); auto rend = std::chrono::steady_clock::now(); std::cout << "completed with " << std::chrono::duration_cast<std::chrono::milliseconds>(rend-rbeg).count() << " ms\n"; std::cout << "k centroids found by gpu (without conditional tasking)\n"; for(int k=0; k<K; ++k) { std::cout << "centroid " << k << ": " << std::setw(10) << mx[k] << ' ' << std::setw(10) << my[k] << '\n'; } return 0; }
the_stack
namespace CUDAKernel{ Norm Norm::mean_std(const float mean[3], const float std[3], float alpha, ChannelType channel_type){ Norm out; out.type = NormType::MeanStd; out.alpha = alpha; out.channel_type = channel_type; memcpy(out.mean, mean, sizeof(out.mean)); memcpy(out.std, std, sizeof(out.std)); return out; } Norm Norm::alpha_beta(float alpha, float beta, ChannelType channel_type){ Norm out; out.type = NormType::AlphaBeta; out.alpha = alpha; out.beta = beta; out.channel_type = channel_type; return out; } Norm Norm::None(){ return Norm(); } #define INTER_RESIZE_COEF_BITS 11 #define INTER_RESIZE_COEF_SCALE (1 << INTER_RESIZE_COEF_BITS) #define CAST_BITS (INTER_RESIZE_COEF_BITS << 1) template<typename _T> static __inline__ __device__ _T limit(_T value, _T low, _T high){ return value < low ? low : (value > high ? high : value); } static __inline__ __device__ int resize_cast(int value){ return (value + (1 << (CAST_BITS - 1))) >> CAST_BITS; } // same to opencv // reference: https://github.com/opencv/opencv/blob/24fcb7f8131f707717a9f1871b17d95e7cf519ee/modules/imgproc/src/resize.cpp // reference: https://github.com/openppl-public/ppl.cv/blob/04ef4ca48262601b99f1bb918dcd005311f331da/src/ppl/cv/cuda/resize.cu /* 可以考虑用同样实现的resize函数进行训练,python代码在:tools/test_resize.py */ __global__ void resize_bilinear_and_normalize_kernel( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, float sx, float sy, Norm norm, int edge ){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; int dx = position % dst_width; int dy = position / dst_width; float src_x = (dx + 0.5f) * sx - 0.5f; float src_y = (dy + 0.5f) * sy - 0.5f; float c0, c1, c2; int y_low = floorf(src_y); int x_low = floorf(src_x); int y_high = limit(y_low + 1, 0, src_height - 1); int x_high = limit(x_low + 1, 0, src_width - 1); y_low = limit(y_low, 0, src_height - 1); x_low = limit(x_low, 0, src_width - 1); int ly = rint((src_y - y_low) * INTER_RESIZE_COEF_SCALE); int lx = rint((src_x - x_low) * INTER_RESIZE_COEF_SCALE); int hy = INTER_RESIZE_COEF_SCALE - ly; int hx = INTER_RESIZE_COEF_SCALE - lx; int w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; float* pdst = dst + dy * dst_width + dx * 3; uint8_t* v1 = src + y_low * src_line_size + x_low * 3; uint8_t* v2 = src + y_low * src_line_size + x_high * 3; uint8_t* v3 = src + y_high * src_line_size + x_low * 3; uint8_t* v4 = src + y_high * src_line_size + x_high * 3; c0 = resize_cast(w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0]); c1 = resize_cast(w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1]); c2 = resize_cast(w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2]); if(norm.channel_type == ChannelType::Invert){ float t = c2; c2 = c0; c0 = t; } if(norm.type == NormType::MeanStd){ c0 = (c0 * norm.alpha - norm.mean[0]) / norm.std[0]; c1 = (c1 * norm.alpha - norm.mean[1]) / norm.std[1]; c2 = (c2 * norm.alpha - norm.mean[2]) / norm.std[2]; }else if(norm.type == NormType::AlphaBeta){ c0 = c0 * norm.alpha + norm.beta; c1 = c1 * norm.alpha + norm.beta; c2 = c2 * norm.alpha + norm.beta; } int area = dst_width * dst_height; float* pdst_c0 = dst + dy * dst_width + dx; float* pdst_c1 = pdst_c0 + area; float* pdst_c2 = pdst_c1 + area; *pdst_c0 = c0; *pdst_c1 = c1; *pdst_c2 = c2; } __global__ void warp_perspective_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, uint8_t const_value_st, float* warp_affine_matrix_3_3, Norm norm, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; float m_x1 = warp_affine_matrix_3_3[0]; float m_y1 = warp_affine_matrix_3_3[1]; float m_z1 = warp_affine_matrix_3_3[2]; float m_x2 = warp_affine_matrix_3_3[3]; float m_y2 = warp_affine_matrix_3_3[4]; float m_z2 = warp_affine_matrix_3_3[5]; float m_x3 = warp_affine_matrix_3_3[6]; float m_y3 = warp_affine_matrix_3_3[7]; float m_z3 = warp_affine_matrix_3_3[8]; int dx = position % dst_width; int dy = position / dst_width; // 原图位置 float src_x = (m_x1 * dx + m_y1 * dy + m_z1)/(m_x3 * dx + m_y3 * dy + m_z3); float src_y = (m_x2 * dx + m_y2 * dy + m_z2)/(m_x3 * dx + m_y3 * dy + m_z3); float c0, c1, c2; if(src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height){ // out of range c0 = const_value_st; c1 = const_value_st; c2 = const_value_st; }else{ int y_low = floorf(src_y); int x_low = floorf(src_x); int y_high = y_low + 1; int x_high = x_low + 1; uint8_t const_value[] = {const_value_st, const_value_st, const_value_st}; float ly = src_y - y_low; float lx = src_x - x_low; float hy = 1 - ly; float hx = 1 - lx; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; uint8_t* v1 = const_value; uint8_t* v2 = const_value; uint8_t* v3 = const_value; uint8_t* v4 = const_value; if(y_low >= 0){ if (x_low >= 0) v1 = src + y_low * src_line_size + x_low * 3; if (x_high < src_width) v2 = src + y_low * src_line_size + x_high * 3; } if(y_high < src_height){ if (x_low >= 0) v3 = src + y_high * src_line_size + x_low * 3; if (x_high < src_width) v4 = src + y_high * src_line_size + x_high * 3; } // same to opencv c0 = floorf(w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f); c1 = floorf(w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f); c2 = floorf(w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f); } if(norm.channel_type == ChannelType::Invert){ float t = c2; c2 = c0; c0 = t; } if(norm.type == NormType::MeanStd){ c0 = (c0 * norm.alpha - norm.mean[0]) / norm.std[0]; c1 = (c1 * norm.alpha - norm.mean[1]) / norm.std[1]; c2 = (c2 * norm.alpha - norm.mean[2]) / norm.std[2]; }else if(norm.type == NormType::AlphaBeta){ c0 = c0 * norm.alpha + norm.beta; c1 = c1 * norm.alpha + norm.beta; c2 = c2 * norm.alpha + norm.beta; } int area = dst_width * dst_height; float* pdst_c0 = dst + dy * dst_width + dx; float* pdst_c1 = pdst_c0 + area; float* pdst_c2 = pdst_c1 + area; *pdst_c0 = c0; *pdst_c1 = c1; *pdst_c2 = c2; } __global__ void warp_affine_bilinear_and_normalize_plane_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, uint8_t const_value_st, float* warp_affine_matrix_2_3, Norm norm, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; float m_x1 = warp_affine_matrix_2_3[0]; float m_y1 = warp_affine_matrix_2_3[1]; float m_z1 = warp_affine_matrix_2_3[2]; float m_x2 = warp_affine_matrix_2_3[3]; float m_y2 = warp_affine_matrix_2_3[4]; float m_z2 = warp_affine_matrix_2_3[5]; int dx = position % dst_width; int dy = position / dst_width; float src_x = m_x1 * dx + m_y1 * dy + m_z1; float src_y = m_x2 * dx + m_y2 * dy + m_z2; float c0, c1, c2; if(src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height){ // out of range c0 = const_value_st; c1 = const_value_st; c2 = const_value_st; }else{ int y_low = floorf(src_y); int x_low = floorf(src_x); int y_high = y_low + 1; int x_high = x_low + 1; uint8_t const_value[] = {const_value_st, const_value_st, const_value_st}; float ly = src_y - y_low; float lx = src_x - x_low; float hy = 1 - ly; float hx = 1 - lx; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; uint8_t* v1 = const_value; uint8_t* v2 = const_value; uint8_t* v3 = const_value; uint8_t* v4 = const_value; if(y_low >= 0){ if (x_low >= 0) v1 = src + y_low * src_line_size + x_low * 3; if (x_high < src_width) v2 = src + y_low * src_line_size + x_high * 3; } if(y_high < src_height){ if (x_low >= 0) v3 = src + y_high * src_line_size + x_low * 3; if (x_high < src_width) v4 = src + y_high * src_line_size + x_high * 3; } // same to opencv c0 = floorf(w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f); c1 = floorf(w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f); c2 = floorf(w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f); } if(norm.channel_type == ChannelType::Invert){ float t = c2; c2 = c0; c0 = t; } if(norm.type == NormType::MeanStd){ c0 = (c0 * norm.alpha - norm.mean[0]) / norm.std[0]; c1 = (c1 * norm.alpha - norm.mean[1]) / norm.std[1]; c2 = (c2 * norm.alpha - norm.mean[2]) / norm.std[2]; }else if(norm.type == NormType::AlphaBeta){ c0 = c0 * norm.alpha + norm.beta; c1 = c1 * norm.alpha + norm.beta; c2 = c2 * norm.alpha + norm.beta; } int area = dst_width * dst_height; float* pdst_c0 = dst + dy * dst_width + dx; float* pdst_c1 = pdst_c0 + area; float* pdst_c2 = pdst_c1 + area; *pdst_c0 = c0; *pdst_c1 = c1; *pdst_c2 = c2; } __global__ void warp_affine_bilinear_and_normalize_focus_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, uint8_t const_value_st, float* warp_affine_matrix_1_3, Norm norm, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; float m_k = *warp_affine_matrix_1_3++; float m_b0 = *warp_affine_matrix_1_3++; float m_b1 = *warp_affine_matrix_1_3++; int dx = position % dst_width; int dy = position / dst_width; float src_x = m_k * dx + m_b0; float src_y = m_k * dy + m_b1; float c0, c1, c2; if(src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height){ // out of range c0 = const_value_st; c1 = const_value_st; c2 = const_value_st; }else{ int y_low = floorf(src_y); int x_low = floorf(src_x); int y_high = y_low + 1; int x_high = x_low + 1; uint8_t const_value[] = {const_value_st, const_value_st, const_value_st}; float ly = src_y - y_low; float lx = src_x - x_low; float hy = 1 - ly; float hx = 1 - lx; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; uint8_t* v1 = const_value; uint8_t* v2 = const_value; uint8_t* v3 = const_value; uint8_t* v4 = const_value; if(y_low >= 0){ if (x_low >= 0) v1 = src + y_low * src_line_size + x_low * 3; if (x_high < src_width) v2 = src + y_low * src_line_size + x_high * 3; } if(y_high < src_height){ if (x_low >= 0) v3 = src + y_high * src_line_size + x_low * 3; if (x_high < src_width) v4 = src + y_high * src_line_size + x_high * 3; } // same to opencv c0 = floorf(w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f); c1 = floorf(w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f); c2 = floorf(w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f); } if(norm.channel_type == ChannelType::Invert){ float t = c2; c2 = c0; c0 = t; } if(norm.type == NormType::MeanStd){ c0 = (c0 * norm.alpha - norm.mean[0]) / norm.std[0]; c1 = (c1 * norm.alpha - norm.mean[1]) / norm.std[1]; c2 = (c2 * norm.alpha - norm.mean[2]) / norm.std[2]; }else if(norm.type == NormType::AlphaBeta){ c0 = c0 * norm.alpha + norm.beta; c1 = c1 * norm.alpha + norm.beta; c2 = c2 * norm.alpha + norm.beta; } int after_focus_width = dst_width / 2; int after_focus_height = dst_height / 2; int fdx = dx / 2; int fdy = dy / 2; int fc = ((dx % 2) << 1) | (dy % 2); /** * x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2] * 4 fc * 3 [0, 1, 2] * after_focus_height fdy * after_focus_width fdx * 左乘右加 **/ float* pdst_c0 = dst + ((fc * 3 + 0) * after_focus_height + fdy) * after_focus_width + fdx; float* pdst_c1 = dst + ((fc * 3 + 1) * after_focus_height + fdy) * after_focus_width + fdx; float* pdst_c2 = dst + ((fc * 3 + 2) * after_focus_height + fdy) * after_focus_width + fdx; *pdst_c0 = c0; *pdst_c1 = c1; *pdst_c2 = c2; } __global__ void normalize_feature_kernel(float* feature_array, int num_feature, int feature_length, int edge){ /* & 1 gz bi.z 0 * 1 gy bi.y 0 * N NF bi.x ~ * 1 1 ti.z 0 * F FL / 32 ti.y ~ * Q 32 ti.x ~ */ int position = (blockIdx.x * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x; if (position >= edge) return; extern __shared__ float l2_norm[]; int irow = position / feature_length; int icol = position % feature_length; if(icol == 0) l2_norm[irow] = 0; __syncthreads(); float value = feature_array[position]; atomicAdd(l2_norm + irow, value * value); __syncthreads(); if(icol == 0) l2_norm[irow] = sqrt(l2_norm[irow]); __syncthreads(); feature_array[position] = value / l2_norm[irow]; } static __device__ uint8_t cast(float value){ return value < 0 ? 0 : (value > 255 ? 255 : value); } static __global__ void convert_nv12_to_bgr_kernel(const uint8_t* y, const uint8_t* uv, int width, int height, int linesize, uint8_t* dst_bgr, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; int ox = position % width; int oy = position / width; const uint8_t& yvalue = y[oy * linesize + ox]; int offset_uv = (oy >> 1) * linesize + (ox & 0xFFFFFFFE); const uint8_t& u = uv[offset_uv + 0]; const uint8_t& v = uv[offset_uv + 1]; dst_bgr[position * 3 + 0] = 1.164f * (yvalue - 16.0f) + 2.018f * (u - 128.0f); dst_bgr[position * 3 + 1] = 1.164f * (yvalue - 16.0f) - 0.813f * (v - 128.0f) - 0.391f * (u - 128.0f); dst_bgr[position * 3 + 2] = 1.164f * (yvalue - 16.0f) + 1.596f * (v - 128.0f); } ///////////////////////////////////////////////////////////////////////// void convert_nv12_to_bgr_invoke( const uint8_t* y, const uint8_t* uv, int width, int height, int linesize, uint8_t* dst, cudaStream_t stream){ int total = width * height; dim3 grid = CUDATools::grid_dims(total); dim3 block = CUDATools::block_dims(total); checkCudaKernel(convert_nv12_to_bgr_kernel<<<grid, block, 0, stream>>>( y, uv, width, height, linesize, dst, total )); } void warp_affine_bilinear_and_normalize_plane( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, float* matrix_2_3, uint8_t const_value, const Norm& norm, cudaStream_t stream) { int jobs = dst_width * dst_height; auto grid = CUDATools::grid_dims(jobs); auto block = CUDATools::block_dims(jobs); checkCudaKernel(warp_affine_bilinear_and_normalize_plane_kernel << <grid, block, 0, stream >> > ( src, src_line_size, src_width, src_height, dst, dst_width, dst_height, const_value, matrix_2_3, norm, jobs )); } void warp_affine_bilinear_and_normalize_focus( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst , int dst_width, int dst_height, float* matrix_1_3, uint8_t const_value, const Norm& norm, cudaStream_t stream){ int jobs = dst_width * dst_height; auto grid = CUDATools::grid_dims(jobs); auto block = CUDATools::block_dims(jobs); checkCudaKernel(warp_affine_bilinear_and_normalize_focus_kernel << <grid, block, 0, stream >> > ( src, src_line_size, src_width, src_height, dst, dst_width, dst_height, const_value, matrix_1_3, norm, jobs )); } void warp_perspective( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, float* matrix_3_3, uint8_t const_value, const Norm& norm, cudaStream_t stream ) { int jobs = dst_width * dst_height; auto grid = CUDATools::grid_dims(jobs); auto block = CUDATools::block_dims(jobs); checkCudaKernel(warp_perspective_kernel << <grid, block, 0, stream >> > ( src, src_line_size, src_width, src_height, dst, dst_width, dst_height, const_value, matrix_3_3, norm, jobs )); } void resize_bilinear_and_normalize( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, const Norm& norm, cudaStream_t stream) { int jobs = dst_width * dst_height; auto grid = CUDATools::grid_dims(jobs); auto block = CUDATools::block_dims(jobs); checkCudaKernel(resize_bilinear_and_normalize_kernel << <grid, block, 0, stream >> > ( src, src_line_size, src_width, src_height, dst, dst_width, dst_height, src_width/(float)dst_width, src_height/(float)dst_height, norm, jobs )); } void norm_feature( float* feature_array, int num_feature, int feature_length, cudaStream_t stream ){ Assert(feature_length % 32 == 0); int jobs = num_feature * feature_length; auto grid = dim3(num_feature); auto block = dim3(feature_length / 32, 32); checkCudaKernel(normalize_feature_kernel << <grid, block, num_feature * sizeof(float), stream >> > ( feature_array, num_feature, feature_length, jobs )); } };
the_stack
#include "octnet/gpu/pool.h" #include "octnet/gpu/gpu.h" #include <cstdlib> __global__ void kernel_gridpool2x2x2_struct(octree out, int n_blocks, ot_size_t feature_size, const octree in) { CUDA_KERNEL_LOOP(out_grid_idx, n_blocks) { ot_tree_t* otree = octree_get_tree(&out, out_grid_idx); int gn,ogd,ogh,ogw; octree_split_grid_idx(&out, out_grid_idx, &gn, &ogd, &ogh, &ogw); // first bit is always set, because out block consists of 8 in blocks tree_set_bit(otree, 0); int obit_idx_l1 = 1; for(int dgd = 0; dgd < 2; ++dgd) { for(int hgh = 0; hgh < 2; ++hgh) { for(int wgw = 0; wgw < 2; ++wgw) { int igd = 2*ogd + dgd; int igh = 2*ogh + hgh; int igw = 2*ogw + wgw; int in_grid_idx = octree_grid_idx(&in, gn, igd, igh, igw); ot_tree_t* itree = octree_get_tree(&in, in_grid_idx); //check if first bit in in blocks is set if(tree_isset_bit(itree, 0)) { tree_set_bit(otree, obit_idx_l1); int obit_idx_l2 = tree_child_bit_idx(obit_idx_l1); for(int ibit_idx_l1 = 1; ibit_idx_l1 < 9; ++ibit_idx_l1) { //check if l1 bits are set in in blocks if(tree_isset_bit(itree, ibit_idx_l1)) { tree_set_bit(otree, obit_idx_l2); } obit_idx_l2++; } } obit_idx_l1++; } } } } } template <int pool_fcn> __global__ void kernel_gridpool2x2x2_data(octree out, int n_leafs, const octree in) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int out_grid_idx = out.data[leaf_idx * out.feature_size]; const ot_tree_t* out_tree = octree_get_tree(&out, out_grid_idx); // const int cum_n_leafs = n_leafs_upto(&out, out_grid_idx); const int cum_n_leafs = out.prefix_leafs[out_grid_idx]; const int out_data_idx = leaf_idx - cum_n_leafs; const int out_bit_idx = data_idx_to_bit_idx(out_tree, out_data_idx); // ot_data_t* out_data = out.data_ptrs[out_grid_idx] + out_data_idx * out.feature_size; ot_data_t* out_data = octree_get_data(&out, out_grid_idx) + out_data_idx * out.feature_size; const int depth = depth_from_bit_idx(out_bit_idx); int gn,gd,gh,gw; octree_split_grid_idx(&out, out_grid_idx, &gn, &gd, &gh, &gw); int bd = 0; int bh = 0; int bw = 0; if(depth == 1) { bdhw_from_idx_l1(out_bit_idx, &bd,&bh,&bw); } else if(depth == 2) { bdhw_from_idx_l2(out_bit_idx, &bd,&bh,&bw); } else if(depth == 3) { bdhw_from_idx_l3(out_bit_idx, &bd,&bh,&bw); } const int in_gd = (gd * 2) + (bd > 3); const int in_gh = (gh * 2) + (bh > 3); const int in_gw = (gw * 2) + (bw > 3); const int in_grid_idx = octree_grid_idx(&in, gn,in_gd,in_gh,in_gw); // printf(" in_grid_idx %d <= %d,%d,%d, %d,%d,%d, %d,%d,%d\n", in_grid_idx, in_gd,in_gh,in_gw, gd,gh,gw, bd,bh,bw); const ot_tree_t* in_tree = octree_get_tree(&in, in_grid_idx); int in_bit_idx = 0; if(depth == 2) { in_bit_idx = (out_bit_idx - tree_child_bit_idx(tree_parent_bit_idx(out_bit_idx))) + 1; } else if(depth == 3) { in_bit_idx = (out_bit_idx - tree_child_bit_idx(tree_parent_bit_idx(out_bit_idx))) + (tree_parent_bit_idx(out_bit_idx) - tree_child_bit_idx(tree_parent_bit_idx(tree_parent_bit_idx(out_bit_idx)))) * 8 + 9; } // printf(" leaf_idx %d, out_grid_idx %d, out_bit_idx %d, in_grid_idx %d (%d,%d,%d), in_bit_idx %d (%d,%d,%d)\n", leaf_idx, out_grid_idx, out_bit_idx, in_grid_idx, gd,gh,gw, in_bit_idx, bd,bh,bw); if(tree_isset_bit(in_tree, in_bit_idx)) { in_bit_idx = tree_child_bit_idx(in_bit_idx); // const ot_data_t* in_data = in.data_ptrs[in_grid_idx] + tree_data_idx(in_tree, in_bit_idx, in.feature_size); const ot_data_t* in_data = octree_get_data(&in, in_grid_idx) + tree_data_idx(in_tree, in_bit_idx, in.feature_size); octree_pool2x2x2<pool_fcn>(in_data, in.feature_size, out_data); } else { // const ot_data_t* in_data = in.data_ptrs[in_grid_idx] + tree_data_idx(in_tree, in_bit_idx, in.feature_size); const ot_data_t* in_data = octree_get_data(&in, in_grid_idx) + tree_data_idx(in_tree, in_bit_idx, in.feature_size); octree_cpy_leaf(in_data, in.feature_size, out_data); } } } template <int pool_fcn> void octree_gridpool2x2x2_gpu(const octree* in, octree* out) { if(in->grid_depth % 2 != 0 || in->grid_height % 2 != 0 || in->grid_width % 2 != 0) { printf("[ERROR] octree_gridpool2x2x2_gpu grid dimension should be a multiply of 2 (are %d,%d,%d)\n", in->grid_depth, in->grid_height, in->grid_width); exit(-1); } if(in->grid_depth / 2 == 0 || in->grid_height / 2 == 0 || in->grid_width / 2 == 0) { printf("[ERROR] octree_gridpool2x2x2_gpu grid dimension have to be at least 2x2x2\n"); exit(-1); } //copy scalars out->n = in->n; out->grid_depth = in->grid_depth / 2; out->grid_height = in->grid_height / 2; out->grid_width = in->grid_width / 2; out->feature_size = in->feature_size; int n_blocks = octree_num_blocks(out); int feature_size = in->feature_size; //compute out structure octree_resize_as_gpu(out, out); octree_clr_trees_gpu(out); kernel_gridpool2x2x2_struct<<<GET_BLOCKS(n_blocks), CUDA_NUM_THREADS>>>( *out, n_blocks, feature_size, *in ); CUDA_POST_KERNEL_CHECK; //pool/copy data octree_upd_n_leafs_gpu(out); octree_resize_as_gpu(out, out); octree_upd_prefix_leafs_gpu(out); octree_leaf_idx_to_grid_idx_gpu(out, out->feature_size, out->data_capacity, out->data); kernel_gridpool2x2x2_data<pool_fcn><<<GET_BLOCKS(out->n_leafs), CUDA_NUM_THREADS>>>( *out, out->n_leafs, *in ); CUDA_POST_KERNEL_CHECK; } template <int pool_fcn> __global__ void kernel_gridpool2x2x2_bwd(octree grad_in, int n_leafs, const octree grad_out, const octree in) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { // const int out_grid_idx = grad_in.trees[leaf_idx * N_TREE_INTS]; const int out_grid_idx = leaf_idx_to_grid_idx(&grad_out, leaf_idx); const ot_tree_t* out_tree = octree_get_tree(&grad_out, out_grid_idx); // const int cum_n_leafs = n_leafs_upto(&grad_out, out_grid_idx); const int cum_n_leafs = grad_out.prefix_leafs[out_grid_idx]; const int out_data_idx = leaf_idx - cum_n_leafs; const int out_bit_idx = data_idx_to_bit_idx(out_tree, out_data_idx); // const ot_data_t* grad_out_data = grad_out.data_ptrs[out_grid_idx] + out_data_idx * grad_out.feature_size; const ot_data_t* grad_out_data = octree_get_data(&grad_out, out_grid_idx) + out_data_idx * grad_out.feature_size; const int depth = depth_from_bit_idx(out_bit_idx); int gn,gd,gh,gw; octree_split_grid_idx(&grad_out, out_grid_idx, &gn, &gd, &gh, &gw); int bd = 0; int bh = 0; int bw = 0; if(depth == 1) { bdhw_from_idx_l1(out_bit_idx, &bd,&bh,&bw); } else if(depth == 2) { bdhw_from_idx_l2(out_bit_idx, &bd,&bh,&bw); } else if(depth == 3) { bdhw_from_idx_l3(out_bit_idx, &bd,&bh,&bw); } const int in_gd = (gd * 2) + (bd > 3); const int in_gh = (gh * 2) + (bh > 3); const int in_gw = (gw * 2) + (bw > 3); const int in_grid_idx = octree_grid_idx(&in, gn,in_gd,in_gh,in_gw); const ot_tree_t* in_tree = octree_get_tree(&in, in_grid_idx); int in_bit_idx = 0; if(depth == 2) { in_bit_idx = (out_bit_idx - tree_child_bit_idx(tree_parent_bit_idx(out_bit_idx))) + 1; } else if(depth == 3) { in_bit_idx = (out_bit_idx - tree_child_bit_idx(tree_parent_bit_idx(out_bit_idx))) + (tree_parent_bit_idx(out_bit_idx) - tree_child_bit_idx(tree_parent_bit_idx(tree_parent_bit_idx(out_bit_idx)))) * 8 + 9; } if(tree_isset_bit(in_tree, in_bit_idx)) { in_bit_idx = tree_child_bit_idx(in_bit_idx); const int in_data_idx = tree_data_idx(in_tree, in_bit_idx, in.feature_size); // const ot_data_t* in_data = in.data_ptrs[in_grid_idx] + in_data_idx; const ot_data_t* in_data = octree_get_data(&in, in_grid_idx) + in_data_idx; // ot_data_t* grad_in_data = grad_in.data_ptrs[in_grid_idx] + in_data_idx; ot_data_t* grad_in_data = octree_get_data(&grad_in, in_grid_idx) + in_data_idx; octree_pool2x2x2_bwd<pool_fcn>(in_data, grad_out_data, in.feature_size, grad_in_data); } else { const int in_data_idx = tree_data_idx(in_tree, in_bit_idx, in.feature_size); // ot_data_t* grad_in_data = grad_in.data_ptrs[in_grid_idx] + in_data_idx; ot_data_t* grad_in_data = octree_get_data(&grad_in, in_grid_idx) + in_data_idx; octree_cpy_leaf(grad_out_data, in.feature_size, grad_in_data); } } } template <int pool_fcn> void octree_gridpool2x2x2_bwd_gpu(const octree* in, const octree* grad_out, octree* grad_in) { octree_cpy_scalars(in, grad_in); octree_resize_as_gpu(in, grad_in); octree_cpy_trees_gpu_gpu(in, grad_in); octree_cpy_prefix_leafs_gpu_gpu(in, grad_in); int n_blocks = octree_num_blocks(grad_out); // octree_leaf_idx_to_grid_idx_gpu(grad_in, N_TREE_INTS, grad_in->trees); kernel_gridpool2x2x2_bwd<pool_fcn><<<GET_BLOCKS(grad_out->n_leafs), CUDA_NUM_THREADS>>>( *grad_in, grad_out->n_leafs, *grad_out, *in ); CUDA_POST_KERNEL_CHECK; } void octree_gridpool2x2x2_avg_gpu(const octree* in, octree* out) { if(DEBUG) { printf("[DEBUG] octree_gridpool2x2x2_avg_gpu\n"); } octree_gridpool2x2x2_gpu<REDUCE_AVG>(in, out); } void octree_gridpool2x2x2_max_gpu(const octree* in, octree* out){ if(DEBUG) { printf("[DEBUG] octree_gridpool2x2x2_max_gpu\n"); } octree_gridpool2x2x2_gpu<REDUCE_MAX>(in, out); } void octree_gridpool2x2x2_avg_bwd_gpu(const octree* in, const octree* grad_out, octree* grad_in) { if(DEBUG) { printf("[DEBUG] octree_gridpool2x2x2_avg_bwd_gpu\n"); } octree_gridpool2x2x2_bwd_gpu<REDUCE_AVG>(in, grad_out, grad_in); } void octree_gridpool2x2x2_max_bwd_gpu(const octree* in, const octree* grad_out, octree* grad_in) { if(DEBUG) { printf("[DEBUG] octree_gridpool2x2x2_max_bwd_gpu\n"); } octree_gridpool2x2x2_bwd_gpu<REDUCE_AVG>(in, grad_out, grad_in); }
the_stack
#include "fringe/cuda/cudaUtils.h" #include "fringe/cuda/ulongmask.h" #include "KS2sample_cuda.h" #include <math.h> #include <iostream> #define THRD_PER_BLOCK 96 //Constant memory for constant input values //Done this way in topozero. Need to understand //why this cannot be put in part of the struct __constant__ double d_inpts_pval[1]; __constant__ int d_inpts_int[6]; //0: numCols //1: numLines //2: nBands //3: Nx //4: Ny //5: wtslen /*******Advance function declarations***********/ __global__ void runSortAmp(float *amp); __global__ void findNeighbors(const float *amp, const unsigned char *mask, unsigned int *wts, int * count); /**********Inplace sorting algorithm************/ __device__ inline void bruteForceSort(float *arr) { int ii,jj; float temp; for(jj=0; jj < (d_inpts_int[2]-1); jj++) { for (ii=0; ii < (d_inpts_int[2]-jj-1); ii++) { temp = fminf(arr[ii], arr[ii+1]); arr[ii+1] = fmaxf(arr[ii], arr[ii+1]); arr[ii] = temp; } } }; /***** Structure for all GPU/Host handling data *****/ struct gpuParams { //These are meant to be inputs //Pointers refer to pointers on device float *amplitude; //Flattened array of amplitude values unsigned char *mask; //Flattened mask array //These are host end variables int numCols; //Number of cols in a line int numLines; //Number of lines in a block int nBands; //Number of bands int numWtsBands; //Number of uint32 bands for nmap int Nx; //Half window width in X int Ny; //Half window width in Y //These are meant to be outputs //Pointers refer to pointers on device int *count; //Count of number of neighbors unsigned int *wts; //Weights //Constructor and destructor gpuParams(int cols, int lines, int bands, int nx, int ny, int nwts); ~gpuParams(); //Methods to help void allocateArrays(); void setConstants(); void deallocateArrays(); void setInputs(float *, unsigned char *); void getOutputs(int *, unsigned int *); void sortAmplitude(); void process(double pval); }; gpuParams::gpuParams(int cols, int lines, int bands, int nx, int ny, int nwts): numCols(cols),numLines(lines), nBands(bands), Nx(nx), Ny(ny), numWtsBands(nwts) { //Ensure the memory on GPU is allocated for this allocateArrays(); //Assign constants to the global arrays setConstants(); } gpuParams::~gpuParams() { //Ensure memory on GPU is released deallocateArrays(); } //Allocate memory on the GPU void gpuParams::allocateArrays() { size_t nPix = numCols * numLines; //Allocate memory for input amplitude gpuErrChk ( cudaMalloc((float**)&amplitude, (nBands*nPix)*sizeof(float))); gpuErrChk( cudaMemset(amplitude, 0, (nBands*nPix)*sizeof(float))); //Allocate memory for input mask gpuErrChk( cudaMalloc((unsigned char**)&mask, (nPix)*sizeof(unsigned char))); gpuErrChk( cudaMemset(mask, 0, nPix*sizeof(unsigned char))); //Allocate memory for output count gpuErrChk( cudaMalloc((int**)&count, (nPix)*sizeof(int))); gpuErrChk( cudaMemset(count, 0, nPix*sizeof(int))); //Allocate memory for weights gpuErrChk( cudaMalloc((unsigned int**)&wts, (nPix*numWtsBands)*sizeof(unsigned int))); gpuErrChk( cudaMemset(wts, 0, nPix*numWtsBands*sizeof(unsigned int)) ); } void gpuParams::setConstants() { int constants[6]; constants[0] = numCols; constants[1] = numLines; constants[2] = nBands; constants[3] = Nx; constants[4] = Ny; constants[5] = numWtsBands; gpuErrChk( cudaMemcpyToSymbol(d_inpts_int, constants, (6*sizeof(int)))); //int readback[6]; //gpuErrChk( cudaMemcpyFromSymbol(readback, d_inpts_int, (6*sizeof(int)))); //std::cout << "Ncols = " << readback[0] << "\n" // << "Nlines = " << readback[1] << "\n" // << "Nbands = " << readback[2] << "\n" // << "Nx = " << readback[3] << "\n" // << "Ny = " << readback[4] << "\n" // << "Nwts = " << readback[5] << "\n"; } //Deallocate memory on GPU void gpuParams::deallocateArrays() { //Free amplitude gpuErrChk( cudaFree(amplitude)); //Free mask gpuErrChk( cudaFree(mask)); //Free output gpuErrChk( cudaFree(count)); //Free weights gpuErrChk( cudaFree(wts)); } //Pass amplitude and mask to GPU void gpuParams::setInputs(float *amp, unsigned char *msk) { size_t nPix = numCols * numLines; //Copy amplitude to GPU gpuErrChk( cudaMemcpy(amplitude, amp, (nPix*nBands*sizeof(float)), cudaMemcpyHostToDevice)); //Copy mask to GPU gpuErrChk( cudaMemcpy(mask, msk, (nPix*sizeof(unsigned char)), cudaMemcpyHostToDevice)); } //Get count and wts from GPU void gpuParams::getOutputs(int *cnt, unsigned int *wmask) { size_t nPix = numCols * numLines; //Copy count to host gpuErrChk( cudaMemcpy(cnt, count, (nPix*sizeof(int)), cudaMemcpyDeviceToHost)); //copy wts to host gpuErrChk( cudaMemcpy(wmask, wts, (nPix*numWtsBands*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); } //Sort amplitudes void gpuParams::sortAmplitude() { int numPix = numCols * numLines; dim3 block(THRD_PER_BLOCK); dim3 grid((numPix + (THRD_PER_BLOCK-1))/THRD_PER_BLOCK); /*if ((grid.x * THRD_PER_BLOCK) > numPix) { std::cout << " Number of empty threads = " << ((grid.x * THRD_PER_BLOCK) - numPix) << "\n"; }*/ runSortAmp <<<grid, block>>>(amplitude); //Track errors and synchronize gpuErrChk( cudaGetLastError()); gpuErrChk( cudaDeviceSynchronize()); } //Find neighbors void gpuParams::process(double pval) { //Copy the threshold value to device gpuErrChk( cudaMemcpyToSymbol(d_inpts_pval, &pval, sizeof(double))); int numPix = numCols * numLines; dim3 block(THRD_PER_BLOCK); dim3 grid((numPix + (THRD_PER_BLOCK-1))/THRD_PER_BLOCK); /*if ((grid.x * THRD_PER_BLOCK) > numPix) { std::cout << "Number of empty threads = " << ((grid.x * THRD_PER_BLOCK) - numPix) << "\n"; }*/ findNeighbors <<<grid, block>>>(amplitude, mask, wts, count); //Track errors and synchronize gpuErrChk(cudaGetLastError()); gpuErrChk(cudaDeviceSynchronize()); } /*****End of structure************/ /********** Actual Kernel function *************/ //This method is to sort a single pixel __global__ void runSortAmp(float *amp) { //Pixel number int pixel = (blockDim.x * blockIdx.x) + threadIdx.x; //Make sure count is within limits //i.e, pixel < numCols * numLines if (pixel < (d_inpts_int[0] * d_inpts_int[1])) { //Offset to pixel = pixel * nbands bruteForceSort(amp+(pixel*d_inpts_int[2])); } } //This method is to identify neighbors for a single pixel __global__ void findNeighbors(const float *amp, const unsigned char *mask, unsigned int *wts, int *count) { //Temporary variables needed int refii, refjj; int qq,ii,jj; double prob; const float *refpix; const float *cenpix; unsigned int *weight; //Pixel number int pp = (blockDim.x * blockIdx.x) + threadIdx.x; //Make sure count is within limits //i.e, pixel < numCols * numLines if (pp < (d_inpts_int[0] * d_inpts_int[1])) { if( mask[pp] != 0) { cenpix = amp + (d_inpts_int[2] * pp); weight = wts + (d_inpts_int[5] * pp); for(ii=-d_inpts_int[4]; ii<=d_inpts_int[4]; ii++) { refii = (pp/d_inpts_int[0]) + ii; for (jj=-d_inpts_int[3]; jj<=d_inpts_int[3]; jj++) { refjj = (pp%d_inpts_int[0]) + jj; if ((refii < d_inpts_int[1]) && (refii >=0) && (refjj < d_inpts_int[0]) && (refjj >=0)) { qq = refii * d_inpts_int[0] + refjj; refpix = amp + (d_inpts_int[2] * qq); if (mask[qq] != 0) { //Count same pix as neighbor if (pp == qq) { count[pp] += 1; setBit(weight, 0, 0, d_inpts_int[3], d_inpts_int[4], true); } else { prob = KS2test(cenpix, refpix, d_inpts_int[2]); if (prob >= d_inpts_pval[0]) { count[pp] += 1; setBit( weight, ii,jj, d_inpts_int[3], d_inpts_int[4], true); } //if prob > thresh } //if not same pixel } //if ref pixel is not masked } //if ref pixel is within limits } //loop over jj } //loop over ii /*count[pp] = 0; for(ii=0; ii< d_inpts_int[2]; ii++) count[pp] += (cenpix[ii] == 0);*/ } //if pixel is not masked } //if pixel is within limits } /********* End of actual kernel function **********/ //Actual interface to nmap.cpp //This is the only function that is used by parent code directly. void nmapProcessBlock(float *amp, unsigned char *msk, int cols, int lines, int bands, int *cnt, unsigned int *wmask, int wtslen, double pval, int Nx, int Ny) { //Create structure to handle interaction with GPU struct gpuParams pars( cols,lines, bands, Nx, Ny, wtslen); //Copy inputs to GPU pars.setInputs(amp, msk); //Sort amplitudes since stats is between histograms pars.sortAmplitude(); //Process the block pars.process(pval); //Get outputs from GPU pars.getOutputs(cnt, wmask); } //Wrappers for GPU access void lockGPU() { getGPUDevice(0); } void unlockGPU() { releaseGPUDevice(); }
the_stack
#pragma once #include <gunrock/app/problem_base.cuh> namespace gunrock { namespace app { namespace bfs { enum Direction { FORWARD = 0, BACKWARD = 1, UNDECIDED = 2, }; /** * @brief Speciflying parameters for BFS Problem * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_problem(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(gunrock::app::UseParameters_problem(parameters)); GUARD_CU(parameters.Use<bool>( "mark-pred", util::OPTIONAL_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, false, "Whether to mark predecessor info.", __FILE__, __LINE__)); return retval; } /** * @brief Breadth-First Search Problem structure * @tparam _GraphT Type of the graph * @tparam _LabelT Type of labels used in sssp * @tparam _ValueT Type of per-vertex distance values * @tparam _FLAG Problem flags */ template <typename _GraphT, typename _LabelT = typename _GraphT::VertexT, typename _ValueT = typename _GraphT::ValueT, ProblemFlag _FLAG = Problem_None> struct Problem : ProblemBase<_GraphT, _FLAG> { typedef _GraphT GraphT; static const ProblemFlag FLAG = _FLAG; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; typedef typename GraphT::CsrT CsrT; typedef typename GraphT::CscT CscT; typedef typename GraphT::GpT GpT; typedef _LabelT LabelT; typedef _ValueT ValueT; typedef unsigned char MaskT; typedef ProblemBase<GraphT, FLAG> BaseProblem; typedef DataSliceBase<GraphT, FLAG> BaseDataSlice; // Helper structures /** * @brief Data slice structure containing BFS-specific data on indiviual GPU */ struct DataSlice : BaseDataSlice { // util::Array1D<SizeT, VertexT> original_vertex; util::Array1D<SizeT, LabelT> labels; // labels to mark latest iteration the vertex been visited util::Array1D<SizeT, VertexT> preds; // predecessors of vertices util::Array1D<SizeT, VertexT> temp_preds; // predecessors of vertices util::Array1D<SizeT, SizeT> vertex_markers[2]; util::Array1D<SizeT, VertexT> unvisited_vertices[2]; util::Array1D<SizeT, SizeT, util::PINNED> split_lengths; util::Array1D<SizeT, VertexT> local_vertices; util::Array1D<SizeT, MaskT> visited_masks; util::Array1D<SizeT, MaskT> old_mask; util::Array1D<SizeT, MaskT *> in_masks; util::Array1D<SizeT, Direction> direction_votes; SizeT num_visited_vertices, num_unvisited_vertices; bool been_in_backward; Direction current_direction, previous_direction; /* * @brief Default constructor */ DataSlice() : BaseDataSlice() { // original_vertex .SetName("original_vertex" ); labels.SetName("labels"); preds.SetName("preds"); temp_preds.SetName("temp_preds"); vertex_markers[0].SetName("vertex_markers[0]"); vertex_markers[1].SetName("vertex_markers[1]"); unvisited_vertices[0].SetName("unvisited_vertices[0]"); unvisited_vertices[1].SetName("unvisited_vertices[1]"); local_vertices.SetName("local_vertices"); split_lengths.SetName("split_length"); direction_votes.SetName("direction_votes"); visited_masks.SetName("visited_masks"); old_mask.SetName("old_mask"); in_masks.SetName("in_masks"); } /* * @brief Default destructor */ virtual ~DataSlice() { Release(); } cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx)); // GUARD_CU(original_vertex .Release(target)); GUARD_CU(labels.Release(target)); GUARD_CU(preds.Release(target)); GUARD_CU(temp_preds.Release(target)); GUARD_CU(vertex_markers[0].Release(target)); GUARD_CU(vertex_markers[1].Release(target)); GUARD_CU(unvisited_vertices[0].Release(target)); GUARD_CU(unvisited_vertices[1].Release(target)); GUARD_CU(split_lengths.Release(target)); GUARD_CU(local_vertices.Release(target)); GUARD_CU(direction_votes.Release(target)); GUARD_CU(visited_masks.Release(target)); GUARD_CU(old_mask.Release(target)); GUARD_CU(in_masks.Release(target)); GUARD_CU(BaseDataSlice ::Release(target)); return retval; } /** * @brief initializing sssp-specific data on each gpu * @param sub_graph Sub graph on the GPU. * @param[in] num_gpus Number of GPUs * @param[in] gpu_idx GPU device index * @param[in] target Targeting device location * @param[in] flag Problem flag containling options * \return cudaError_t Error message(s), if any */ cudaError_t Init(GraphT &sub_graph, int num_gpus = 1, int gpu_idx = 0, util::Location target = util::DEVICE, ProblemFlag flag = Problem_None) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag)); GUARD_CU(labels.Allocate(sub_graph.nodes, target)); if (flag & Mark_Predecessors) { GUARD_CU(preds.Allocate(sub_graph.nodes, target)); // GUARD_CU(temp_preds .Allocate(sub_graph.nodes, target)); } GUARD_CU(unvisited_vertices[0].Allocate(sub_graph.nodes, target)); GUARD_CU(unvisited_vertices[1].Allocate(sub_graph.nodes, target)); GUARD_CU(split_lengths.Allocate(2, util::HOST | target)); GUARD_CU(direction_votes.Allocate(4, util::HOST)); if (flag & Enable_Idempotence) { GUARD_CU(visited_masks.Allocate( sub_graph.nodes / (sizeof(MaskT) * 8) + 2 * sizeof(VertexT), target)); } if (num_gpus > 1) { /*if (flag & Mark_Predecessors) { this->vertex_associate_orgs[0] = preds.GetPointer(target); if (!keep_node_num) { original_vertex.SetPointer( graph_slice->original_vertex.GetPointer(target), graph_slice->original_vertex.GetSize(), target); } } GUARD_CU(this->vertex_associate_orgs.Move(util::HOST, target)); */ SizeT local_counter = 0; for (VertexT v = 0; v < sub_graph.nodes; v++) if (sub_graph.GpT::partition_table[v] == 0) local_counter++; GUARD_CU(local_vertices.Allocate(local_counter, util::HOST | target)); local_counter = 0; for (VertexT v = 0; v < sub_graph.nodes; v++) { if (sub_graph.GpT::partition_table[v] == 0) { local_vertices[local_counter] = v; local_counter++; } } GUARD_CU(local_vertices.Move(util::HOST, target)); } GUARD_CU(sub_graph.Move(util::HOST, target, this->stream)); return retval; } // end of Init /** * @brief Reset problem function. Must be called prior to each run. * @param[in] src Source vertex to start. * @param[in] location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Reset(VertexT src, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; SizeT nodes = this->sub_graph->nodes; num_visited_vertices = 0; num_unvisited_vertices = 0; been_in_backward = false; current_direction = FORWARD; previous_direction = FORWARD; GUARD_CU(util::SetDevice(this->gpu_idx)); for (int i = 0; i < 4; i++) direction_votes[i] = UNDECIDED; // Allocate output labels if necessary GUARD_CU(labels.EnsureSize_(nodes, target)); GUARD_CU(labels.ForEach( [] __host__ __device__(LabelT & label) { label = util::PreDefinedValues<LabelT>::MaxValue; }, nodes, target, this->stream)); if (this->flag & Mark_Predecessors) { // Allocate preds if necessary GUARD_CU(preds.EnsureSize_(nodes, target)); GUARD_CU(preds.ForEach( [] __host__ __device__(VertexT & pred) { pred = util::PreDefinedValues<VertexT>::InvalidValue; }, nodes, target, this->stream)); } if (this->flag & Enable_Idempotence) { GUARD_CU(visited_masks.ForEach( [] __host__ __device__(MaskT & mask) { mask = 0; }, util::PreDefinedValues<SizeT>::InvalidValue, target, this->stream)); } return retval; } // end of Reset }; // end of DataSlice // Members // Set of data slices (one for each GPU) util::Array1D<SizeT, DataSlice> *data_slices; // Methods /** * @brief BFSProblem default constructor */ Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None) : BaseProblem(_parameters, _flag), data_slices(NULL) {} /** * @brief BFSProblem default destructor */ virtual ~Problem() { Release(); } /* * @brief Releasing allocated memory space * @param[in] target The location to release memory from * \return cudaError_t Error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; if (data_slices == NULL) return retval; for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(data_slices[gpu].Release(target)); } if ((target & util::HOST) != 0 && data_slices[0].GetPointer(util::DEVICE) == NULL) { delete[] data_slices; data_slices = NULL; } GUARD_CU(BaseProblem::Release(target)); return retval; } /** * \addtogroup PublicInterface * @{ */ /** * @brief Copy result labels and/or predecessors computed on the GPU back to *host-side vectors. * @param[out] h_labels Host array to store computed vertex labels * @param[out] h_preds Host array to store computed vertex predecessors * @param[in] target where the results are stored * \return cudaError_t Error message(s), if any */ cudaError_t Extract(LabelT *h_labels, VertexT *h_preds = NULL, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; SizeT nodes = this->org_graph->nodes; if (this->num_gpus == 1) { auto &data_slice = data_slices[0][0]; if (target == util::DEVICE) { // Set device GUARD_CU(util::SetDevice(this->gpu_idx[0])); GUARD_CU(data_slice.labels.SetPointer(h_labels, nodes, util::HOST)); GUARD_CU(data_slice.labels.Move(util::DEVICE, util::HOST)); if (this->flag & Mark_Predecessors) { GUARD_CU(data_slice.preds.SetPointer(h_preds, nodes, util::HOST)); GUARD_CU(data_slice.preds.Move(util::DEVICE, util::HOST)); } } else if (target == util::HOST) { GUARD_CU(data_slice.labels.ForAll( [h_labels] __host__ __device__(const LabelT *labels, const VertexT &v) { h_labels[v] = labels[v]; }, nodes, util::HOST)); if (this->flag & Mark_Predecessors) GUARD_CU(data_slice.preds.ForAll( [h_preds] __host__ __device__(const VertexT *preds, const VertexT &v) { h_preds[v] = preds[v]; }, nodes, util::HOST)); } } else { // num_gpus != 1 util::Array1D<SizeT, LabelT *> th_labels; util::Array1D<SizeT, VertexT *> th_preds; th_labels.SetName("bfs::Problem::Extract::th_labels"); th_preds.SetName("bfs::Problem::Extract::th_preds"); GUARD_CU(th_labels.Allocate(this->num_gpus, util::HOST)); GUARD_CU(th_preds.Allocate(this->num_gpus, util::HOST)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { auto &data_slice = data_slices[gpu][0]; if (target == util::DEVICE) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slice.labels.Move(util::DEVICE, util::HOST)); if (this->flag & Mark_Predecessors) GUARD_CU(data_slice.preds.Move(util::DEVICE, util::HOST)); } th_labels[gpu] = data_slice.labels.GetPointer(util::HOST); th_preds[gpu] = data_slice.preds.GetPointer(util::HOST); } // end for(gpu) for (VertexT v = 0; v < nodes; v++) { int gpu = this->org_graph->GpT::partition_table[v]; VertexT v_ = v; if ((GraphT::FLAG & gunrock::partitioner::Keep_Node_Num) == 0) v_ = this->org_graph->GpT::convertion_table[v]; h_labels[v] = th_labels[gpu][v_]; if (this->flag & Mark_Predecessors) h_preds[v] = th_preds[gpu][v_]; } GUARD_CU(th_labels.Release()); GUARD_CU(th_preds.Release()); } // end if (num_gpus ==1) return retval; } /** * @brief initialization function. * @param graph The graph that SSSP processes on * @param[in] Location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseProblem::Init(graph, target)); data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus]; if (this->parameters.template Get<bool>("mark-pred")) this->flag = this->flag | Mark_Predecessors; for (int gpu = 0; gpu < this->num_gpus; gpu++) { data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]"); if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST)); auto &data_slice = data_slices[gpu][0]; GUARD_CU(data_slice.Init(this->sub_graphs[gpu], this->num_gpus, this->gpu_idx[gpu], target, this->flag)); } // end for(gpu) return retval; } /** * @brief Reset problem function. Must be called prior to each run. * @param[in] src Source vertex to start. * @param[in] location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Reset(VertexT src, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; for (int gpu = 0; gpu < this->num_gpus; ++gpu) { // Set device if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slices[gpu]->Reset(target)); GUARD_CU(data_slices[gpu].Move(util::HOST, target)); } // Fillin the initial input_queue for BFS problem int gpu; VertexT src_; if (this->num_gpus <= 1) { gpu = 0; src_ = src; } else { gpu = this->org_graph->partition_table[src]; if (this->flag & partitioner::Keep_Node_Num) src_ = src; else src_ = this->org_graph->GpT::convertion_table[src]; } if (target & util::HOST) { data_slices[gpu]->labels[src_] = 0; if (this->flag & Mark_Predecessors) data_slices[gpu]->preds[src_] = util::PreDefinedValues<VertexT>::InvalidValue; if (this->flag & Enable_Idempotence) { VertexT mask_pos = src_ / (8 * sizeof(MaskT)); data_slices[gpu]->visited_masks[mask_pos] = 1 << (src_ % (8 * sizeof(MaskT))); } } if (target & util::DEVICE) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); GUARD_CU(data_slices[gpu]->labels.ForAll( [src_] __host__ __device__(LabelT * labels, const SizeT &v) { labels[src_] = 0; }, 1, util::DEVICE)); if (this->flag & Mark_Predecessors) { GUARD_CU(data_slices[gpu]->preds.ForAll( [src_] __host__ __device__(VertexT * preds, const SizeT &v) { preds[src_] = util::PreDefinedValues<VertexT>::InvalidValue; }, 1, util::DEVICE)); } if (this->flag & Enable_Idempotence) { VertexT mask_pos = src_ / (8 * sizeof(MaskT)); GUARD_CU(data_slices[gpu]->visited_masks.ForAll( [mask_pos, src_] __host__ __device__(MaskT * masks, const SizeT &v) { masks[mask_pos] = 1 << (src_ % (8 * sizeof(MaskT))); }, 1, util::DEVICE)); } GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); } return retval; } // end of reset /** @} */ }; // end of problem } // namespace bfs } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
namespace cgbn { #if 1 template<class env> __device__ __forceinline__ void core_t<env>::mont_mul(uint32_t r[LIMBS], const uint32_t a[LIMBS], const uint32_t b[LIMBS], const uint32_t n[LIMBS], const uint32_t np0) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t t, t0, t1, q, r1, ra[LIMBS+2], ru[LIMBS+1], c=0; #pragma unroll for(int32_t index=0;index<=LIMBS;index++) { ra[index]=0; ru[index]=0; } ra[LIMBS+1]=0; #pragma nounroll for(int32_t thread=0;thread<TPI;thread+=2) { #pragma unroll for(int32_t word=0;word<2*LIMBS;word+=2) { if(word<LIMBS) t0=__shfl_sync(sync, b[word], thread, TPI); else t0=__shfl_sync(sync, b[word-LIMBS], thread+1, TPI); if(word+1<LIMBS) t1=__shfl_sync(sync, b[word+1], thread, TPI); else t1=__shfl_sync(sync, b[word+1-LIMBS], thread+1, TPI); /* FIRST HALF */ chain_t<> chain1; // unaligned: T0 * A_odd ra[0]=chain1.add(ra[0], c); #pragma unroll for(int32_t index=0;index<LIMBS-1;index+=2) { ru[index]=chain1.madlo(a[index+1], t0, ru[index]); ru[index+1]=chain1.madhi(a[index+1], t0, ru[index+1]); } if(LIMBS%2==1) ru[LIMBS-1]=chain1.add(ru[LIMBS-1], 0); chain_t<> chain2; // aligned: T0 * A_even #pragma unroll for(int32_t index=0;index<LIMBS;index+=2) { ra[index]=chain2.madlo(a[index], t0, ra[index]); ra[index+1]=chain2.madhi(a[index], t0, ra[index+1]); } if(LIMBS%2==0) ra[LIMBS]=chain2.add(ra[LIMBS], 0); chain_t<> chain3; // aligned: Q0 * N_even q=__shfl_sync(sync, ra[0], 0, TPI)*np0; #pragma unroll for(int32_t index=0;index<LIMBS;index+=2) { ra[index]=chain3.madlo(n[index], q, ra[index]); ra[index+1]=chain3.madhi(n[index], q, ra[index+1]); } ra[LIMBS+LIMBS%2]=chain3.add(ra[LIMBS+LIMBS%2], 0); chain_t<> chain4; // unaligned: Q0 * N_odd #pragma unroll for(int32_t index=0;index<LIMBS-1;index+=2) { ru[index]=chain4.madlo(n[index+1], q, ru[index]); ru[index+1]=chain4.madhi(n[index+1], q, ru[index+1]); } ru[LIMBS-LIMBS%2]=chain4.add(ru[LIMBS-LIMBS%2], 0); /* SECOND HALF */ t0=ra[0]; chain_t<> chain5; // unaigned: T1 * A_even #pragma unroll for(int32_t index=0;index<LIMBS;index+=2) { ru[index]=chain5.madlo(a[index], t1, ru[index]); ru[index+1]=chain5.madhi(a[index], t1, ru[index+1]); } ru[LIMBS-LIMBS%2]=chain5.add(ru[LIMBS-LIMBS%2], 0); chain_t<> chain6; // aligned: T1 * A_odd #pragma unroll for(int32_t index=0;index<LIMBS-1;index+=2) { ra[index+2]=chain6.madlo(a[index+1], t1, ra[index+2]); ra[index+3]=chain6.madhi(a[index+1], t1, ra[index+3]); } if(LIMBS%2==1) ra[LIMBS+1]=chain3.add(ra[LIMBS+1], 0); chain_t<> chain7; // aligned: Q1 * N_odd ru[0]=chain7.add(ru[0], ra[1]); q=__shfl_sync(sync, ru[0], 0, TPI)*np0; #pragma unroll for(int32_t index=0;index<(int32_t)LIMBS-3;index+=2) { ra[index]=chain7.madlo(n[index+1], q, ra[index+2]); ra[index+1]=chain7.madhi(n[index+1], q, ra[index+3]); } ra[LIMBS-2-LIMBS%2]=chain3.madlo(n[LIMBS-1-LIMBS%2], q, ra[LIMBS-LIMBS%2]); ra[LIMBS-1-LIMBS%2]=chain3.madhi(n[LIMBS-1-LIMBS%2], q, ra[LIMBS+1-LIMBS%2]); if(LIMBS%2==1) { ra[LIMBS-1]=chain3.add(ra[LIMBS+1], 0); ra[LIMBS]=0; } else ra[LIMBS-LIMBS%2]=chain3.add(0, 0); chain_t<> chain8; // unaigned: Q1 * N_even t1=chain8.madlo(n[0], q, ru[0]); c=chain8.madhi(n[0], q, ru[1]); #pragma unroll for(int32_t index=0;index<LIMBS-2;index+=2) { ru[index]=chain8.madlo(n[index+2], q, ru[index+2]); ru[index+1]=chain8.madhi(n[index+2], q, ru[index+3]); } if(LIMBS%2==1) ru[LIMBS-1]=chain8.add(0, 0); else { ru[LIMBS-2]=chain8.add(ru[LIMBS], 0); ru[LIMBS-1]=0; } ru[LIMBS]=0; t0=__shfl_sync(sync, t0, threadIdx.x+1, TPI); t1=__shfl_sync(sync, t1, threadIdx.x+1, TPI); ra[LIMBS-2]=add_cc(ra[LIMBS-2], t0); ra[LIMBS-1]=addc_cc(ra[LIMBS-1], t1); ra[LIMBS]=addc(ra[LIMBS], 0); ra[LIMBS+1]=0; } } chain_t<LIMBS+1> chain9; r[0]=chain9.add(ra[0], c); #pragma unroll for(int32_t index=1;index<LIMBS;index++) r[index]=chain9.add(ra[index], ru[index-1]); r1=chain9.add(ra[LIMBS], ru[LIMBS-1]); // r1:r0 <= 0x00000002 0xFFFFFFFD t=__shfl_up_sync(sync, r1, 1, TPI); // all but most significant thread clears r1 if(group_thread!=TPI-1) r1=0; if(group_thread==0) t=0; chain_t<LIMBS+1> chain10; r[0]=chain10.add(r[0], t); #pragma unroll for(int32_t index=1;index<LIMBS;index++) r[index]=chain10.add(r[index], 0); c=chain10.add(r1, 0); c=-fast_propagate_add(c, r); // compute -n t=n[0]-(group_thread==0); // n must be odd, so there is no chance for a carry ripple chain_t<LIMBS+1> chain11; r[0]=chain11.add(r[0], ~t & c); #pragma unroll for(int32_t index=1;index<LIMBS;index++) r[index]=chain11.add(r[index], ~n[index] & c); c=chain11.add(0, 0); fast_propagate_add(c, r); clear_padding(r); } #endif template<class env> __device__ __forceinline__ void core_t<env>::mont_reduce_wide(uint32_t r[LIMBS], const uint32_t lo[LIMBS], const uint32_t hi[LIMBS], const uint32_t n[LIMBS], const uint32_t np0, const bool zero) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t t0, t1, q, ra[LIMBS+2], ru[LIMBS+1], c=0, top; #pragma unroll for(int32_t index=0;index<LIMBS;index++) { ra[index]=lo[index]; ru[index]=0; } ra[LIMBS]=0; ru[LIMBS]=0; ra[LIMBS+1]=0; c=0; #pragma nounroll for(int32_t thread=0;thread<TPI;thread+=2) { #pragma unroll for(int32_t l=0;l<2*LIMBS;l+=2) { chain_t<> chain1; // unaligned: Q0 * N_odd ra[0]=chain1.add(ra[0], c); q=__shfl_sync(sync, ra[0], 0, TPI)*np0; #pragma unroll for(int32_t index=0;index<LIMBS-1;index+=2) { ru[index]=chain1.madlo(n[index+1], q, ru[index]); ru[index+1]=chain1.madhi(n[index+1], q, ru[index+1]); } if(LIMBS%2==1) ru[LIMBS-1]=chain1.add(0, 0); chain_t<> chain2; // aligned: Q0 * N_even #pragma unroll for(int32_t index=0;index<LIMBS;index+=2) { ra[index]=chain2.madlo(n[index], q, ra[index]); ra[index+1]=chain2.madhi(n[index], q, ra[index+1]); } if(LIMBS%2==0) ra[LIMBS]=chain2.add(ra[LIMBS], 0); t0=__shfl_sync(sync, ra[0], threadIdx.x+1, TPI); if(!zero) { if(l<LIMBS) top=__shfl_sync(sync, hi[l], thread, TPI); else top=__shfl_sync(sync, hi[l-LIMBS], thread+1, TPI); t0=(group_thread==TPI-1) ? top : t0; } chain_t<> chain3; // aligned: Q1 * N_odd ru[0]=chain3.add(ru[0], ra[1]); q=__shfl_sync(sync, ru[0], 0, TPI)*np0; #pragma unroll for(int32_t index=0;index<(int32_t)LIMBS-3;index+=2) { ra[index]=chain3.madlo(n[index+1], q, ra[index+2]); ra[index+1]=chain3.madhi(n[index+1], q, ra[index+3]); } ra[LIMBS-2-LIMBS%2]=chain3.madlo(n[LIMBS-1-LIMBS%2], q, ra[LIMBS-LIMBS%2]); ra[LIMBS-1-LIMBS%2]=chain3.madhi(n[LIMBS-1-LIMBS%2], q, ra[LIMBS+1-LIMBS%2]); if(LIMBS%2==1) ra[LIMBS-1]=chain3.add(0, 0); chain_t<> chain4; // unaligned: Q1 * N_even t1=chain4.madlo(n[0], q, ru[0]); c=chain4.madhi(n[0], q, ru[1]); #pragma unroll for(int32_t index=0;index<LIMBS-2;index+=2) { ru[index]=chain4.madlo(n[index+2], q, ru[index+2]); ru[index+1]=chain4.madhi(n[index+2], q, ru[index+3]); } if(LIMBS%2==1) ru[LIMBS-1]=0; else ru[LIMBS-2]=chain4.add(0, 0); ru[LIMBS-1+LIMBS%2]=0; t1=__shfl_sync(sync, t1, threadIdx.x+1, TPI); if(!zero) { if(l+1<LIMBS) top=__shfl_sync(sync, hi[l+1], thread, TPI); else top=__shfl_sync(sync, hi[l+1-LIMBS], thread+1, TPI); t1=(group_thread==TPI-1) ? top : t1; } ra[LIMBS-2]=add_cc(ra[LIMBS-2], t0); ra[LIMBS-1]=addc_cc(ra[LIMBS-1], t1); ra[LIMBS]=addc(0, 0); } } chain_t<> chain5; ra[0]=chain5.add(ra[0], c); #pragma unroll for(int32_t index=1;index<LIMBS;index++) ra[index]=chain5.add(ra[index], ru[index-1]); c=chain5.add(ra[LIMBS], 0); c=fast_propagate_add(c, ra); if(!zero && c!=0) { t0=n[0]-(group_thread==0); // n must be odd, so there is no chance for a carry ripple chain_t<LIMBS+1> chain3; ra[0]=chain3.add(ra[0], ~t0); #pragma unroll for(int32_t index=1;index<LIMBS;index++) ra[index]=chain3.add(ra[index], ~n[index]); c=chain3.add(0, 0); fast_propagate_add(c, ra); clear_padding(ra); } mpset<LIMBS>(r, ra); } #if 0 template<uint32_t LIMBS> __device__ __forceinline__ void fwmont_mul(uint32_t r[LIMBS], const uint32_t a[LIMBS], const uint32_t b[LIMBS], const uint32_t n[LIMBS], const uint32_t np0) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t t, t0, t1, q, r1, ra[LIMBS+2], ru[LIMBS+1], c=0; #pragma unroll for(int32_t index=0;index<=LIMBS;index++) { ra[index]=0; ru[index]=0; } ra[LIMBS+1]=0; #pragma nounroll for(int32_t thread=0;thread<TPI;thread+=2) { #pragma unroll for(int32_t word=0;word<2*LIMBS;word+=2) { if(word<LIMBS) t0=__shfl_sync(sync, b[word], thread, TPI); else t0=__shfl_sync(sync, b[word-LIMBS], thread+1, TPI); if(word+1<LIMBS) t1=__shfl_sync(sync, b[word+1], thread, TPI); else t1=__shfl_sync(sync, b[word+1-LIMBS], thread+1, TPI); /* FIRST HALF */ chain_t<> chain1; // aligned: T0 * A_even #pragma unroll for(int32_t index=0;index<LIMBS;index+=2) { ra[index]=chain1.madlo(a[index], t0, ra[index]); ra[index+1]=chain1.madhi(a[index], t0, ra[index+1]); } ra[LIMBS+LIMBS%2]=chain1.add(ra[LIMBS+LIMBS%2], 0); chain_t<> chain2; // unaligned: T0 * A_odd ra[0]=chain2.add(ra[0], c); #pragma unroll for(int32_t index=0;index<LIMBS-1;index+=2) { ru[index]=chain2.madlo(a[index+1], t0, ru[index]); ru[index+1]=chain2.madhi(a[index+1], t0, ru[index+1]); } ru[LIMBS-LIMBS%2]=chain2.add(ru[LIMBS-LIMBS%2], 0); q=__shfl_sync(sync, ra[0], 0, TPI)*np0; chain_t<> chain3; // aligned: Q0 * N_even #pragma unroll for(int32_t index=0;index<LIMBS;index+=2) { ra[index]=chain3.madlo(n[index], q, ra[index]); ra[index+1]=chain3.madhi(n[index], q, ra[index+1]); } t=__shfl_sync(sync, ra[0], threadIdx.x+1, TPI); if(LIMBS%2==0) ra[LIMBS]=chain3.add(ra[LIMBS], t); ra[LIMBS+1]=chain3.add(ra[LIMBS+1], 0); chain_t<> chain4; // unaligned: Q0 * N_odd #pragma unroll for(int32_t index=0;index<LIMBS-1;index+=2) { ru[index]=chain4.madlo(n[index+1], q, ru[index]); ru[index+1]=chain4.madhi(n[index+1], q, ru[index+1]); } if(LIMBS%2==1) ru[LIMBS-1]=chain4.add(ru[LIMBS-1], t); ru[LIMBS]=chain4.add(ru[LIMBS], 0); /* SECOND HALF */ chain_t<> chain5; // unaigned: T1 * A_even #pragma unroll for(int32_t index=0;index<LIMBS;index+=2) { ru[index]=chain5.madlo(a[index], t1, ru[index]); ru[index+1]=chain5.madhi(a[index], t1, ru[index+1]); } if(LIMBS%2==0) ru[LIMBS]=chain5.add(ru[LIMBS], 0); chain_t<> chain6; // aligned: T1 * A_odd ru[0]=chain6.add(ru[0], ra[1]); #pragma unroll for(int32_t index=0;index<LIMBS-1;index+=2) { ra[index+2]=chain6.madlo(a[index+1], t1, ra[index+2]); ra[index+3]=chain6.madhi(a[index+1], t1, ra[index+3]); } if(LIMBS%2==1) ra[LIMBS+1]=chain6.add(ra[LIMBS+1], 0); q=__shfl_sync(sync, ru[0], 0, TPI)*np0; chain_t<> chain7; // unaigned: Q1 * N_even t=chain7.madlo(n[0], q, ru[0]); c=chain7.madhi(n[0], q, ru[1]); #pragma unroll for(int32_t index=0;index<LIMBS-2;index+=2) { ru[index]=chain7.madlo(n[index+2], q, ru[index+2]); ru[index+1]=chain7.madhi(n[index+2], q, ru[index+3]); } t=__shfl_sync(sync, t, threadIdx.x+1, TPI); if(LIMBS%2==0) ru[LIMBS-2]=chain7.add(ru[LIMBS], t); ru[LIMBS-1]=chain7.add(0, 0); ru[LIMBS]=0; chain_t<> chain8; // aligned: Q1 * N_odd #pragma unroll for(int32_t index=0;index<LIMBS-1;index+=2) { ra[index]=chain8.madlo(n[index+1], q, ra[index+2]); ra[index+1]=chain8.madhi(n[index+1], q, ra[index+3]); } if(LIMBS%2==1) ra[LIMBS-1]=chain8.add(ra[LIMBS+1], t); ra[LIMBS]=chain8.add(0, 0); ra[LIMBS+1]=0; } } chain_t<LIMBS+1> chain9; r[0]=chain9.add(ra[0], c); #pragma unroll for(int32_t index=1;index<LIMBS;index++) r[index]=chain9.add(ra[index], ru[index-1]); r1=chain9.add(ra[LIMBS], ru[LIMBS-1]); // r1:r0 <= 0x00000002 0xFFFFFFFD t=__shfl_up_sync(sync, r1, 1, TPI); // all but most significant thread clears r1 if(group_thread!=TPI-1) r1=0; if(group_thread==0) t=0; chain_t<LIMBS+1> chain10; r[0]=chain10.add(r[0], t); #pragma unroll for(int32_t index=1;index<LIMBS;index++) r[index]=chain10.add(r[index], 0); c=chain10.add(r1, 0); c=-fast_propagate_add(c, r); // compute -n t=n[0]-(group_thread==0); // n must be odd, so there is no chance for a carry ripple chain_t<LIMBS+1> chain11; r[0]=chain11.add(r[0], ~t & c); #pragma unroll for(int32_t index=1;index<LIMBS;index++) r[index]=chain11.add(r[index], ~n[index] & c); c=chain11.add(0, 0); fast_propagate_add(c, r); clear_padding(r); } #endif } /* namespace cgbn */
the_stack
namespace cg = cooperative_groups; #include <helper_cuda.h> //////////////////////////////////////////////////////////////////////////////// // A structure of 2D points (structure of arrays). //////////////////////////////////////////////////////////////////////////////// class Points { float *m_x; float *m_y; public: // Constructor. __host__ __device__ Points() : m_x(NULL), m_y(NULL) {} // Constructor. __host__ __device__ Points(float *x, float *y) : m_x(x), m_y(y) {} // Get a point. __host__ __device__ __forceinline__ float2 get_point(int idx) const { return make_float2(m_x[idx], m_y[idx]); } // Set a point. __host__ __device__ __forceinline__ void set_point(int idx, const float2 &p) { m_x[idx] = p.x; m_y[idx] = p.y; } // Set the pointers. __host__ __device__ __forceinline__ void set(float *x, float *y) { m_x = x; m_y = y; } }; //////////////////////////////////////////////////////////////////////////////// // A 2D bounding box //////////////////////////////////////////////////////////////////////////////// class Bounding_box { // Extreme points of the bounding box. float2 m_p_min; float2 m_p_max; public: // Constructor. Create a unit box. __host__ __device__ Bounding_box() { m_p_min = make_float2(0.0f, 0.0f); m_p_max = make_float2(1.0f, 1.0f); } // Compute the center of the bounding-box. __host__ __device__ void compute_center(float2 &center) const { center.x = 0.5f * (m_p_min.x + m_p_max.x); center.y = 0.5f * (m_p_min.y + m_p_max.y); } // The points of the box. __host__ __device__ __forceinline__ const float2 &get_max() const { return m_p_max; } __host__ __device__ __forceinline__ const float2 &get_min() const { return m_p_min; } // Does a box contain a point. __host__ __device__ bool contains(const float2 &p) const { return p.x >= m_p_min.x && p.x < m_p_max.x && p.y >= m_p_min.y && p.y < m_p_max.y; } // Define the bounding box. __host__ __device__ void set(float min_x, float min_y, float max_x, float max_y) { m_p_min.x = min_x; m_p_min.y = min_y; m_p_max.x = max_x; m_p_max.y = max_y; } }; //////////////////////////////////////////////////////////////////////////////// // A node of a quadree. //////////////////////////////////////////////////////////////////////////////// class Quadtree_node { // The identifier of the node. int m_id; // The bounding box of the tree. Bounding_box m_bounding_box; // The range of points. int m_begin, m_end; public: // Constructor. __host__ __device__ Quadtree_node() : m_id(0), m_begin(0), m_end(0) {} // The ID of a node at its level. __host__ __device__ int id() const { return m_id; } // The ID of a node at its level. __host__ __device__ void set_id(int new_id) { m_id = new_id; } // The bounding box. __host__ __device__ __forceinline__ const Bounding_box &bounding_box() const { return m_bounding_box; } // Set the bounding box. __host__ __device__ __forceinline__ void set_bounding_box(float min_x, float min_y, float max_x, float max_y) { m_bounding_box.set(min_x, min_y, max_x, max_y); } // The number of points in the tree. __host__ __device__ __forceinline__ int num_points() const { return m_end - m_begin; } // The range of points in the tree. __host__ __device__ __forceinline__ int points_begin() const { return m_begin; } __host__ __device__ __forceinline__ int points_end() const { return m_end; } // Define the range for that node. __host__ __device__ __forceinline__ void set_range(int begin, int end) { m_begin = begin; m_end = end; } }; //////////////////////////////////////////////////////////////////////////////// // Algorithm parameters. //////////////////////////////////////////////////////////////////////////////// struct Parameters { // Choose the right set of points to use as in/out. int point_selector; // The number of nodes at a given level (2^k for level k). int num_nodes_at_this_level; // The recursion depth. int depth; // The max value for depth. const int max_depth; // The minimum number of points in a node to stop recursion. const int min_points_per_node; // Constructor set to default values. __host__ __device__ Parameters(int max_depth, int min_points_per_node) : point_selector(0), num_nodes_at_this_level(1), depth(0), max_depth(max_depth), min_points_per_node(min_points_per_node) {} // Copy constructor. Changes the values for next iteration. __host__ __device__ Parameters(const Parameters &params, bool) : point_selector((params.point_selector + 1) % 2), num_nodes_at_this_level(4 * params.num_nodes_at_this_level), depth(params.depth + 1), max_depth(params.max_depth), min_points_per_node(params.min_points_per_node) {} }; //////////////////////////////////////////////////////////////////////////////// // Build a quadtree on the GPU. Use CUDA Dynamic Parallelism. // // The algorithm works as follows. The host (CPU) launches one block of // NUM_THREADS_PER_BLOCK threads. That block will do the following steps: // // 1- Check the number of points and its depth. // // We impose a maximum depth to the tree and a minimum number of points per // node. If the maximum depth is exceeded or the minimum number of points is // reached. The threads in the block exit. // // Before exiting, they perform a buffer swap if it is needed. Indeed, the // algorithm uses two buffers to permute the points and make sure they are // properly distributed in the quadtree. By design we want all points to be // in the first buffer of points at the end of the algorithm. It is the reason // why we may have to swap the buffer before leavin (if the points are in the // 2nd buffer). // // 2- Count the number of points in each child. // // If the depth is not too high and the number of points is sufficient, the // block has to dispatch the points into four geometrical buckets: Its // children. For that purpose, we compute the center of the bounding box and // count the number of points in each quadrant. // // The set of points is divided into sections. Each section is given to a // warp of threads (32 threads). Warps use __ballot and __popc intrinsics // to count the points. See the Programming Guide for more information about // those functions. // // 3- Scan the warps' results to know the "global" numbers. // // Warps work independently from each other. At the end, each warp knows the // number of points in its section. To know the numbers for the block, the // block has to run a scan/reduce at the block level. It's a traditional // approach. The implementation in that sample is not as optimized as what // could be found in fast radix sorts, for example, but it relies on the same // idea. // // 4- Move points. // // Now that the block knows how many points go in each of its 4 children, it // remains to dispatch the points. It is straightforward. // // 5- Launch new blocks. // // The block launches four new blocks: One per children. Each of the four blocks // will apply the same algorithm. //////////////////////////////////////////////////////////////////////////////// template <int NUM_THREADS_PER_BLOCK> __global__ void build_quadtree_kernel(Quadtree_node *nodes, Points *points, Parameters params) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); // The number of warps in a block. const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / warpSize; // Shared memory to store the number of points. extern __shared__ int smem[]; // s_num_pts[4][NUM_WARPS_PER_BLOCK]; // Addresses of shared memory. volatile int *s_num_pts[4]; for (int i = 0; i < 4; ++i) s_num_pts[i] = (volatile int *)&smem[i * NUM_WARPS_PER_BLOCK]; // Compute the coordinates of the threads in the block. const int warp_id = threadIdx.x / warpSize; const int lane_id = threadIdx.x % warpSize; // Mask for compaction. // Same as: asm( "mov.u32 %0, %%lanemask_lt;" : "=r"(lane_mask_lt) ); int lane_mask_lt = (1 << lane_id) - 1; // The current node. Quadtree_node &node = nodes[blockIdx.x]; // The number of points in the node. int num_points = node.num_points(); float2 center; int range_begin, range_end; int warp_cnts[4] = {0, 0, 0, 0}; // // 1- Check the number of points and its depth. // // Stop the recursion here. Make sure points[0] contains all the points. if (params.depth >= params.max_depth || num_points <= params.min_points_per_node) { if (params.point_selector == 1) { int it = node.points_begin(), end = node.points_end(); for (it += threadIdx.x; it < end; it += NUM_THREADS_PER_BLOCK) if (it < end) points[0].set_point(it, points[1].get_point(it)); } return; } // Compute the center of the bounding box of the points. const Bounding_box &bbox = node.bounding_box(); bbox.compute_center(center); // Find how many points to give to each warp. int num_points_per_warp = max( warpSize, (num_points + NUM_WARPS_PER_BLOCK - 1) / NUM_WARPS_PER_BLOCK); // Each warp of threads will compute the number of points to move to each // quadrant. range_begin = node.points_begin() + warp_id * num_points_per_warp; range_end = min(range_begin + num_points_per_warp, node.points_end()); // // 2- Count the number of points in each child. // // Input points. const Points &in_points = points[params.point_selector]; cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta); // Compute the number of points. for (int range_it = range_begin + tile32.thread_rank(); tile32.any(range_it < range_end); range_it += warpSize) { // Is it still an active thread? bool is_active = range_it < range_end; // Load the coordinates of the point. float2 p = is_active ? in_points.get_point(range_it) : make_float2(0.0f, 0.0f); // Count top-left points. int num_pts = __popc(tile32.ballot(is_active && p.x < center.x && p.y >= center.y)); warp_cnts[0] += tile32.shfl(num_pts, 0); // Count top-right points. num_pts = __popc(tile32.ballot(is_active && p.x >= center.x && p.y >= center.y)); warp_cnts[1] += tile32.shfl(num_pts, 0); // Count bottom-left points. num_pts = __popc(tile32.ballot(is_active && p.x < center.x && p.y < center.y)); warp_cnts[2] += tile32.shfl(num_pts, 0); // Count bottom-right points. num_pts = __popc(tile32.ballot(is_active && p.x >= center.x && p.y < center.y)); warp_cnts[3] += tile32.shfl(num_pts, 0); } if (tile32.thread_rank() == 0) { s_num_pts[0][warp_id] = warp_cnts[0]; s_num_pts[1][warp_id] = warp_cnts[1]; s_num_pts[2][warp_id] = warp_cnts[2]; s_num_pts[3][warp_id] = warp_cnts[3]; } // Make sure warps have finished counting. cg::sync(cta); // // 3- Scan the warps' results to know the "global" numbers. // // First 4 warps scan the numbers of points per child (inclusive scan). if (warp_id < 4) { int num_pts = tile32.thread_rank() < NUM_WARPS_PER_BLOCK ? s_num_pts[warp_id][tile32.thread_rank()] : 0; #pragma unroll for (int offset = 1; offset < NUM_WARPS_PER_BLOCK; offset *= 2) { int n = tile32.shfl_up(num_pts, offset); if (tile32.thread_rank() >= offset) num_pts += n; } if (tile32.thread_rank() < NUM_WARPS_PER_BLOCK) s_num_pts[warp_id][tile32.thread_rank()] = num_pts; } cg::sync(cta); // Compute global offsets. if (warp_id == 0) { int sum = s_num_pts[0][NUM_WARPS_PER_BLOCK - 1]; for (int row = 1; row < 4; ++row) { int tmp = s_num_pts[row][NUM_WARPS_PER_BLOCK - 1]; cg::sync(tile32); if (tile32.thread_rank() < NUM_WARPS_PER_BLOCK) s_num_pts[row][tile32.thread_rank()] += sum; cg::sync(tile32); sum += tmp; } } cg::sync(cta); // Make the scan exclusive. int val = 0; if (threadIdx.x < 4 * NUM_WARPS_PER_BLOCK) { val = threadIdx.x == 0 ? 0 : smem[threadIdx.x - 1]; val += node.points_begin(); } cg::sync(cta); if (threadIdx.x < 4 * NUM_WARPS_PER_BLOCK) { smem[threadIdx.x] = val; } cg::sync(cta); // // 4- Move points. // if (!(params.depth >= params.max_depth || num_points <= params.min_points_per_node)) { // Output points. Points &out_points = points[(params.point_selector + 1) % 2]; warp_cnts[0] = s_num_pts[0][warp_id]; warp_cnts[1] = s_num_pts[1][warp_id]; warp_cnts[2] = s_num_pts[2][warp_id]; warp_cnts[3] = s_num_pts[3][warp_id]; const Points &in_points = points[params.point_selector]; // Reorder points. for (int range_it = range_begin + tile32.thread_rank(); tile32.any(range_it < range_end); range_it += warpSize) { // Is it still an active thread? bool is_active = range_it < range_end; // Load the coordinates of the point. float2 p = is_active ? in_points.get_point(range_it) : make_float2(0.0f, 0.0f); // Count top-left points. bool pred = is_active && p.x < center.x && p.y >= center.y; int vote = tile32.ballot(pred); int dest = warp_cnts[0] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[0] += tile32.shfl(__popc(vote), 0); // Count top-right points. pred = is_active && p.x >= center.x && p.y >= center.y; vote = tile32.ballot(pred); dest = warp_cnts[1] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[1] += tile32.shfl(__popc(vote), 0); // Count bottom-left points. pred = is_active && p.x < center.x && p.y < center.y; vote = tile32.ballot(pred); dest = warp_cnts[2] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[2] += tile32.shfl(__popc(vote), 0); // Count bottom-right points. pred = is_active && p.x >= center.x && p.y < center.y; vote = tile32.ballot(pred); dest = warp_cnts[3] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[3] += tile32.shfl(__popc(vote), 0); } } cg::sync(cta); if (tile32.thread_rank() == 0) { s_num_pts[0][warp_id] = warp_cnts[0]; s_num_pts[1][warp_id] = warp_cnts[1]; s_num_pts[2][warp_id] = warp_cnts[2]; s_num_pts[3][warp_id] = warp_cnts[3]; } cg::sync(cta); // // 5- Launch new blocks. // if (!(params.depth >= params.max_depth || num_points <= params.min_points_per_node)) { // The last thread launches new blocks. if (threadIdx.x == NUM_THREADS_PER_BLOCK - 1) { // The children. Quadtree_node *children = &nodes[params.num_nodes_at_this_level - (node.id() & ~3)]; // The offsets of the children at their level. int child_offset = 4 * node.id(); // Set IDs. children[child_offset + 0].set_id(4 * node.id() + 0); children[child_offset + 1].set_id(4 * node.id() + 1); children[child_offset + 2].set_id(4 * node.id() + 2); children[child_offset + 3].set_id(4 * node.id() + 3); const Bounding_box &bbox = node.bounding_box(); // Points of the bounding-box. const float2 &p_min = bbox.get_min(); const float2 &p_max = bbox.get_max(); // Set the bounding boxes of the children. children[child_offset + 0].set_bounding_box(p_min.x, center.y, center.x, p_max.y); // Top-left. children[child_offset + 1].set_bounding_box(center.x, center.y, p_max.x, p_max.y); // Top-right. children[child_offset + 2].set_bounding_box(p_min.x, p_min.y, center.x, center.y); // Bottom-left. children[child_offset + 3].set_bounding_box(center.x, p_min.y, p_max.x, center.y); // Bottom-right. // Set the ranges of the children. children[child_offset + 0].set_range(node.points_begin(), s_num_pts[0][warp_id]); children[child_offset + 1].set_range(s_num_pts[0][warp_id], s_num_pts[1][warp_id]); children[child_offset + 2].set_range(s_num_pts[1][warp_id], s_num_pts[2][warp_id]); children[child_offset + 3].set_range(s_num_pts[2][warp_id], s_num_pts[3][warp_id]); // Launch 4 children. build_quadtree_kernel<NUM_THREADS_PER_BLOCK><<< 4, NUM_THREADS_PER_BLOCK, 4 * NUM_WARPS_PER_BLOCK * sizeof(int)>>>( &children[child_offset], points, Parameters(params, true)); } } } //////////////////////////////////////////////////////////////////////////////// // Make sure a Quadtree is properly defined. //////////////////////////////////////////////////////////////////////////////// bool check_quadtree(const Quadtree_node *nodes, int idx, int num_pts, Points *pts, Parameters params) { const Quadtree_node &node = nodes[idx]; int num_points = node.num_points(); if (!(params.depth == params.max_depth || num_points <= params.min_points_per_node)) { int num_points_in_children = 0; num_points_in_children += nodes[params.num_nodes_at_this_level + 4 * idx + 0].num_points(); num_points_in_children += nodes[params.num_nodes_at_this_level + 4 * idx + 1].num_points(); num_points_in_children += nodes[params.num_nodes_at_this_level + 4 * idx + 2].num_points(); num_points_in_children += nodes[params.num_nodes_at_this_level + 4 * idx + 3].num_points(); if (num_points_in_children != node.num_points()) return false; return check_quadtree(&nodes[params.num_nodes_at_this_level], 4 * idx + 0, num_pts, pts, Parameters(params, true)) && check_quadtree(&nodes[params.num_nodes_at_this_level], 4 * idx + 1, num_pts, pts, Parameters(params, true)) && check_quadtree(&nodes[params.num_nodes_at_this_level], 4 * idx + 2, num_pts, pts, Parameters(params, true)) && check_quadtree(&nodes[params.num_nodes_at_this_level], 4 * idx + 3, num_pts, pts, Parameters(params, true)); } const Bounding_box &bbox = node.bounding_box(); for (int it = node.points_begin(); it < node.points_end(); ++it) { if (it >= num_pts) return false; float2 p = pts->get_point(it); if (!bbox.contains(p)) return false; } return true; } //////////////////////////////////////////////////////////////////////////////// // Parallel random number generator. //////////////////////////////////////////////////////////////////////////////// struct Random_generator { int count; __host__ __device__ Random_generator() : count(0) {} __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } __host__ __device__ __forceinline__ thrust::tuple<float, float> operator()() { #ifdef __CUDA_ARCH__ unsigned seed = hash(blockIdx.x * blockDim.x + threadIdx.x + count); // thrust::generate may call operator() more than once per thread. // Hence, increment count by grid size to ensure uniqueness of seed count += blockDim.x * gridDim.x; #else unsigned seed = hash(0); #endif thrust::default_random_engine rng(seed); thrust::random::uniform_real_distribution<float> distrib; return thrust::make_tuple(distrib(rng), distrib(rng)); } }; //////////////////////////////////////////////////////////////////////////////// // Allocate GPU structs, launch kernel and clean up //////////////////////////////////////////////////////////////////////////////// bool cdpQuadtree(int warp_size) { // Constants to control the algorithm. const int num_points = 1024; const int max_depth = 8; const int min_points_per_node = 16; // Allocate memory for points. thrust::device_vector<float> x_d0(num_points); thrust::device_vector<float> x_d1(num_points); thrust::device_vector<float> y_d0(num_points); thrust::device_vector<float> y_d1(num_points); // Generate random points. Random_generator rnd; thrust::generate( thrust::make_zip_iterator(thrust::make_tuple(x_d0.begin(), y_d0.begin())), thrust::make_zip_iterator(thrust::make_tuple(x_d0.end(), y_d0.end())), rnd); // Host structures to analyze the device ones. Points points_init[2]; points_init[0].set(thrust::raw_pointer_cast(&x_d0[0]), thrust::raw_pointer_cast(&y_d0[0])); points_init[1].set(thrust::raw_pointer_cast(&x_d1[0]), thrust::raw_pointer_cast(&y_d1[0])); // Allocate memory to store points. Points *points; checkCudaErrors(cudaMalloc((void **)&points, 2 * sizeof(Points))); checkCudaErrors(cudaMemcpy(points, points_init, 2 * sizeof(Points), cudaMemcpyHostToDevice)); // We could use a close form... int max_nodes = 0; for (int i = 0, num_nodes_at_level = 1; i < max_depth; ++i, num_nodes_at_level *= 4) max_nodes += num_nodes_at_level; // Allocate memory to store the tree. Quadtree_node root; root.set_range(0, num_points); Quadtree_node *nodes; checkCudaErrors( cudaMalloc((void **)&nodes, max_nodes * sizeof(Quadtree_node))); checkCudaErrors( cudaMemcpy(nodes, &root, sizeof(Quadtree_node), cudaMemcpyHostToDevice)); // We set the recursion limit for CDP to max_depth. cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, max_depth); // Build the quadtree. Parameters params(max_depth, min_points_per_node); std::cout << "Launching CDP kernel to build the quadtree" << std::endl; const int NUM_THREADS_PER_BLOCK = 128; // Do not use less than 128 threads. const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / warp_size; const size_t smem_size = 4 * NUM_WARPS_PER_BLOCK * sizeof(int); build_quadtree_kernel< NUM_THREADS_PER_BLOCK><<<1, NUM_THREADS_PER_BLOCK, smem_size>>>( nodes, points, params); checkCudaErrors(cudaGetLastError()); // Copy points to CPU. thrust::host_vector<float> x_h(x_d0); thrust::host_vector<float> y_h(y_d0); Points host_points; host_points.set(thrust::raw_pointer_cast(&x_h[0]), thrust::raw_pointer_cast(&y_h[0])); // Copy nodes to CPU. Quadtree_node *host_nodes = new Quadtree_node[max_nodes]; checkCudaErrors(cudaMemcpy(host_nodes, nodes, max_nodes * sizeof(Quadtree_node), cudaMemcpyDeviceToHost)); // Validate the results. bool ok = check_quadtree(host_nodes, 0, num_points, &host_points, params); std::cout << "Results: " << (ok ? "OK" : "FAILED") << std::endl; // Free CPU memory. delete[] host_nodes; // Free memory. checkCudaErrors(cudaFree(nodes)); checkCudaErrors(cudaFree(points)); return ok; } //////////////////////////////////////////////////////////////////////////////// // Main entry point. //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // Find/set the device. // The test requires an architecture SM35 or greater (CDP capable). int cuda_device = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProps; checkCudaErrors(cudaGetDeviceProperties(&deviceProps, cuda_device)); int cdpCapable = (deviceProps.major == 3 && deviceProps.minor >= 5) || deviceProps.major >= 4; printf("GPU device %s has compute capabilities (SM %d.%d)\n", deviceProps.name, deviceProps.major, deviceProps.minor); if (!cdpCapable) { std::cerr << "cdpQuadTree requires SM 3.5 or higher to use CUDA Dynamic " "Parallelism. Exiting...\n" << std::endl; exit(EXIT_WAIVED); } bool ok = cdpQuadtree(deviceProps.warpSize); return (ok ? EXIT_SUCCESS : EXIT_FAILURE); }
the_stack
// DownSampleImage.cu // 实现对图像的缩小处理 #include <iostream> using namespace std; #include "DownSampleImage.h" #include "ErrorCode.h" #include "stdio.h" #include "time.h" #include "stdlib.h" #include "curand.h" #include "curand_kernel.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:METHODSELECT_THRES // 定义了图像缩小倍数 3,根据这个临界值采用不同的 Kernel 函数处理方式。 #define METHODSELECT_THRES 3 // Kernel 函数:_downImgbyDomLessKer(优势法缩小图像) // 根据给定的缩小倍数 N,将输入图像缩小,将其尺寸从 width * height 变成 // (width / N) * (height / N) static __global__ void // Kernel 函数无返回值 _downImgbyDomLessKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 int times, // 缩小倍数 int *alldevicepointer // 数组指针 ); // Kernel 函数:_downImgbyDomGreaterKer(优势法缩小图像) // 根据给定的缩小倍数 N,将输入图像缩小,将其尺寸从 width * height 变成 // (width / N) * (height / N) static __global__ void // Kernel 函数无返回值 _downImgbyDomGreaterKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 int times // 缩小倍数 ); // Kernel 函数:_genRandomKer(生成随机数) // 在 Device 端生成一个跟输出图片大小一样的随机数矩阵,用于概率法 // 缩小图像。 static __global__ void _genRandomKer( int *randnumdev, // 随机数矩阵 int times, // 缩小倍数 int time, // 时间参数 int width // 随机数矩阵的宽度 ); // Kernel 函数:_downImgbyProKer(概率法缩小图像) // 根据给定的缩小倍数 N,用概率法将输入图像缩小,将其尺寸从 width * height 变成 // (width / N) * (height / N)。 static __global__ void // Kernel 函数无返回值 _downImgbyProKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 int *randnumdev, // 随机数矩阵 int times // 缩小倍数 ); // Kernel 函数:_downImgbyDomLessKer(用优势法缩小图像) static __global__ void _downImgbyDomLessKer( ImageCuda inimg, ImageCuda outimg, int times, int *alldevicepointer) { // 计算当前线程的位置。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 对应输入图像的位置 int inc = times * (c + 1) - 1; int inr = times * (r + 1) - 1; // 计算当前像素的领域大小 int pixnum = (2 * times - 1) * (2 * times - 1); // 获取当前线程中存放像素值的区域指针。 int *pixel = alldevicepointer + (r * outimg.imgMeta.width + c) * pixnum; // 获取当前线程中存放像素值个数的区域指针。 int *count = alldevicepointer + pixnum * outimg.imgMeta.width * outimg.imgMeta.height + (r * outimg.imgMeta.width + c) * pixnum; // 定义变量 int i, j; unsigned char *outptr; unsigned char curvalue; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃。 if (r >= outimg.imgMeta.height || c >= outimg.imgMeta.width) return; // 得到输出图像当前像素的位置。 outptr = outimg.imgMeta.imgData + c + r * outimg.pitchBytes; // 中间数组,用来临时记录下标 int flag[256]; // 为数组 flag 赋初值 for(int i = 0; i < 256; i++) flag[i] = -1; // pixel 数组的下标 int m = 0; // 对当前像素的 (2 * times - 1) * (2 * times - 1)领域,计算出 // 各像素值以及相应的个数。 for (j = inr - (times - 1); j <= inr + (times - 1); j++) { for (i = inc - (times - 1); i <= inc + (times - 1); i++) { // 判断当前像素是否越界。 if (j >= 0 && j < inimg.imgMeta.height && i >= 0 && i < inimg.imgMeta.width) { // 得到当前位置的像素值。 curvalue = *(inimg.imgMeta.imgData + i + j * inimg.pitchBytes); // 如果当前像素值的 flag 为 -1,即说明该像素值在邻域 // 中没有出现过,则对该像素值进行数量统计,并把标记 flag // 赋值为当前位置,从而建立一个索引。 if (flag[curvalue] == -1) { pixel[m] = curvalue; flag[curvalue] = m; count[m]++; m++; } else { // 如果当年像素值的 flag 不为 -1,则找到当前像素值 // 对应的索引值,并把计数器中该位置的数字加 1。 count[flag[curvalue]]++; } } } } // 选出领域内像素值个数最多的三个。 // 声明局部变量。 int p, q; int maxest; int maxindex; int tempmax[3], tempindex[3]; // 使用选择排序,找到最大的 3 个像素值。 for (p = 0; p < 3; p++) { // 暂存计数器中的第一个数据,以及对应的索引。 maxest = count[0]; maxindex = 0; // 对于邻域中所有值,与暂存的值进行比较,从而找到最大值。 for (q = 1; q < pixnum; q++) { if (count[q] > maxest) { maxest = count[q]; maxindex = q; } } // 记录下找到的最大值以及索引,并把计数器中的 // 最大值位置清 0。 tempmax[p] = maxest; tempindex[p] = maxindex; count[maxindex] = 0; } // 求这 3 个像素峰值的加权平均值,并四舍五入取整。 int v; int sum = tempmax[0] + tempmax[1] + tempmax[2]; v = (pixel[tempindex[0]] * tempmax[0] + pixel[tempindex[1]] * tempmax[1] + pixel[tempindex[2]] * tempmax[2] + (sum >> 1)) / sum; // 将用优势法计算出的像素值赋给输出图像 *outptr = v; } // Kernel 函数:_downImgbyDomGreaterKer(用优势法缩小图像) static __global__ void _downImgbyDomGreaterKer( ImageCuda inimg, ImageCuda outimg, int times) { // 计算当前线程的位置。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 对应输入图像的位置 int inc = times * (c + 1) - 1; int inr = times * (r + 1) - 1; // 定义变量 int i, j; unsigned char *outptr; unsigned char curvalue; // 检查第一个像素点是否越界,如果越界,则不进行处理, // 一方面节省计算资源,另一方面防止由于段错误导致程序崩溃。 if (r >= outimg.imgMeta.height || c >= outimg.imgMeta.width) return; // 得到输出图像当前像素的位置。 outptr = outimg.imgMeta.imgData + c + r * outimg.pitchBytes; // 定义数组,下标代表图像灰度值,数组里存相应的个数 int count[256] = { 0 }; // 对当前像素的 (2 * times - 1) * (2 * times - 1)领域,计算出 // 各像素值以及相应的个数。 for (j = inr - (times - 1); j <= inr + (times - 1); j++) { for (i = inc - (times - 1); i <= inc + (times - 1); i++) { // 判断当前像素是否越界。 if (j >= 0 && j < inimg.imgMeta.height && i >= 0 && i < inimg.imgMeta.width) { // 得到当前位置的像素值。 curvalue = *(inimg.imgMeta.imgData + i + j * inimg.pitchBytes); // 当前像素值的计数器加 1。 count[curvalue]++; } } } // 选出领域内像素值个数最多的三个。 // 声明局部变量。 int p, q; int maxest; int maxindex; int tempmax[3], tempindex[3]; // 使用选择排序,找到最大的 3 个像素值。 for (p = 0; p < 3; p++) { // 暂存计数器中的第一个数据,以及对应的索引。 maxest = count[0]; maxindex = 0; // 对于邻域中所有值,与暂存的值进行比较,从而找到最大值。 for (q = 1; q < 256; q++) { if (count[q] > maxest) { maxest = count[q]; maxindex = q; } } // 记录下找到的最大值以及索引,并把计数器中的最大值位置清 0。 tempmax[p] = maxest; tempindex[p] = maxindex; count[maxindex] = 0; } // 求这 3 个像素峰值的加权平均值,并四舍五入取整。 int v; int sum = tempmax[0] + tempmax[1] + tempmax[2]; v = (tempindex[0] * tempmax[0] + tempindex[1] * tempmax[1] + tempindex[2] * tempmax[2] + (sum >> 1)) / sum; // 将用优势法计算出的像素值赋给输出图像 *outptr = v; } // Host 成员方法:dominanceDownSImg(优势法图像缩小处理) __host__ int DownSampleImage::dominanceDownSImg(Image *inimg, Image *outimg) { // 检查输入图像,输出图像是否为空 if (inimg == NULL || outimg == NULL ) return NULL_POINTER; // 判断缩小倍数是否合理 if (times <= 1) return INVALID_DATA; // 这一段代码进行图像的预处理工作。图像的预处理主要完成 // 在 Device 内存上为输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建 // 一个和输入图像的 ROI 子图像缩小 times 倍后尺寸相同的图像。 int outwidth = (inimg->roiX2 - inimg->roiX1) / times; int outheight = (inimg->roiY2 - inimg->roiY1) / times; errcode = ImageBasicOp::makeAtCurrentDevice( outimg, outwidth, outheight); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width * times) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width * times; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width / times; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height * times) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height * times; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height / times; // 计算每个像素点需要处理的邻域的大小。 int devsize = (2 * times - 1) * (2 * times - 1); // 得到输出图像的大小。 int size = outsubimgCud.imgMeta.width * outsubimgCud.imgMeta.height; // 声明局部变量。 int *alldevicepointer; cudaError_t cudaerrcode; // 一次性申请全部的 device 端的内存空间。 cudaerrcode = cudaMalloc( (void **)&alldevicepointer, 2 * devsize * size * sizeof (int)); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 初始化所有 Device 上的内存空间。 cudaerrcode = cudaMemset( alldevicepointer, 0, 2 * devsize * size * sizeof (int)); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; if (times < METHODSELECT_THRES) { // 调用核函数,根据缩小倍数 times 进行图像缩小处理。 _downImgbyDomLessKer<<<gridsize, blocksize>>>( insubimgCud, outsubimgCud, times, alldevicepointer); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } } else { // 调用核函数,根据缩小倍数 times 进行图像缩小处理。 _downImgbyDomGreaterKer<<<gridsize, blocksize>>>( insubimgCud, outsubimgCud, times); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } } // 释放内存空间。 cudaFree(alldevicepointer); // 处理完毕,退出。 return NO_ERROR; } // Kernel 函数:_genRandomKer(生成随机数) static __global__ void _genRandomKer(int *randnumdev, int times, int time, int width) { // 使用一个线程生成 4 行随机数。 // 计算当前线程的位置。 int index = blockIdx.x * 4; // 获取当前的时间参数 int position; // curand随机函数初始化 curandState state; curand_init(time, index, 0, &state); // 得到当前行在随机数矩阵中的偏移 position = index * width; // 一次性生成 4 行随机数。 for (int k = 0; k < 4; k ++) { for (int i = 0; i < width; i++) { // 生成一个随机数。 *(randnumdev + position + i) = curand(&state) % times; } // 获取下一行的偏移。 position += width; } } // Kernel 函数:_downImgbyProKer(概率法缩小图像) static __global__ void _downImgbyProKer( ImageCuda inimg, ImageCuda outimg, int *randnumdev, int times) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = (blockIdx.x * blockDim.x + threadIdx.x); int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 声明局部变量。 unsigned char *inptr, *outptr; int randnum, index; int rex, rey, x, y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃。 if (r >= outimg.imgMeta.height || c >= outimg.imgMeta.width) return; // 得到当前线程在随机数矩阵中的位置 index。 index = r * outimg.imgMeta.width + c; // 得到当前 index 位置上对应的随机数。 randnum = *(randnumdev + index); // 得到在输入图像的偏移量。 rex = randnum % (2 * times - 1); rey = randnum / (2 * times - 1); // 对应输入图像的位置 x = times * c + rex; //n * (c + 1) - 1 + rex - (n - 1); y = times * r + rey; //n * (r + 1) - 1 + rey - (n - 1); // 处理边界点的特殊情况 if (x >= inimg.imgMeta.width) x = x - (times - 1); if (y >= inimg.imgMeta.height) y = y - (times - 1); if (x < 0) x = x + (times - 1); if (y < 0) y = y + (times - 1); // 第一个像素点 inptr = inimg.imgMeta.imgData + x + y * inimg.pitchBytes; outptr = outimg.imgMeta.imgData + c + r * outimg.pitchBytes; // 为输出图像当前点赋值。 *outptr = *inptr; // 处理剩下的 3 个像素点。 for (int k = 1; k < 4; k++) { // 判断是否越界。 if (++r >= outimg.imgMeta.height) return; // 得到下一个随机数的位置以及获取随机数。 index += outimg.imgMeta.width; randnum = *(randnumdev + index); // 得到在输入图像的偏移量 rex = randnum % (2 * times - 1); rey = randnum / (2 * times - 1); // 对应输入图像的位置 x = times * c + rex; y = times * r + rey; // 处理边界点 if (x >= inimg.imgMeta.width) x = x - (times - 1); if (y >= inimg.imgMeta.height) y = y - (times - 1); if (x < 0) x = x + (times - 1); if (y < 0) y = y + (times - 1); // 为后面 3 个像素点赋值。 inptr = inimg.imgMeta.imgData + x + y * inimg.pitchBytes; outptr = outimg.imgMeta.imgData + c + r * outimg.pitchBytes; *outptr = *inptr; } } // Host 成员方法:probabilityShrink(概率法图像缩小处理) __host__ int DownSampleImage::probabilityDownSImg(Image *inimg, Image *outimg) { // 定义变量 int errcode; cudaError_t cudaerrcode; // 检查输入图像,输出图像是否为空 if (inimg == NULL || outimg == NULL ) return NULL_POINTER; // 判断缩小倍数是否合理 if (times <= 1) return INVALID_DATA; // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像缩小 times 倍后尺寸相同的图像。 int outwidth = (inimg->roiX2 - inimg->roiX1) / times; int outheight = (inimg->roiY2 - inimg->roiY1) / times; errcode = ImageBasicOp::makeAtCurrentDevice( outimg, outwidth, outheight); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width * times) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width * times; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width / times; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height * times) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height * times; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height / times; // 计算随机数矩阵的大小,以及随机数的范围。 int positionsize, randomsize; positionsize = outsubimgCud.imgMeta.width * outsubimgCud.imgMeta.height; randomsize = (2 * times - 1) * (2 * times - 1); // 在 Device 端一次性申请所需要的空间 int *randnumdev; // 为 Device 端分配内存空间。 cudaerrcode = cudaMalloc((void**)&randnumdev, positionsize * sizeof (int)); if (cudaerrcode != cudaSuccess) { return cudaerrcode; } // 在 Host 端获取时间。由于使用标准 C++ 库获得的时间是精确到秒的,这个时间 // 精度是远远大于两次可能的调用间隔,因此,我们只在程序启动时取当前时间,之 // 之后对程序的时间直接进行自加,以使得每次的时间都是不同的,这样来保证种子 // 在各次调用之间的不同,从而获得不同的随机数。 static int timehost = (int)time(NULL); timehost++; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 gridsize, blocksize; gridsize.x = (outsubimgCud.imgMeta.height + 3) / 4; blocksize.x = 1; // 随机数矩阵的宽度。 int width = outsubimgCud.imgMeta.width; // 调用生成随机数的 Kernel 函数。 _genRandomKer<<<gridsize, blocksize>>>(randnumdev, randomsize, timehost, width); if (cudaGetLastError() != cudaSuccess) { cudaFree(randnumdev); return CUDA_ERROR; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 调用核函数,根据缩小倍数 times 进行图像缩小处理。 _downImgbyProKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud, randnumdev, times); if (cudaGetLastError() != cudaSuccess) { cudaFree(randnumdev); return CUDA_ERROR; } // 释放 Device 内存中的数据。 cudaFree(randnumdev); // 处理完毕,退出。 return NO_ERROR; }
the_stack
#include <opencv2/core.hpp> #include "labeling_algorithms.h" #include "labels_solver.h" #include "memory_tester.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <cstdio> #include <stdlib.h> #include <math.h> #include <time.h> #include <iostream> #include <opencv2/core.hpp> #include <opencv2/cudafeatures2d.hpp> #include <map> // Questo algoritmo � una modifica del Komura Equivalence (KE) che esegue le operazioni in due livelli (stage). // Inizialmente esegue le operazioni nel blocco usando la shared memory e poi merga le etichette sui bordi dei // blocchi. Varie prove hanno mostrato che sulla quadro va peggio della versione BUF. // Il minimo per entrambi � 4 #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; // Algorithm itself has good performances, but memory allocation is a problem. // I will try to reduce it. namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } __device__ __forceinline__ void SetBit(unsigned char &bitmap, unsigned char pos) { bitmap |= (1 << pos); } // Risale alla radice dell'albero a partire da un suo nodo n __device__ unsigned Find(const int *s_buf, unsigned n) { // Attenzione: non invocare la find su un pixel di background while (s_buf[n] != n) { n = s_buf[n]; } return n; } // Unisce gli alberi contenenti i nodi a e b, collegandone le radici __device__ void Union(int *s_buf, unsigned a, unsigned b) { bool done; do { a = Find(s_buf, a); b = Find(s_buf, b); if (a < b) { int old = atomicMin(s_buf + b, a); done = (old == b); b = old; } else if (b < a) { int old = atomicMin(s_buf + a, b); done = (old == a); a = old; } else { done = true; } } while (!done); } __global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; __shared__ int buf[BLOCK_ROWS * BLOCK_COLS]; unsigned buf_index = threadIdx.y * BLOCK_COLS + threadIdx.x; if (row < labels.rows && col < labels.cols) { buf[buf_index] = buf_index; } __syncthreads(); if (row < labels.rows && col < labels.cols) { // 0|1 2|3 // --+---+-- // 4|A B| // 5|C D| // --+---+ unsigned char P = 0; if ((threadIdx.x > 0 || threadIdx.y > 0)) { if (img[img_index]) { P |= 0x37; // 00110111 } } if ((threadIdx.y > 0 || threadIdx.x < BLOCK_COLS - 1) && (col + 1 < img.cols)) { if (img[img_index + 1]) { P |= 0x0E; // 00001110 } } if ((threadIdx.x > 0) && (row + 1 < img.rows)) { if (img[img_index + img.step]) { P |= 0x30; // 00110000 } } if (threadIdx.x == 0) { P &= 0xCE; // 11001110 } if (col + 1 >= img.cols) { P &= 0xF3; // 11110011 } else if ((threadIdx.x + 1 == BLOCK_COLS) || (col + 2 >= img.cols)) { P &= 0xF7; // 11110111 } if (threadIdx.y == 0) { P &= 0xF0; // 11110000 } if (row + 1 >= img.rows) { P &= 0xDF; // 11011111 } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors if (P > 0) { if (HasBit(P, 0) && img[img_index - img.step - 1]) { Union(buf, buf_index, buf_index - BLOCK_COLS - 1); } if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) { Union(buf, buf_index, buf_index - BLOCK_COLS); } if (HasBit(P, 3) && img[img_index + 2 - img.step]) { Union(buf, buf_index, buf_index + 1 - BLOCK_COLS); } if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 5) && img[img_index + img.step - 1])) { Union(buf, buf_index, buf_index - 1); } } } __syncthreads(); // Local compression if (row < labels.rows && col < labels.cols) { unsigned f = Find(buf, buf_index); unsigned f_row = f / BLOCK_COLS; unsigned f_col = f % BLOCK_COLS; unsigned global_f = 2 * (blockIdx.y * BLOCK_ROWS + f_row) * (labels.step / labels.elem_size) + 2 * (blockIdx.x * BLOCK_COLS + f_col); labels.data[labels_index] = global_f; } } __global__ void GlobalMerge(cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned char P = 0; if (((threadIdx.x == 0 && col > 0) || (threadIdx.y == 0 && row > 0))) { if (img[img_index]) { P |= 0x37; // 00110111 } } if (((threadIdx.y == 0 && row > 0) || (threadIdx.x == BLOCK_COLS - 1 && col + 2 < img.cols)) && (col + 1 < img.cols)) { if (img[img_index + 1]) { P |= 0x0E; // 00001110 } } if ((threadIdx.x == 0 && col > 0) && (row + 1 < img.rows)) { if (img[img_index + img.step]) { P |= 0x30; // 00110000 } } if (col == 0) { P &= 0xCE; // 11001110 } if (col + 1 >= img.cols) { P &= 0xF3; // 11110011 } else if (col + 2 >= img.cols) { P &= 0xF7; // 11110111 } if (row == 0) { P &= 0xF0; // 11110000 } if (row + 1 >= img.rows) { P &= 0xDF; // 11011111 } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors if (P > 0) { if (HasBit(P, 0) && img[img_index - img.step - 1]) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) - 2); } if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size)); } if (HasBit(P, 3) && img[img_index + 2 - img.step]) { Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2); } if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 5) && img[img_index + img.step - 1])) { Union(labels.data, labels_index, labels_index - 2); } } } } __global__ void Compression(cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { labels[labels_index] = Find(labels.data, labels_index); } } __global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; unsigned img_index = row * (img.step / img.elem_size) + col; if (row < labels.rows && col < labels.cols) { int label = labels[labels_index] + 1; if (img[img_index]) labels[labels_index] = label; else { labels[labels_index] = 0; } if (col + 1 < labels.cols) { if (img[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (row + 1 < labels.rows) { if (img[img_index + img.step + 1]) labels[labels_index + (labels.step / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.step / labels.elem_size) + 1] = 0; } } } if (row + 1 < labels.rows) { if (img[img_index + img.step]) labels[labels_index + (labels.step / labels.elem_size)] = label; else { labels[labels_index + (labels.step / labels.elem_size)] = 0; } } } } } class CUDA_BKE_2S : public GpuLabeling2D<CONN_8> { private: dim3 grid_size_; dim3 block_size_; unsigned char *last_pixel_; bool last_pixel_allocated_; public: CUDA_BKE_2S() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); last_pixel_allocated_ = false; if ((d_img_.rows == 1 || d_img_.cols == 1) && !((d_img_.rows + d_img_.cols) % 2)) { cudaMalloc(&last_pixel_, sizeof(unsigned char)); last_pixel_allocated_ = true; } else { // last_pixel_ = d_img_labels_.data + d_img_labels_.step + sizeof(unsigned int); last_pixel_ = d_img_labels_.data + ((d_img_labels_.rows - 2) * d_img_labels_.step) + (d_img_labels_.cols - 2) * d_img_labels_.elemSize(); } grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i local_labels; //cuda::GpuMat d_local_merge; //d_img_labels_.copyTo(d_local_merge); //FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_local_merge); //d_local_merge.download(local_labels); GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); Compression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //d_img_labels_.download(img_labels_); if (last_pixel_allocated_) { cudaFree(last_pixel_); } cudaDeviceSynchronize(); } private: void Alloc() { d_img_labels_.create(d_img_.size(), CV_32SC1); } void Dealloc() { } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); Compression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); // d_img_labels_.download(img_labels_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { perf_.start(); Alloc(); perf_.stop(); double alloc_timing = perf_.last(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); perf_.start(); Dealloc(); perf_.stop(); double dealloc_timing = perf_.last(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(CUDA_BKE_2S);
the_stack
#ifdef __INTELLISENSE__ //#define __CUDA_ARCH__ 210 #define __CUDACC__ #include <cuda_helper.h> #include <cuda_texture_types.h> #define __byte_perm(a,b,c) (a) #define tex1Dfetch(t, n) (n) #endif #define USE_SHARED 1 static unsigned int *d_textures[MAX_GPUS][8]; #define PC32up(j, r) ((uint32_t)((j) + (r))) #define PC32dn(j, r) 0 #define QC32up(j, r) 0xFFFFFFFF #define QC32dn(j, r) (((uint32_t)(r) << 24) ^ SPH_T32(~((uint32_t)(j) << 24))) #define B32_0(x) __byte_perm(x, 0, 0x4440) //((x) & 0xFF) #define B32_1(x) __byte_perm(x, 0, 0x4441) //(((x) >> 8) & 0xFF) #define B32_2(x) __byte_perm(x, 0, 0x4442) //(((x) >> 16) & 0xFF) #define B32_3(x) __byte_perm(x, 0, 0x4443) //((x) >> 24) #define T0up(x) (*((uint32_t*)mixtabs + ( (x)))) #define T0dn(x) (*((uint32_t*)mixtabs + ( 256+(x)))) #define T1up(x) (*((uint32_t*)mixtabs + ( 512+(x)))) #define T1dn(x) (*((uint32_t*)mixtabs + ( 768+(x)))) #define T2up(x) (*((uint32_t*)mixtabs + (1024+(x)))) #define T2dn(x) (*((uint32_t*)mixtabs + (1280+(x)))) #define T3up(x) (*((uint32_t*)mixtabs + (1536+(x)))) #define T3dn(x) (*((uint32_t*)mixtabs + (1792+(x)))) texture<unsigned int, 1, cudaReadModeElementType> t0up1; texture<unsigned int, 1, cudaReadModeElementType> t0dn1; texture<unsigned int, 1, cudaReadModeElementType> t1up1; texture<unsigned int, 1, cudaReadModeElementType> t1dn1; texture<unsigned int, 1, cudaReadModeElementType> t2up1; texture<unsigned int, 1, cudaReadModeElementType> t2dn1; texture<unsigned int, 1, cudaReadModeElementType> t3up1; texture<unsigned int, 1, cudaReadModeElementType> t3dn1; extern uint32_t T0up_cpu[]; extern uint32_t T0dn_cpu[]; extern uint32_t T1up_cpu[]; extern uint32_t T1dn_cpu[]; extern uint32_t T2up_cpu[]; extern uint32_t T2dn_cpu[]; extern uint32_t T3up_cpu[]; extern uint32_t T3dn_cpu[]; #if __CUDA_ARCH__ < 300 || defined(_DEBUG) #if (!USE_SHARED) #include "groestl_simple.cuh" #endif __device__ __forceinline__ void quark_groestl512_perm_P(uint32_t *a, char *mixtabs) { #pragma unroll 1 for(int r=0; r<14; r++) { uint32_t t[32]; #pragma unroll 16 for (int k=0; k<16; k++) a[(k*2)+0] ^= PC32up(k<< 4, r); #pragma unroll 16 for(int k=0;k<32;k+=2) { uint32_t t0_0 = B32_0(a[(k ) & 0x1f]), t9_0 = B32_0(a[(k + 9) & 0x1f]); uint32_t t2_1 = B32_1(a[(k + 2) & 0x1f]), t11_1 = B32_1(a[(k + 11) & 0x1f]); uint32_t t4_2 = B32_2(a[(k + 4) & 0x1f]), t13_2 = B32_2(a[(k + 13) & 0x1f]); uint32_t t6_3 = B32_3(a[(k + 6) & 0x1f]), t23_3 = B32_3(a[(k + 23) & 0x1f]); t[k + 0] = T0up( t0_0 ) ^ T1up( t2_1 ) ^ T2up( t4_2 ) ^ T3up( t6_3 ) ^ T0dn( t9_0 ) ^ T1dn( t11_1 ) ^ T2dn( t13_2 ) ^ T3dn( t23_3 ); t[k + 1] = T0dn( t0_0 ) ^ T1dn( t2_1 ) ^ T2dn( t4_2 ) ^ T3dn( t6_3 ) ^ T0up( t9_0 ) ^ T1up( t11_1 ) ^ T2up( t13_2 ) ^ T3up( t23_3 ); } #pragma unroll 32 for(int k=0; k<32; k++) a[k] = t[k]; } } __device__ __forceinline__ void quark_groestl512_perm_Q(uint32_t *a, char *mixtabs) { #pragma unroll 1 for(int r=0; r<14; r++) { uint32_t t[32]; #pragma unroll 16 for (int k=0; k<16; k++) { a[(k*2)+0] ^= QC32up(k << 4, r); a[(k*2)+1] ^= QC32dn(k << 4, r); } #pragma unroll 16 for(int k=0;k<32;k+=2) { uint32_t t2_0 = B32_0(a[(k + 2) & 0x1f]), t1_0 = B32_0(a[(k + 1) & 0x1f]); uint32_t t6_1 = B32_1(a[(k + 6) & 0x1f]), t5_1 = B32_1(a[(k + 5) & 0x1f]); uint32_t t10_2 = B32_2(a[(k + 10) & 0x1f]), t9_2 = B32_2(a[(k + 9) & 0x1f]); uint32_t t22_3 = B32_3(a[(k + 22) & 0x1f]), t13_3 = B32_3(a[(k + 13) & 0x1f]); t[k + 0] = T0up( t2_0 ) ^ T1up( t6_1 ) ^ T2up( t10_2 ) ^ T3up( t22_3 ) ^ T0dn( t1_0 ) ^ T1dn( t5_1 ) ^ T2dn( t9_2 ) ^ T3dn( t13_3 ); t[k + 1] = T0dn( t2_0 ) ^ T1dn( t6_1 ) ^ T2dn( t10_2 ) ^ T3dn( t22_3 ) ^ T0up( t1_0 ) ^ T1up( t5_1 ) ^ T2up( t9_2 ) ^ T3up( t13_3 ); } #pragma unroll 32 for(int k=0; k<32; k++) a[k] = t[k]; } } #endif __global__ void quark_groestl512_gpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *g_hash, uint32_t *g_nonceVector) { #if __CUDA_ARCH__ < 300 || defined(_DEBUG) #if USE_SHARED __shared__ char mixtabs[8 * 1024]; if (threadIdx.x < 256) { *((uint32_t*)mixtabs + ( threadIdx.x)) = tex1Dfetch(t0up1, threadIdx.x); *((uint32_t*)mixtabs + ( 256+threadIdx.x)) = tex1Dfetch(t0dn1, threadIdx.x); *((uint32_t*)mixtabs + ( 512+threadIdx.x)) = tex1Dfetch(t1up1, threadIdx.x); *((uint32_t*)mixtabs + ( 768+threadIdx.x)) = tex1Dfetch(t1dn1, threadIdx.x); *((uint32_t*)mixtabs + (1024+threadIdx.x)) = tex1Dfetch(t2up1, threadIdx.x); *((uint32_t*)mixtabs + (1280+threadIdx.x)) = tex1Dfetch(t2dn1, threadIdx.x); *((uint32_t*)mixtabs + (1536+threadIdx.x)) = tex1Dfetch(t3up1, threadIdx.x); *((uint32_t*)mixtabs + (1792+threadIdx.x)) = tex1Dfetch(t3dn1, threadIdx.x); } __syncthreads(); #endif uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { // GROESTL uint32_t message[32]; uint32_t state[32]; uint32_t nounce = (g_nonceVector != NULL) ? g_nonceVector[thread] : (startNounce + thread); off_t hashPosition = nounce - startNounce; uint32_t *pHash = &g_hash[hashPosition * 16]; #pragma unroll 4 for (int i=0; i<16; i += 4) AS_UINT4(&message[i]) = AS_UINT4(&pHash[i]); message[16] = 0x80U; #pragma unroll 14 for(int i=17; i<31; i++) message[i] = 0; message[31] = 0x01000000U; #pragma unroll 32 for(int i=0; i<32; i++) state[i] = message[i]; state[31] ^= 0x20000U; // Perm #if USE_SHARED quark_groestl512_perm_P(state, mixtabs); state[31] ^= 0x20000U; quark_groestl512_perm_Q(message, mixtabs); #pragma unroll 32 for(int i=0; i<32; i++) state[i] ^= message[i]; #pragma unroll 16 for(int i=16; i<32; i++) message[i] = state[i]; quark_groestl512_perm_P(state, mixtabs); #else tex_groestl512_perm_P(state); state[31] ^= 0x20000U; tex_groestl512_perm_Q(message); #pragma unroll 32 for(int i=0; i<32; i++) state[i] ^= message[i]; #pragma unroll 16 for(int i=16; i<32; i++) message[i] = state[i]; tex_groestl512_perm_P(state); #endif #pragma unroll 16 for(int i=16; i<32; i++) state[i] ^= message[i]; uint4 *outpt = (uint4*)(pHash); uint4 *phash = (uint4*)(&state[16]); outpt[0] = phash[0]; outpt[1] = phash[1]; outpt[2] = phash[2]; outpt[3] = phash[3]; } #endif } #define texDef(id, texname, texmem, texsource, texsize) { \ unsigned int *texmem; \ cudaMalloc(&texmem, texsize); \ d_textures[thr_id][id] = texmem; \ cudaMemcpy(texmem, texsource, texsize, cudaMemcpyHostToDevice); \ texname.normalized = 0; \ texname.filterMode = cudaFilterModePoint; \ texname.addressMode[0] = cudaAddressModeClamp; \ { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<unsigned int>(); \ cudaBindTexture(NULL, &texname, texmem, &channelDesc, texsize ); \ } \ } __host__ void quark_groestl512_sm20_init(int thr_id, uint32_t threads) { // Texturen mit obigem Makro initialisieren texDef(0, t0up1, d_T0up, T0up_cpu, sizeof(uint32_t)*256); texDef(1, t0dn1, d_T0dn, T0dn_cpu, sizeof(uint32_t)*256); texDef(2, t1up1, d_T1up, T1up_cpu, sizeof(uint32_t)*256); texDef(3, t1dn1, d_T1dn, T1dn_cpu, sizeof(uint32_t)*256); texDef(4, t2up1, d_T2up, T2up_cpu, sizeof(uint32_t)*256); texDef(5, t2dn1, d_T2dn, T2dn_cpu, sizeof(uint32_t)*256); texDef(6, t3up1, d_T3up, T3up_cpu, sizeof(uint32_t)*256); texDef(7, t3dn1, d_T3dn, T3dn_cpu, sizeof(uint32_t)*256); } __host__ void quark_groestl512_sm20_free(int thr_id) { if (!d_textures[thr_id][0]) return; for (int i=0; i<8; i++) cudaFree(d_textures[thr_id][i]); d_textures[thr_id][0] = NULL; } __host__ void quark_groestl512_sm20_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order) { int threadsperblock = 512; dim3 grid((threads + threadsperblock-1)/threadsperblock); dim3 block(threadsperblock); quark_groestl512_gpu_hash_64<<<grid, block>>>(threads, startNounce, d_hash, d_nonceVector); } __host__ void quark_doublegroestl512_sm20_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order) { int threadsperblock = 512; dim3 grid((threads + threadsperblock-1)/threadsperblock); dim3 block(threadsperblock); quark_groestl512_gpu_hash_64<<<grid, block>>>(threads, startNounce, d_hash, d_nonceVector); quark_groestl512_gpu_hash_64<<<grid, block>>>(threads, startNounce, d_hash, d_nonceVector); } // -------------------------------------------------------------------------------------------------------------------------------------------- #ifdef WANT_GROESTL80 // defined in groest512.cu // __constant__ static uint32_t c_Message80[20]; __global__ //__launch_bounds__(256) void groestl512_gpu_hash_80_sm2(const uint32_t threads, const uint32_t startNounce, uint32_t * g_outhash) { #if __CUDA_ARCH__ < 300 || defined(_DEBUG) #if USE_SHARED __shared__ char mixtabs[8 * 1024]; if (threadIdx.x < 256) { *((uint32_t*)mixtabs + ( threadIdx.x)) = tex1Dfetch(t0up1, threadIdx.x); *((uint32_t*)mixtabs + ( 256+threadIdx.x)) = tex1Dfetch(t0dn1, threadIdx.x); *((uint32_t*)mixtabs + ( 512+threadIdx.x)) = tex1Dfetch(t1up1, threadIdx.x); *((uint32_t*)mixtabs + ( 768+threadIdx.x)) = tex1Dfetch(t1dn1, threadIdx.x); *((uint32_t*)mixtabs + (1024+threadIdx.x)) = tex1Dfetch(t2up1, threadIdx.x); *((uint32_t*)mixtabs + (1280+threadIdx.x)) = tex1Dfetch(t2dn1, threadIdx.x); *((uint32_t*)mixtabs + (1536+threadIdx.x)) = tex1Dfetch(t3up1, threadIdx.x); *((uint32_t*)mixtabs + (1792+threadIdx.x)) = tex1Dfetch(t3dn1, threadIdx.x); } __syncthreads(); #endif const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t message[32]; #pragma unroll 5 for (int i=0; i < 20; i += 4) AS_UINT4(&message[i]) = AS_UINT4(&c_Message80[i]); message[19] = cuda_swab32(startNounce + thread); message[20] = 0x80U; // end tag #pragma unroll for(int i=21; i<31; i++) message[i] = 0U; message[31] = 0x01000000U; // end block uint32_t state[32]; #pragma unroll for(int i=0; i<32; i++) state[i] = message[i]; state[31] ^= 0x00020000U; // "...00000201" #if USE_SHARED quark_groestl512_perm_P(state, mixtabs); quark_groestl512_perm_Q(message, mixtabs); state[31] ^= 0x00020000U; #pragma unroll 32 for(int i=0; i<32; i++) state[i] ^= message[i]; #pragma unroll 16 for(int i=16; i<32; i++) message[i] = state[i]; quark_groestl512_perm_P(state, mixtabs); #else tex_groestl512_perm_P(state); tex_groestl512_perm_Q(message); state[31] ^= 0x00020000U; #pragma unroll 32 for(int i=0; i<32; i++) state[i] ^= message[i]; #pragma unroll 16 for(int i=16; i<32; i++) message[i] = state[i]; tex_groestl512_perm_P(state); #endif #pragma unroll 16 for(int i=16; i<32; i++) state[i] ^= message[i]; // uint4 = 4 x uint32_t = 16 bytes, x 4 => 64 bytes const off_t hashPosition = thread; uint4 *outpt = (uint4*) (&g_outhash[hashPosition << 4]); uint4 *phash = (uint4*) (&state[16]); outpt[0] = phash[0]; outpt[1] = phash[1]; outpt[2] = phash[2]; outpt[3] = phash[3]; } #endif } #endif // WANT_GROESTL80
the_stack
#include "base_strategy.cuh" #include <cuco/static_map.cuh> // this is needed by cuco as key, value must be bitwise comparable. // compilers don't declare float/double as bitwise comparable // but that is too strict // for example, the following is true (or 0): // float a = 5; // float b = 5; // memcmp(&a, &b, sizeof(float)); CUCO_DECLARE_BITWISE_COMPARABLE(float); CUCO_DECLARE_BITWISE_COMPARABLE(double); namespace raft { namespace sparse { namespace distance { namespace detail { template <typename value_idx, typename value_t, int tpb> class hash_strategy : public coo_spmv_strategy<value_idx, value_t, tpb> { public: using insert_type = typename cuco::static_map<value_idx, value_t, cuda::thread_scope_block>::device_mutable_view; using smem_type = typename insert_type::slot_type*; using find_type = typename cuco::static_map<value_idx, value_t, cuda::thread_scope_block>::device_view; hash_strategy(const distances_config_t<value_idx, value_t>& config_, float capacity_threshold_ = 0.5, int map_size_ = get_map_size()) : coo_spmv_strategy<value_idx, value_t, tpb>(config_), capacity_threshold(capacity_threshold_), map_size(map_size_) { } void chunking_needed(const value_idx* indptr, const value_idx n_rows, rmm::device_uvector<value_idx>& mask_indptr, std::tuple<value_idx, value_idx>& n_rows_divided, cudaStream_t stream) { auto policy = this->config.handle.get_thrust_policy(); auto less = thrust::copy_if(policy, thrust::make_counting_iterator(value_idx(0)), thrust::make_counting_iterator(n_rows), mask_indptr.data(), fits_in_hash_table(indptr, 0, capacity_threshold * map_size)); std::get<0>(n_rows_divided) = less - mask_indptr.data(); auto more = thrust::copy_if( policy, thrust::make_counting_iterator(value_idx(0)), thrust::make_counting_iterator(n_rows), less, fits_in_hash_table( indptr, capacity_threshold * map_size, std::numeric_limits<value_idx>::max())); std::get<1>(n_rows_divided) = more - less; } template <typename product_f, typename accum_f, typename write_f> void dispatch(value_t* out_dists, value_idx* coo_rows_b, product_f product_func, accum_f accum_func, write_f write_func, int chunk_size) { auto n_blocks_per_row = raft::ceildiv(this->config.b_nnz, chunk_size * tpb); rmm::device_uvector<value_idx> mask_indptr(this->config.a_nrows, this->config.handle.get_stream()); std::tuple<value_idx, value_idx> n_rows_divided; chunking_needed(this->config.a_indptr, this->config.a_nrows, mask_indptr, n_rows_divided, this->config.handle.get_stream()); auto less_rows = std::get<0>(n_rows_divided); if (less_rows > 0) { mask_row_it<value_idx> less(this->config.a_indptr, less_rows, mask_indptr.data()); auto n_less_blocks = less_rows * n_blocks_per_row; this->_dispatch_base(*this, map_size, less, out_dists, coo_rows_b, product_func, accum_func, write_func, chunk_size, n_less_blocks, n_blocks_per_row); } auto more_rows = std::get<1>(n_rows_divided); if (more_rows > 0) { rmm::device_uvector<value_idx> n_chunks_per_row(more_rows + 1, this->config.handle.get_stream()); rmm::device_uvector<value_idx> chunk_indices(0, this->config.handle.get_stream()); chunked_mask_row_it<value_idx>::init(this->config.a_indptr, mask_indptr.data() + less_rows, more_rows, capacity_threshold * map_size, n_chunks_per_row, chunk_indices, this->config.handle.get_stream()); chunked_mask_row_it<value_idx> more(this->config.a_indptr, more_rows, mask_indptr.data() + less_rows, capacity_threshold * map_size, n_chunks_per_row.data(), chunk_indices.data(), this->config.handle.get_stream()); auto n_more_blocks = more.total_row_blocks * n_blocks_per_row; this->_dispatch_base(*this, map_size, more, out_dists, coo_rows_b, product_func, accum_func, write_func, chunk_size, n_more_blocks, n_blocks_per_row); } } template <typename product_f, typename accum_f, typename write_f> void dispatch_rev(value_t* out_dists, value_idx* coo_rows_a, product_f product_func, accum_f accum_func, write_f write_func, int chunk_size) { auto n_blocks_per_row = raft::ceildiv(this->config.a_nnz, chunk_size * tpb); rmm::device_uvector<value_idx> mask_indptr(this->config.b_nrows, this->config.handle.get_stream()); std::tuple<value_idx, value_idx> n_rows_divided; chunking_needed(this->config.b_indptr, this->config.b_nrows, mask_indptr, n_rows_divided, this->config.handle.get_stream()); auto less_rows = std::get<0>(n_rows_divided); if (less_rows > 0) { mask_row_it<value_idx> less(this->config.b_indptr, less_rows, mask_indptr.data()); auto n_less_blocks = less_rows * n_blocks_per_row; this->_dispatch_base_rev(*this, map_size, less, out_dists, coo_rows_a, product_func, accum_func, write_func, chunk_size, n_less_blocks, n_blocks_per_row); } auto more_rows = std::get<1>(n_rows_divided); if (more_rows > 0) { rmm::device_uvector<value_idx> n_chunks_per_row(more_rows + 1, this->config.handle.get_stream()); rmm::device_uvector<value_idx> chunk_indices(0, this->config.handle.get_stream()); chunked_mask_row_it<value_idx>::init(this->config.b_indptr, mask_indptr.data() + less_rows, more_rows, capacity_threshold * map_size, n_chunks_per_row, chunk_indices, this->config.handle.get_stream()); chunked_mask_row_it<value_idx> more(this->config.b_indptr, more_rows, mask_indptr.data() + less_rows, capacity_threshold * map_size, n_chunks_per_row.data(), chunk_indices.data(), this->config.handle.get_stream()); auto n_more_blocks = more.total_row_blocks * n_blocks_per_row; this->_dispatch_base_rev(*this, map_size, more, out_dists, coo_rows_a, product_func, accum_func, write_func, chunk_size, n_more_blocks, n_blocks_per_row); } } __device__ inline insert_type init_insert(smem_type cache, const value_idx& cache_size) { return insert_type::make_from_uninitialized_slots( cooperative_groups::this_thread_block(), cache, cache_size, -1, 0); } __device__ inline void insert(insert_type cache, const value_idx& key, const value_t& value) { auto success = cache.insert(cuco::pair<value_idx, value_t>(key, value)); } __device__ inline find_type init_find(smem_type cache, const value_idx& cache_size) { return find_type(cache, cache_size, -1, 0); } __device__ inline value_t find(find_type cache, const value_idx& key) { auto a_pair = cache.find(key); value_t a_col = 0.0; if (a_pair != cache.end()) { a_col = a_pair->second; } return a_col; } struct fits_in_hash_table { public: fits_in_hash_table(const value_idx* indptr_, value_idx degree_l_, value_idx degree_r_) : indptr(indptr_), degree_l(degree_l_), degree_r(degree_r_) { } __host__ __device__ bool operator()(const value_idx& i) { auto degree = indptr[i + 1] - indptr[i]; return degree >= degree_l && degree < degree_r; } private: const value_idx* indptr; const value_idx degree_l, degree_r; }; inline static int get_map_size() { return (raft::getSharedMemPerBlock() - ((tpb / raft::warp_size()) * sizeof(value_t))) / sizeof(typename insert_type::slot_type); } private: float capacity_threshold; int map_size; }; } // namespace detail } // namespace distance } // namespace sparse } // namespace raft
the_stack
#include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/detail/column_utilities.hpp> #include <jit/type.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/copy.h> #include <thrust/distance.h> #include <thrust/equal.h> #include <thrust/execution_policy.h> #include <thrust/generate.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/logical.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/scatter.h> #include <thrust/sequence.h> #include <thrust/transform.h> #include <numeric> #include <sstream> namespace cudf { namespace test { namespace { // expand all non-null rows in a list column into a column of child row indices. std::unique_ptr<column> generate_child_row_indices(lists_column_view const& c, column_view const& row_indices) { // Example input // List<int32_t>: // Length : 7 // Offsets : 0, 3, 6, 8, 11, 14, 16, 19 // | | <-- non-null input rows // Null count: 5 // 0010100 // 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 7 // | | | | | <-- child rows of non-null rows // // Desired output: [6, 7, 11, 12, 13] // compute total # of child row indices we will be emitting. auto row_size_iter = cudf::detail::make_counting_transform_iterator( 0, [row_indices = row_indices.begin<size_type>(), validity = c.null_mask(), offsets = c.offsets().begin<offset_type>(), offset = c.offset()] __device__(int index) { // both null mask and offsets data are not pre-sliced. so we need to add the column offset to // every incoming index. auto const true_index = row_indices[index] + offset; return !validity || cudf::bit_is_set(validity, true_index) ? (offsets[true_index + 1] - offsets[true_index]) : 0; }); auto const output_size = thrust::reduce(rmm::exec_policy(), row_size_iter, row_size_iter + row_indices.size()); // no output. done. auto result = cudf::make_fixed_width_column(data_type{type_id::INT32}, output_size, mask_state::UNALLOCATED); if (output_size == 0) { return result; } // for all input rows, what position in the output column they will start at. // // output_row_start = [0, 0, 0, 2, 2, 5, 5] // | | <-- non-null input rows // auto output_row_start = cudf::make_fixed_width_column( data_type{type_id::INT32}, row_indices.size(), mask_state::UNALLOCATED); thrust::exclusive_scan(rmm::exec_policy(), row_size_iter, row_size_iter + row_indices.size(), output_row_start->mutable_view().begin<size_type>()); // fill result column with 1s // // result = [1, 1, 1, 1, 1] // thrust::generate(rmm::exec_policy(), result->mutable_view().begin<size_type>(), result->mutable_view().end<size_type>(), [] __device__() { return 1; }); // scatter the output row positions into result buffer // // result = [6, 1, 11, 1, 1] // auto output_row_iter = cudf::detail::make_counting_transform_iterator( 0, [row_indices = row_indices.begin<size_type>(), offsets = c.offsets().begin<offset_type>(), offset = c.offset(), first_offset = cudf::detail::get_value<offset_type>( c.offsets(), c.offset(), rmm::cuda_stream_default)] __device__(int index) { auto const true_index = row_indices[index] + offset; return offsets[true_index] - first_offset; }); thrust::scatter_if(rmm::exec_policy(), output_row_iter, output_row_iter + row_indices.size(), output_row_start->view().begin<size_type>(), row_size_iter, result->mutable_view().begin<size_type>(), [] __device__(auto row_size) { return row_size != 0; }); // generate keys for each output row // // result = [1, 1, 2, 2, 2] // auto keys = cudf::make_fixed_width_column(data_type{type_id::INT32}, output_size, mask_state::UNALLOCATED); thrust::generate(rmm::exec_policy(), keys->mutable_view().begin<size_type>(), keys->mutable_view().end<size_type>(), [] __device__() { return 0; }); thrust::scatter_if(rmm::exec_policy(), row_size_iter, row_size_iter + row_indices.size(), output_row_start->view().begin<size_type>(), row_size_iter, keys->mutable_view().begin<size_type>(), [] __device__(auto row_size) { return row_size != 0; }); thrust::inclusive_scan(rmm::exec_policy(), keys->view().begin<size_type>(), keys->view().end<size_type>(), keys->mutable_view().begin<size_type>()); // scan by key to generate final child row indices. // input // result = [6, 1, 11, 1, 1] // keys = [1, 1, 2, 2, 2] // // output // result = [6, 7, 11, 12, 13] // thrust::inclusive_scan_by_key(rmm::exec_policy(), keys->view().begin<size_type>(), keys->view().end<size_type>(), result->view().begin<size_type>(), result->mutable_view().begin<size_type>()); return result; } #define PROP_EXPECT_EQ(a, b) \ do { \ if (verbosity == debug_output_level::QUIET) { \ if (a != b) { return false; } \ } else { \ EXPECT_EQ(a, b); \ if (a != b) { \ if (verbosity == debug_output_level::FIRST_ERROR) { \ return false; \ } else { \ result = false; \ } \ } \ } \ } while (0) template <bool check_exact_equality> struct column_property_comparator { bool types_equivalent(cudf::data_type const& lhs, cudf::data_type const& rhs) { return is_fixed_point(lhs) ? lhs.id() == rhs.id() : lhs == rhs; } size_type count_nulls(cudf::column_view const& c, cudf::column_view const& row_indices) { auto validity_iter = cudf::detail::make_counting_transform_iterator( 0, [row_indices = row_indices.begin<size_type>(), validity = c.null_mask(), offset = c.offset()] __device__(int index) { // both null mask and offsets data are not pre-sliced. so we need to add the column offset // to every incoming index. auto const true_index = row_indices[index] + offset; return !validity || cudf::bit_is_set(validity, true_index) ? 0 : 1; }); return thrust::reduce(rmm::exec_policy(), validity_iter, validity_iter + row_indices.size()); } bool compare_common(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { bool result = true; if (check_exact_equality) { PROP_EXPECT_EQ(lhs.type(), rhs.type()); } else { PROP_EXPECT_EQ(types_equivalent(lhs.type(), rhs.type()), true); } // DISCUSSION: does this make sense, semantically? auto const lhs_size = check_exact_equality ? lhs.size() : lhs_row_indices.size(); auto const rhs_size = check_exact_equality ? rhs.size() : rhs_row_indices.size(); PROP_EXPECT_EQ(lhs_size, rhs_size); if (lhs_size > 0 && check_exact_equality) { PROP_EXPECT_EQ(lhs.nullable(), rhs.nullable()); } // DISCUSSION: does this make sense, semantically? auto const lhs_null_count = check_exact_equality ? lhs.null_count() : count_nulls(lhs, lhs_row_indices); auto const rhs_null_count = check_exact_equality ? rhs.null_count() : count_nulls(rhs, rhs_row_indices); PROP_EXPECT_EQ(lhs_null_count, rhs_null_count); // equivalent, but not exactly equal columns can have a different number of children if their // sizes are both 0. Specifically, empty string columns may or may not have children. if (check_exact_equality || (lhs.size() > 0 && lhs.null_count() < lhs.size())) { PROP_EXPECT_EQ(lhs.num_children(), rhs.num_children()); } return result; } template <typename T, std::enable_if_t<!std::is_same_v<T, cudf::list_view> && !std::is_same_v<T, cudf::struct_view>>* = nullptr> bool operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { return compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity); } template <typename T, std::enable_if_t<std::is_same_v<T, cudf::list_view>>* = nullptr> bool operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { if (!compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } cudf::lists_column_view lhs_l(lhs); cudf::lists_column_view rhs_l(rhs); // recurse auto lhs_child = lhs_l.get_sliced_child(rmm::cuda_stream_default); // note: if a column is all nulls or otherwise empty, no indices are generated and no recursion // happens auto lhs_child_indices = generate_child_row_indices(lhs_l, lhs_row_indices); if (lhs_child_indices->size() > 0) { auto rhs_child = rhs_l.get_sliced_child(rmm::cuda_stream_default); auto rhs_child_indices = generate_child_row_indices(rhs_l, rhs_row_indices); return cudf::type_dispatcher(lhs_child.type(), column_property_comparator<check_exact_equality>{}, lhs_child, rhs_child, *lhs_child_indices, *rhs_child_indices, verbosity); } return true; } template <typename T, std::enable_if_t<std::is_same_v<T, cudf::struct_view>>* = nullptr> bool operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { if (!compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } structs_column_view l_scv(lhs); structs_column_view r_scv(rhs); for (size_type i = 0; i < lhs.num_children(); i++) { column_view lhs_child = l_scv.get_sliced_child(i); column_view rhs_child = r_scv.get_sliced_child(i); if (!cudf::type_dispatcher(lhs_child.type(), column_property_comparator<check_exact_equality>{}, lhs_child, rhs_child, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } } return true; } }; class corresponding_rows_unequal { public: corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs, column_device_view lhs_row_indices_, column_device_view rhs_row_indices_, size_type /*fp_ulps*/) : comp(cudf::nullate::YES{}, d_lhs, d_rhs, cudf::null_equality::EQUAL), lhs_row_indices(lhs_row_indices_), rhs_row_indices(rhs_row_indices_) { } cudf::row_equality_comparator<cudf::nullate::YES> comp; __device__ bool operator()(size_type index) { return !comp(lhs_row_indices.element<size_type>(index), rhs_row_indices.element<size_type>(index)); } column_device_view lhs_row_indices; column_device_view rhs_row_indices; }; class corresponding_rows_not_equivalent { table_device_view d_lhs; table_device_view d_rhs; column_device_view lhs_row_indices; column_device_view rhs_row_indices; size_type const fp_ulps; public: corresponding_rows_not_equivalent(table_device_view d_lhs, table_device_view d_rhs, column_device_view lhs_row_indices_, column_device_view rhs_row_indices_, size_type fp_ulps_) : d_lhs(d_lhs), d_rhs(d_rhs), comp(cudf::nullate::YES{}, d_lhs, d_rhs, null_equality::EQUAL), lhs_row_indices(lhs_row_indices_), rhs_row_indices(rhs_row_indices_), fp_ulps(fp_ulps_) { CUDF_EXPECTS(d_lhs.num_columns() == 1 and d_rhs.num_columns() == 1, "Unsupported number of columns"); } struct typed_element_not_equivalent { template <typename T> __device__ std::enable_if_t<std::is_floating_point_v<T>, bool> operator()( column_device_view const& lhs, column_device_view const& rhs, size_type lhs_index, size_type rhs_index, size_type fp_ulps) { if (lhs.is_valid(lhs_index) and rhs.is_valid(rhs_index)) { T const x = lhs.element<T>(lhs_index); T const y = rhs.element<T>(rhs_index); // Must handle inf and nan separately if (std::isinf(x) || std::isinf(y)) { return x != y; // comparison of (inf==inf) returns true } else if (std::isnan(x) || std::isnan(y)) { return std::isnan(x) != std::isnan(y); // comparison of (nan==nan) returns false } else { T const abs_x_minus_y = std::abs(x - y); return abs_x_minus_y >= std::numeric_limits<T>::min() && abs_x_minus_y > std::numeric_limits<T>::epsilon() * std::abs(x + y) * fp_ulps; } } else { // if either is null, then the inequality was checked already return true; } } template <typename T, typename... Args> __device__ std::enable_if_t<not std::is_floating_point_v<T>, bool> operator()(Args...) { // Non-floating point inequality is checked already return true; } }; cudf::row_equality_comparator<cudf::nullate::YES> comp; __device__ bool operator()(size_type index) { auto const lhs_index = lhs_row_indices.element<size_type>(index); auto const rhs_index = rhs_row_indices.element<size_type>(index); if (not comp(lhs_index, rhs_index)) { auto lhs_col = this->d_lhs.column(0); auto rhs_col = this->d_rhs.column(0); return type_dispatcher(lhs_col.type(), typed_element_not_equivalent{}, lhs_col, rhs_col, lhs_index, rhs_index, fp_ulps); } return false; } }; // Stringify the inconsistent values resulted from the comparison of two columns element-wise std::string stringify_column_differences(cudf::device_span<int const> differences, column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, int depth) { CUDF_EXPECTS(not differences.empty(), "Shouldn't enter this function if `differences` is empty"); std::string const depth_str = depth > 0 ? "depth " + std::to_string(depth) + '\n' : ""; // move the differences to the host. auto h_differences = cudf::detail::make_host_vector_sync(differences); if (verbosity == debug_output_level::ALL_ERRORS) { std::ostringstream buffer; buffer << depth_str << "differences:" << std::endl; auto source_table = cudf::table_view({lhs, rhs}); auto diff_column = fixed_width_column_wrapper<int32_t>(h_differences.begin(), h_differences.end()); auto diff_table = cudf::gather(source_table, diff_column); // Need to pull back the differences auto const h_left_strings = to_strings(diff_table->get_column(0)); auto const h_right_strings = to_strings(diff_table->get_column(1)); for (size_t i = 0; i < h_differences.size(); ++i) buffer << depth_str << "lhs[" << h_differences[i] << "] = " << h_left_strings[i] << ", rhs[" << h_differences[i] << "] = " << h_right_strings[i] << std::endl; return buffer.str(); } else { auto const index = h_differences[0]; // only stringify first difference auto const lhs_index = cudf::detail::get_value<size_type>(lhs_row_indices, index, rmm::cuda_stream_default); auto const rhs_index = cudf::detail::get_value<size_type>(rhs_row_indices, index, rmm::cuda_stream_default); auto diff_lhs = cudf::detail::slice(lhs, lhs_index, lhs_index + 1); auto diff_rhs = cudf::detail::slice(rhs, rhs_index, rhs_index + 1); return depth_str + "first difference: " + "lhs[" + std::to_string(index) + "] = " + to_string(diff_lhs, "") + ", rhs[" + std::to_string(index) + "] = " + to_string(diff_rhs, ""); } } // non-nested column types template <typename T, bool check_exact_equality> struct column_comparator_impl { bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth) { auto d_lhs = cudf::table_device_view::create(table_view{{lhs}}); auto d_rhs = cudf::table_device_view::create(table_view{{rhs}}); auto d_lhs_row_indices = cudf::column_device_view::create(lhs_row_indices); auto d_rhs_row_indices = cudf::column_device_view::create(rhs_row_indices); using ComparatorType = std::conditional_t<check_exact_equality, corresponding_rows_unequal, corresponding_rows_not_equivalent>; auto differences = rmm::device_uvector<int>( lhs.size(), rmm::cuda_stream_default); // worst case: everything different auto input_iter = thrust::make_counting_iterator(0); auto diff_iter = thrust::copy_if( rmm::exec_policy(), input_iter, input_iter + lhs_row_indices.size(), differences.begin(), ComparatorType(*d_lhs, *d_rhs, *d_lhs_row_indices, *d_rhs_row_indices, fp_ulps)); differences.resize(thrust::distance(differences.begin(), diff_iter), rmm::cuda_stream_default); // shrink back down if (not differences.is_empty()) { if (verbosity != debug_output_level::QUIET) { // GTEST_FAIL() does a return that conflicts with our return type. so hide it in a lambda. [&]() { GTEST_FAIL() << stringify_column_differences( differences, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth); }(); } return false; } return true; } }; // forward declaration for nested-type recursion. template <bool check_exact_equality> struct column_comparator; // specialization for list columns template <bool check_exact_equality> struct column_comparator_impl<list_view, check_exact_equality> { bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth) { lists_column_view lhs_l(lhs); lists_column_view rhs_l(rhs); CUDF_EXPECTS(lhs_row_indices.size() == rhs_row_indices.size(), "List column size mismatch"); if (lhs_row_indices.is_empty()) { return true; } // worst case - everything is different rmm::device_uvector<int> differences(lhs_row_indices.size(), rmm::cuda_stream_default); // compare offsets, taking slicing into account // left side size_type lhs_shift = cudf::detail::get_value<size_type>(lhs_l.offsets(), lhs_l.offset(), rmm::cuda_stream_default); auto lhs_offsets = thrust::make_transform_iterator( lhs_l.offsets().begin<size_type>() + lhs_l.offset(), [lhs_shift] __device__(size_type offset) { return offset - lhs_shift; }); auto lhs_valids = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [mask = lhs_l.null_mask(), offset = lhs_l.offset()] __device__(size_type index) { return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset); }); // right side size_type rhs_shift = cudf::detail::get_value<size_type>(rhs_l.offsets(), rhs_l.offset(), rmm::cuda_stream_default); auto rhs_offsets = thrust::make_transform_iterator( rhs_l.offsets().begin<size_type>() + rhs_l.offset(), [rhs_shift] __device__(size_type offset) { return offset - rhs_shift; }); auto rhs_valids = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [mask = rhs_l.null_mask(), offset = rhs_l.offset()] __device__(size_type index) { return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset); }); // when checking for equivalency, we can't compare offset values directly, we can only // compare lengths of the rows, and only if valid. as a concrete example, you could have two // equivalent columns with the following data: // // column A // offsets = [0, 3, 5, 7] // validity = [0, 1, 1, 1] // // column B // offsets = [0, 0, 2, 4] // validity = [0, 1, 1, 1] // // Row 0 in column A happens to have a positive length, even though the row is null, but column // B does not. So the offsets for the remaining valid rows are fundamentally different even // though the row lengths are the same. // auto input_iter = thrust::make_counting_iterator(0); auto diff_iter = thrust::copy_if( rmm::exec_policy(), input_iter, input_iter + lhs_row_indices.size(), differences.begin(), [lhs_offsets, rhs_offsets, lhs_valids, rhs_valids, lhs_indices = lhs_row_indices.begin<size_type>(), rhs_indices = rhs_row_indices.begin<size_type>()] __device__(size_type index) { auto const lhs_index = lhs_indices[index]; auto const rhs_index = rhs_indices[index]; // check for validity match if (lhs_valids[lhs_index] != rhs_valids[rhs_index]) { return true; } // if the row is valid, check that the length of the list is the same. do this // for both the equivalency and exact equality checks. if (lhs_valids[lhs_index] && ((lhs_offsets[lhs_index + 1] - lhs_offsets[lhs_index]) != (rhs_offsets[rhs_index + 1] - rhs_offsets[rhs_index]))) { return true; } // if validity matches -and- is false, we can ignore the actual offset values. this // is technically not checking "equal()", but it's how the non-list code path handles it if (!lhs_valids[lhs_index]) { return false; } // if checking exact equality, compare the actual offset values if (check_exact_equality && lhs_offsets[lhs_index] != rhs_offsets[rhs_index]) { return true; } return false; }); differences.resize(thrust::distance(differences.begin(), diff_iter), rmm::cuda_stream_default); // shrink back down if (not differences.is_empty()) { if (verbosity != debug_output_level::QUIET) { // GTEST_FAIL() does a return that conflicts with our return type. so hide it in a lambda. [&]() { GTEST_FAIL() << stringify_column_differences( differences, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth); }(); } return false; } // recurse. auto lhs_child = lhs_l.get_sliced_child(rmm::cuda_stream_default); // note: if a column is all nulls or otherwise empty, no indices are generated and no recursion // happens auto lhs_child_indices = generate_child_row_indices(lhs_l, lhs_row_indices); if (lhs_child_indices->size() > 0) { auto rhs_child = rhs_l.get_sliced_child(rmm::cuda_stream_default); auto rhs_child_indices = generate_child_row_indices(rhs_l, rhs_row_indices); return cudf::type_dispatcher(lhs_child.type(), column_comparator<check_exact_equality>{}, lhs_child, rhs_child, *lhs_child_indices, *rhs_child_indices, verbosity, fp_ulps, depth + 1); } return true; } }; template <bool check_exact_equality> struct column_comparator_impl<struct_view, check_exact_equality> { bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth) { structs_column_view l_scv(lhs); structs_column_view r_scv(rhs); for (size_type i = 0; i < lhs.num_children(); i++) { column_view lhs_child = l_scv.get_sliced_child(i); column_view rhs_child = r_scv.get_sliced_child(i); if (!cudf::type_dispatcher(lhs_child.type(), column_comparator<check_exact_equality>{}, lhs_child, rhs_child, lhs_row_indices, rhs_row_indices, verbosity, fp_ulps, depth + 1)) { return false; } } return true; } }; template <bool check_exact_equality> struct column_comparator { template <typename T> bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth = 0) { CUDF_EXPECTS(lhs_row_indices.size() == rhs_row_indices.size(), "Mismatch in row counts to compare"); // compare properties if (!cudf::type_dispatcher(lhs.type(), column_property_comparator<check_exact_equality>{}, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } // compare values column_comparator_impl<T, check_exact_equality> comparator{}; return comparator(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, fp_ulps, depth); } }; std::unique_ptr<column> generate_all_row_indices(size_type num_rows) { auto indices = cudf::make_fixed_width_column(data_type{type_id::INT32}, num_rows, mask_state::UNALLOCATED); thrust::sequence(rmm::exec_policy(), indices->mutable_view().begin<size_type>(), indices->mutable_view().end<size_type>(), 0); return indices; } } // namespace /** * @copydoc cudf::test::expect_column_properties_equal */ bool expect_column_properties_equal(column_view const& lhs, column_view const& rhs, debug_output_level verbosity) { auto indices = generate_all_row_indices(lhs.size()); return cudf::type_dispatcher( lhs.type(), column_property_comparator<true>{}, lhs, rhs, *indices, *indices, verbosity); } /** * @copydoc cudf::test::expect_column_properties_equivalent */ bool expect_column_properties_equivalent(column_view const& lhs, column_view const& rhs, debug_output_level verbosity) { auto indices = generate_all_row_indices(lhs.size()); return cudf::type_dispatcher( lhs.type(), column_property_comparator<false>{}, lhs, rhs, *indices, *indices, verbosity); } /** * @copydoc cudf::test::expect_columns_equal */ bool expect_columns_equal(cudf::column_view const& lhs, cudf::column_view const& rhs, debug_output_level verbosity) { auto indices = generate_all_row_indices(lhs.size()); return cudf::type_dispatcher(lhs.type(), column_comparator<true>{}, lhs, rhs, *indices, *indices, verbosity, cudf::test::default_ulp); } /** * @copydoc cudf::test::expect_columns_equivalent */ bool expect_columns_equivalent(cudf::column_view const& lhs, cudf::column_view const& rhs, debug_output_level verbosity, size_type fp_ulps) { auto indices = generate_all_row_indices(lhs.size()); return cudf::type_dispatcher( lhs.type(), column_comparator<false>{}, lhs, rhs, *indices, *indices, verbosity, fp_ulps); } /** * @copydoc cudf::test::expect_equal_buffers */ void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes) { if (size_bytes > 0) { EXPECT_NE(nullptr, lhs); EXPECT_NE(nullptr, rhs); } auto typed_lhs = static_cast<char const*>(lhs); auto typed_rhs = static_cast<char const*>(rhs); EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes, typed_rhs)); } /** * @copydoc cudf::test::bitmask_to_host */ std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c) { if (c.nullable()) { auto num_bitmasks = num_bitmask_words(c.size()); std::vector<bitmask_type> host_bitmask(num_bitmasks); if (c.offset() == 0) { CUDF_CUDA_TRY(cudaMemcpy(host_bitmask.data(), c.null_mask(), num_bitmasks * sizeof(bitmask_type), cudaMemcpyDeviceToHost)); } else { auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset() + c.size()); CUDF_CUDA_TRY(cudaMemcpy(host_bitmask.data(), mask.data(), num_bitmasks * sizeof(bitmask_type), cudaMemcpyDeviceToHost)); } return host_bitmask; } else { return std::vector<bitmask_type>{}; } } namespace { template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr> static auto numeric_to_string_precise(T value) { return std::to_string(value); } template <typename T, std::enable_if_t<std::is_floating_point_v<T>>* = nullptr> static auto numeric_to_string_precise(T value) { std::ostringstream o; o << std::setprecision(std::numeric_limits<T>::max_digits10) << value; return o.str(); } static auto duration_suffix(cudf::duration_D) { return " days"; } static auto duration_suffix(cudf::duration_s) { return " seconds"; } static auto duration_suffix(cudf::duration_ms) { return " milliseconds"; } static auto duration_suffix(cudf::duration_us) { return " microseconds"; } static auto duration_suffix(cudf::duration_ns) { return " nanoseconds"; } std::string get_nested_type_str(cudf::column_view const& view) { if (view.type().id() == cudf::type_id::LIST) { lists_column_view lcv(view); return cudf::jit::get_type_name(view.type()) + "<" + (get_nested_type_str(lcv.child())) + ">"; } if (view.type().id() == cudf::type_id::STRUCT) { std::ostringstream out; out << cudf::jit::get_type_name(view.type()) + "<"; std::transform(view.child_begin(), view.child_end(), std::ostream_iterator<std::string>(out, ","), [&out](auto const col) { return get_nested_type_str(col); }); out << ">"; return out.str(); } return cudf::jit::get_type_name(view.type()); } template <typename NestedColumnView> std::string nested_offsets_to_string(NestedColumnView const& c, std::string const& delimiter = ", ") { column_view offsets = (c.parent()).child(NestedColumnView::offsets_column_index); CUDF_EXPECTS(offsets.type().id() == type_id::INT32, "Column does not appear to be an offsets column"); CUDF_EXPECTS(offsets.offset() == 0, "Offsets column has an internal offset!"); size_type output_size = c.size() + 1; // the first offset value to normalize everything against size_type first = cudf::detail::get_value<size_type>(offsets, c.offset(), rmm::cuda_stream_default); rmm::device_uvector<size_type> shifted_offsets(output_size, rmm::cuda_stream_default); // normalize the offset values for the column offset size_type const* d_offsets = offsets.head<size_type>() + c.offset(); thrust::transform( rmm::exec_policy(), d_offsets, d_offsets + output_size, shifted_offsets.begin(), [first] __device__(int32_t offset) { return static_cast<size_type>(offset - first); }); auto const h_shifted_offsets = cudf::detail::make_host_vector_sync(shifted_offsets); std::ostringstream buffer; for (size_t idx = 0; idx < h_shifted_offsets.size(); idx++) { buffer << h_shifted_offsets[idx]; if (idx < h_shifted_offsets.size() - 1) { buffer << delimiter; } } return buffer.str(); } struct column_view_printer { template <typename Element, std::enable_if_t<is_numeric<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto h_data = cudf::test::to_host<Element>(col); out.resize(col.size()); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return bit_is_set(h_data.second.data(), idx) ? numeric_to_string_precise(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) { return numeric_to_string_precise(el); }); } } template <typename Element, std::enable_if_t<is_timestamp<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { // For timestamps, convert timestamp column to column of strings, then // call string version std::string format = [&]() { if constexpr (std::is_same_v<cudf::timestamp_s, Element>) { return std::string{"%Y-%m-%dT%H:%M:%SZ"}; } else if constexpr (std::is_same_v<cudf::timestamp_ms, Element>) { return std::string{"%Y-%m-%dT%H:%M:%S.%3fZ"}; } else if constexpr (std::is_same_v<cudf::timestamp_us, Element>) { return std::string{"%Y-%m-%dT%H:%M:%S.%6fZ"}; } else if constexpr (std::is_same_v<cudf::timestamp_ns, Element>) { return std::string{"%Y-%m-%dT%H:%M:%S.%9fZ"}; } return std::string{"%Y-%m-%d"}; }(); auto col_as_strings = cudf::strings::from_timestamps(col, format); if (col_as_strings->size() == 0) { return; } this->template operator()<cudf::string_view>(*col_as_strings, out, indent); } template <typename Element, std::enable_if_t<cudf::is_fixed_point<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto const h_data = cudf::test::to_host<Element>(col); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), std::back_inserter(out), [&h_data](auto idx) { return h_data.second.empty() || bit_is_set(h_data.second.data(), idx) ? static_cast<std::string>(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(std::cbegin(h_data.first), std::cend(h_data.first), std::back_inserter(out), [col](auto const& fp) { return static_cast<std::string>(fp); }); } } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::string_view>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { // // Implementation for strings, call special to_host variant // if (col.is_empty()) return; auto h_data = cudf::test::to_host<std::string>(col); out.resize(col.size()); std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return h_data.second.empty() || bit_is_set(h_data.second.data(), idx) ? h_data.first[idx] : std::string("NULL"); }); } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::dictionary32>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { cudf::dictionary_column_view dictionary(col); if (col.is_empty()) return; std::vector<std::string> keys = to_strings(dictionary.keys()); std::vector<std::string> indices = to_strings({dictionary.indices().type(), dictionary.size(), dictionary.indices().head(), dictionary.null_mask(), dictionary.null_count(), dictionary.offset()}); out.insert(out.end(), keys.begin(), keys.end()); if (!indices.empty()) { std::string first = "\x08 : " + indices.front(); // use : as delimiter out.push_back(first); // between keys and indices out.insert(out.end(), indices.begin() + 1, indices.end()); } } // Print the tick counts with the units template <typename Element, std::enable_if_t<is_duration<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto h_data = cudf::test::to_host<Element>(col); out.resize(col.size()); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return bit_is_set(h_data.second.data(), idx) ? numeric_to_string_precise(h_data.first[idx].count()) + duration_suffix(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) { return numeric_to_string_precise(el.count()) + duration_suffix(el); }); } } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::list_view>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { lists_column_view lcv(col); // propagate slicing to the child if necessary column_view child = lcv.get_sliced_child(rmm::cuda_stream_default); bool const is_sliced = lcv.offset() > 0 || child.offset() > 0; std::string tmp = get_nested_type_str(col) + (is_sliced ? "(sliced)" : "") + ":\n" + indent + "Length : " + std::to_string(lcv.size()) + "\n" + indent + "Offsets : " + (lcv.size() > 0 ? nested_offsets_to_string(lcv) : "") + "\n" + (lcv.parent().nullable() ? indent + "Null count: " + std::to_string(lcv.null_count()) + "\n" + detail::to_string(bitmask_to_host(col), col.size(), indent) + "\n" : "") + // non-nested types don't typically display their null masks, so do it here for convenience. (!is_nested(child.type()) && child.nullable() ? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n" : "") + (detail::to_string(child, ", ", indent + " ")) + "\n"; out.push_back(tmp); } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::struct_view>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { structs_column_view view{col}; std::ostringstream out_stream; out_stream << get_nested_type_str(col) << ":\n" << indent << "Length : " << view.size() << ":\n"; if (view.nullable()) { out_stream << indent << "Null count: " << view.null_count() << "\n" << detail::to_string(bitmask_to_host(col), col.size(), indent) << "\n"; } auto iter = thrust::make_counting_iterator(0); std::transform( iter, iter + view.num_children(), std::ostream_iterator<std::string>(out_stream, "\n"), [&](size_type index) { auto child = view.get_sliced_child(index); // non-nested types don't typically display their null masks, so do it here for convenience. return (!is_nested(child.type()) && child.nullable() ? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n" : "") + detail::to_string(child, ", ", indent + " "); }); out.push_back(out_stream.str()); } }; } // namespace namespace detail { /** * @copydoc cudf::test::detail::to_strings */ std::vector<std::string> to_strings(cudf::column_view const& col, std::string const& indent) { std::vector<std::string> reply; cudf::type_dispatcher(col.type(), column_view_printer{}, col, reply, indent); return reply; } /** * @copydoc cudf::test::detail::to_string(cudf::column_view, std::string, std::string) * * @param indent Indentation for all output */ std::string to_string(cudf::column_view const& col, std::string const& delimiter, std::string const& indent) { std::ostringstream buffer; std::vector<std::string> h_data = to_strings(col, indent); buffer << indent; std::copy(h_data.begin(), h_data.end() - (!h_data.empty()), std::ostream_iterator<std::string>(buffer, delimiter.c_str())); if (!h_data.empty()) buffer << h_data.back(); return buffer.str(); } /** * @copydoc cudf::test::detail::to_string(std::vector<bitmask_type>, size_type, std::string) * * @param indent Indentation for all output. See comment in `to_strings` for * a detailed description. */ std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size, std::string const& indent) { std::ostringstream buffer; buffer << indent; for (int idx = null_mask_size - 1; idx >= 0; idx--) { buffer << (cudf::bit_is_set(null_mask.data(), idx) ? "1" : "0"); } return buffer.str(); } } // namespace detail /** * @copydoc cudf::test::to_strings */ std::vector<std::string> to_strings(cudf::column_view const& col) { return detail::to_strings(col); } /** * @copydoc cudf::test::to_string(cudf::column_view, std::string) */ std::string to_string(cudf::column_view const& col, std::string const& delimiter) { return detail::to_string(col, delimiter); } /** * @copydoc cudf::test::to_string(std::vector<bitmask_type>, size_type) */ std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size) { return detail::to_string(null_mask, null_mask_size); } /** * @copydoc cudf::test::print */ void print(cudf::column_view const& col, std::ostream& os, std::string const& delimiter) { os << to_string(col, delimiter) << std::endl; } /** * @copydoc cudf::test::validate_host_masks */ bool validate_host_masks(std::vector<bitmask_type> const& expected_mask, std::vector<bitmask_type> const& got_mask, size_type number_of_elements) { return std::all_of(thrust::make_counting_iterator(0), thrust::make_counting_iterator(number_of_elements), [&expected_mask, &got_mask](auto index) { return cudf::bit_is_set(expected_mask.data(), index) == cudf::bit_is_set(got_mask.data(), index); }); } } // namespace test } // namespace cudf
the_stack
#include "common.cuh" #include <kat/on_device/sequence_ops/grid.cuh> #include <kat/on_device/sequence_ops/block.cuh> #include <kat/on_device/sequence_ops/warp.cuh> #include <cuda/api_wrappers.hpp> #include <limits> #include <algorithm> #include <numeric> #include <iostream> #include <iomanip> using std::size_t; using fake_bool = int8_t; // so as not to have trouble with vector<bool> static_assert(sizeof(bool) == sizeof(fake_bool), "unexpected size mismatch"); namespace klcg = kat::linear_grid::collaborative::grid; namespace klcb = kat::linear_grid::collaborative::block; // namespace kcg = kat::collaborative::grid; namespace kcb = kat::collaborative::block; namespace kcw = kat::collaborative::warp; #if __cplusplus < 201701L #include <experimental/optional> template <typename T> using optional = std::experimental::optional<T>; #else template <typename T> #include <optional> using optional = std::optional<T>; #endif template <typename T> const auto make_exact_comparison { optional<T>{} }; namespace kcw = ::kat::collaborative::warp; namespace klcw = ::kat::linear_grid::collaborative::warp; std::ostream& operator<<(std::ostream& os, klcw::detail::predicate_computation_length_slack_t ss) { switch(ss) { case klcw::detail::predicate_computation_length_slack_t::has_no_slack: os << "has_no_slack"; break; case klcw::detail::predicate_computation_length_slack_t::may_have_arbitrary_slack: os << "may_have_arbitrary_slack"; break; case klcw::detail::predicate_computation_length_slack_t::may_have_full_warps_of_slack: default: os << "may_have_full_warps_of_slack:"; } return os; } namespace kernels { template <typename F, typename T, typename... Is> __global__ void execute_testcase( F testcase_device_function, size_t num_values_to_populate, T* __restrict__ values_to_populate, const Is* __restrict__ ... inputs ) { testcase_device_function(num_values_to_populate, values_to_populate, inputs...); } } // namespace kernels namespace kat { namespace linear_grid { namespace collaborative { namespace warp { template <typename T> std::ostream& operator<<(std::ostream& os, search_result_t<T> sr) { if (not sr.is_set()) { return os << "(not found)"; } return os << "value " << sr.value << " in lane " << sr.lane_index; } template <typename T> KAT_FHD bool operator==(const search_result_t<T>& lhs, const search_result_t<T>& rhs) { return (lhs.lane_index == rhs.lane_index) and ( (not lhs.is_set() ) or (lhs.value == rhs.value) ); } } // namespace warp } // namespace collaborative } // namespace linear_grid } // namespace kat template <typename T> std::size_t set_width_for_up_to(T max) { // assert(std::is_integral<I>::value, "Only integer types supported for now"); std::stringstream ss; ss << std::dec << max; return ss.str().length(); } namespace detail { template <typename T> auto tolerance_gadget(std::true_type, T x, optional<T> tolerance) { auto eps = tolerance.value_or(0); return doctest::Approx(x).epsilon(eps); } template <typename T> T tolerance_gadget(std::false_type, T x, optional<T>) { return x; } } // namespace detail template <typename T> auto tolerance_gadget(T x, optional<T> tolerance) { constexpr const auto is_arithmetic = std::is_arithmetic< std::decay_t<T> >::value; return detail::tolerance_gadget(std::integral_constant<bool, is_arithmetic>{}, x, tolerance); } template <typename T, typename F, typename... Is> void check_results( std::string title, size_t num_values_to_check, const T* __restrict__ actual_values, F expected_value_retriever, optional<T> comparison_tolerance_fraction, const Is* __restrict__... inputs) { std::stringstream ss; auto index_width = set_width_for_up_to(num_values_to_check); // TODO: Consider using the maximum/minimum result values to set field widths. for(size_t i = 0; i < num_values_to_check; i++) { ss.str(""); ss << "Assertion " << std::setw(index_width) << (i+1) << " for " << title // << " :\n" << "(" << std::make_tuple(inputs[i]...) << ")" ; auto mismatch_message { ss.str() }; if (comparison_tolerance_fraction) { const auto& actual = actual_values[i]; const auto expected = tolerance_gadget(expected_value_retriever(i), comparison_tolerance_fraction); CHECK_MESSAGE(actual == expected, mismatch_message); } else { const auto& ev = expected_value_retriever(i); const auto& actual = actual_values[i]; const auto expected = expected_value_retriever(i); CHECK_MESSAGE(actual == expected, mismatch_message); } } } template <typename T, typename F, typename... Is> void check_results( size_t num_values_to_check, const T* __restrict__ actual_values, F expected_value_retriever, optional<T> comparison_tolerance_fraction, const Is* __restrict__... inputs) { return check_results( std::string("testcase ") + doctest::current_test_name(), num_values_to_check, actual_values, expected_value_retriever, comparison_tolerance_fraction, inputs...); } template <typename T> struct tag {}; /** * @brief Executes a testcase intended to make certain checks using a GPU kernel * which produces the values to check for. * * @note The actual checks are eventually conducted on the host side, since doctest * code can't actually do anything useful on the GPU. So on the GPU side we "merely" * compute the values to check and let the test logic peform the actual comparison later * on. */ template <typename F, typename K, typename T, typename... Is, size_t... Indices> auto execute_testcase_on_gpu( tag<T>, std::index_sequence<Indices...>, K testcase_kernel, F testcase_device_function, cuda::launch_configuration_t launch_config, size_t num_values_to_populate, Is* __restrict__ ... inputs) { cuda::device_t device { cuda::device::current::get() }; auto device_side_results { cuda::memory::device::make_unique<T[]>(device, num_values_to_populate) }; cuda::memory::device::zero(device_side_results.get(), num_values_to_populate * sizeof(T)); // just to be on the safe side auto host_side_results { std::vector<T>(num_values_to_populate) }; auto make_device_side_input = [&device, num_values_to_populate](auto input, size_t n) { using input_type = std::remove_reference_t<decltype(*input)>; auto device_side_input = cuda::memory::device::make_unique<input_type[]>(device, n); cuda::memory::copy(device_side_input.get(), input, num_values_to_populate * sizeof(input_type)); return std::move(device_side_input); }; auto device_side_inputs = std::make_tuple( make_device_side_input(inputs, num_values_to_populate)... ); ignore(device_side_inputs); // for the case of no inputs cuda::launch( testcase_kernel, launch_config, testcase_device_function, num_values_to_populate, device_side_results.get(), std::get<Indices>(device_side_inputs).get()... ); cuda::memory::copy(host_side_results.data(), device_side_results.get(), sizeof(T) * num_values_to_populate); return host_side_results; } template <typename F, typename ExpectedResultRetriever, typename T, typename... Is> void execute_non_uniform_testcase_on_gpu_and_check( F testcase_device_function, ExpectedResultRetriever expected_value_retriever, size_t num_values_to_populate, cuda::grid::dimensions_t grid_dimensions, cuda::grid::block_dimensions_t block_dimensions, optional<T> comparison_tolerance_fraction, Is* __restrict__ ... inputs) { auto launch_config { cuda::make_launch_config(grid_dimensions, block_dimensions) }; auto host_side_results = execute_testcase_on_gpu( tag<T>{}, typename std::make_index_sequence<sizeof...(Is)> {}, kernels::execute_testcase<F, T, Is...>, testcase_device_function, launch_config, num_values_to_populate, inputs... ); check_results ( num_values_to_populate, // perhaps add another parameter for specific testcase details? host_side_results.data(), expected_value_retriever, comparison_tolerance_fraction, inputs...); } template <typename F, typename T, typename... Is> auto execute_non_uniform_testcase_on_gpu( tag<T>, F testcase_device_function, size_t num_values_to_populate, cuda::grid::dimensions_t grid_dimensions, cuda::grid::block_dimensions_t block_dimensions, Is* __restrict__ ... inputs) { auto launch_config { cuda::make_launch_config(grid_dimensions, block_dimensions) }; return execute_testcase_on_gpu( tag<T>{}, typename std::make_index_sequence<sizeof...(Is)> {}, kernels::execute_testcase<F, T, Is...>, testcase_device_function, launch_config, num_values_to_populate, inputs... ); } template <typename T1, typename T2> struct poor_mans_pair { T1 first; T2 second; }; template <typename T1, typename T2> bool operator==(const poor_mans_pair<T1, T2>& lhs, const poor_mans_pair<T1, T2>& rhs) { return lhs.first == rhs.first and lhs.second == rhs.second; } TEST_SUITE("warp-to-grid") { TEST_CASE("append_to_global_memory") { // template <typename T, typename Size = size_t> // KAT_FD void collaborative_append_to_global_memory( // T* __restrict__ global_output, // Size* __restrict__ global_output_length, // T* __restrict__ fragment_to_append, // Size __restrict__ fragment_length) } } // TEST_SUITE("warp-to-grid") TEST_SUITE("block-level - linear grid") { TEST_CASE("fill") { using checked_value_type = int32_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; auto resolve_fill_value = [] KAT_HD (unsigned block_id) -> checked_value_type { constexpr const checked_value_type fill_value_base { 456 }; return fill_value_base + (block_id + 1) * 10000; }; auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* buffer_to_fill_by_entire_grid ) { namespace gi = kat::linear_grid::grid_info; auto start = buffer_to_fill_by_entire_grid + length_to_cover_per_block * gi::block::id(); auto end = start + length_to_cover_per_block; auto fill_value = resolve_fill_value(gi::block::id()); klcb::fill(start, end, fill_value); }; auto expected_value_retriever = [=] (size_t pos) { auto processing_block_id = pos / length_to_cover_per_block; return resolve_fill_value(processing_block_id); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } TEST_CASE("fill_n") { using checked_value_type = int32_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; auto resolve_fill_value = [] KAT_HD (unsigned block_id) -> checked_value_type { constexpr const checked_value_type fill_value_base { 456 }; return fill_value_base + (block_id + 1) * 10000; }; auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* buffer_to_fill_by_entire_grid ) { namespace gi = kat::linear_grid::grid_info; auto start = buffer_to_fill_by_entire_grid + length_to_cover_per_block * gi::block::id(); auto fill_value = resolve_fill_value(gi::block::id()); klcb::fill_n(start, length_to_cover_per_block, fill_value); }; auto expected_value_retriever = [=] (size_t pos) { auto processing_block_id = pos / length_to_cover_per_block; return resolve_fill_value(processing_block_id); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } TEST_CASE("memzero") { using checked_value_type = int32_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* buffer_to_fill_by_entire_grid ) { namespace gi = kat::linear_grid::grid_info; auto start = buffer_to_fill_by_entire_grid + length_to_cover_per_block * gi::block::id(); auto end = start + length_to_cover_per_block; klcb::memzero(start, end); }; auto expected_value_retriever = [=] (size_t pos) { return checked_value_type(0); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } TEST_CASE("memzero_n") { using checked_value_type = int32_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* buffer_to_fill_by_entire_grid ) { namespace gi = kat::linear_grid::grid_info; auto start = buffer_to_fill_by_entire_grid + length_to_cover_per_block * gi::block::id(); klcb::memzero_n(start, length_to_cover_per_block); }; auto expected_value_retriever = [=] (size_t pos) { return checked_value_type(0); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } TEST_CASE("transform") { using checked_value_type = int32_t; using input_value_type = uint8_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; std::vector<input_value_type> input; auto generator = [](size_t pos) -> uint8_t { return uint8_t('a' + pos % ('z'-'a' + 1)); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { return generator(pos++); }); auto op = [] KAT_HD (input_value_type x) -> checked_value_type { return -x; }; auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { namespace gi = kat::linear_grid::grid_info; auto source_start = input + length_to_cover_per_block * gi::block::id(); auto source_end = source_start + length_to_cover_per_block; auto block_target_start = target + length_to_cover_per_block * gi::block::id(); klcb::transform(source_start, source_end, block_target_start, op); }; auto expected_value_retriever = [=] (size_t pos) { return op(generator(pos)); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("transform_n") { using checked_value_type = int32_t; using input_value_type = uint8_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; std::vector<input_value_type> input; auto generator = [](size_t pos) -> uint8_t { return uint8_t('a' + pos % ('z'-'a' + 1)); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { return generator(pos++); }); auto op = [] KAT_HD (input_value_type x) -> checked_value_type { return -x; }; auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { namespace gi = kat::linear_grid::grid_info; auto source_start = input + length_to_cover_per_block * gi::block::id(); auto block_target_start = target + length_to_cover_per_block * gi::block::id(); klcb::transform_n(source_start, length_to_cover_per_block, block_target_start, op); }; auto expected_value_retriever = [=] (size_t pos) { return op(generator(pos)); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("cast_and_copy") { using checked_value_type = int32_t; using input_value_type = float; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; std::vector<input_value_type> input; auto generator = [](size_t pos) -> input_value_type { return 10 + pos % 80 + 0.123; }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { return generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { namespace gi = kat::linear_grid::grid_info; auto source_start = input + length_to_cover_per_block * gi::block::id(); auto source_end = source_start + length_to_cover_per_block; auto block_target_start = target + length_to_cover_per_block * gi::block::id(); klcb::cast_and_copy(source_start, source_end, block_target_start); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("cast_and_copy_n") { using checked_value_type = int32_t; using input_value_type = float; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; std::vector<input_value_type> input; auto generator = [](size_t pos) -> input_value_type { return 10 + pos % 80 + 0.123; }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { return generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { namespace gi = kat::linear_grid::grid_info; auto start = input + length_to_cover_per_block * gi::block::id(); auto block_target_start = target + length_to_cover_per_block * gi::block::id(); klcb::cast_and_copy_n(start, length_to_cover_per_block, block_target_start); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("copy") { using checked_value_type = int32_t; using input_value_type = checked_value_type; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; std::vector<input_value_type> input; auto generator = [](size_t pos) -> input_value_type { return 10 + pos % 80; }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { return generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { namespace gi = kat::linear_grid::grid_info; auto source_start = input + length_to_cover_per_block * gi::block::id(); auto source_end = source_start + length_to_cover_per_block; auto block_target_start = target + length_to_cover_per_block * gi::block::id(); klcb::copy(source_start, source_end, block_target_start); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("copy_n") { using checked_value_type = int32_t; using input_value_type = checked_value_type; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; std::vector<input_value_type> input; auto generator = [](size_t pos) -> input_value_type { return 10 + pos % 80; }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { return generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { namespace gi = kat::linear_grid::grid_info; auto start = input + length_to_cover_per_block * gi::block::id(); auto block_target_start = target + length_to_cover_per_block * gi::block::id(); klcb::copy_n(start, length_to_cover_per_block, block_target_start); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("lookup") { using checked_value_type = int32_t; using index_type = uint32_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t num_indices_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = num_indices_per_block * num_grid_blocks; std::vector<checked_value_type> data = { 101, 202, 303, 404, 505, 606, 707, 808, 909, 1010 }; std::vector<index_type> indices; auto generator = [](size_t pos) -> index_type { return (7 * pos) % 10; }; size_t pos = 0; std::generate_n(std::back_inserter(indices), num_values_to_populate, [&]() { return generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const checked_value_type* __restrict data, const index_type* __restrict indices ) { namespace gi = kat::linear_grid::grid_info; auto block_indices_start = indices + num_indices_per_block * gi::block::id(); auto block_target_start = target + num_indices_per_block * gi::block::id(); klcb::lookup(block_target_start, data, block_indices_start, num_indices_per_block); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return data[generator(pos)]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, data.data(), indices.data() ); } TEST_CASE("reduce - all threads obtain result") { constexpr const bool all_threads_do_obtain_result { true }; using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t using input_value_type = int16_t; cuda::grid::dimension_t num_grid_blocks { 3 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<input_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) -> input_value_type { return ((1+block_id) * 1000) + (10 + 10 * thread_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto block_id = pos / num_threads_per_block; auto thread_id = pos % num_threads_per_block; pos++; return make_thread_value(block_id, thread_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; const auto plus = [](checked_value_type& x, checked_value_type y) { x += y; }; target[gi::thread::global_id()] = klcb::reduce< checked_value_type, decltype(plus), all_threads_do_obtain_result >(thread_input, plus); }; std::vector<checked_value_type> block_sums; block_sums.reserve(num_grid_blocks); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { checked_value_type block_sum = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { block_sum += make_thread_value(block_id,thread_id); } block_sums.push_back(block_sum); } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { auto block_id = pos / num_threads_per_block; return block_sums[block_id]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("reduce - not all threads obtain result") { constexpr const bool not_all_threads_obtain_result { true }; using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t using input_value_type = int16_t; cuda::grid::dimension_t num_grid_blocks { 3 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<input_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) -> input_value_type { return ((1+block_id) * 1000) + (10 + 10 * thread_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto block_id = pos / num_threads_per_block; auto thread_id = pos % num_threads_per_block; pos++; return make_thread_value(block_id, thread_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; auto plus = [](checked_value_type& x, checked_value_type y) { x += y; }; target[gi::thread::global_id()] = klcb::reduce<checked_value_type, decltype(plus), not_all_threads_obtain_result>(thread_input, plus); }; std::vector<checked_value_type> block_sums; block_sums.reserve(num_grid_blocks); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { checked_value_type block_sum = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { block_sum += make_thread_value(block_id,thread_id); } block_sums.push_back(block_sum); } auto host_results = execute_non_uniform_testcase_on_gpu( tag<checked_value_type>{}, testcase_device_function, num_values_to_populate, num_grid_blocks, num_threads_per_block, input.data() ); CHECK(host_results.size() == num_values_to_populate); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { auto first_block_thread_glbal_id = block_id * num_threads_per_block; CHECK(host_results[first_block_thread_glbal_id] == block_sums[block_id]); } } TEST_CASE("sum - all threads obtain result") { constexpr const bool all_threads_do_obtain_result { true }; using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t using input_value_type = int16_t; cuda::grid::dimension_t num_grid_blocks { 3 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<input_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) -> input_value_type { return ((1+block_id) * 1000) + (10 + 10 * thread_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto block_id = pos / num_threads_per_block; auto thread_id = pos % num_threads_per_block; pos++; return make_thread_value(block_id, thread_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; target[gi::thread::global_id()] = klcb::sum<checked_value_type, all_threads_do_obtain_result>(thread_input); }; std::vector<checked_value_type> block_sums; block_sums.reserve(num_grid_blocks); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { checked_value_type block_sum = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { block_sum += make_thread_value(block_id,thread_id); } block_sums.push_back(block_sum); } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { auto block_id = pos / num_threads_per_block; return block_sums[block_id]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("inclusive scan with specified scratch area") { using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t using input_value_type = int16_t; cuda::grid::dimension_t num_grid_blocks { 3 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<input_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) -> input_value_type { return ((1+block_id) * 1000) + (10 + 10 * thread_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto block_id = pos / num_threads_per_block; auto thread_id = pos % num_threads_per_block; pos++; return make_thread_value(block_id, thread_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; const auto plus = [](checked_value_type& x, checked_value_type y) { x += y; }; static __shared__ checked_value_type scratch[kat::warp_size]; // assumes that there are no than warp_size warps per block target[gi::thread::global_id()] = klcb::scan<checked_value_type, decltype(plus), kat::collaborative::inclusivity_t::Inclusive>(checked_value_type(thread_input), plus, scratch); }; std::vector<checked_value_type> scans; scans.reserve(num_values_to_populate); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { checked_value_type block_scan = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { block_scan += make_thread_value(block_id,thread_id); scans.push_back(block_scan); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("inclusive scan without specified scratch area") { using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t using input_value_type = int16_t; cuda::grid::dimension_t num_grid_blocks { 3 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<input_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) -> input_value_type { return ((1+block_id) * 1000) + (10 + 10 * thread_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto block_id = pos / num_threads_per_block; auto thread_id = pos % num_threads_per_block; pos++; return make_thread_value(block_id, thread_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; const auto plus = [](checked_value_type& x, checked_value_type y) { x += y; }; target[gi::thread::global_id()] = klcb::scan<checked_value_type, decltype(plus), kat::collaborative::inclusivity_t::Inclusive>(checked_value_type(thread_input), plus); }; std::vector<checked_value_type> scans; scans.reserve(num_values_to_populate); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { checked_value_type block_scan = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { block_scan += make_thread_value(block_id,thread_id); scans.push_back(block_scan); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("exclusive scan with specified scratch area") { using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t using input_value_type = int16_t; cuda::grid::dimension_t num_grid_blocks { 3 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<input_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) -> input_value_type { return ((1+block_id) * 1000) + (10 + 10 * thread_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto block_id = pos / num_threads_per_block; auto thread_id = pos % num_threads_per_block; pos++; return make_thread_value(block_id, thread_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; static __shared__ checked_value_type scratch[kat::warp_size]; // assumes that there are no than warp_size warps per block const auto plus = [](checked_value_type& x, checked_value_type y) { x += y; }; target[gi::thread::global_id()] = klcb::scan<checked_value_type, decltype(plus), kat::collaborative::inclusivity_t::Exclusive>(checked_value_type(thread_input), plus, scratch); }; std::vector<checked_value_type> scans; scans.reserve(num_values_to_populate); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { checked_value_type block_scan = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { scans.push_back(block_scan); block_scan += make_thread_value(block_id,thread_id); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("exclusive scan without specified scratch area") { using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t using input_value_type = int16_t; cuda::grid::dimension_t num_grid_blocks { 3 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<input_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) -> input_value_type { return ((1+block_id) * 1000) + (10 + 10 * thread_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto block_id = pos / num_threads_per_block; auto thread_id = pos % num_threads_per_block; pos++; return make_thread_value(block_id, thread_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; const auto plus = [](checked_value_type& x, checked_value_type y) { x += y; }; target[gi::thread::global_id()] = klcb::scan<checked_value_type, decltype(plus), kat::collaborative::inclusivity_t::Exclusive>(checked_value_type(thread_input), plus); }; std::vector<checked_value_type> scans; scans.reserve(num_values_to_populate); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { checked_value_type block_scan = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { scans.push_back(block_scan); block_scan += make_thread_value(block_id,thread_id); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("inclusive scan_and_reduce with specified scratch area") { using scan_result_type = int32_t; using reduction_result_type = int32_t; using checked_value_type = poor_mans_pair<scan_result_type, reduction_result_type >; using input_value_type = int16_t; cuda::grid::dimension_t num_grid_blocks { 3 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<input_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) -> input_value_type { return ((1+block_id) * 1000) + (10 + 10 * thread_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto block_id = pos / num_threads_per_block; auto thread_id = pos % num_threads_per_block; pos++; return make_thread_value(block_id, thread_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; static __shared__ scan_result_type scratch[kat::warp_size]; // assumes that there are no than warp_size warps per block const auto plus = [](scan_result_type& x, scan_result_type y) { x += y; }; checked_value_type result; klcb::scan_and_reduce<scan_result_type, decltype(plus), kat::collaborative::inclusivity_t::Inclusive>( scratch, scan_result_type(thread_input), plus, result.first, result.second); target[gi::thread::global_id()] = result; }; std::vector<checked_value_type> scans_and_reductions; scans_and_reductions.reserve(num_values_to_populate); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { scan_result_type block_reduction = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { block_reduction += make_thread_value(block_id,thread_id); } scan_result_type block_scan = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { block_scan += make_thread_value(block_id,thread_id); checked_value_type p { block_scan, block_reduction }; scans_and_reductions.push_back(p); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans_and_reductions[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("exclusive scan_and_reduce with specified scratch area") { using scan_result_type = int32_t; using reduction_result_type = int32_t; using checked_value_type = poor_mans_pair<scan_result_type, reduction_result_type >; using input_value_type = int16_t; cuda::grid::dimension_t num_grid_blocks { 3 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<input_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) -> input_value_type { return ((1+block_id) * 1000) + (10 + 10 * thread_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto block_id = pos / num_threads_per_block; auto thread_id = pos % num_threads_per_block; pos++; return make_thread_value(block_id, thread_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; static __shared__ scan_result_type scratch[kat::warp_size]; // assumes that there are no than warp_size warps per block const auto plus = [](scan_result_type& x, scan_result_type y) { x += y; }; checked_value_type result; klcb::scan_and_reduce<scan_result_type, decltype(plus), kat::collaborative::inclusivity_t::Exclusive>( scratch, scan_result_type(thread_input), plus, result.first, result.second); target[gi::thread::global_id()] = result; }; std::vector<checked_value_type> scans_and_reductions; scans_and_reductions.reserve(num_values_to_populate); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { scan_result_type block_reduction = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { block_reduction += make_thread_value(block_id,thread_id); } scan_result_type block_scan = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { checked_value_type p { block_scan, block_reduction }; scans_and_reductions.push_back(p); block_scan += make_thread_value(block_id,thread_id); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans_and_reductions[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("inclusive scan_and_reduce with specified scratch area") { using scan_result_type = int32_t; using reduction_result_type = int32_t; using checked_value_type = poor_mans_pair<scan_result_type, reduction_result_type >; using input_value_type = int16_t; cuda::grid::dimension_t num_grid_blocks { 3 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<input_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) -> input_value_type { return ((1+block_id) * 1000) + (10 + 10 * thread_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto block_id = pos / num_threads_per_block; auto thread_id = pos % num_threads_per_block; pos++; return make_thread_value(block_id, thread_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; static __shared__ scan_result_type scratch[kat::warp_size]; // assumes that there are no than warp_size warps per block const auto plus = [](scan_result_type& x, scan_result_type y) { x += y; }; checked_value_type result; klcb::scan_and_reduce<scan_result_type, decltype(plus), kat::collaborative::inclusivity_t::Inclusive>( scratch, scan_result_type(thread_input), plus, result.first, result.second); target[gi::thread::global_id()] = result; }; std::vector<checked_value_type> scans_and_reductions; scans_and_reductions.reserve(num_values_to_populate); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { scan_result_type block_reduction = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { block_reduction += make_thread_value(block_id,thread_id); } scan_result_type block_scan = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { block_scan += make_thread_value(block_id,thread_id); checked_value_type p { block_scan, block_reduction }; scans_and_reductions.push_back(p); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans_and_reductions[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("exclusive scan_and_reduce without specified scratch area") { using scan_result_type = int32_t; using reduction_result_type = int32_t; using checked_value_type = poor_mans_pair<scan_result_type, reduction_result_type >; using input_value_type = int16_t; cuda::grid::dimension_t num_grid_blocks { 3 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<input_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) -> input_value_type { return ((1+block_id) * 1000) + (10 + 10 * thread_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto block_id = pos / num_threads_per_block; auto thread_id = pos % num_threads_per_block; pos++; return make_thread_value(block_id, thread_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; const auto plus = [](scan_result_type& x, scan_result_type y) { x += y; }; checked_value_type result; klcb::scan_and_reduce<scan_result_type, decltype(plus), kat::collaborative::inclusivity_t::Exclusive>( scan_result_type(thread_input), plus, result.first, result.second); target[gi::thread::global_id()] = result; }; std::vector<checked_value_type> scans_and_reductions; scans_and_reductions.reserve(num_values_to_populate); for(int block_id = 0; block_id < num_grid_blocks; block_id++ ) { scan_result_type block_reduction = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { block_reduction += make_thread_value(block_id,thread_id); } scan_result_type block_scan = 0; for(int thread_id = 0; thread_id < num_threads_per_block; thread_id++ ) { checked_value_type p { block_scan, block_reduction }; scans_and_reductions.push_back(p); block_scan += make_thread_value(block_id,thread_id); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans_and_reductions[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("elementwise accumulate_n") { using checked_value_type = int32_t; using input_value_type = checked_value_type; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; std::vector<checked_value_type> input_dest; auto dest_generator = [](size_t pos) -> checked_value_type { return 1000 + pos % 8000; }; size_t pos = 0; std::generate_n(std::back_inserter(input_dest), num_values_to_populate, [&]() { return dest_generator(pos++); }); std::vector<input_value_type> input_src; auto src_generator = [](size_t pos) -> input_value_type { return 10 + pos % 80; }; pos = 0; std::generate_n(std::back_inserter(input_src), num_values_to_populate, [&]() { return src_generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict result, const checked_value_type* __restrict input_dest, const input_value_type* __restrict input_src ) { namespace gi = kat::linear_grid::grid_info; auto block_result = result + length_to_cover_per_block * gi::block::id(); auto block_dest = input_dest + length_to_cover_per_block * gi::block::id(); klcb::copy_n(block_dest, length_to_cover_per_block, block_result); auto block_src = input_src + length_to_cover_per_block * gi::block::id(); const auto plus = [](checked_value_type& x, input_value_type y) { x += y; }; // So, you might think we should be accumulating into _dest - but we can't do that since it's // read-only. So first let's make a copy of it into the result column, then accumulate there. klcb::elementwise_accumulate_n(plus, block_result, block_src, length_to_cover_per_block); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return dest_generator(pos) + src_generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input_dest.data(), input_src.data() ); } TEST_CASE("elementwise accumulate") { using checked_value_type = int32_t; using input_value_type = checked_value_type; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 3 }; size_t length_to_cover_per_block { num_threads_per_block * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; std::vector<checked_value_type> input_dest; auto dest_generator = [](size_t pos) -> checked_value_type { return 1000 + pos % 8000; }; size_t pos = 0; std::generate_n(std::back_inserter(input_dest), num_values_to_populate, [&]() { return dest_generator(pos++); }); std::vector<input_value_type> input_src; auto src_generator = [](size_t pos) -> input_value_type { return 10 + pos % 80; }; pos = 0; std::generate_n(std::back_inserter(input_src), num_values_to_populate, [&]() { return src_generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict result, const checked_value_type* __restrict input_dest, const input_value_type* __restrict input_src ) { namespace gi = kat::linear_grid::grid_info; auto block_result = result + length_to_cover_per_block * gi::block::id(); auto block_dest = input_dest + length_to_cover_per_block * gi::block::id(); klcb::copy_n(block_dest, length_to_cover_per_block, block_result); auto block_src = input_src + length_to_cover_per_block * gi::block::id(); const auto plus = [](checked_value_type& x, input_value_type y) { x += y; }; // So, you might think we should be accumulating into _dest - but we can't do that since it's // read-only. So first let's make a copy of it into the result column, then accumulate there. klcb::elementwise_accumulate(plus, block_result, block_src, block_src + length_to_cover_per_block); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return dest_generator(pos) + src_generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input_dest.data(), input_src.data() ); } } // TEST_SUITE("block-level - linear grid") TEST_SUITE("warp-level") { TEST_CASE("reduce") { using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_warps_per_block { 3 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; // TODO: What about non-full warps? auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<checked_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t global_warp_id, cuda::grid::block_dimension_t lane_id) -> checked_value_type { return ((1+global_warp_id) * 1000) + (10 + 10 * lane_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto global_warp_id = pos / kat::warp_size; auto lane_id = pos % kat::warp_size; pos++; return make_thread_value(global_warp_id, lane_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const checked_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; // printf("Warp %u Lane %2u input is %4d\n", (unsigned) gi::warp::global_id(), (unsigned) gi::lane::id(), (int) thread_input); const auto plus = [](checked_value_type& x, checked_value_type y) { x += y; }; auto warp_reduction_result = kcw::reduce(thread_input, plus); // printf("Warp %u reduction result is %d\n", (unsigned) gi::warp::global_id(), (int) warp_reduction_result); target[gi::thread::global_id()] = warp_reduction_result; }; std::vector<checked_value_type> warp_sums; warp_sums.reserve(num_grid_blocks); for(int global_warp_id = 0; global_warp_id < num_grid_blocks * num_warps_per_block; global_warp_id++ ) { checked_value_type warp_sum = 0; for(int lane_id = 0; lane_id < kat::warp_size; lane_id++ ) { warp_sum += make_thread_value(global_warp_id, lane_id); } warp_sums.push_back(warp_sum); } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { auto global_warp_id = pos / kat::warp_size; return warp_sums[global_warp_id]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("sum") { using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_warps_per_block { 3 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; // TODO: What about non-full warps? auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<checked_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t global_warp_id, cuda::grid::block_dimension_t lane_id) -> checked_value_type { return ((1+global_warp_id) * 1000) + (10 + 10 * lane_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto global_warp_id = pos / kat::warp_size; auto lane_id = pos % kat::warp_size; pos++; return make_thread_value(global_warp_id, lane_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const checked_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; auto warp_sum = kcw::sum(thread_input); target[gi::thread::global_id()] = warp_sum; }; std::vector<checked_value_type> warp_sums; warp_sums.reserve(num_grid_blocks); for(int global_warp_id = 0; global_warp_id < num_grid_blocks * num_warps_per_block; global_warp_id++ ) { checked_value_type warp_sum = 0; for(int lane_id = 0; lane_id < kat::warp_size; lane_id++ ) { warp_sum += make_thread_value(global_warp_id, lane_id); } warp_sums.push_back(warp_sum); } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { auto global_warp_id = pos / kat::warp_size; return warp_sums[global_warp_id]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("inclusive scan") { using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 1 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; // TODO: What about non-full warps? auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<checked_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t global_warp_id, cuda::grid::block_dimension_t lane_id) -> checked_value_type { return ((1 + global_warp_id) * 1000) + (10 + 10 * lane_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto global_warp_id = pos / kat::warp_size; auto lane_id = pos % kat::warp_size; pos++; return make_thread_value(global_warp_id, lane_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const checked_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; const auto plus = [](checked_value_type& x, checked_value_type y) { x += y; }; auto warp_scan_result = kcw::scan<checked_value_type, decltype(plus), kat::collaborative::inclusivity_t::Inclusive>(thread_input, plus); target[gi::thread::global_id()] = warp_scan_result; }; std::vector<checked_value_type> scans; scans.reserve(num_values_to_populate); for(int global_warp_id = 0; global_warp_id < num_grid_blocks * num_warps_per_block; global_warp_id++ ) { checked_value_type warp_scan = 0; for(int lane_id = 0; lane_id < kat::warp_size; lane_id++ ) { warp_scan += make_thread_value(global_warp_id, lane_id); scans.push_back(warp_scan); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("exclusive scan") { using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 1 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; // TODO: What about non-full warps? auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<checked_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t global_warp_id, cuda::grid::block_dimension_t lane_id) -> checked_value_type { return ((1 + global_warp_id) * 1000) + (10 + 10 * lane_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto global_warp_id = pos / kat::warp_size; auto lane_id = pos % kat::warp_size; pos++; return make_thread_value(global_warp_id, lane_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const checked_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; const auto plus = [](checked_value_type& x, checked_value_type y) { x += y; }; auto warp_scan_result = kcw::scan<checked_value_type, decltype(plus), kat::collaborative::inclusivity_t::Exclusive>(thread_input, plus); target[gi::thread::global_id()] = warp_scan_result; }; std::vector<checked_value_type> scans; scans.reserve(num_values_to_populate); for(int global_warp_id = 0; global_warp_id < num_grid_blocks * num_warps_per_block; global_warp_id++ ) { checked_value_type warp_scan = 0; for(int lane_id = 0; lane_id < kat::warp_size; lane_id++ ) { scans.push_back(warp_scan); warp_scan += make_thread_value(global_warp_id, lane_id); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("exclusive_prefix_sum") { using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 1 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; // TODO: What about non-full warps? auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<checked_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t global_warp_id, cuda::grid::block_dimension_t lane_id) -> checked_value_type { return ((1 + global_warp_id) * 1000) + (10 + 10 * lane_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto global_warp_id = pos / kat::warp_size; auto lane_id = pos % kat::warp_size; pos++; return make_thread_value(global_warp_id, lane_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const checked_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; auto warp_exclusive_prefix_sum = kcw::exclusive_prefix_sum(thread_input); target[gi::thread::global_id()] = warp_exclusive_prefix_sum; }; std::vector<checked_value_type> scans; scans.reserve(num_values_to_populate); for(int global_warp_id = 0; global_warp_id < num_grid_blocks * num_warps_per_block; global_warp_id++ ) { checked_value_type warp_scan = 0; for(int lane_id = 0; lane_id < kat::warp_size; lane_id++ ) { scans.push_back(warp_scan); warp_scan += make_thread_value(global_warp_id, lane_id); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("prefix_sum") { using checked_value_type = int32_t; // TODO: Try with some other types, e.g. int64_t cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 1 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; // TODO: What about non-full warps? auto num_values_to_populate = num_threads_per_block * num_grid_blocks; std::vector<checked_value_type> input; input.reserve(num_values_to_populate); auto make_thread_value = []( cuda::grid::dimension_t global_warp_id, cuda::grid::block_dimension_t lane_id) -> checked_value_type { return ((1 + global_warp_id) * 1000) + (10 + 10 * lane_id % 9); }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { auto global_warp_id = pos / kat::warp_size; auto lane_id = pos % kat::warp_size; pos++; return make_thread_value(global_warp_id, lane_id); } ); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const checked_value_type* __restrict input ) { // Note: Every thread will set a target value, but there is still just one reduction result // per block. In this variant of reduce, all block threads must obtain the result. namespace gi = kat::linear_grid::grid_info; auto thread_input = input[gi::thread::global_id()]; auto warp_prefix_sum = kcw::prefix_sum(thread_input); target[gi::thread::global_id()] = warp_prefix_sum; }; std::vector<checked_value_type> scans; scans.reserve(num_values_to_populate); for(int global_warp_id = 0; global_warp_id < num_grid_blocks * num_warps_per_block; global_warp_id++ ) { checked_value_type warp_scan = 0; for(int lane_id = 0; lane_id < kat::warp_size; lane_id++ ) { warp_scan += make_thread_value(global_warp_id, lane_id); scans.push_back(warp_scan); } } auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return scans[pos]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("cast_and_copy_n") { using checked_value_type = int32_t; using input_value_type = float; cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 3 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; size_t length_to_cover_per_warp { kat::warp_size * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_warp * num_warps_per_block * num_grid_blocks; std::vector<input_value_type> input; auto generator = [](size_t pos) -> input_value_type { return 10 + pos % 80 + 0.123; }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { return generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { namespace gi = kat::linear_grid::grid_info; auto source_start = input + length_to_cover_per_warp * gi::warp::global_id(); auto warp_target_start = target + length_to_cover_per_warp * gi::warp::global_id(); kcw::cast_and_copy_n(source_start, length_to_cover_per_warp, warp_target_start); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("cast_and_copy") { using checked_value_type = int32_t; using input_value_type = float; cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 3 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; size_t length_to_cover_per_warp { kat::warp_size * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_warp * num_warps_per_block * num_grid_blocks; std::vector<input_value_type> input; auto generator = [](size_t pos) -> input_value_type { return 10 + pos % 80 + 0.123; }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { return generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const input_value_type* __restrict input ) { namespace gi = kat::linear_grid::grid_info; auto source_start = input + length_to_cover_per_warp * gi::warp::global_id(); auto source_end = source_start + length_to_cover_per_warp; auto warp_target_start = target + length_to_cover_per_warp * gi::warp::global_id(); kcw::cast_and_copy(source_start, source_end, warp_target_start); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("copy_n") { using checked_value_type = int32_t; cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 3 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; size_t length_to_cover_per_warp { kat::warp_size * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_warp * num_warps_per_block * num_grid_blocks; std::vector<checked_value_type> input; auto generator = [](size_t pos) -> checked_value_type { return 10 + pos % 80; }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { return generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const checked_value_type* __restrict input ) { namespace gi = kat::linear_grid::grid_info; auto source_start = input + length_to_cover_per_warp * gi::warp::global_id(); auto warp_target_start = target + length_to_cover_per_warp * gi::warp::global_id(); kcw::copy_n(source_start, length_to_cover_per_warp, warp_target_start); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("copy") { using checked_value_type = int32_t; cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 3 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; size_t length_to_cover_per_warp { kat::warp_size * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_warp * num_warps_per_block * num_grid_blocks; std::vector<checked_value_type> input; auto generator = [](size_t pos) -> checked_value_type { return 10 + pos % 80; }; size_t pos = 0; std::generate_n(std::back_inserter(input), num_values_to_populate, [&]() { return generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const checked_value_type* __restrict input ) { namespace gi = kat::linear_grid::grid_info; auto source_start = input + length_to_cover_per_warp * gi::warp::global_id(); auto source_end = source_start + length_to_cover_per_warp; auto warp_target_start = target + length_to_cover_per_warp * gi::warp::global_id(); kcw::copy(source_start, source_end, warp_target_start); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input.data() ); } TEST_CASE("fill") { using checked_value_type = int32_t; cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 3 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; size_t length_to_cover_per_warp { kat::warp_size * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_warp * num_warps_per_block * num_grid_blocks; auto resolve_fill_value = [] KAT_HD (unsigned warp_id) -> checked_value_type { constexpr const checked_value_type fill_value_base { 456 }; return fill_value_base + (warp_id + 1) * 10000; }; auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* buffer_to_fill_by_entire_grid ) { namespace gi = kat::linear_grid::grid_info; auto start = buffer_to_fill_by_entire_grid + length_to_cover_per_warp * gi::warp::global_id(); auto end = start + length_to_cover_per_warp; auto fill_value = resolve_fill_value(gi::warp::global_id()); kcw::fill(start, end, fill_value); }; auto expected_value_retriever = [=] (size_t pos) { auto processing_warp_global_id = pos / length_to_cover_per_warp; return resolve_fill_value(processing_warp_global_id); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } TEST_CASE("fill_n") { using checked_value_type = int32_t; cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 3 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; size_t length_to_cover_per_warp { kat::warp_size * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_warp * num_warps_per_block * num_grid_blocks; auto resolve_fill_value = [] KAT_HD (unsigned warp_id) -> checked_value_type { constexpr const checked_value_type fill_value_base { 456 }; return fill_value_base + (warp_id + 1) * 10000; }; auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* buffer_to_fill_by_entire_grid ) { namespace gi = kat::linear_grid::grid_info; auto start = buffer_to_fill_by_entire_grid + length_to_cover_per_warp * gi::warp::global_id(); auto fill_value = resolve_fill_value(gi::warp::global_id()); kcw::fill_n(start, length_to_cover_per_warp, fill_value); }; auto expected_value_retriever = [=] (size_t pos) { auto processing_warp_global_id = pos / length_to_cover_per_warp; return resolve_fill_value(processing_warp_global_id); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } TEST_CASE("lookup") { using checked_value_type = int32_t; using index_type = uint32_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_warps_per_block { 3 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; size_t num_indices_per_warp { kat::warp_size * 2 + 7 }; auto num_values_to_populate = num_indices_per_warp * num_warps_per_block * num_grid_blocks; std::vector<checked_value_type> data = { 101, 202, 303, 404, 505, 606, 707, 808, 909, 1010 }; std::vector<index_type> indices; auto generator = [](size_t pos) -> index_type { return (7 * pos) % 10; }; size_t pos = 0; std::generate_n(std::back_inserter(indices), num_values_to_populate, [&]() { return generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict target, const checked_value_type* __restrict data, const index_type* __restrict indices ) { namespace gi = kat::linear_grid::grid_info; auto warp_indices_start = indices + num_indices_per_warp * gi::warp::global_id(); auto warp_target_start = target + num_indices_per_warp * gi::warp::global_id(); kcw::lookup(warp_target_start, data, warp_indices_start, num_indices_per_warp); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return data[generator(pos)]; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, data.data(), indices.data() ); } TEST_CASE("elementwise accumulate_n") { using checked_value_type = int32_t; using input_value_type = checked_value_type; cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 3 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; size_t length_to_cover_per_warp { kat::warp_size * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_warp * num_warps_per_block * num_grid_blocks; std::vector<checked_value_type> input_dest; auto dest_generator = [](size_t pos) -> checked_value_type { return 1000 + pos % 8000; }; size_t pos = 0; std::generate_n(std::back_inserter(input_dest), num_values_to_populate, [&]() { return dest_generator(pos++); }); std::vector<input_value_type> input_src; auto src_generator = [](size_t pos) -> input_value_type { return 10 + pos % 80; }; pos = 0; std::generate_n(std::back_inserter(input_src), num_values_to_populate, [&]() { return src_generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict result, const checked_value_type* __restrict input_dest, const input_value_type* __restrict input_src ) { namespace gi = kat::linear_grid::grid_info; auto warp_result = result + length_to_cover_per_warp * gi::warp::global_id(); auto warp_dest = input_dest + length_to_cover_per_warp * gi::warp::global_id(); kcw::copy_n(warp_dest, length_to_cover_per_warp, warp_result); auto warp_src = input_src + length_to_cover_per_warp * gi::warp::global_id(); const auto plus = [](checked_value_type& x, input_value_type y) { x += y; }; // So, you might think we should be accumulating into _dest - but we can't do that since it's // read-only. So first let's make a copy of it into the result column, then accumulate there. kcw::elementwise_accumulate_n(plus, warp_result, warp_src, length_to_cover_per_warp); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return dest_generator(pos) + src_generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input_dest.data(), input_src.data() ); } TEST_CASE("elementwise accumulate") { using checked_value_type = int32_t; using input_value_type = checked_value_type; cuda::grid::dimension_t num_grid_blocks { 1 }; cuda::grid::block_dimension_t num_warps_per_block { 3 }; cuda::grid::block_dimension_t num_threads_per_block { num_warps_per_block * kat::warp_size }; size_t length_to_cover_per_warp { kat::warp_size * 2 + 7 }; auto num_values_to_populate = length_to_cover_per_warp * num_warps_per_block * num_grid_blocks; std::vector<checked_value_type> input_dest; auto dest_generator = [](size_t pos) -> checked_value_type { return 1000 + pos % 8000; }; size_t pos = 0; std::generate_n(std::back_inserter(input_dest), num_values_to_populate, [&]() { return dest_generator(pos++); }); std::vector<input_value_type> input_src; auto src_generator = [](size_t pos) -> input_value_type { return 10 + pos % 80; }; pos = 0; std::generate_n(std::back_inserter(input_src), num_values_to_populate, [&]() { return src_generator(pos++); }); auto testcase_device_function = [=] KAT_DEV ( size_t, checked_value_type* __restrict result, const checked_value_type* __restrict input_dest, const input_value_type* __restrict input_src ) { namespace gi = kat::linear_grid::grid_info; auto warp_result = result + length_to_cover_per_warp * gi::warp::global_id(); auto warp_dest = input_dest + length_to_cover_per_warp * gi::warp::global_id(); kcw::copy_n(warp_dest, length_to_cover_per_warp, warp_result); auto warp_src = input_src + length_to_cover_per_warp * gi::warp::global_id(); const auto plus = [](checked_value_type& x, input_value_type y) { x += y; }; // So, you might think we should be accumulating into _dest - but we can't do that since it's // read-only. So first let's make a copy of it into the result column, then accumulate there. kcw::elementwise_accumulate(plus, warp_result, warp_src, warp_src+length_to_cover_per_warp); }; auto expected_value_retriever = [=] (size_t pos) -> checked_value_type { return dest_generator(pos) + src_generator(pos); }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type>, input_dest.data(), input_src.data() ); } } // TEST_SUITE("warp-level")
the_stack
#pragma once #include <gunrock/util/device_intrinsics.cuh> #include <gunrock/util/track_utils.cuh> #include <gunrock/util/sort_device.cuh> #include <gunrock/app/enactor_base.cuh> #include <gunrock/app/enactor_iteration.cuh> #include <gunrock/app/enactor_loop.cuh> #include <gunrock/app/pr/pr_problem.cuh> #include <gunrock/oprtr/oprtr.cuh> namespace gunrock { namespace app { namespace pr { /** * @brief Speciflying parameters for SSSP Enactor * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_enactor(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(app::UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<bool>( "pull", util::OPTIONAL_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, false, "Whether to use pull direction PageRank.", __FILE__, __LINE__)); return retval; } /** * @brief defination of SSSP iteration loop * @tparam EnactorT Type of enactor */ template <typename EnactorT> struct PRIterationLoop : public IterationLoopBase<EnactorT, Use_FullQ | Push> { typedef typename EnactorT::VertexT VertexT; typedef typename EnactorT::SizeT SizeT; typedef typename EnactorT::ValueT ValueT; typedef typename EnactorT::Problem::GraphT::GpT GpT; typedef IterationLoopBase<EnactorT, Use_FullQ | Push> BaseIterationLoop; PRIterationLoop() : BaseIterationLoop() {} /** * @brief Core computation of PageRank, one iteration * @param[in] peer_ Which GPU peers to work on, 0 means local * \return cudaError_t error message(s), if any */ cudaError_t Core(int peer_ = 0) { // Data PageRank that works on auto &enactor = this->enactor[0]; auto &gpu_num = this->gpu_num; auto &data_slice = enactor.problem->data_slices[gpu_num][0]; auto &enactor_slice = enactor.enactor_slices[gpu_num * enactor.num_gpus + peer_]; auto &enactor_stats = enactor_slice.enactor_stats; auto &graph = data_slice.sub_graph[0]; auto &rank_curr = data_slice.rank_curr; auto &rank_next = data_slice.rank_next; auto &rank_temp = data_slice.rank_temp; auto &rank_temp2 = data_slice.rank_temp2; auto &degrees = data_slice.degrees; auto &local_vertices = data_slice.local_vertices; auto &delta = data_slice.delta; auto &threshold = data_slice.threshold; auto &reset_value = data_slice.reset_value; auto &frontier = enactor_slice.frontier; auto &oprtr_parameters = enactor_slice.oprtr_parameters; auto &retval = enactor_stats.retval; auto &iteration = enactor_stats.iteration; auto null_ptr = &local_vertices; null_ptr = NULL; if (iteration != 0) { if (enactor.flag & Debug) util::cpu_mt::PrintMessage("Filter start.", gpu_num, iteration, peer_); auto filter_op = [rank_curr, rank_next, degrees, delta, threshold, reset_value] __host__ __device__(const VertexT &src, VertexT &dest, const SizeT &edge_id, const VertexT &input_item, const SizeT &input_pos, SizeT &output_pos) -> bool { ValueT old_value = rank_curr[dest]; ValueT new_value = delta * rank_next[dest]; new_value = reset_value + new_value; if (degrees[dest] != 0) new_value /= degrees[dest]; if (!isfinite(new_value)) new_value = 0; rank_curr[dest] = new_value; // if (util::isTracking(dest)) // printf("rank[%d] = %f -> %f = (%f + %f * %f) / %d\n", // dest, old_value, new_value, reset_value, // delta, rank_next[dest], degrees[dest]); return (fabs(new_value - old_value) > (threshold * old_value)); }; frontier.queue_length = data_slice.local_vertices.GetSize(); enactor_stats.nodes_queued[0] += frontier.queue_length; frontier.queue_reset = true; oprtr_parameters.filter_mode = "BY_PASS"; // filter kernel GUARD_CU(oprtr::Filter<oprtr::OprtrType_V2V>( graph.coo(), &local_vertices, null_ptr, oprtr_parameters, filter_op)); if (enactor.flag & Debug) util::cpu_mt::PrintMessage("Filter end.", gpu_num, iteration, peer_); frontier.queue_index++; // Get back the resulted frontier length GUARD_CU(frontier.work_progress.GetQueueLength( frontier.queue_index, frontier.queue_length, false, oprtr_parameters.stream, true)); if (!data_slice.pull) { GUARD_CU(rank_next.ForEach( [] __host__ __device__(ValueT & rank) { rank = 0.0; }, graph.nodes, util::DEVICE, oprtr_parameters.stream)); } GUARD_CU2(cudaStreamSynchronize(oprtr_parameters.stream), "cudaStreamSynchronize failed"); data_slice.num_updated_vertices = frontier.queue_length; } if (data_slice.pull) { if (enactor.flag & Debug) util::cpu_mt::PrintMessage("NeighborReduce start.", gpu_num, iteration, peer_); auto advance_op = [rank_curr, graph] __host__ __device__( const VertexT &src, VertexT &dest, const SizeT &edge_id, const VertexT &input_item, const SizeT &input_pos, SizeT &output_pos) -> ValueT { return rank_curr[dest]; }; oprtr_parameters.reduce_values_out = &rank_next; oprtr_parameters.reduce_reset = true; oprtr_parameters.reduce_values_temp = &rank_temp; oprtr_parameters.reduce_values_temp2 = &rank_temp2; oprtr_parameters.advance_mode = "ALL_EDGES"; frontier.queue_length = graph.nodes; frontier.queue_reset = true; GUARD_CU(oprtr::NeighborReduce<oprtr::OprtrType_V2V | oprtr::OprtrMode_REDUCE_TO_SRC | oprtr::ReduceOp_Plus>( graph.csc(), null_ptr, null_ptr, oprtr_parameters, advance_op, [] __host__ __device__(const ValueT &a, const ValueT &b) { return a + b; }, (ValueT)0)); } else { if (enactor.flag & Debug) util::cpu_mt::PrintMessage("Advance start.", gpu_num, iteration, peer_); auto advance_op = [rank_curr, rank_next] __host__ __device__( const VertexT &src, VertexT &dest, const SizeT &edge_id, const VertexT &input_item, const SizeT &input_pos, SizeT &output_pos) -> bool { // printf("%d -> %d\n", src, dest); ValueT add_value = rank_curr[src]; if (isfinite(add_value)) { atomicAdd(rank_next + dest, add_value); // ValueT old_val = atomicAdd(rank_next + dest, add_value); // if (dest == 42029) // printf("rank[%d] = %f = %f (rank[%d]) + %f\n", // dest, old_val + add_value, add_value, src, old_val); } return true; }; // Edge Map frontier.queue_length = local_vertices.GetSize(); frontier.queue_reset = true; oprtr_parameters.advance_mode = "ALL_EDGES"; GUARD_CU(oprtr::Advance<oprtr::OprtrType_V2V>( graph.coo(), &local_vertices, null_ptr, oprtr_parameters, advance_op)); } enactor_stats.edges_queued[0] += graph.edges; return retval; } cudaError_t Compute_OutputLength(int peer_) { // No need to load balance or get output size return cudaSuccess; } cudaError_t Check_Queue_Size(int peer_) { // no need to check queue size for PR return cudaSuccess; } /** * @brief Routine to combine received data and local data * @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each * transmition item, typed VertexT * @tparam NUM_VALUE__ASSOCIATES Number of data associated with each * transmition item, typed ValueT * @param received_length The numver of transmition items received * @param[in] peer_ which peer GPU the data came from * \return cudaError_t error message(s), if any */ template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES> cudaError_t ExpandIncoming(SizeT &received_length, int peer_) { auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &rank_next = data_slice.rank_next; auto expand_op = [rank_next] __host__ __device__( VertexT & key, const SizeT &in_pos, VertexT *vertex_associate_ins, ValueT *value__associate_ins) -> bool { ValueT in_val = value__associate_ins[in_pos]; atomicAdd(rank_next + key, in_val); return false; }; cudaError_t retval = BaseIterationLoop::template ExpandIncomingBase<NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>( received_length, peer_, expand_op); return retval; } cudaError_t UpdatePreds(SizeT num_elements) { // No need to update predecessors return cudaSuccess; } /* * @brief Make_Output function. * @tparam NUM_VERTEX_ASSOCIATES * @tparam NUM_VALUE__ASSOCIATES */ template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES> cudaError_t MakeOutput(SizeT num_elements) { cudaError_t retval = cudaSuccess; int num_gpus = this->enactor->num_gpus; int gpu_num = this->gpu_num; auto &enactor = this->enactor[0]; auto &enactor_slice = enactor.enactor_slices[gpu_num * num_gpus + ((enactor.flag & Size_Check) ? 0 : num_gpus)]; auto &mgpu_slice = enactor.mgpu_slices[gpu_num]; auto &data_slice = enactor.problem->data_slices[gpu_num][0]; auto &rank_next = data_slice.rank_next; cudaStream_t stream = enactor_slice.stream; if (num_gpus < 2) return retval; for (int peer_ = 1; peer_ < num_gpus; peer_++) { auto &remote_vertices_out = data_slice.remote_vertices_out[peer_]; mgpu_slice.out_length[peer_] = remote_vertices_out.GetSize(); GUARD_CU(mgpu_slice.value__associate_out[peer_].ForAll( [remote_vertices_out, rank_next] __host__ __device__( ValueT * values_out, const SizeT &pos) { values_out[pos] = rank_next[remote_vertices_out[pos]]; }, mgpu_slice.out_length[peer_], util::DEVICE, stream)); } return retval; } bool Stop_Condition(int gpu_num = 0) { auto &enactor_slices = this->enactor->enactor_slices; int num_gpus = this->enactor->num_gpus; for (int gpu = 0; gpu < num_gpus * num_gpus; gpu++) { auto &retval = enactor_slices[gpu].enactor_stats.retval; if (retval == cudaSuccess) continue; printf("(CUDA error %d @ GPU %d: %s\n", retval, gpu % num_gpus, cudaGetErrorString(retval)); fflush(stdout); return true; } auto &data_slices = this->enactor->problem->data_slices; bool all_zero = true; for (int gpu = 0; gpu < num_gpus; gpu++) if (data_slices[gpu]->num_updated_vertices) // PR_queue_length > 0) { // printf("data_slice[%d].PR_queue_length = %d\n", gpu, // data_slice[gpu]->PR_queue_length); all_zero = false; } if (all_zero) return true; for (int gpu = 0; gpu < num_gpus; gpu++) if (enactor_slices[gpu * num_gpus].enactor_stats.iteration < data_slices[0]->max_iter) { // printf("enactor_stats[%d].iteration = %lld\n", gpu, enactor_stats[gpu // * num_gpus].iteration); return false; } return true; } }; /** * @brief PageRank enactor class. * @tparam _Problem Problem type we process on * @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor * @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor */ template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE, unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault> class Enactor : public EnactorBase<typename _Problem::GraphT, typename _Problem::VertexT, // LabelT typename _Problem::ValueT, ARRAY_FLAG, cudaHostRegisterFlag> { public: // Definations typedef _Problem Problem; typedef typename Problem::SizeT SizeT; typedef typename Problem::VertexT VertexT; typedef typename Problem::ValueT ValueT; typedef typename Problem::GraphT GraphT; typedef EnactorBase<GraphT, VertexT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> BaseEnactor; typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT; typedef PRIterationLoop<EnactorT> IterationT; // Members Problem *problem; IterationT *iterations; // Methods /** * \addtogroup PublicInterface * @{ */ /** * @brief PREnactor constructor */ Enactor() : BaseEnactor("pr"), problem(NULL) { this->max_num_vertex_associates = 0; this->max_num_value__associates = 1; } /** * @brief PREnactor destructor */ virtual ~Enactor() { // Release(); } /* * @brief Releasing allocated memory space * @param target The location to release memory from * \return cudaError_t error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Release(target)); delete[] iterations; iterations = NULL; problem = NULL; return retval; } /** * @brief Initialize the enactor. * @param[in] problem The problem object. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Init(Problem &problem, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; this->problem = &problem; GUARD_CU(BaseEnactor::Init(problem, Enactor_None, 2, NULL, target, false)); iterations = new IterationT[this->num_gpus]; for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(iterations[gpu].Init(this, gpu)); } if (this->num_gpus == 1) { GUARD_CU(this->Init_Threads( this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>))); return retval; } auto &data_slices = problem.data_slices; for (int gpu = 0; gpu < this->num_gpus; gpu++) { auto &data_slice_l = data_slices[gpu][0]; if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); for (int peer = 0; peer < this->num_gpus; peer++) { if (peer == gpu) continue; int peer_ = (peer < gpu) ? peer + 1 : peer; int gpu_ = (peer < gpu) ? gpu : gpu + 1; auto &data_slice_p = data_slices[peer][0]; data_slice_l.in_counters[peer_] = data_slice_p.out_counters[gpu_]; if (gpu != 0) { data_slice_l.remote_vertices_in[peer_].SetPointer( data_slice_p.remote_vertices_out[gpu_].GetPointer(util::HOST), data_slice_p.remote_vertices_out[gpu_].GetSize(), util::HOST); } else { data_slice_l.remote_vertices_in[peer_].SetPointer( data_slice_p.remote_vertices_out[gpu_].GetPointer(util::HOST), max(data_slice_p.remote_vertices_out[gpu_].GetSize(), data_slice_p.local_vertices.GetSize()), util::HOST); } GUARD_CU(data_slice_l.remote_vertices_in[peer_].Move( util::HOST, target, data_slice_p.remote_vertices_out[gpu_].GetSize())); for (int t = 0; t < 2; t++) { GUARD_CU( this->mgpu_slices[gpu].value__associate_in[t][peer_].EnsureSize_( data_slice_l.in_counters[peer_], target)); } } } for (int gpu = 1; gpu < this->num_gpus; gpu++) { if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(this->mgpu_slices[gpu].value__associate_out[1].EnsureSize_( problem.data_slices[gpu]->local_vertices.GetSize(), target)); } if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[0])); for (int peer = 1; peer < this->num_gpus; peer++) { GUARD_CU(this->mgpu_slices[0].value__associate_in[0][peer].EnsureSize_( problem.data_slices[peer]->local_vertices.GetSize(), target)); } GUARD_CU(this->Init_Threads( this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>))); return retval; } /** * @brief Reset enactor * @param[in] src Source node to start primitive. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Reset(VertexT src, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Reset(target)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { /*thread_slices[gpu].status = ThreadSlice::Status::Wait; if (retval = util::SetDevice(problem -> gpu_idx[gpu])) return retval; if (AdvanceKernelPolicy::ADVANCE_MODE == gunrock::oprtr::advance::TWC_FORWARD) { //return retval; } else { bool over_sized = false; if (retval = Check_Size<SizeT, SizeT> ( this -> size_check, "scanned_edges", problem -> data_slices[gpu] -> local_vertices.GetSize() + 2, problem -> data_slices[gpu] -> scanned_edges, over_sized, -1, -1, -1, false)) return retval; this -> frontier_attribute [gpu * this -> num_gpus].queue_length = problem -> data_slices[gpu] -> local_vertices.GetSize(); retval = gunrock::oprtr::advance::ComputeOutputLength <AdvanceKernelPolicy, Problem, PRFunctor<VertexId, SizeT, Value, Problem>, gunrock::oprtr::advance::V2V>( this -> frontier_attribute + gpu * this -> num_gpus,//frontier_attribute, problem -> graph_slices[gpu] -> row_offsets.GetPointer(util::DEVICE),//d_offsets, problem -> graph_slices[gpu] -> column_indices.GetPointer(util::DEVICE),//d_indices, (SizeT *)NULL, ///d_inv_offsets, (VertexId*)NULL,//d_inv_indices, problem -> data_slices[gpu] -> local_vertices.GetPointer(util::DEVICE),//d_in_key_queue, problem -> data_slices[gpu] -> scanned_edges[0].GetPointer(util::DEVICE),//partitioned_scanned_edges->GetPointer(util::DEVICE), problem -> graph_slices[gpu] -> nodes,//max_in, problem -> graph_slices[gpu] -> edges,//max_out, thread_slices[gpu].context[0][0], problem -> data_slices[gpu] -> streams[0], //ADVANCE_TYPE, false, false, false); if (retval = this -> frontier_attribute[gpu * this -> num_gpus].output_length.Move(util::DEVICE, util::HOST, 1, 0, problem -> data_slices[gpu] -> streams[0])) return retval; if (retval = util::GRError(cudaStreamSynchronize(problem -> data_slices[gpu] -> streams[0]), "cudaStreamSynchronize failed", __FILE__, __LINE__)) return retval; }*/ for (int peer = 0; peer < this->num_gpus; peer++) { auto &frontier = this->enactor_slices[gpu * this->num_gpus + peer].frontier; frontier.queue_length = (peer != 0) ? 0 : this->problem->data_slices[gpu]->local_vertices.GetSize(); frontier.queue_index = 0; // Work queue index frontier.queue_reset = true; this->enactor_slices[gpu * this->num_gpus + peer] .enactor_stats.iteration = 0; } if (this->num_gpus > 1) { if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); this->mgpu_slices[gpu].value__associate_orgs[0] = this->problem->data_slices[gpu]->rank_next.GetPointer(target); GUARD_CU(this->mgpu_slices[gpu].value__associate_orgs.Move(util::HOST, target)); } } GUARD_CU(BaseEnactor::Sync()); return retval; } /** * @brief one run of sssp, to be called within GunrockThread * @param thread_data Data for the CPU thread * \return cudaError_t error message(s), if any */ cudaError_t Run(ThreadSlice &thread_data) { gunrock::app::Iteration_Loop<0, 1, IterationT>( thread_data, iterations[thread_data.thread_num]); return cudaSuccess; } /** * @brief Enacts a PR computing on the specified graph. * @param[in] src Source node to start primitive. * \return cudaError_t error message(s), if any */ cudaError_t Enact(VertexT src) { cudaError_t retval = cudaSuccess; GUARD_CU(this->Run_Threads(this)); util::PrintMsg("GPU PageRank Done.", this->flag & Debug); return retval; } cudaError_t Extract() { cudaError_t retval = cudaSuccess; auto &data_slices = this->problem->data_slices; int num_gpus = this->num_gpus; for (int gpu_num = 1; gpu_num < num_gpus; gpu_num++) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu_num])); auto &data_slice = data_slices[gpu_num][0]; auto &enactor_slice = this->enactor_slices[gpu_num * num_gpus]; auto &degrees = data_slice.degrees; auto &rank_curr = data_slice.rank_curr; auto &rank_out = this->mgpu_slices[gpu_num].value__associate_out[1]; auto &enactor_stats = enactor_slice.enactor_stats; auto &stream = enactor_slice.stream2; GUARD_CU(data_slice.local_vertices.ForAll( [rank_curr, degrees, rank_out] __host__ __device__(VertexT * vertices, const SizeT &pos) { VertexT v = vertices[pos]; ValueT rank = rank_curr[v]; if (degrees[v] != 0) rank *= degrees[v]; rank_out[pos] = rank; }, data_slice.local_vertices.GetSize(), util::DEVICE, stream)); enactor_stats.iteration = 0; PushNeighbor<EnactorT, 0, 1>(*this, gpu_num, 0); SetRecord(this->mgpu_slices[gpu_num], enactor_stats.iteration, 1, 0, stream); data_slice.final_event_set = true; } GUARD_CU(util::SetDevice(this->gpu_idx[0])); auto &data_slice = data_slices[0][0]; auto &enactor_slice = this->enactor_slices[0]; auto &degrees = data_slice.degrees; auto &rank_curr = data_slice.rank_curr; auto &stream = enactor_slice.stream2; GUARD_CU(data_slice.local_vertices.ForAll( [rank_curr, degrees] __host__ __device__(VertexT * vertices, const SizeT &pos) { VertexT v = vertices[pos]; ValueT rank = rank_curr[v]; if (degrees[v] != 0) rank *= degrees[v]; rank_curr[v] = rank; }, data_slice.local_vertices.GetSize(), util::DEVICE, stream)); for (int peer = 1; peer < num_gpus; peer++) { GUARD_CU2( cudaMemcpyAsync( data_slice.remote_vertices_in[peer].GetPointer(util::DEVICE), data_slices[peer]->local_vertices.GetPointer(util::HOST), sizeof(VertexT) * data_slices[peer]->local_vertices.GetSize(), cudaMemcpyHostToDevice, this->enactor_slices[peer].stream), "cudaMemcpyAsync failed"); } for (int peer = 1; peer < num_gpus; peer++) { int peer_iteration = this->enactor_slices[peer * num_gpus].enactor_stats.iteration; GUARD_CU2( cudaStreamWaitEvent( this->enactor_slices[peer].stream, this->mgpu_slices[peer].events[peer_iteration % 4][0][0], 0), "cudaStreamWaitEvent failed"); auto &rank_in = this->mgpu_slices[0].value__associate_in[peer_iteration % 2][peer]; GUARD_CU(data_slice.remote_vertices_in[peer].ForAll( [rank_curr, rank_in] __host__ __device__(VertexT * keys_in, SizeT & pos) { VertexT v = keys_in[pos]; rank_curr[v] = rank_in[pos]; }, data_slices[peer]->local_vertices.GetSize(), util::DEVICE, this->enactor_slices[peer].stream)); GUARD_CU2( cudaEventRecord( this->mgpu_slices[0] .events[enactor_slice.enactor_stats.iteration % 4][peer][0], this->enactor_slices[peer].stream), "cudaEventRecord failed"); GUARD_CU2( cudaStreamWaitEvent( this->enactor_slices[0].stream, this->mgpu_slices[0] .events[enactor_slice.enactor_stats.iteration % 4][peer][0], 0), "cudaStreamWaitEvent failed"); } SizeT nodes = data_slice.org_nodes; GUARD_CU(data_slice.node_ids.EnsureSize_(nodes, util::DEVICE)); GUARD_CU(data_slice.temp_vertex.EnsureSize_(nodes, util::DEVICE)); GUARD_CU(data_slice.node_ids.ForAll( [] __host__ __device__(VertexT * ids, const SizeT &pos) { ids[pos] = pos; }, nodes, util::DEVICE, this->enactor_slices[0].stream)); // util::PrintMsg("#nodes = " + std::to_string(nodes)); /*size_t cub_required_size = 0; void* temp_storage = NULL; cub::DoubleBuffer<ValueT > key_buffer( data_slice.rank_curr.GetPointer(util::DEVICE), data_slice.rank_next.GetPointer(util::DEVICE)); cub::DoubleBuffer<VertexT> value_buffer( data_slice.node_ids .GetPointer(util::DEVICE), data_slice.temp_vertex.GetPointer(util::DEVICE)); GUARD_CU2(cub::DeviceRadixSort::SortPairsDescending( temp_storage, cub_required_size, key_buffer, value_buffer, nodes, 0, sizeof(ValueT) * 8, this -> enactor_slices[0].stream), "cubDeviceRadixSort failed"); GUARD_CU(data_slice.cub_sort_storage.EnsureSize_( cub_required_size, util::DEVICE)); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed."); printf("cub_sort_stoarge = %p, size = %d\n", data_slice.cub_sort_storage.GetPointer(util::DEVICE), data_slice.cub_sort_storage.GetSize()); // sort according to the rank of nodes GUARD_CU2(cub::DeviceRadixSort::SortPairsDescending( data_slice.cub_sort_storage.GetPointer(util::DEVICE), cub_required_size, key_buffer, value_buffer, nodes, 0, sizeof(ValueT) * 8, this -> enactor_slices[0].stream), "cubDeviceRadixSort failed"); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed."); if (key_buffer.Current() != data_slice.rank_curr.GetPointer(util::DEVICE)) { ValueT *keys = key_buffer.Current(); GUARD_CU(data_slice.rank_curr.ForEach(keys, []__host__ __device__(ValueT &rank, const ValueT &key) { rank = key; }, nodes, util::DEVICE, this -> enactor_slices[0].stream)); } if (value_buffer.Current() != data_slice.node_ids.GetPointer(util::DEVICE)) { VertexT *values = value_buffer.Current(); GUARD_CU(data_slice.node_ids.ForEach(values, []__host__ __device__(VertexT &node_id, const VertexT &val) { node_id = val; }, nodes, util::DEVICE, this -> enactor_slices[0].stream)); }*/ // util::Array1D<SizeT, char> cub_temp_space; GUARD_CU(util::cubSortPairsDescending( data_slice.cub_sort_storage, data_slice.rank_curr, data_slice.rank_next, data_slice.node_ids, data_slice.temp_vertex, nodes, 0, sizeof(ValueT) * 8, this->enactor_slices[0].stream)); // GUARD_CU2(cudaDeviceSynchronize(), // "cudaDeviceSynchronize failed."); auto &temp_vertex = data_slice.temp_vertex; // auto &rank_curr = data_slice.rank_curr; auto &rank_next = data_slice.rank_next; GUARD_CU(data_slice.node_ids.ForAll( [temp_vertex, rank_curr, rank_next] __host__ __device__( VertexT * ids, const SizeT &v) { ids[v] = temp_vertex[v]; rank_curr[v] = rank_next[v]; }, nodes, util::DEVICE, this->enactor_slices[0].stream)); if (data_slice.scale) { ValueT a = 1.0 / (ValueT)nodes; GUARD_CU(data_slice.rank_curr.ForEach( [a] __host__ __device__(ValueT & rank) { rank *= a; }, nodes, util::DEVICE, this->enactor_slices[0].stream)); } GUARD_CU2(cudaStreamSynchronize(this->enactor_slices[0].stream), "cudaStreamSynchronize failed"); return retval; } /** @} */ }; } // namespace pr } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include "Template.h" #include <iostream> #include <fstream> using namespace std; #include "ErrorCode.h" // Host 静态方法:newTemplate(创建模板) __host__ int TemplateBasicOp::newTemplate(Template **outtpl) { // 检查用于盛放新模板的指针是否为 NULL。 if (outtpl == NULL) return NULL_POINTER; // 申请一个新的 TemplateCuda 型数据,本方法最后会将其中的 tplMeta 域返回给 // outtpl,这样 outtpl 就有了一个对应的 TemplateCuda 型伴随数据。 TemplateCuda *tplCud = new TemplateCuda; // 初始化各种元数据。 tplCud->tplMeta.count = 0; tplCud->tplMeta.tplData = NULL; tplCud->attachedData = NULL; tplCud->deviceId = -1; // 将 TemplateCuda 型数据中的 tplMeta 赋值给输出参数。 *outtpl = &(tplCud->tplMeta); // 处理完毕,退出。 return NO_ERROR; } // Host 静态方法:deleteTemplate(销毁模板) __host__ int TemplateBasicOp::deleteTemplate(Template *intpl) { // 检查模板的指针是否为 NULL。 if (intpl == NULL) return NULL_POINTER; // 根据输入参数的 Template 指针,得到对应的 TemplateCuda 型数据。 TemplateCuda *intplCud = TEMPLATE_CUDA(intpl); // 检查模板所在的地址空间是否合法,如果模板所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (intplCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 释放模板数据,即坐标数据。 if (intpl->tplData == NULL || intpl->count == 0) { // 如果输入模板是空的,则不进行模板数据释放操作(因为本来也没有数据可被 // 释放)。 // Do Nothing; } if (intplCud->deviceId < 0) { // 对于数据存储于 Host 内存,直接利用 delete 关键字释放图像数据。 delete[] intpl->tplData; delete[] intplCud->attachedData; } else { // 对于数据存储于 Device 内存中,则需要首先切换设备,将该设备作为当前 // Device 设备,然后释放之,最后还需要将设备切换回来以保证后续处理的正 // 确性。 cudaSetDevice(intplCud->deviceId); cudaFree(intpl->tplData); cudaFree(intplCud->attachedData); cudaSetDevice(curdevid); } // 最后还需要释放模板的元数据 delete intplCud; // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:makeAtCurrentDevice(在当前 Device 内存中构建数据) __host__ int TemplateBasicOp::makeAtCurrentDevice(Template *tpl, size_t count) { // 检查输入模板是否为 NULL if (tpl == NULL) return NULL_POINTER; // 检查给定的模板中坐标点数量 if (count < 1) return INVALID_DATA; // 检查模板是否为空模板 if (tpl->tplData != NULL) return UNMATCH_IMG; // 获取 tpl 对应的 TemplateCuda 型数据。 TemplateCuda *tplCud = TEMPLATE_CUDA(tpl); // 在当前的 Device 上申请存储指定坐标数量的模板所需要的内存空间。 cudaError_t cuerrcode; cuerrcode = cudaMalloc((void **)(&tpl->tplData), 2 * count * sizeof (int)); if (cuerrcode != cudaSuccess) { tpl->tplData = NULL; return CUDA_ERROR; } // 为附属数据在当前的 Device 上申请内存空间。 cuerrcode = cudaMalloc((void **)(&tplCud->attachedData), count * sizeof (float)); if (cuerrcode != cudaSuccess) { // 如果附属数据空间申请失败,需要释放掉先前申请的坐标数据的内存空间。 cudaFree(tpl->tplData); tpl->tplData = NULL; tplCud->attachedData = NULL; return CUDA_ERROR; } // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 修改模板的元数据。 tpl->count = count; tplCud->deviceId = curdevid; // 处理完毕,退出。 return NO_ERROR; } // Host 静态方法:makeAtHost(在 Host 内存中构建数据) __host__ int TemplateBasicOp::makeAtHost(Template *tpl, size_t count) { // 检查输入模板是否为 NULL if (tpl == NULL) return NULL_POINTER; // 检查给定的模板中坐标点数量 if (count < 1) return INVALID_DATA; // 检查模板是否为空模板 if (tpl->tplData != NULL) return UNMATCH_IMG; // 获取 tpl 对应的 TemplateCuda 型数据。 TemplateCuda *tplCud = TEMPLATE_CUDA(tpl); // 为图像数据在 Host 内存中申请空间 tpl->tplData = new int[count * 2]; if (tpl->tplData == NULL) return OUT_OF_MEM; // 为附属数据在 Host 内存中申请空间。 tplCud->attachedData = new float[count]; if (tplCud->attachedData == NULL) { delete[] tpl->tplData; tpl->tplData = NULL; return OUT_OF_MEM; } // 设置模板中的元数据 tpl->count = count; tplCud->deviceId = -1; // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:readFromFile(从文件读取模板) __host__ int TemplateBasicOp::readFromFile(const char *filepath, Template *outtpl) { // 这段代码仅支持 int 型尺寸为 2、4、8 三种情况。目前绝大部分的系统,采用了 // sizeof (int) == 4 的情况,少数早期的 DOS 和 Windows 系统中 sizeof (int) // == 2。 if (sizeof (int) != 2 && sizeof (int) != 4 && sizeof (int) != 8) return UNIMPLEMENT; // 检查文件路径和模板是否为 NULL。 if (filepath == NULL || outtpl == NULL) return NULL_POINTER; // 根据输入参数的 Template 型指针,得到对应的 TemplateCuda 型数据。 TemplateCuda *outtplCud = TEMPLATE_CUDA(outtpl); // 检查模板所在的地址空间是否合法,如果模板所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (outtplCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 打开模板文件。 ifstream tplfile(filepath, ios::in | ios::binary); if (!tplfile) return NO_FILE; // 将文件读指针挪到文件的开头处。该步骤虽然显得多余,但是却可以确保操作的正 // 确。 tplfile.seekg(0, ios::beg); // 读取文件的前四个字节,这是文件的类型头,如果类型头为 TPLT,则说明该文件 // 是模板文件。 char typestr[5] = { '\0' }; tplfile.read(typestr, 4); if (strcmp(typestr, "TPLT") != 0) return WRONG_FILE; // 从文件中获取模板中包含的坐标点的数量。如果坐标点数量小于 1,则报错。 size_t count = 0; tplfile.read(reinterpret_cast<char *>(&count), 4); if (count < 1) return WRONG_FILE; // 读取并丢弃掉 20 个字节的保留位。 char disdata[21] = { '\0' }; tplfile.read(disdata, 20); // 为在内存中保存模板的坐标点而申请新的数据空间。为了避免频繁的数据申请与释 // 放,如果发现原来模板中的坐标点数量和新的数据中坐标点数量相同,且原来的数 // 据存储于 Host 内存,则会重用这段内存空间,不去重新申请内存。 int *newdata; float *newattach; bool reusedata; if (outtpl->tplData != NULL && outtpl->count == count && outtplCud->deviceId == -1) { // 若数据可以重用,则使用原来的内存空间。 newdata = outtpl->tplData; newattach = outtplCud->attachedData; reusedata = true; } else { // 若数据不能重用,则重新申请合适的内存空间。 newdata = new int[count * 2]; newattach = new float[count]; reusedata = false; if (newdata == NULL || newattach == NULL) { delete[] newdata; delete[] newattach; return OUT_OF_MEM; } } // 读取坐标点数据。因为文件中存储的坐标点采用了 32 位有符号整形数,这里需要 // 根据系统中 int 型数据的尺寸采取不同的转换策略。 if (sizeof (int) == 2) { // 对于 sizeof (int) == 2 的系统通常 long 型数据为 32 位,因此需要逐个 // 读取后转成 int 型存放到数据数组中。 long tmp; for (int i = 0; i < count * 2; i++) { tplfile.read(reinterpret_cast<char *>(&tmp), 4); newdata[i] = (int)tmp; } } else if (sizeof (int) == 8) { // 对于 sizeof (int) == 8 的系统通常 short 型数据为 32 位,因此需要逐个 // 读取后转成 int 型存放到数据数组中。 short tmp; for (int i = 0; i < count * 2; i++) { tplfile.read(reinterpret_cast<char *>(&tmp), 4); newdata[i] = (int)tmp; } } else { // 对于 sizeof (int) == 4 的系统,不需要进行任何的转换,读取后的数据可 // 读取存放到数据数组中。 tplfile.read(reinterpret_cast<char *>(newdata), count * 2 * 4); } // 根据 IEEE 的规定 float 型数据为 4 字节,因此这里就采用了直接读取,而没有 // 像处理坐标点数据那样做了很多的数据尺寸判断。 tplfile.read(reinterpret_cast<char *>(newattach), count * 4); // 当数据已经成功的读取后,释放原来数据占用的内存空间,防止内存泄漏。 if (outtpl->tplData != NULL && !reusedata) { if (outtplCud->deviceId == -1) { // 如果原来的数据存放在 Host 内存中,则直接通过 delete 关键字释放。 delete[] outtpl->tplData; delete[] outtplCud->attachedData; } else { // 如果原来的数据存放在 Device 内存中,则切换到相应的 Device 后,使 // 用 cudaFree 释放。 cudaSetDevice(outtplCud->deviceId); cudaFree(outtpl->tplData); cudaFree(outtplCud->attachedData); cudaSetDevice(curdevid); } } // 使用新的数据更新模板的元数据。 outtpl->count = count; outtpl->tplData = newdata; outtplCud->attachedData = newattach; outtplCud->deviceId = -1; // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:writeToFile(将模板写入文件) __host__ int TemplateBasicOp::writeToFile(const char *filepath, Template *intpl) { // 这段代码仅支持 int 型尺寸为 2、4、8 三种情况。目前绝大部分的系统,采用了 // sizeof (int) == 4 的情况,少数早期的 DOS 和 Windows 系统中 sizeof (int) // == 2。 if (sizeof (int) != 2 && sizeof (int) != 4 && sizeof (int) != 8) return UNIMPLEMENT; // 检查文件路径和模板是否为 NULL。 if (filepath == NULL || intpl == NULL) return NULL_POINTER; // 打开需要写入的文件。 ofstream tplfile(filepath, ios::out | ios::binary); if (!tplfile) return NO_FILE; // 根据输入参数的 Template 型指针,得到对应的 TemplateCuda 型数据。 TemplateCuda *intplCud = TEMPLATE_CUDA(intpl); // 将模板的数据拷贝回 Host 内存中,这样模板就可以被下面的代码所读取,然后将 // 模板的数据写入到磁盘中。这里需要注意的是,安排模板的拷贝过程在文件打开之 // 后是因为,如果一旦文件打开失败,则不会改变模板在内存中的存储状态,这可能 // 会对后续处理更加有利。 int errcode; errcode = TemplateBasicOp::copyToHost(intpl); if (errcode < 0) return errcode; // 向文件中写入文件类型字符串 static char typestr[] = "TPLT"; tplfile.write(typestr, 4); // 向文件中写入模板含有的坐标点数量。 tplfile.write(reinterpret_cast<char *>(&intpl->count), 4); // 向文件中写入 20 个字节的保留位 static char reserved[20] = { '\0' }; tplfile.write(reserved, 20); // 向文件中写入坐标数据,因为考虑到。为了保证每个整型数据占用 4 个字节,这 // 里对不同的情况进行了处理。不过针对目前绝大部分系统来说,sizeof (int) == // 4,因此绝大部分情况下,编译器会选择 else 分支。如果委托方认为系统是运行 // 在 sizeof (int) == 4 的系统之上,也可以删除前面的两个分支,直接使用最后 // 的 else 分支。 if (sizeof (int) == 2) { // 对于 sizeof (int) == 2 的系统来说,long 通常是 32 位的,因此,需要逐 // 个的将数据转换成 32 位的 long 型,然后进行处理。 long tmp; for (int i = 0; i < intpl->count * 2; i++) { tmp = (long)(intpl->tplData[i]); tplfile.write(reinterpret_cast<char *>(&tmp), 4); } } else if (sizeof (int) == 8) { // 对于 sizeof (int) == 8 的系统来说,short 通常是 32 位的,因此,需要 // 逐个的将数据转换成 32 位的 short 型,然后进行处理。 short tmp; for (int i = 0; i < intpl->count * 2; i++) { tmp = (short)(intpl->tplData[i]); tplfile.write(reinterpret_cast<char *>(&tmp), 4); } } else { // 如果 sizeof (int) == 4,则可以直接将数据写入磁盘,而不需要任何的转换 // 过程。 tplfile.write(reinterpret_cast<char *>(intpl->tplData), intpl->count * 2 * 4); } // 根据 IEEE 的规定,float 型数据通常采用 4 字节的形式,因此这里没有做数据 // 长度的判断,而是直接使用了 4 字节存储数据到磁盘。 tplfile.write(reinterpret_cast<char *>(intplCud->attachedData), intpl->count * 4); // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:copyToCurrentDevice(将模板拷贝到当前 Device 内存上) __host__ int TemplateBasicOp::copyToCurrentDevice(Template *tpl) { // 检查模板是否为 NULL。 if (tpl == NULL) return NULL_POINTER; // 根据输入参数的 Template 型指针,得到对应的 TemplateCuda 型数据。 TemplateCuda *tplCud = TEMPLATE_CUDA(tpl); // 检查模板所在的地址空间是否合法,如果模板所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (tplCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果模板是一个不包含数据的空模板,则报错。 if (tpl->tplData == NULL || tpl->count == 0) return UNMATCH_IMG; // 对于不同的情况,将模板数据拷贝到当前设备上。 if (tplCud->deviceId < 0) { // 如果模板的数据位于 Host 内存上,则需要在当前 Device 的内存空间上申请 // 空间,然后将 Host 内存上的数据拷贝到当前 Device 上。 int *devptr; // 新的坐标数据空间,在当前 Device 上。 float *attachptr; // 新的附属数据空间,在当前 Device 上。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 在当前设备上申请坐标数据的空间。 cuerrcode = cudaMalloc((void **)(&devptr), tpl->count * 2 * sizeof (int)); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 当前设备商申请附属数据的空间。 cuerrcode = cudaMalloc((void **)(&attachptr), tpl->count * sizeof (float)); if (cuerrcode != cudaSuccess) { cudaFree(devptr); return CUDA_ERROR; } // 将原来存储在 Host 上坐标数据拷贝到当前 Device 上。 cuerrcode = cudaMemcpy(devptr, tpl->tplData, tpl->count * 2 * sizeof (int), cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { cudaFree(devptr); cudaFree(attachptr); return CUDA_ERROR; } // 将原来存储在 Host 上附属数据拷贝到当前 Device 上。 cuerrcode = cudaMemcpy(attachptr, tplCud->attachedData, tpl->count * sizeof (float), cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { cudaFree(devptr); cudaFree(attachptr); return CUDA_ERROR; } // 释放掉原来存储于 Host 内存上的数据。 delete[] tpl->tplData; delete[] tplCud->attachedData; // 更新模版数据,把新的在当前 Device 上申请的数据和相关数据写入模版元数 // 据中。 tpl->tplData = devptr; tplCud->attachedData = attachptr; tplCud->deviceId = curdevid; // 操作完毕,返回。 return NO_ERROR; } else if (tplCud->deviceId != curdevid) { // 对于数据存在其他 Device 的情况,仍旧要在当前 Device 上申请数据空间, // 并从另一个 Device 上拷贝数据到新申请的当前 Device 的数据空间中。 int *devptr; // 新申请的当前 Device 上的坐标数据。 float *attachptr; // 新申请的当前 Device 上的附属数据。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 在当前 Device 上申请坐标数据空间。 cuerrcode = cudaMalloc((void **)(&devptr), tpl->count * 2 * sizeof (int)); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 在当前 Device 上申请附属数据空间。 cuerrcode = cudaMalloc((void **)(&attachptr), tpl->count * sizeof (float)); if (cuerrcode != cudaSuccess) { cudaFree(devptr); return CUDA_ERROR; } // 将数据从模板原来的存储位置拷贝到当前的 Device 上。 cuerrcode = cudaMemcpyPeer(devptr, curdevid, tpl->tplData, tplCud->deviceId, tpl->count * 2 * sizeof (int)); if (cuerrcode != cudaSuccess) { cudaFree(devptr); cudaFree(attachptr); return CUDA_ERROR; } // 将附属数据从模板原来的存储位置拷贝到当前的 Device 上。 cuerrcode = cudaMemcpyPeer(attachptr, curdevid, tplCud->attachedData, tplCud->deviceId, tpl->count * sizeof (float)); if (cuerrcode != cudaSuccess) { cudaFree(devptr); cudaFree(attachptr); return CUDA_ERROR; } // 释放掉模板在原来的 Device 上的数据。 cudaFree(tpl->tplData); cudaFree(tplCud->attachedData); // 将新的图像数据信息写入到图像元数据中。 tpl->tplData = devptr; tplCud->attachedData = attachptr; tplCud->deviceId = curdevid; // 操作完成,返回。 return NO_ERROR; } // 对于其他情况,即模板数据本来就在当前 Device 上,则直接返回,不进行任何的 // 操作。 return NO_ERROR; } // Host 静态方法:copyToCurrentDevice(将模板拷贝到当前 Device 内存上) __host__ int TemplateBasicOp::copyToCurrentDevice( Template *srctpl, Template *dsttpl) { // 检查输入模板是否为 NULL。 if (srctpl == NULL || dsttpl == NULL) return NULL_POINTER; // 如果输出模板为 NULL 或者和输入模板为同一个模板,则转而调用对应的 // In-place 版本的函数。 if (dsttpl == NULL || dsttpl == srctpl) return copyToCurrentDevice(srctpl); // 获取 srctpl 和 dsttpl 对应的 TemplateCuda 型指针。 TemplateCuda *srctplCud = TEMPLATE_CUDA(srctpl); TemplateCuda *dsttplCud = TEMPLATE_CUDA(dsttpl); // 用来存放旧的 dsttpl 数据,使得在拷贝操作失败时可以恢复为原来的可用的数据 // 信息,防止系统进入一个混乱的状态。 TemplateCuda olddsttplCud = *dsttplCud; // 旧的 dsttpl 数据 bool reusedata = true; // 记录是否重用了原来的模板数据空间。 // 该值为 ture,则原来的数据空间被重 // 用,不需要在之后释放数据,否则需要 // 在最后释放旧的空间。 // 如果源模板是一个空模板,则不进行任何操作,直接报错。 if (srctpl->tplData == NULL || srctpl->count == 0) return INVALID_DATA; // 检查模板所在的地址空间是否合法,如果模板所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (srctplCud->deviceId >= devcnt || dsttplCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果目标模板中存在有数据,则需要根据情况,若原来的数据不存储在当前的 // Device 上,或者即使存储在当前的 Device 上,但数据尺寸不匹配,则需要释放 // 掉原来申请的空间,以便重新申请合适的内存空间。此处不进行真正的释放操作, // 其目的在于当后续操作出现错误时,可以很快的恢复 dsttpl 中原来的信息,使得 // 整个系统不会处于一个混乱的状态,本函数会在最后,确定 dsttpl 被成功的更换 // 为了新的数据以后,才会真正的将原来的模板数据释放掉。 if (dsttplCud->deviceId != curdevid) { // 对于数据存在 Host 与其他的 Device 上,则直接释放掉原来的数据空间。 reusedata = false; dsttpl->tplData = NULL; dsttplCud->attachedData = NULL; } else if (dsttpl->count != srctpl->count) { // 对于数据存在于当前 Device 上,则需要检查数据的尺寸是否和源图像相匹 // 配。如果目标模板和源模板的尺寸不匹配则仍旧需要释放目标图像原来的数据 // 空间。 reusedata = false; dsttpl->tplData = NULL; dsttplCud->attachedData = NULL; } // 将目标模板的尺寸更改为源模板的尺寸。 dsttpl->count = srctpl->count; // 更改目标模板的数据存储位置为当前 Device。 dsttplCud->deviceId = curdevid; // 如果目标模板需要重新申请空间(因为上一步将无法重用原来内存空间的情况的 // dsttpl->tplData 都置为 NULL,因此此处通过检查 dsttpl->tplData == NULL来 // 确定是否需要重新申请空间),则在当前的 Device 内存中申请空间。 cudaError_t cuerrcode; if (dsttpl->tplData == NULL) { // 申请坐标数据的内存空间 cuerrcode = cudaMalloc((void **)(&dsttpl->tplData), srctpl->count * 2 * sizeof (int)); if (cuerrcode != cudaSuccess) { // 如果空间申请操作失败,则恢复原来的目标模板的数据,以防止系统进入 // 混乱状态。 *dsttplCud = olddsttplCud; return CUDA_ERROR; } // 申请附属数据的内存空间 cuerrcode = cudaMalloc((void **)(&dsttplCud->attachedData), srctpl->count * sizeof (float)); if (cuerrcode != cudaSuccess) { // 如果空间申请操作失败,则恢复原来的目标模板的数据,以防止系统进入 // 混乱状态。 cudaFree(dsttpl->tplData); *dsttplCud = olddsttplCud; return CUDA_ERROR; } } // 将数据拷贝如目标模板中。 if (srctplCud->deviceId < 0) { // 如果源模板存储于 Host,则通过 cudaMemcpy 将数据从 Host 拷贝到 Device // 上。 // 拷贝坐标数据 cuerrcode = cudaMemcpy(dsttpl->tplData, srctpl->tplData, srctpl->count * 2 * sizeof (int), cudaMemcpyHostToDevice); // 拷贝附属数据 if (cuerrcode == cudaSuccess) { cuerrcode = cudaMemcpy(dsttplCud->attachedData, srctplCud->attachedData, srctpl->count * sizeof (float), cudaMemcpyHostToDevice); } } else { // 如果源模板存储于 Device,则通过 cudaMemcpyPeer 进行设备间的数据拷 // 贝。 // 拷贝坐标数据 cuerrcode = cudaMemcpyPeer(dsttpl->tplData, curdevid, srctpl->tplData, srctplCud->deviceId, srctpl->count * 2 * sizeof (int)); // 拷贝附属数据 if (cuerrcode == cudaSuccess) { cuerrcode = cudaMemcpyPeer(dsttplCud->attachedData, curdevid, srctplCud->attachedData, srctplCud->deviceId, srctpl->count * sizeof (float)); } } // 如果上述的数据拷贝过程失败,进入这个 if 分支进行报错处理。 if (cuerrcode != cudaSuccess) { // 报错处理分为两个步骤:第一步,如果数据空间不是重用原来的数据空间时, // 则需要释放掉新申请的数据空间;第二步,恢复原来的目标模板的元数据。 if (!reusedata) { cudaFree(dsttpl->tplData); cudaFree(dsttplCud->attachedData); } *dsttplCud = olddsttplCud; return CUDA_ERROR; } // 到此步骤已经说明新的模板数据空间已经成功的申请并拷贝了新的数据,因此,旧 // 的数据空间已毫无用处。本步骤就是释放掉旧的数据空间以防止内存泄漏。这里, // 作为拷贝的 olddsttplCud 是局部变量,因此相应的元数据会在本函数退出后自动 // 释放,不用理会。 if (olddsttplCud.tplMeta.tplData != NULL) { if (olddsttplCud.deviceId < 0) { // 如果旧数据空间是 Host 内存上的,则需要无条件释放。 delete[] olddsttplCud.tplMeta.tplData; delete[] olddsttplCud.attachedData; } else if (!reusedata) { // 如果旧数据空间不是当前 Device 内存上的其他 Device 内存上的数据, // 则也需要无条件的释放。 cudaSetDevice(olddsttplCud.deviceId); cudaFree(olddsttplCud.tplMeta.tplData); cudaFree(olddsttplCud.attachedData); cudaSetDevice(curdevid); } } return NO_ERROR; } // Host 静态方法:copyToHost(将模板拷贝到 Host 内存上) __host__ int TemplateBasicOp::copyToHost(Template *tpl) { // 检查模板是否为 NULL。 if (tpl == NULL) return NULL_POINTER; // 根据输入参数的 Template 型指针,得到对应的 TemplateCuda 型数据。 TemplateCuda *tplCud = TEMPLATE_CUDA(tpl); // 检查模板所在的地址空间是否合法,如果模板所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (tplCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果模板是一个不好含数据的空模板,则报错。 if (tpl->tplData == NULL || tpl->count == 0) return UNMATCH_IMG; // 对于不同的情况,将模板数据拷贝到当前设备上。 if (tplCud->deviceId < 0) { // 如果模板位于 Host 内存上,则不需要进行任何操作。 return NO_ERROR; } else { // 如果模板的数据位于 Device 内存上,则需要在 Host 的内存空间上申请空 // 间,然后将数据拷贝到 Host 上。 int *hostptr; // 新的数据空间,在 Host 上。 float *attachptr; // 新的附属数据空间,在 Host 上。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 在 Host 上申请坐标数据空间。 hostptr = new int[tpl->count * 2]; if (hostptr == NULL) return OUT_OF_MEM; // 在 Host 上申请附属数据空间。 attachptr = new float[tpl->count]; if (attachptr == NULL) { delete[] hostptr; return OUT_OF_MEM; } // 将设备切换到数据所在的 Device 上。 cudaSetDevice(tplCud->deviceId); // 拷贝坐标数据 cuerrcode = cudaMemcpy(hostptr, tpl->tplData, tpl->count * 2 * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝失败,则需要释放掉刚刚申请的内存空间,以防止内存泄漏。之 // 后报错返回。 delete[] hostptr; delete[] attachptr; return CUDA_ERROR; } // 拷贝附属数据 cuerrcode = cudaMemcpy(attachptr, tplCud->attachedData, tpl->count * sizeof (float), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝失败,则需要释放掉刚刚申请的内存空间,以防止内存泄漏。之 // 后报错返回。 delete[] hostptr; delete[] attachptr; return CUDA_ERROR; } // 释放掉原来存储于 Device 内存上的模板数据。 cudaFree(tpl->tplData); cudaFree(tplCud->attachedData); // 对 Device 内存的操作完毕,将设备切换回当前 Device。 cudaSetDevice(curdevid); // 更新模板数据,把新的在当前 Device 上申请的数据和相关数据写入模板元数 // 据中。 tpl->tplData = hostptr; tplCud->attachedData = attachptr; tplCud->deviceId = -1; // 操作完毕,返回。 return NO_ERROR; } // 程序永远也不会到达这个分支,因此如果到达这个分支,则说明系统紊乱。对于多 // 数编译器来说,会对此句报出不可达语句的 Warning,因此这里将其注释掉,以防 // 止不必要的 Warning。 //return UNKNOW_ERROR; } // Host 静态方法:copyToHost(将模板拷贝到 Host 内存上) __host__ int TemplateBasicOp::copyToHost( Template *srctpl, Template *dsttpl) { // 检查输入模板是否为 NULL。 if (srctpl == NULL || dsttpl == NULL) return NULL_POINTER; // 如果输出模板为 NULL 或者和输入模板同为一个模板,则调用对应的 In-place 版 // 本的函数。 if (dsttpl == NULL || dsttpl == srctpl) return copyToHost(srctpl); // 获取 srctpl 和 dsttpl 对应的 TemplateCuda 型指针。 TemplateCuda *srctplCud = TEMPLATE_CUDA(srctpl); TemplateCuda *dsttplCud = TEMPLATE_CUDA(dsttpl); // 用来存放旧的 dsttpl 数据,使得在拷贝操作失败时可以恢复为原来的可用的数据 // 信息,防止系统进入一个混乱的状态。 TemplateCuda olddsttplCud = *dsttplCud; // 旧的 dsttpl 数据 bool reusedata = true; // 记录是否重用了原来的图像数据空间。 // 该值为 true,则原来的数据空间被重 // 用。不需要在之后释放数据,否则需要 // 释放就的空间。 // 如果源模板是一个空模板,则不进行任何操作,直接报错。 if (srctpl->tplData == NULL || srctpl->count == 0) return INVALID_DATA; // 检查模板所在的地址空间是否合法,如果模板所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (srctplCud->deviceId >= devcnt || dsttplCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果目标模板中存在有数据,则需要根据情况,若原来的数据不存储在 Host 上, // 或者即使存储在 Host 上,但数据尺寸不匹配,则需要释放掉原来申请的空间,以 // 便重新申请合适的内存空间。此处不进行真正的释放操作,其目的在于当后续操作 // 出现错误时,可以很快的恢复 dsttpl 中原来的信息,使得整个系统不会处于一个 // 混乱的状态,本函数会在最后,确定 dsttpl 被成功的更换为了新的数据以后,才 // 会真正的将原来的模板数据释放掉。 if (dsttplCud->deviceId >= 0) { // 对于数据存在于 Device 上,则亦直接释放掉原来的数据空间。 reusedata = false; dsttpl->tplData = NULL; dsttplCud->attachedData = NULL; } else if (srctpl->count != dsttpl->count) { // 对于数据存在于 Host 上,则需要检查数据的尺寸是否和源模板相匹配。检查 // 的标准:源模板和目标模板的尺寸相同时,可重用原来的空间。 reusedata = false; dsttpl->tplData = NULL; dsttplCud->attachedData = NULL; } // 将目标模板的尺寸修改为源模板的尺寸。 dsttpl->count = srctpl->count; // 更改目标模板的数据存储位置为 Host。 dsttplCud->deviceId = -1; // 如果目标模板的 tplData == NULL,说明目标模板原本要么是一个空图像,要么目 // 标模板原本的数据空间不合适,需要重新申请。这时,需要为目标模板重新在 // Host 上申请一个合适的数据空间。 if (dsttpl->tplData == NULL) { // 申请坐标数据 dsttpl->tplData = new int[srctpl->count * 2]; if (dsttpl->tplData == NULL) { // 如果申请内存的操作失败,则再报错返回前需要将旧的目标模板数据 // 恢复到目标模板中,以保证系统接下的操作不至于混乱。 *dsttplCud = olddsttplCud; return OUT_OF_MEM; } // 申请附属数据 dsttplCud->attachedData = new float[srctpl->count]; if (dsttplCud->attachedData == NULL) { // 如果申请内存的操作失败,则再报错返回前需要将旧的目标模板数据 // 恢复到目标模板中,以保证系统接下的操作不至于混乱。 delete[] dsttpl->tplData; *dsttplCud = olddsttplCud; return OUT_OF_MEM; } } // 将坐标数据从源模板中拷贝到目标模板中。 if (srctplCud->deviceId < 0) { // 如果源模板数据存储于 Host 内存,则直接使用 C 标准支持库中的 memcpy // 完成拷贝。 // 将 srctpl 内的坐标数据拷贝到 dsttpl 中。memcpy 不返回错误,因此,没 // 有进行错误检查。 memcpy(dsttpl->tplData, srctpl->tplData, srctpl->count * 2 * sizeof (int)); memcpy(dsttplCud->attachedData, srctplCud->attachedData, srctpl->count * sizeof (float)); } else { // 如果源模板数据存储于 Device 内存(无论是当前 Device 还是其他的 // Device),都是通过 CUDA 提供的函数进行拷贝。。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 首先切换到 srctpl 坐标数据所在的 Device,以方便进行内存操作。 cudaSetDevice(srctplCud->deviceId); // 这里使用 cudaMemcpy 将 srctpl 中处于 Device 上的数据拷贝到 dsttpl 中 // 位于 Host 的内存空间上面。 // 拷贝坐标数据 cuerrcode = cudaMemcpy(dsttpl->tplData, srctpl->tplData, srctpl->count * 2 * sizeof (int), cudaMemcpyDeviceToHost); // 拷贝附属数据 if (cuerrcode == cudaSuccess) { cuerrcode = cudaMemcpy(dsttplCud->attachedData, srctplCud->attachedData, srctpl->count * sizeof (float), cudaMemcpyDeviceToHost); } if (cuerrcode != cudaSuccess) { // 如果拷贝操作失败,则再报错退出前,需要将旧的目标模板数据恢复到目 // 标模板中。此外,如果数据不是重用的,则需要释放新申请的数据空间, // 防止内存泄漏。最后,还需要把 Device 切换回来,以免整个程序乱套。 if (!reusedata) { delete[] dsttpl->tplData; delete[] dsttplCud->attachedData; } *dsttplCud = olddsttplCud; cudaSetDevice(curdevid); return CUDA_ERROR; } // 对内存操作完毕后,将设备切换回当前的 Device。 cudaSetDevice(curdevid); } // 到此步骤已经说明新的模板数据空间已经成功的申请并拷贝了新的数据,因此,旧 // 的数据空间已毫无用处。本步骤就是释放掉旧的数据空间以防止内存泄漏。这里, // 作为拷贝的 olddsttplCud 是局部变量,因此相应的元数据会在本函数退出后自动 // 释放,不用理会。 if (olddsttplCud.tplMeta.tplData != NULL) { if (olddsttplCud.deviceId > 0) { // 如果旧数据是存储于 Device 内存上的数据,则需要无条件的释放。 cudaSetDevice(olddsttplCud.deviceId); cudaFree(olddsttplCud.tplMeta.tplData); cudaFree(olddsttplCud.attachedData); cudaSetDevice(curdevid); } else if (!reusedata) { // 如果旧数据就在 Host 内存上,则对于 reusedata 未置位的情况进行释 // 放,因为一旦置位,旧的数据空间就被用于承载新的数据,则不能释放。 delete[] olddsttplCud.tplMeta.tplData; delete[] olddsttplCud.attachedData; } } // 处理完毕,退出。 return NO_ERROR; }
the_stack
void test_pp_stringize() { ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(int)) , "int" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(hello world)) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(hello world)) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE( hello world)) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(hello world )) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE( hello world )) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(hello world)) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE("hello world")) , "\"hello world\"" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE('hello world')) , "'hello world'" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE($%!&<->)) , "$%!&<->" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE($%!&""<->)) , "$%!&\"\"<->" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_STRINGIZE)) , "THRUST_PP_STRINGIZE" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_STRINGIZE(int))) , "\"int\"" ); } DECLARE_UNITTEST(test_pp_stringize); void test_pp_cat2() { ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_CAT2(i, nt))) , "int" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_CAT2(hello, world))) , "helloworld" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_CAT2(hello , world))) , "helloworld" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_CAT2( hello, world))) , "helloworld" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_CAT2(hello, world))) , "helloworld" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_CAT2(hello, world ))) , "helloworld" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_CAT2(hello, world ))) , "helloworld" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_CAT2(hello world, from thrust!))) , "hello worldfrom thrust!" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_CAT2(-, >))) , "->" ); } DECLARE_UNITTEST(test_pp_cat2); #define THRUST_TEST_PP_EXPAND_TARGET() success #define THRUST_TEST_PP_EXPAND_ARGS() () void test_pp_expand() { ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND(int))) , "int" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND(hello world))) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND(hello world))) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND( hello world))) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND(hello world ))) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND( hello world ))) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND(hello world))) , "hello world" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND("hello world"))) , "\"hello world\"" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND('hello world'))) , "'hello world'" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND($%!&<->))) , "$%!&<->" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND($%!&""<->))) , "$%!&\"\"<->" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND(THRUST_PP_EXPAND))) , "THRUST_PP_EXPAND" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND(THRUST_PP_EXPAND(int)))) , "int" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND( THRUST_PP_CAT2(THRUST_TEST_, PP_EXPAND_TARGET)() ))) , "success" ); ASSERT_EQUAL( std::string(THRUST_PP_STRINGIZE(THRUST_PP_EXPAND( THRUST_TEST_PP_EXPAND_TARGET THRUST_TEST_PP_EXPAND_ARGS() ))) , "success" ); } DECLARE_UNITTEST(test_pp_expand); #undef THRUST_TEST_PP_EXPAND_TARGET #undef THRUST_TEST_PP_EXPAND_ARGS void test_pp_arity() { ASSERT_EQUAL( THRUST_PP_ARITY() , 0 ); /* This bash script was used to generate these tests: for arity in {0..62} do echo " ASSERT_EQUAL(" echo " THRUST_PP_ARITY(" echo " `bash -c \"echo {0..${arity}} | tr ' ' ,\"`" echo " )" echo " , $((${arity} + 1))" echo " );" echo done */ ASSERT_EQUAL( THRUST_PP_ARITY( 0 ) , 1 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1 ) , 2 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2 ) , 3 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3 ) , 4 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4 ) , 5 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5 ) , 6 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6 ) , 7 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7 ) , 8 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8 ) , 9 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9 ) , 10 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10 ) , 11 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11 ) , 12 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12 ) , 13 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13 ) , 14 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 ) , 15 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ) , 16 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16 ) , 17 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17 ) , 18 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18 ) , 19 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 ) , 20 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20 ) , 21 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21 ) , 22 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22 ) , 23 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23 ) , 24 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24 ) , 25 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25 ) , 26 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26 ) , 27 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27 ) , 28 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28 ) , 29 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 ) , 30 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 ) , 31 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 ) , 32 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32 ) , 33 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33 ) , 34 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34 ) , 35 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35 ) , 36 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36 ) , 37 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37 ) , 38 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38 ) , 39 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39 ) , 40 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40 ) , 41 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41 ) , 42 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42 ) , 43 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43 ) , 44 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44 ) , 45 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45 ) , 46 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46 ) , 47 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47 ) , 48 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48 ) , 49 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49 ) , 50 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50 ) , 51 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51 ) , 52 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52 ) , 53 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53 ) , 54 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54 ) , 55 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55 ) , 56 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56 ) , 57 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57 ) , 58 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58 ) , 59 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59 ) , 60 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60 ) , 61 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61 ) , 62 ); ASSERT_EQUAL( THRUST_PP_ARITY( 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62 ) , 63 ); } DECLARE_UNITTEST(test_pp_arity); #define THRUST_TEST_PP_DISPATCH_PLUS(...) \ THRUST_PP_DISPATCH(THRUST_TEST_PP_DISPATCH_PLUS, __VA_ARGS__) \ /**/ #define THRUST_TEST_PP_DISPATCH_PLUS0() 0 #define THRUST_TEST_PP_DISPATCH_PLUS1(x) x #define THRUST_TEST_PP_DISPATCH_PLUS2(x, y) x + y #define THRUST_TEST_PP_DISPATCH_PLUS3(x, y, z) x + y + z void test_pp_dispatch() { ASSERT_EQUAL( THRUST_TEST_PP_DISPATCH_PLUS() , 0 ); ASSERT_EQUAL( THRUST_TEST_PP_DISPATCH_PLUS(0) , 0 ); ASSERT_EQUAL( THRUST_TEST_PP_DISPATCH_PLUS(1, 2) , 3 ); ASSERT_EQUAL( THRUST_TEST_PP_DISPATCH_PLUS(1, 2, 3) , 6 ); } DECLARE_UNITTEST(test_pp_dispatch); #undef THRUST_TEST_PP_DISPATCH_PLUS #undef THRUST_TEST_PP_DISPATCH_PLUS0 #undef THRUST_TEST_PP_DISPATCH_PLUS1 #undef THRUST_TEST_PP_DISPATCH_PLUS2 #undef THRUST_TEST_PP_DISPATCH_PLUS3
the_stack
using namespace std; namespace std { template <typename _CharT, typename _Traits> inline basic_ostream<_CharT, _Traits> & tab(basic_ostream<_CharT, _Traits> &__os) { return __os.put(__os.widen('\t')); } } std::string stringPadding(std::string original, size_t charCount) { original.resize(charCount, ' '); return original; } /*************Error Handling**************/ bool check(cudaError_t e, int iLine, const char *szFile) { if (e != cudaSuccess) { cout << "CUDA runtime API error " << cudaGetErrorName(e) << " at line " << iLine << " in file " << szFile << endl; exit(0); return false; } return true; } const char* cublasGetErrorString(cublasStatus_t status) { switch(status) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; } return "unknown error"; } bool check(cublasStatus_t e, int iLine, const char *szFile) { if (e !=CUBLAS_STATUS_SUCCESS) { cout << "CUDA runtime API error " << cublasGetErrorString(e) << " at line " << iLine << " in file " << szFile << endl; exit(0); return false; } return true; } /*************Time Handling**************/ CudaTimer::CudaTimer(cudaStream_t stream){ this->stream=stream; } void CudaTimer::start(){ ck(cudaEventCreate(&event_start)); ck(cudaEventCreate(&event_stop)); ck(cudaEventRecord(event_start, stream)); } float CudaTimer::stop(){ ck(cudaEventRecord(event_stop,stream)); ck(cudaEventSynchronize(event_stop)); ck(cudaEventElapsedTime(&time, event_start, event_stop)); ck(cudaEventDestroy(event_start)); ck(cudaEventDestroy(event_stop)); return time; } CudaTimer:: ~CudaTimer(){ } /*************Useful functions***********************/ int blockNum(int size, int blockSize){ int nblock= (size-1)/blockSize+1; return nblock; } int next_pow2(int a){ int rval=32; if(a>32){ while(rval<a) rval<<=1; } return rval; } template<typename T> int numPerThread(){ return sizeof(float)/sizeof(T); } template <typename T> void deviceMalloc(T** ptr, int size) { ck(cudaMalloc((void**)ptr, sizeof(T) * size)); } template <typename T> void deviceMemset(T* ptr, int value, int size) { ck(cudaMemset((void*)ptr,0, sizeof(T) * size)); } template <typename T> void deviceFree(T* & ptr){ if(ptr!=NULL){ ck(cudaFree(ptr)); ptr=NULL; } } template <typename T> void deviceMemcpyHtoD(cudaStream_t stream, T* d_ptr,T* h_ptr, int size) { ck(cudaMemcpyAsync(d_ptr, h_ptr,size *sizeof(T),cudaMemcpyHostToDevice,stream)); } template <typename T> float castToFloat(T input){ float output=(T)(input); return output; } template<> float castToFloat(__half input){ float output=__half2float(input); return output; } /*********************Npz &Npy File Process functions***********************/ std::string paraName(int i_layer, std::string sub_para){ std::ostringstream s; s<<"model/transformer/layer_"<<i_layer<<sub_para; std::string str= s.str(); return str; } std::string paraName(std::string s){ std::string str= s; return str; } template <typename T> void setByNpz(cnpy::npz_t & my_npz, std::string name, T* h_ptr, int size, int offset){ //printKey(my_npz); //check that the loaded myVar1 matches myVar1 cnpy::NpyArray arr = my_npz[name]; //load it into a new array T* loaded_data = arr.data<T>(); memcpy (h_ptr, loaded_data+offset, sizeof(T)*size); } template<> void setByNpz<__half>(cnpy::npz_t & my_npz, std::string name, __half* h_ptr, int size, int offset){ //check that the loaded myVar1 matches myVar1 cnpy::NpyArray arr = my_npz[name]; //load it into a new array float* loaded_data = arr.data<float>(); __half* half_data=(__half*)malloc(sizeof(__half)*size); loaded_data=loaded_data+offset; for(int i=0;i<size;i++){ half_data[i]=__float2half_rn(loaded_data[i]); } memcpy (h_ptr, half_data, sizeof(__half)*size); free(half_data); } void printKey(cnpy::npz_t & npz){ std::map<std::string,cnpy::NpyArray>::iterator iter; for(iter = npz.begin(); iter != npz.end(); iter++){ std::cout<<iter->first<<std::endl; } } void setByNpz(cudaStream_t stream,cnpy::npz_t & my_npz, std::string name, int* d_ptr, int size, int offset){ //check that the loaded myVar1 matches myVar1 cnpy::NpyArray arr = my_npz[name]; //load it into a new array int* loaded_data = arr.data<int>(); ck(cudaMemcpyAsync(d_ptr, loaded_data+offset, sizeof(int)*size, cudaMemcpyHostToDevice,stream)); cudaDeviceSynchronize(); ck(cudaGetLastError()); } void setByNpz(cudaStream_t stream,cnpy::npz_t & my_npz, std::string name, float* d_ptr, int size, int offset){ //check that the loaded myVar1 matches myVar1 cnpy::NpyArray arr = my_npz[name]; //load it into a new array float* loaded_data = arr.data<float>(); //std::cout<<name<<" "<<size<<" "<<d_ptr<<" "<<loaded_data<<std::endl; ck(cudaMemcpyAsync(d_ptr, loaded_data+offset, sizeof(float)*size, cudaMemcpyHostToDevice,stream)); cudaDeviceSynchronize(); ck(cudaGetLastError()); } void setByNpz(cudaStream_t stream,cnpy::npz_t & my_npz, std::string name, __half* d_ptr, int size, int offset){ //check that the loaded myVar1 matches myVar1 cnpy::NpyArray arr = my_npz[name]; //load it into a new array float* loaded_data = arr.data<float>(); __half* half_data=(__half*)malloc(sizeof(__half)*size); loaded_data=loaded_data+offset; for(int i=0;i<size;i++){ half_data[i]=__float2half_rn(loaded_data[i]); } ck(cudaMemcpyAsync(d_ptr, half_data, sizeof(__half)*size, cudaMemcpyHostToDevice,stream)); free(half_data); cudaDeviceSynchronize(); ck(cudaGetLastError()); } void setByNpy(cudaStream_t stream,float* d_ptr, int size,std::string dir, std::string fname){ std::ostringstream s; s<<dir<<fname; std::string fullFname= s.str(); //load it into a new array cnpy::NpyArray arr = cnpy::npy_load(fullFname); float* loaded_data = arr.data<float>(); ck(cudaMemcpy(d_ptr, loaded_data, sizeof(__half)*size, cudaMemcpyHostToDevice)); } void setByNpy(cudaStream_t stream, __half* d_ptr, int size,std::string dir, std::string fname){ std::ostringstream s; s<<dir<<fname; std::string fullFname= s.str(); //load it into a new array cnpy::NpyArray arr = cnpy::npy_load(fullFname); float* loaded_data = arr.data<float>(); __half* half_data=(__half*)malloc(sizeof(__half)*size); for(int i=0;i<size;i++){ half_data[i]=__float2half_rn(loaded_data[i]); } ck(cudaMemcpy(d_ptr, half_data, sizeof(__half)*size, cudaMemcpyHostToDevice)); free(half_data); } void checkByNpy(cudaStream_t stream,float* d_ptr, int size,std::string dir, std::string fname){ //load it into a new array std::ostringstream s; s<<dir<<fname; std::string fullFname= s.str(); float* h_ptr=(float*)malloc(sizeof(float)*size); ck(cudaMemcpyAsync(h_ptr, d_ptr,sizeof(float)*size, cudaMemcpyDeviceToHost,stream)); FILE * test=fopen(fullFname.c_str(), "r"); if(test){ fclose(test); cnpy::NpyArray arr = cnpy::npy_load(fullFname); float* loaded_data = arr.data<float>(); double err=0; double max=-1e30f; int loc_err=0; int i=0; for(;i<size;i++){ double sub=abs(h_ptr[i]-loaded_data[i]); if(sub>err){ err=sub; loc_err=i; } if(h_ptr[i]>max){ max=h_ptr[i]; } } if(i==size){ std::cout<<stringPadding(fname,30)<<" ,Max Abs-Err: " << std::fixed << std::setw(11)<<err<<" ,Err Loc: " << std::fixed << std::setw(11)<<loc_err<<" ,Max Value: " << std::fixed << std::setw(11)<<max<<" ,Rel-Err: " << std::fixed << std::setw(11)<<err/max*100<<"%"<<std::endl; } }else{ std::cout<<"Can not find file: "<<fullFname<<std::endl; } free(h_ptr); } void checkByNpy(cudaStream_t stream, __half* d_ptr, int size,std::string dir, std::string fname){ std::ostringstream s; s<<dir<<fname; std::string fullFname= s.str(); __half* h_ptr=(__half*)malloc(sizeof(__half)*size); ck(cudaMemcpyAsync(h_ptr, d_ptr,sizeof(float)*size, cudaMemcpyDeviceToHost,stream)); FILE * test=fopen(fullFname.c_str(), "r"); if(test){ fclose(test); //load it into a new array cnpy::NpyArray arr = cnpy::npy_load(fullFname); float* loaded_data = arr.data<float>(); double max=-1e30f; double err=0; int loc_err=0; int i=0; for(;i<size;i++){ double tmp=__half2float(h_ptr[i]); double sub=abs(tmp-loaded_data[i]); if(sub>err){ err=sub; loc_err=i; } if(tmp>max){ max=tmp; } } if(i==size){ std::cout<<stringPadding(fname,30)<<" ,Max Abs-Err: " << std::fixed << std::setw(11)<<err<<" ,Err Loc: " << std::fixed << std::setw(11)<<loc_err<<" ,Max Value: " << std::fixed << std::setw(11)<<max<<" ,Rel-Err: " << std::fixed << std::setw(11)<<err/max*100<<"%"<<std::endl; //Filename, error, max error location, max value, relative error } } free(h_ptr); } template <typename T> bool checkByNpz(cnpy::npz_t& data_npz,cudaStream_t stream,std::string name, T* d_ptr, int size){ std::cout<<name<<" "<<size<<std::endl; bool ifCorrect=1; cnpy::NpyArray arr = data_npz[name]; T* loaded_data = arr.data<T>(); T * h_ptr=(T*)malloc(size*sizeof(T)); ck(cudaMemcpyAsync(h_ptr, d_ptr,sizeof(T)*size, cudaMemcpyDeviceToHost,stream)); double err=0; double max=castToFloat(h_ptr[0]); int i=0; for(i=0;i<size;i++){ double sub=abs(castToFloat(h_ptr[i])-castToFloat(loaded_data[i])); if(sub>err){ err=sub; } if(max<castToFloat(h_ptr[i])){ max=castToFloat(h_ptr[i]); } } if(err/max>0.05){ ifCorrect=0; std::cout<<"Wrong: "<< std::setw(20)<<name<<" Max err :"<<err <<" Max value :"<<max<<" Ralative error rate: "<< err/max <<std::endl; }else{ ifCorrect=1; //std::cout<<"Correct: "<< std::setw(20)<<name<<" Max err :"<<err <<" Max value :"<<max<<" Ralative error rate: "<< err/max <<std::endl; } free(h_ptr); return ifCorrect; } void checkByNpz(cudaStream_t stream,string data_fname, string name, float* d_ptr, int size ){ cnpy::npz_t data_npz=cnpy::npz_load(data_fname); cnpy::NpyArray arr = data_npz[name]; float* loaded_data = arr.data<float>(); float * h_ptr=(float*)malloc(size*sizeof(float)); //ck(cudaMemcpy(h_ptr,d_ptr, sizeof(float)*size, cudaMemcpyDeviceToHost)); ck(cudaMemcpyAsync(h_ptr, d_ptr,sizeof(float)*size, cudaMemcpyDeviceToHost,stream)); double err=0; int i=0; for(i=0;i<size;i++){ double sub=abs(h_ptr[i]-loaded_data[i]); if(sub>err){ err=sub; } if(sub>0.01){ std::cout<<data_fname<<" "<<name<<" Got error at: "<<i<<" Calculated="<<h_ptr[i]<<" Ori="<<loaded_data[i]<<" Err: "<<sub<<std::endl; break; } } if(i==size){ std::cout<<"Correct: "<< data_fname<<" Max err :"<<err<<std::endl; } free(h_ptr); } void checkByNpz(cudaStream_t stream,string data_fname, string name, __half* d_ptr, int size ){ cnpy::npz_t data_npz=cnpy::npz_load(data_fname); cnpy::NpyArray arr = data_npz[name]; float* loaded_data = arr.data<float>(); __half * h_ptr=(__half*)malloc(size*sizeof(float)); //ck(cudaMemcpy(h_ptr,d_ptr, sizeof(__half)*size, cudaMemcpyDeviceToHost)); ck(cudaMemcpyAsync(h_ptr, d_ptr,sizeof(float)*size, cudaMemcpyDeviceToHost,stream)); double err=0; int i=0; for(;i<size;i++){ float tmp=__half2float(h_ptr[i]); double sub=abs(tmp-loaded_data[i]); if(sub>err){ err=sub; } if(sub>10){ std::cout<<data_fname<<" Got error at: "<<i<<" value: calculated="<<tmp<<" tensorRT="<<loaded_data[i]<<" Err: "<<sub<<std::endl; break; } } if(i==size){ std::cout<<"Correct: "<< data_fname<<" Max err :"<<err<<std::endl; } free(h_ptr); } /*********************The explicit instantiation part***********************/ template int numPerThread<float>(); template int numPerThread<__half>(); template float castToFloat<float>(float input); template float castToFloat<__half>(__half input); template void deviceMalloc<float>(float** ptr, int size); template void deviceMemset<float>(float* ptr, int value, int size); template void deviceFree<float>(float* & ptr); template void deviceMemcpyHtoD<float>(cudaStream_t stream, float* d_ptr,float* h_ptr, int size); template void deviceMalloc<int>(int** ptr, int size); template void deviceMemset<int>(int* ptr, int value, int size); template void deviceFree<int>(int* & ptr); template void deviceMemcpyHtoD<int>(cudaStream_t stream, int* d_ptr,int* h_ptr, int size); template void deviceMalloc<__half>(__half** ptr, int size); template void deviceMemset<__half>(__half* ptr, int value, int size); template void deviceFree<__half>(__half* & ptr); template void deviceMemcpyHtoD<__half>(cudaStream_t stream, __half* d_ptr,__half* h_ptr, int size); template void setByNpz<int>(cnpy::npz_t & my_npz, std::string name, int* h_ptr, int size, int offset); template void setByNpz<float>(cnpy::npz_t & my_npz, std::string name, float* h_ptr, int size, int offset); template void setByNpz<__half>(cnpy::npz_t & my_npz, std::string name, __half* h_ptr, int size, int offset); template bool checkByNpz<float>(cnpy::npz_t& data_npz,cudaStream_t stream,std::string name, float* d_ptr, int size); template bool checkByNpz<__half>(cnpy::npz_t& data_npz,cudaStream_t stream,std::string name, __half* d_ptr, int size);
the_stack
#include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/reverse.h> #include <thrust/sequence.h> template <typename InputT, typename OutputT, int LogicalWarpThreads, int ItemsPerThread, int BlockThreads, typename ActionT> __global__ void kernel(const InputT *input_data, OutputT *output_data, ActionT action, cub::Int2Type<true> /* same_type */) { using WarpExchangeT = cub::WarpExchange<InputT, ItemsPerThread, LogicalWarpThreads>; constexpr int tile_size = ItemsPerThread * LogicalWarpThreads; constexpr int warps_per_block = BlockThreads / LogicalWarpThreads; __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; const int warp_id = threadIdx.x / LogicalWarpThreads; const int lane_id = threadIdx.x % LogicalWarpThreads; WarpExchangeT exchange(temp_storage[warp_id]); InputT input[ItemsPerThread]; input_data += warp_id * tile_size; output_data += warp_id * tile_size; for (int item = 0; item < ItemsPerThread; item++) { input[item] = input_data[lane_id * ItemsPerThread + item]; } action(input, input, exchange); for (int item = 0; item < ItemsPerThread; item++) { output_data[lane_id * ItemsPerThread + item] = input[item]; } } template <typename InputT, typename OutputT, int LogicalWarpThreads, int ItemsPerThread, int BlockThreads, typename ActionT> __global__ void kernel(const InputT *input_data, OutputT *output_data, ActionT action, cub::Int2Type<false> /* different_types */) { using WarpExchangeT = cub::WarpExchange<InputT, ItemsPerThread, LogicalWarpThreads>; constexpr int tile_size = ItemsPerThread * LogicalWarpThreads; constexpr int warps_per_block = BlockThreads / LogicalWarpThreads; __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; const int warp_id = threadIdx.x / LogicalWarpThreads; const int lane_id = threadIdx.x % LogicalWarpThreads; WarpExchangeT exchange(temp_storage[warp_id]); InputT input[ItemsPerThread]; OutputT output[ItemsPerThread]; input_data += warp_id * tile_size; output_data += warp_id * tile_size; for (int item = 0; item < ItemsPerThread; item++) { input[item] = input_data[lane_id * ItemsPerThread + item]; } action(input, output, exchange); for (int item = 0; item < ItemsPerThread; item++) { output_data[lane_id * ItemsPerThread + item] = output[item]; } } struct StripedToBlocked { template <typename InputT, typename OutputT, int LogicalWarpThreads, int ItemsPerThread, int ITEMS_PER_THREAD> __device__ void operator()( InputT (&input)[ITEMS_PER_THREAD], OutputT (&output)[ITEMS_PER_THREAD], cub::WarpExchange<InputT, ItemsPerThread, LogicalWarpThreads> &exchange) { exchange.StripedToBlocked(input, output); } }; struct BlockedToStriped { template <typename InputT, typename OutputT, int LogicalWarpThreads, int ItemsPerThread, int ITEMS_PER_THREAD> __device__ void operator()( InputT (&input)[ITEMS_PER_THREAD], OutputT (&output)[ITEMS_PER_THREAD], cub::WarpExchange<InputT, ItemsPerThread, LogicalWarpThreads> &exchange) { exchange.BlockedToStriped(input, output); } }; template <typename T> bool Compare( const thrust::device_vector<T> &lhs, const thrust::device_vector<T> &rhs) { auto err = thrust::mismatch(lhs.begin(), lhs.end(), rhs.begin()); if (err.first != lhs.end()) { auto i = thrust::distance(lhs.begin(), err.first); std::cerr << "Mismatch at " << i << ": " << lhs[i] << " != " << rhs[i] << std::endl; return false; } return true; } template <typename InputT, typename OutputT, int LogicalWarpThreads, int ItemsPerThread, int BlockThreads> void TestStripedToBlocked(thrust::device_vector<InputT> &input, thrust::device_vector<OutputT> &output) { thrust::fill(output.begin(), output.end(), OutputT{0}); thrust::host_vector<InputT> h_input(input.size()); FillStriped<LogicalWarpThreads, ItemsPerThread, BlockThreads>( h_input.begin()); input = h_input; kernel<InputT, OutputT, LogicalWarpThreads, ItemsPerThread, BlockThreads> <<<1, BlockThreads>>>(thrust::raw_pointer_cast(input.data()), thrust::raw_pointer_cast(output.data()), StripedToBlocked{}, cub::Int2Type<std::is_same<InputT, OutputT>::value>{}); cudaDeviceSynchronize(); thrust::device_vector<OutputT> expected_output(output.size()); thrust::sequence(expected_output.begin(), expected_output.end()); AssertTrue(Compare(expected_output, output)); } template <typename InputT, typename OutputT, int LogicalWarpThreads, int ItemsPerThread, int BlockThreads> void TestBlockedToStriped(thrust::device_vector<InputT> &input, thrust::device_vector<OutputT> &output) { thrust::fill(output.begin(), output.end(), OutputT{0}); thrust::host_vector<OutputT> expected_output(input.size()); FillStriped<LogicalWarpThreads, ItemsPerThread, BlockThreads>( expected_output.begin()); thrust::sequence(input.begin(), input.end()); kernel<InputT, OutputT, LogicalWarpThreads, ItemsPerThread, BlockThreads> <<<1, BlockThreads>>>(thrust::raw_pointer_cast(input.data()), thrust::raw_pointer_cast(output.data()), BlockedToStriped{}, cub::Int2Type<std::is_same<InputT, OutputT>::value>{}); cudaDeviceSynchronize(); thrust::device_vector<OutputT> d_expected_output(expected_output); AssertTrue(Compare(d_expected_output, output)); } template <typename InputT, typename OutputT, int LogicalWarpThreads, int ItemsPerThread, int BlockThreads> __global__ void scatter_kernel(const InputT *input_data, OutputT *output_data, cub::Int2Type<true> /* same_type */) { using WarpExchangeT = cub::WarpExchange<InputT, ItemsPerThread, LogicalWarpThreads>; constexpr int tile_size = ItemsPerThread * LogicalWarpThreads; constexpr int warps_per_block = BlockThreads / LogicalWarpThreads; __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; const int warp_id = threadIdx.x / LogicalWarpThreads; const int lane_id = threadIdx.x % LogicalWarpThreads; WarpExchangeT exchange(temp_storage[warp_id]); InputT input[ItemsPerThread]; // Reverse data int ranks[ItemsPerThread]; input_data += warp_id * tile_size; output_data += warp_id * tile_size; for (int item = 0; item < ItemsPerThread; item++) { const auto item_idx = lane_id * ItemsPerThread + item; input[item] = input_data[item_idx]; ranks[item] = tile_size - 1 - item_idx; } exchange.ScatterToStriped(input, ranks); // Striped to blocked for (int item = 0; item < ItemsPerThread; item++) { output_data[item * LogicalWarpThreads + lane_id] = input[item]; } } template <typename InputT, typename OutputT, int LogicalWarpThreads, int ItemsPerThread, int BlockThreads> __global__ void scatter_kernel(const InputT *input_data, OutputT *output_data, cub::Int2Type<false> /* different_types */) { using WarpExchangeT = cub::WarpExchange<InputT, ItemsPerThread, LogicalWarpThreads>; constexpr int tile_size = ItemsPerThread * LogicalWarpThreads; constexpr int warps_per_block = BlockThreads / LogicalWarpThreads; __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; const int warp_id = threadIdx.x / LogicalWarpThreads; const int lane_id = threadIdx.x % LogicalWarpThreads; WarpExchangeT exchange(temp_storage[warp_id]); InputT input[ItemsPerThread]; OutputT output[ItemsPerThread]; // Reverse data int ranks[ItemsPerThread]; input_data += warp_id * tile_size; output_data += warp_id * tile_size; for (int item = 0; item < ItemsPerThread; item++) { const auto item_idx = lane_id * ItemsPerThread + item; input[item] = input_data[item_idx]; ranks[item] = tile_size - 1 - item_idx; } exchange.ScatterToStriped(input, output, ranks); // Striped to blocked for (int item = 0; item < ItemsPerThread; item++) { output_data[item * LogicalWarpThreads + lane_id] = output[item]; } } template <typename InputT, typename OutputT, int LogicalWarpThreads, int ItemsPerThread, int BlockThreads> void TestScatterToStriped(thrust::device_vector<InputT> &input, thrust::device_vector<OutputT> &output) { thrust::fill(output.begin(), output.end(), OutputT{0}); thrust::sequence(input.begin(), input.end()); scatter_kernel<InputT, OutputT, LogicalWarpThreads, ItemsPerThread, BlockThreads> <<<1, BlockThreads>>>(thrust::raw_pointer_cast(input.data()), thrust::raw_pointer_cast(output.data()), cub::Int2Type<std::is_same<InputT, OutputT>::value>{}); thrust::device_vector<OutputT> d_expected_output(input); constexpr int tile_size = LogicalWarpThreads * ItemsPerThread; for (int warp_id = 0; warp_id < BlockThreads / LogicalWarpThreads; warp_id++) { const int warp_data_begin = tile_size * warp_id; const int warp_data_end = warp_data_begin + tile_size; thrust::reverse(d_expected_output.begin() + warp_data_begin, d_expected_output.begin() + warp_data_end); } AssertTrue(Compare(d_expected_output, output)); } template <typename InputT, typename OutputT, int LogicalWarpThreads, int ItemsPerThread, int BlockThreads> void Test() { static_assert(BlockThreads % LogicalWarpThreads == 0, "BlockThreads must be a multiple of LogicalWarpThreads"); const int warps_in_block = BlockThreads / LogicalWarpThreads; const int items_per_warp = LogicalWarpThreads * ItemsPerThread; const int items_per_block = items_per_warp * warps_in_block; thrust::device_vector<InputT> input(items_per_block); thrust::device_vector<OutputT> output(items_per_block); TestStripedToBlocked<InputT, OutputT, LogicalWarpThreads, ItemsPerThread, BlockThreads>(input, output); TestBlockedToStriped<InputT, OutputT, LogicalWarpThreads, ItemsPerThread, BlockThreads>(input, output); TestScatterToStriped<InputT, OutputT, LogicalWarpThreads, ItemsPerThread, BlockThreads>(input, output); } template <int WarpThreads, int ItemsPerThread, int BlockThreads> void Test() { Test<std::uint16_t, std::uint32_t, WarpThreads, ItemsPerThread, BlockThreads>(); Test<std::uint32_t, std::uint32_t, WarpThreads, ItemsPerThread, BlockThreads>(); Test<std::uint64_t, std::uint32_t, WarpThreads, ItemsPerThread, BlockThreads>(); } template <int LogicalWarpThreads, int ItemsPerThread> void Test() { Test<LogicalWarpThreads, ItemsPerThread, 128>(); Test<LogicalWarpThreads, ItemsPerThread, 256>(); } template <int LogicalWarpThreads> void Test() { Test<LogicalWarpThreads, 1>(); Test<LogicalWarpThreads, 4>(); Test<LogicalWarpThreads, 7>(); } int main(int argc, char** argv) { CommandLineArgs args(argc, argv); // Initialize device CubDebugExit(args.DeviceInit()); Test<4>(); Test<16>(); Test<32>(); return 0; }
the_stack
#include <doctest.h> #include <taskflow/taskflow.hpp> #include <taskflow/cuda/cudaflow.hpp> #include <taskflow/cuda/algorithm/for_each.hpp> #include <taskflow/cuda/algorithm/transform.hpp> #include <taskflow/cuda/algorithm/reduce.hpp> #include <taskflow/cuda/algorithm/sort.hpp> #include <taskflow/cuda/algorithm/find.hpp> #include <taskflow/cuda/algorithm/scan.hpp> constexpr float eps = 0.0001f; // -------------------------------------------------------- // Testcase: add2 // -------------------------------------------------------- template <typename T, typename F> void add2() { //const unsigned N = 1<<20; tf::Taskflow taskflow; tf::Executor executor; for(size_t N=1; N<=(1<<20); N <<= 1) { taskflow.clear(); T v1 = ::rand() % 100; T v2 = ::rand() % 100; std::vector<T> hx, hy; T* dx {nullptr}; T* dy {nullptr}; // allocate x auto allocate_x = taskflow.emplace([&]() { hx.resize(N, v1); REQUIRE(cudaMalloc(&dx, N*sizeof(T)) == cudaSuccess); }).name("allocate_x"); // allocate y auto allocate_y = taskflow.emplace([&]() { hy.resize(N, v2); REQUIRE(cudaMalloc(&dy, N*sizeof(T)) == cudaSuccess); }).name("allocate_y"); // axpy auto cudaflow = taskflow.emplace([&](F& cf) { auto h2d_x = cf.copy(dx, hx.data(), N).name("h2d_x"); auto h2d_y = cf.copy(dy, hy.data(), N).name("h2d_y"); auto d2h_x = cf.copy(hx.data(), dx, N).name("d2h_x"); auto d2h_y = cf.copy(hy.data(), dy, N).name("d2h_y"); //auto kernel = cf.add(dx, N, dx, dy); auto kernel = cf.transform(dx, dx+N, dy, [] __device__ (T x) { return x + 2; } ); kernel.succeed(h2d_x, h2d_y) .precede(d2h_x, d2h_y); }).name("saxpy"); cudaflow.succeed(allocate_x, allocate_y); // Add a verification task auto verifier = taskflow.emplace([&](){ for (size_t i = 0; i < N; i++) { REQUIRE(std::fabs(hx[i] - v1) < eps); REQUIRE(std::fabs(hy[i] - (hx[i] + 2)) < eps); } }).succeed(cudaflow).name("verify"); // free memory auto deallocate_x = taskflow.emplace([&](){ REQUIRE(cudaFree(dx) == cudaSuccess); }).name("deallocate_x"); auto deallocate_y = taskflow.emplace([&](){ REQUIRE(cudaFree(dy) == cudaSuccess); }).name("deallocate_y"); verifier.precede(deallocate_x, deallocate_y); executor.run(taskflow).wait(); // standalone tramsform tf::cudaDefaultExecutionPolicy p; auto input = tf::cuda_malloc_shared<T>(N); auto output = tf::cuda_malloc_shared<T>(N); for(size_t n=0; n<N; n++) { input[n] = 1; } tf::cuda_transform(p, input, input + N, output, [] __device__ (T i) { return i+2; } ); cudaStreamSynchronize(0); for(size_t n=0; n<N; n++) { REQUIRE(output[n] == 3); } } } TEST_CASE("add2.int" * doctest::timeout(300)) { add2<int, tf::cudaFlow>(); } TEST_CASE("add2.float" * doctest::timeout(300)) { add2<float, tf::cudaFlow>(); } TEST_CASE("add2.double" * doctest::timeout(300)) { add2<double, tf::cudaFlow>(); } TEST_CASE("capture_add2.int" * doctest::timeout(300)) { add2<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_add2.float" * doctest::timeout(300)) { add2<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_add2.double" * doctest::timeout(300)) { add2<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // for_each // ---------------------------------------------------------------------------- template <typename T, typename F> void for_each() { tf::Taskflow taskflow; tf::Executor executor; for(int n=1; n<=1234567; n = n*2 + 1) { taskflow.clear(); T* cpu = nullptr; T* gpu = nullptr; auto cputask = taskflow.emplace([&](){ cpu = static_cast<T*>(std::calloc(n, sizeof(T))); REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(cpu, gpu, n); auto h2d = cf.copy(gpu, cpu, n); auto kernel = cf.for_each( gpu, gpu+n, [] __device__ (T& val) { val = 65536; } ); h2d.precede(kernel); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); for(int i=0; i<n; i++) { REQUIRE(std::fabs(cpu[i] - (T)65536) < eps); } std::free(cpu); REQUIRE(cudaFree(gpu) == cudaSuccess); // standard algorithm: for_each auto g_data = tf::cuda_malloc_shared<T>(n); for(int i=0; i<n; i++) { g_data[i] = 0; } tf::cuda_for_each(tf::cudaDefaultExecutionPolicy{}, g_data, g_data + n, [] __device__ (T& val) { val = 12222; } ); cudaStreamSynchronize(0); for(int i=0; i<n; i++) { REQUIRE(std::fabs(g_data[i] - (T)12222) < eps); } tf::cuda_free(g_data); } } TEST_CASE("cudaflow.for_each.int" * doctest::timeout(300)) { for_each<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.for_each.float" * doctest::timeout(300)) { for_each<float, tf::cudaFlow>(); } TEST_CASE("cudaflow.for_each.double" * doctest::timeout(300)) { for_each<double, tf::cudaFlow>(); } TEST_CASE("capture.for_each.int" * doctest::timeout(300)) { for_each<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture.for_each.float" * doctest::timeout(300)) { for_each<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture.for_each.double" * doctest::timeout(300)) { for_each<double, tf::cudaFlowCapturer>(); } // -------------------------------------------------------- // Testcase: for_each_index // -------------------------------------------------------- template <typename T, typename F> void for_each_index() { for(int n=10; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; T* cpu = nullptr; T* gpu = nullptr; auto cputask = taskflow.emplace([&](){ cpu = static_cast<T*>(std::calloc(n, sizeof(T))); REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); }); auto gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(cpu, gpu, n); auto h2d = cf.copy(gpu, cpu, n); //auto kernel = cf.for_each_index(gpu, n, [] __device__ (T& value){ value = 17; }); auto kernel1 = cf.for_each_index( 0, n, 2, [gpu] __device__ (int i) { gpu[i] = 17; } ); auto kernel2 = cf.for_each_index( 1, n, 2, [=] __device__ (int i) { gpu[i] = -17; } ); h2d.precede(kernel1, kernel2); d2h.succeed(kernel1, kernel2); }); cputask.precede(gputask); executor.run(taskflow).wait(); for(int i=0; i<n; i++) { if(i % 2 == 0) { REQUIRE(std::fabs(cpu[i] - (T)17) < eps); } else { REQUIRE(std::fabs(cpu[i] - (T)(-17)) < eps); } } std::free(cpu); REQUIRE(cudaFree(gpu) == cudaSuccess); } } TEST_CASE("cudaflow.for_each_index.int" * doctest::timeout(300)) { for_each_index<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.for_each_index.float" * doctest::timeout(300)) { for_each_index<float, tf::cudaFlow>(); } TEST_CASE("cudaflow.for_each_index.double" * doctest::timeout(300)) { for_each_index<double, tf::cudaFlow>(); } TEST_CASE("capture.for_each_index.int" * doctest::timeout(300)) { for_each_index<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture.for_each_index.float" * doctest::timeout(300)) { for_each_index<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture.for_each_index.double" * doctest::timeout(300)) { for_each_index<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // transform // ---------------------------------------------------------------------------- template <typename F> void transform() { F cudaflow; for(unsigned n=1; n<=1234567; n = n*2 + 1) { cudaflow.clear(); auto src1 = tf::cuda_malloc_shared<int>(n); auto src2 = tf::cuda_malloc_shared<int>(n); auto dest = tf::cuda_malloc_shared<int>(n); for(unsigned i=0; i<n; i++) { src1[i] = 10; src2[i] = 90; dest[i] = 0; } cudaflow.transform(src1, src1+n, src2, dest, []__device__(int s1, int s2) { return s1 + s2; } ); cudaflow.offload(); for(unsigned i=0; i<n; i++){ REQUIRE(dest[i] == src1[i] + src2[i]); } } } TEST_CASE("cudaflow.transform" * doctest::timeout(300)) { transform<tf::cudaFlow>(); } TEST_CASE("capture.transform" * doctest::timeout(300) ) { transform<tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void reduce() { for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); REQUIRE(cudaMalloc(&res, 1*sizeof(T)) == cudaSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.reduce( gpu, gpu+n, res, [] __device__ (T a, T b) mutable { return a + b; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(std::fabs(sum-sol+1000) < 0.0001); REQUIRE(cudaFree(gpu) == cudaSuccess); REQUIRE(cudaFree(res) == cudaSuccess); } } TEST_CASE("cudaflow.reduce.int" * doctest::timeout(300)) { reduce<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.reduce.float" * doctest::timeout(300)) { reduce<float, tf::cudaFlow>(); } TEST_CASE("cudaflow.reduce.double" * doctest::timeout(300)) { reduce<double, tf::cudaFlow>(); } TEST_CASE("capture.reduce.int" * doctest::timeout(300)) { reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture.reduce.float" * doctest::timeout(300)) { reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture.reduce.double" * doctest::timeout(300)) { reduce<double, tf::cudaFlow>(); } // ---------------------------------------------------------------------------- // uninitialized_reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void uninitialized_reduce() { for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); REQUIRE(cudaMalloc(&res, 1*sizeof(T)) == cudaSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.uninitialized_reduce( gpu, gpu+n, res, [] __device__ (T a, T b) { return a + b; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(std::fabs(sum-sol) < 0.0001); REQUIRE(cudaFree(gpu) == cudaSuccess); REQUIRE(cudaFree(res) == cudaSuccess); } } TEST_CASE("cudaflow.uninitialized_reduce.int" * doctest::timeout(300)) { uninitialized_reduce<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.uninitialized_reduce.float" * doctest::timeout(300)) { uninitialized_reduce<float, tf::cudaFlow>(); } TEST_CASE("cudaflow.uninitialized_reduce.double" * doctest::timeout(300)) { uninitialized_reduce<double, tf::cudaFlow>(); } TEST_CASE("capture.uninitialized_reduce.int" * doctest::timeout(300)) { uninitialized_reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture.uninitialized_reduce.float" * doctest::timeout(300)) { uninitialized_reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture.uninitialized_reduce.double" * doctest::timeout(300)) { uninitialized_reduce<double, tf::cudaFlow>(); } // ---------------------------------------------------------------------------- // transform_reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void transform_reduce() { tf::Executor executor; for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); REQUIRE(cudaMalloc(&res, 1*sizeof(T)) == cudaSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.transform_reduce( gpu, gpu+n, res, [] __device__ (T a, T b) { return a + b; }, [] __device__ (T a) { return a + 1; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(std::fabs(sum+n+1000-sol) < 0.0001); REQUIRE(cudaFree(gpu) == cudaSuccess); REQUIRE(cudaFree(res) == cudaSuccess); } } TEST_CASE("cudaflow.transform_reduce.int" * doctest::timeout(300)) { transform_reduce<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.transform_reduce.float" * doctest::timeout(300)) { transform_reduce<float, tf::cudaFlow>(); } TEST_CASE("cudaflow.transform_reduce.double" * doctest::timeout(300)) { transform_reduce<double, tf::cudaFlow>(); } TEST_CASE("capture.transform_reduce.int" * doctest::timeout(300)) { transform_reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture.transform_reduce.float" * doctest::timeout(300)) { transform_reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture.transform_reduce.double" * doctest::timeout(300)) { transform_reduce<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // transform_uninitialized_reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void transform_uninitialized_reduce() { tf::Executor executor; for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); REQUIRE(cudaMalloc(&res, 1*sizeof(T)) == cudaSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.transform_uninitialized_reduce( gpu, gpu+n, res, [] __device__ (T a, T b) { return a + b; }, [] __device__ (T a) { return a + 1; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(std::fabs(sum+n-sol) < 0.0001); REQUIRE(cudaFree(gpu) == cudaSuccess); REQUIRE(cudaFree(res) == cudaSuccess); } } TEST_CASE("cudaflow.transform_uninitialized_reduce.int" * doctest::timeout(300)) { transform_uninitialized_reduce<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.transform_uninitialized_reduce.float" * doctest::timeout(300)) { transform_uninitialized_reduce<float, tf::cudaFlow>(); } TEST_CASE("cudaflow.transform_uninitialized_reduce.double" * doctest::timeout(300)) { transform_uninitialized_reduce<double, tf::cudaFlow>(); } TEST_CASE("capture.transform_uninitialized_reduce.int" * doctest::timeout(300)) { transform_uninitialized_reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture.transform_uninitialized_reduce.float" * doctest::timeout(300)) { transform_uninitialized_reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture.transform_uninitialized_reduce.double" * doctest::timeout(300)) { transform_uninitialized_reduce<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // scan // ---------------------------------------------------------------------------- template <typename T, typename F> void scan() { tf::Executor executor; tf::Taskflow taskflow; for(int N=1; N<=1234567; N = N*2 + 1) { taskflow.clear(); auto data1 = tf::cuda_malloc_shared<T>(N); auto data2 = tf::cuda_malloc_shared<T>(N); auto scan1 = tf::cuda_malloc_shared<T>(N); auto scan2 = tf::cuda_malloc_shared<T>(N); // initialize the data for(int i=0; i<N; i++) { data1[i] = T(i); data2[i] = T(i); scan1[i] = 0; scan2[i] = 0; } // perform reduction taskflow.emplace([&](F& cudaflow){ // inclusive scan cudaflow.inclusive_scan( data1, data1+N, scan1, [] __device__ (T a, T b){ return a+b; } ); // exclusive scan cudaflow.exclusive_scan( data2, data2+N, scan2, [] __device__ (T a, T b){ return a+b; } ); }); executor.run(taskflow).wait(); // inspect for(int i=1; i<N; i++) { REQUIRE(scan1[i] == (scan1[i-1]+data1[i])); REQUIRE(scan2[i] == (scan2[i-1]+data2[i-1])); } // test standalone algorithms // initialize the data for(int i=0; i<N; i++) { data1[i] = T(i); data2[i] = T(i); scan1[i] = 0; scan2[i] = 0; } // allocate temporary buffer tf::cudaDeviceVector<std::byte> temp( tf::cuda_scan_buffer_size<tf::cudaDefaultExecutionPolicy, T>(N) ); tf::cuda_inclusive_scan(tf::cudaDefaultExecutionPolicy{}, data1, data1+N, scan1, tf::cuda_plus<T>{}, temp.data() ); cudaStreamSynchronize(0); tf::cuda_exclusive_scan(tf::cudaDefaultExecutionPolicy{}, data2, data2+N, scan2, tf::cuda_plus<T>{}, temp.data() ); cudaStreamSynchronize(0); // inspect for(int i=1; i<N; i++) { REQUIRE(scan1[i] == (scan1[i-1]+data1[i])); REQUIRE(scan2[i] == (scan2[i-1]+data2[i-1])); } REQUIRE(cudaFree(data1) == cudaSuccess); REQUIRE(cudaFree(data2) == cudaSuccess); REQUIRE(cudaFree(scan1) == cudaSuccess); REQUIRE(cudaFree(scan2) == cudaSuccess); } } TEST_CASE("cudaflow.scan.int" * doctest::timeout(300)) { scan<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.scan.size_t" * doctest::timeout(300)) { scan<size_t, tf::cudaFlow>(); } TEST_CASE("capture.scan.int" * doctest::timeout(300)) { scan<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture.scan.size_t" * doctest::timeout(300)) { scan<size_t, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // transofrm scan // ---------------------------------------------------------------------------- template <typename T, typename F> void transform_scan() { tf::Executor executor; tf::Taskflow taskflow; for(int N=1; N<=1234567; N = N*2 + 1) { taskflow.clear(); auto data1 = tf::cuda_malloc_shared<T>(N); auto data2 = tf::cuda_malloc_shared<T>(N); auto scan1 = tf::cuda_malloc_shared<T>(N); auto scan2 = tf::cuda_malloc_shared<T>(N); // initialize the data for(int i=0; i<N; i++) { data1[i] = T(i); data2[i] = T(i); scan1[i] = 0; scan2[i] = 0; } // perform reduction taskflow.emplace([&](F& cudaflow){ // inclusive scan cudaflow.transform_inclusive_scan( data1, data1+N, scan1, [] __device__ (T a, T b){ return a+b; }, [] __device__ (T a) { return a*10; } ); // exclusive scan cudaflow.transform_exclusive_scan( data2, data2+N, scan2, [] __device__ (T a, T b){ return a+b; }, [] __device__ (T a) { return a*10; } ); }); executor.run(taskflow).wait(); // standalone algorithms // initialize the data for(int i=0; i<N; i++) { data1[i] = T(i); data2[i] = T(i); scan1[i] = 0; scan2[i] = 0; } // allocate temporary buffer tf::cudaDeviceVector<std::byte> temp( tf::cuda_scan_buffer_size<tf::cudaDefaultExecutionPolicy, T>(N) ); tf::cuda_transform_inclusive_scan(tf::cudaDefaultExecutionPolicy{}, data1, data1+N, scan1, [] __device__ (T a, T b){ return a+b; }, [] __device__ (T a) { return a*10; }, temp.data() ); cudaStreamSynchronize(0); tf::cuda_transform_exclusive_scan(tf::cudaDefaultExecutionPolicy{}, data2, data2+N, scan2, [] __device__ (T a, T b){ return a+b; }, [] __device__ (T a) { return a*10; }, temp.data() ); cudaStreamSynchronize(0); // inspect for(int i=1; i<N; i++) { REQUIRE(scan1[i] == (scan1[i-1]+data1[i]*10)); REQUIRE(scan2[i] == (scan2[i-1]+data2[i-1]*10)); } REQUIRE(cudaFree(data1) == cudaSuccess); REQUIRE(cudaFree(data2) == cudaSuccess); REQUIRE(cudaFree(scan1) == cudaSuccess); REQUIRE(cudaFree(scan2) == cudaSuccess); } } TEST_CASE("cudaflow.scan.int" * doctest::timeout(300)) { transform_scan<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.scan.size_t" * doctest::timeout(300)) { transform_scan<size_t, tf::cudaFlow>(); } TEST_CASE("capture.transform_scan.int" * doctest::timeout(300)) { transform_scan<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture.transform_scan.size_t" * doctest::timeout(300)) { transform_scan<size_t, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // merge // ---------------------------------------------------------------------------- template <typename T, typename F> void merge_keys() { tf::Executor executor; tf::Taskflow taskflow; for(int N=0; N<=1234567; N = N*2 + 1) { taskflow.clear(); auto a = tf::cuda_malloc_shared<T>(N); auto b = tf::cuda_malloc_shared<T>(N); auto c = tf::cuda_malloc_shared<T>(2*N); tf::cudaStream s; auto p = tf::cudaDefaultExecutionPolicy{s}; // ----------------- standalone algorithms // initialize the data for(int i=0; i<N; i++) { a[i] = T(rand()%100); b[i] = T(rand()%100); } std::sort(a, a+N); std::sort(b, b+N); auto bufsz = tf::cuda_merge_buffer_size<decltype(p)>(N, N); tf::cudaDeviceVector<std::byte> buf(bufsz); tf::cuda_merge(p, a, a+N, b, b+N, c, tf::cuda_less<T>{}, buf.data()); s.synchronize(); REQUIRE(std::is_sorted(c, c+2*N)); // ----------------- cudaFlow capturer for(int i=0; i<N*2; i++) { c[i] = rand(); } taskflow.emplace([&](F& cudaflow){ cudaflow.merge(a, a+N, b, b+N, c, tf::cuda_less<T>{}); }); executor.run(taskflow).wait(); REQUIRE(std::is_sorted(c, c+2*N)); REQUIRE(cudaFree(a) == cudaSuccess); REQUIRE(cudaFree(b) == cudaSuccess); REQUIRE(cudaFree(c) == cudaSuccess); } } TEST_CASE("cudaflow.merge_keys.int" * doctest::timeout(300)) { merge_keys<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.merge_keys.float" * doctest::timeout(300)) { merge_keys<float, tf::cudaFlow>(); } TEST_CASE("cudaflow.merge_keys.int" * doctest::timeout(300)) { merge_keys<int, tf::cudaFlow>(); } TEST_CASE("capture.merge_keys.float" * doctest::timeout(300)) { merge_keys<float, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // merge_by_keys // ---------------------------------------------------------------------------- template <typename T, typename F> void merge_keys_values() { tf::Executor executor; tf::Taskflow taskflow; for(int N=0; N<=1234567; N = N*2 + 1) { taskflow.clear(); auto a_k = tf::cuda_malloc_shared<T>(N); auto b_k = tf::cuda_malloc_shared<T>(N); auto c_k = tf::cuda_malloc_shared<T>(2*N); auto a_v = tf::cuda_malloc_shared<int>(N); auto b_v = tf::cuda_malloc_shared<int>(N); auto c_v = tf::cuda_malloc_shared<int>(2*N); tf::cudaStream s; auto p = tf::cudaDefaultExecutionPolicy{s}; // ----------------- standalone algorithms // initialize the data for(int i=0; i<N; i++) { a_k[i] = (i*2+1); a_v[i] = -(i*2+1); b_k[i] = (i+1)*2; b_v[i] = -(i+1)*2; c_k[i] = c_k[i+N] = c_v[i] = c_v[i+N] = 0; } auto bufsz = tf::cuda_merge_buffer_size<decltype(p)>(N, N); tf::cudaDeviceVector<std::byte> buf(bufsz); tf::cuda_merge_by_key( p, a_k, a_k+N, a_v, b_k, b_k+N, b_v, c_k, c_v, tf::cuda_less<T>{}, buf.data() ); s.synchronize(); for(int i=0; i<2*N; i++) { REQUIRE(c_k[i] == (i+1)); REQUIRE(c_v[i] == -(i+1)); } // ----------------- cudaFlow capturer // initialize the data for(int i=0; i<N; i++) { a_k[i] = (i*2+1); a_v[i] = -(i*2+1); b_k[i] = (i+1)*2; b_v[i] = -(i+1)*2; c_k[i] = c_k[i+N] = c_v[i] = c_v[i+N] = 0; } taskflow.emplace([&](F& cudaflow){ cudaflow.merge_by_key( a_k, a_k+N, a_v, b_k, b_k+N, b_v, c_k, c_v, tf::cuda_less<T>{} ); }); executor.run(taskflow).wait(); for(int i=0; i<2*N; i++) { REQUIRE(c_k[i] == (i+1)); REQUIRE(c_v[i] == -(i+1)); } REQUIRE(cudaFree(a_k) == cudaSuccess); REQUIRE(cudaFree(b_k) == cudaSuccess); REQUIRE(cudaFree(c_k) == cudaSuccess); REQUIRE(cudaFree(a_v) == cudaSuccess); REQUIRE(cudaFree(b_v) == cudaSuccess); REQUIRE(cudaFree(c_v) == cudaSuccess); } } TEST_CASE("cudaflow.merge_keys_values.int" * doctest::timeout(300)) { merge_keys_values<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.merge_keys_values.float" * doctest::timeout(300)) { merge_keys_values<float, tf::cudaFlow>(); } TEST_CASE("capturer.merge_keys_values.int" * doctest::timeout(300)) { merge_keys_values<int, tf::cudaFlowCapturer>(); } TEST_CASE("capturer.merge_keys_values.float" * doctest::timeout(300)) { merge_keys_values<float, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // sort // ---------------------------------------------------------------------------- template <typename T, typename F> void sort_keys() { tf::Executor executor; tf::Taskflow taskflow; for(int N=0; N<=1234567; N = N*2 + 1) { taskflow.clear(); auto a = tf::cuda_malloc_shared<T>(N); tf::cudaStream s; auto p = tf::cudaDefaultExecutionPolicy{s}; // ----------------- standalone asynchronous algorithms // initialize the data for(int i=0; i<N; i++) { a[i] = T(rand()%1000); } auto bufsz = tf::cuda_sort_buffer_size<decltype(p), T>(N); tf::cudaDeviceVector<std::byte> buf(bufsz); tf::cuda_sort(p, a, a+N, tf::cuda_less<T>{}, buf.data()); s.synchronize(); REQUIRE(std::is_sorted(a, a+N)); // ----------------- cudaflow capturer for(int i=0; i<N; i++) { a[i] = T(rand()%1000); } taskflow.emplace([&](F& cudaflow){ cudaflow.sort(a, a+N, tf::cuda_less<T>{}); }); executor.run(taskflow).wait(); REQUIRE(std::is_sorted(a, a+N)); REQUIRE(cudaFree(a) == cudaSuccess); } } TEST_CASE("cudaflow.sort_keys.int" * doctest::timeout(300)) { sort_keys<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.sort_keys.float" * doctest::timeout(300)) { sort_keys<float, tf::cudaFlow>(); } TEST_CASE("capture.sort_keys.int" * doctest::timeout(300)) { sort_keys<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture.sort_keys.float" * doctest::timeout(300)) { sort_keys<float, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // sort key-value // ---------------------------------------------------------------------------- template <typename T, typename F> void sort_keys_values() { std::random_device rd; std::mt19937 g(rd()); tf::Executor executor; tf::Taskflow taskflow; for(int N=1; N<=1234567; N = N*2 + 1) { taskflow.clear(); auto a = tf::cuda_malloc_shared<T>(N); auto b = tf::cuda_malloc_shared<int>(N); tf::cudaStream s; auto p = tf::cudaDefaultExecutionPolicy{s}; std::vector<int> indices(N); // ----------------- standalone asynchronous algorithms // initialize the data for(int i=0; i<N; i++) { a[i] = i; b[i] = i; indices[i] = i; //printf("a[%d]=%d, b[%d]=%d\n", i, a[i], i, b[i]); } std::shuffle(a, a+N, g); std::sort(indices.begin(), indices.end(), [&](auto i, auto j){ return a[i] < a[j]; }); auto bufsz = tf::cuda_sort_buffer_size<decltype(p), T, int>(N); tf::cudaDeviceVector<std::byte> buf(bufsz); tf::cuda_sort_by_key(p, a, a+N, b, tf::cuda_less<T>{}, buf.data()); s.synchronize(); REQUIRE(std::is_sorted(a, a+N)); for(int i=0; i<N; i++) { REQUIRE(indices[i] == b[i]); } // ----------------- cudaflow capturer // initialize the data for(int i=0; i<N; i++) { b[i] = i; indices[i] = i; //printf("a[%d]=%d, b[%d]=%d\n", i, a[i], i, b[i]); } std::shuffle(a, a+N, g); std::sort(indices.begin(), indices.end(), [&](auto i, auto j){ return a[i] > a[j]; }); taskflow.emplace([&](F& cudaflow){ cudaflow.sort_by_key(a, a+N, b, tf::cuda_greater<T>{}); }); executor.run(taskflow).wait(); REQUIRE(std::is_sorted(a, a+N, std::greater<T>{})); for(int i=0; i<N; i++) { REQUIRE(indices[i] == b[i]); } REQUIRE(cudaFree(a) == cudaSuccess); REQUIRE(cudaFree(b) == cudaSuccess); } } TEST_CASE("cudaflow.sort_keys_values.int" * doctest::timeout(300)) { sort_keys_values<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.sort_keys_values.float" * doctest::timeout(300)) { sort_keys_values<float, tf::cudaFlow>(); } TEST_CASE("capture.sort_keys_values.int" * doctest::timeout(300)) { sort_keys_values<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture.sort_keys_values.float" * doctest::timeout(300)) { sort_keys_values<float, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // find-if // ---------------------------------------------------------------------------- template <typename T, typename F> void find_if() { tf::Executor executor; tf::Taskflow taskflow; for(int N=0; N<=1234567; N += std::max(N/100, 1)) { taskflow.clear(); auto a = tf::cuda_malloc_shared<T>(N); auto r = tf::cuda_malloc_shared<unsigned>(1); tf::cudaStream s; auto p = tf::cudaDefaultExecutionPolicy{s}; // initialize the data for(int i=0; i<N; i++) { a[i] = i; } *r = 1234; // ----------------- standalone asynchronous algorithms tf::cuda_find_if(p, a, a+N, r, []__device__(int v){ return v == 5000; }); s.synchronize(); if(N <= 5000) { REQUIRE(*r == N); } else { REQUIRE(*r == 5000); } // ----------------- cudaflow capturer *r = 1234; taskflow.emplace([&](F& cudaflow){ cudaflow.find_if(a, a+N, r, []__device__(int v){ return v == 5000; }); }); executor.run(taskflow).wait(); if(N <= 5000) { REQUIRE(*r == N); } else { REQUIRE(*r == 5000); } REQUIRE(cudaFree(a) == cudaSuccess); REQUIRE(cudaFree(r) == cudaSuccess); } } TEST_CASE("cudaflow.find_if.int" * doctest::timeout(300)) { find_if<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.find_if.float" * doctest::timeout(300)) { find_if<float, tf::cudaFlow>(); } TEST_CASE("capture.find_if.int" * doctest::timeout(300)) { find_if<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture.find_if.float" * doctest::timeout(300)) { find_if<float, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // min_element // ---------------------------------------------------------------------------- template <typename T, typename F> void min_element() { tf::Executor executor; tf::Taskflow taskflow; for(int N=0; N<=1234567; N += std::max(N/10, 1)) { taskflow.clear(); auto a = tf::cuda_malloc_shared<T>(N); auto r = tf::cuda_malloc_shared<unsigned>(1); auto min = std::numeric_limits<T>::max(); tf::cudaStream s; auto p = tf::cudaDefaultExecutionPolicy{s}; // initialize the data for(int i=0; i<N; i++) { a[i] = rand(); min = std::min(min, a[i]); } *r = 1234; // ----------------- standalone asynchronous algorithms tf::cudaDeviceVector<std::byte> buf( tf::cuda_min_element_buffer_size<decltype(p), T>(N) ); tf::cuda_min_element( p, a, a+N, r, tf::cuda_less<T>{}, buf.data() ); s.synchronize(); if(min != std::numeric_limits<T>::max()) { REQUIRE(a[*r] == min); } else { REQUIRE(*r == N); } // ----------------- cudaflow *r = 1234; taskflow.emplace([&](F& cudaflow){ cudaflow.min_element(a, a+N, r, tf::cuda_less<T>{}); }); executor.run(taskflow).wait(); if(min != std::numeric_limits<T>::max()) { REQUIRE(a[*r] == min); } else { REQUIRE(*r == N); } REQUIRE(cudaFree(a) == cudaSuccess); REQUIRE(cudaFree(r) == cudaSuccess); } } TEST_CASE("cudaflow.min_element.int" * doctest::timeout(300)) { min_element<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.min_element.float" * doctest::timeout(300)) { min_element<float, tf::cudaFlow>(); } TEST_CASE("capturer.min_element.int" * doctest::timeout(300)) { min_element<int, tf::cudaFlowCapturer>(); } TEST_CASE("capturer.min_element.float" * doctest::timeout(300)) { min_element<float, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // max_element // ---------------------------------------------------------------------------- template <typename T, typename F> void max_element() { tf::Executor executor; tf::Taskflow taskflow; for(int N=0; N<=1234567; N += std::max(N/10, 1)) { taskflow.clear(); auto a = tf::cuda_malloc_shared<T>(N); auto r = tf::cuda_malloc_shared<unsigned>(1); auto max = std::numeric_limits<T>::lowest(); tf::cudaStream s; auto p = tf::cudaDefaultExecutionPolicy{s}; // initialize the data for(int i=0; i<N; i++) { a[i] = rand(); max = std::max(max, a[i]); } *r = 1234; // ----------------- standalone asynchronous algorithms tf::cudaDeviceVector<std::byte> buf( tf::cuda_max_element_buffer_size<decltype(p), T>(N) ); tf::cuda_max_element(p, a, a+N, r, tf::cuda_less<T>{}, buf.data()); s.synchronize(); if(max != std::numeric_limits<T>::lowest()) { REQUIRE(a[*r] == max); } else { REQUIRE(*r == N); } // ----------------- cudaflow *r = 1234; taskflow.emplace([&](F& cudaflow){ cudaflow.max_element(a, a+N, r, tf::cuda_less<T>{}); }); executor.run(taskflow).wait(); if(max != std::numeric_limits<T>::lowest()) { REQUIRE(a[*r] == max); } else { REQUIRE(*r == N); } REQUIRE(cudaFree(a) == cudaSuccess); REQUIRE(cudaFree(r) == cudaSuccess); } } TEST_CASE("cudaflow.max_element.int" * doctest::timeout(300)) { max_element<int, tf::cudaFlow>(); } TEST_CASE("cudaflow.max_element.float" * doctest::timeout(300)) { max_element<float, tf::cudaFlow>(); } TEST_CASE("capturer.max_element.int" * doctest::timeout(300)) { max_element<int, tf::cudaFlowCapturer>(); } TEST_CASE("capturer.max_element.float" * doctest::timeout(300)) { max_element<float, tf::cudaFlowCapturer>(); } /*// -------------------------------------------------------------------------- // row-major transpose // ---------------------------------------------------------------------------- // Disable for now - better to use cublasFlowCapturer template <typename T> __global__ void verify(const T* din_mat, const T* dout_mat, bool* check, size_t rows, size_t cols) { size_t tid = blockDim.x * blockIdx.x + threadIdx.x; size_t size = rows * cols; for(; tid < size; tid += gridDim.x * blockDim.x) { if(din_mat[tid] != dout_mat[tid / cols + (tid % cols) * rows]) { *check = false; return; } } } template <typename T> void transpose() { tf::Executor executor; for(size_t rows = 1; rows <= 7999; rows*=2+3) { for(size_t cols = 1; cols <= 8021; cols*=3+5) { tf::Taskflow taskflow; std::vector<T> hinput_mat(rows * cols); std::generate_n(hinput_mat.begin(), rows * cols, [](){ return ::rand(); }); T* dinput_mat {nullptr}; T* doutput_mat {nullptr}; bool* check {nullptr}; //allocate auto allocate = taskflow.emplace([&]() { REQUIRE(cudaMalloc(&dinput_mat, (rows * cols) * sizeof(T)) == cudaSuccess); REQUIRE(cudaMalloc(&doutput_mat, (rows * cols) * sizeof(T)) == cudaSuccess); REQUIRE(cudaMallocManaged(&check, sizeof(bool)) == cudaSuccess); *check = true; }).name("allocate"); //transpose auto cudaflow = taskflow.emplace([&](tf::cudaFlow& cf) { auto h2d_input_t = cf.copy(dinput_mat, hinput_mat.data(), rows * cols).name("h2d"); auto kernel_t = tf::cudaBLAF(cf).transpose( dinput_mat, doutput_mat, rows, cols ); auto verify_t = cf.kernel( 32, 512, 0, verify<T>, dinput_mat, doutput_mat, check, rows, cols ); h2d_input_t.precede(kernel_t); kernel_t.precede(verify_t); }).name("transpose"); //free memory auto deallocate = taskflow.emplace([&](){ REQUIRE(cudaFree(dinput_mat) == cudaSuccess); REQUIRE(cudaFree(doutput_mat) == cudaSuccess); }).name("deallocate"); allocate.precede(cudaflow); cudaflow.precede(deallocate); executor.run(taskflow).wait(); REQUIRE(*check); } } } TEST_CASE("transpose.int" * doctest::timeout(300) ) { transpose<int>(); } TEST_CASE("transpose.float" * doctest::timeout(300) ) { transpose<float>(); } TEST_CASE("transpose.double" * doctest::timeout(300) ) { transpose<double>(); } // ---------------------------------------------------------------------------- // row-major matrix multiplication // ---------------------------------------------------------------------------- template <typename T> void matmul() { tf::Taskflow taskflow; tf::Executor executor; std::vector<T> a, b, c; for(int m=1; m<=1992; m=2*m+1) { for(int k=1; k<=1012; k=2*k+3) { for(int n=1; n<=1998; n=2*n+8) { taskflow.clear(); T* ha {nullptr}; T* hb {nullptr}; T* hc {nullptr}; T* da {nullptr}; T* db {nullptr}; T* dc {nullptr}; T val_a = ::rand()%5-1; T val_b = ::rand()%7-3; auto hosta = taskflow.emplace([&](){ a.resize(m*k); std::fill_n(a.begin(), m*k, val_a); ha = a.data(); REQUIRE(cudaMalloc(&da, m*k*sizeof(T)) == cudaSuccess); }).name("ha"); auto hostb = taskflow.emplace([&](){ b.resize(k*n); std::fill_n(b.begin(), k*n, val_b); hb = b.data(); REQUIRE(cudaMalloc(&db, k*n*sizeof(T)) == cudaSuccess); }).name("hb"); auto hostc = taskflow.emplace([&](){ c.resize(m*n); hc = c.data(); REQUIRE(cudaMalloc(&dc, m*n*sizeof(T)) == cudaSuccess); }).name("hc"); auto cuda = taskflow.emplace([&](tf::cudaFlow& cf){ auto pa = cf.copy(da, ha, m*k); auto pb = cf.copy(db, hb, k*n); auto op = tf::cudaBLAF(cf).matmul( da, db, dc, m, k, n ).name("op"); auto cc = cf.copy(hc, dc, m*n).name("cc"); op.precede(cc).succeed(pa, pb); }); cuda.succeed(hosta, hostb, hostc); executor.run(taskflow).wait(); int ans = val_a*val_b*k; for(const auto& x : c) { REQUIRE((int)x == ans); } REQUIRE(cudaFree(da) == cudaSuccess); REQUIRE(cudaFree(db) == cudaSuccess); REQUIRE(cudaFree(dc) == cudaSuccess); } } } } TEST_CASE("matmul.int" * doctest::timeout(300) ) { matmul<int>(); } TEST_CASE("matmul.float" * doctest::timeout(300) ) { matmul<float>(); } TEST_CASE("matmul.double" * doctest::timeout(300) ) { matmul<double>(); }*/
the_stack
#include <ATen/cuda/CUDAApplyUtils.cuh> #define CHECK_CUDA(x) \ TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) \ TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) \ CHECK_CUDA(x); \ CHECK_CONTIGUOUS(x) namespace { int const threadsPerBlock = sizeof(unsigned long long) * 8; } template <typename T, typename T_int> __global__ void scatter_point_to_voxel_kernel( const T* points, T_int* coor, T_int* point_to_voxelidx, T_int* coor_to_voxelidx, T* voxels, T_int* coors, const int num_features, const int num_points, const int max_points, const int NDim) { const int index = blockIdx.x * threadsPerBlock + threadIdx.x; if (index >= num_points) return; int num = point_to_voxelidx[index]; int voxelidx = coor_to_voxelidx[index]; if (num > -1 && voxelidx > -1) { const int feature_per_thread = num_features / 4; int start = threadIdx.y * feature_per_thread; auto voxels_offset = voxels + voxelidx * max_points * num_features + num * num_features; auto points_offset = points + index * num_features; for (int k = start; k < start + feature_per_thread; k++) { voxels_offset[k] = points_offset[k]; } if (num == 0 && start < NDim) { auto coors_offset = coors + voxelidx * NDim; auto coor_offset = coor + index * NDim; for (int k = start; k < NDim; k++) { coors_offset[k] = coor_offset[k]; } } } } template <typename T, typename T_int> __global__ void map_voxel_to_point_kernel( T* points, T* voxels, T_int* point_to_voxelidx, T_int* coor_to_voxelidx, const int num_features, const int num_points, const int max_points) { const int index = blockIdx.x * threadsPerBlock + threadIdx.x; if (index >= num_points) return; auto num = point_to_voxelidx[index]; if (num > -1) { const int feature_per_thread = num_features / 4; auto voxelidx = coor_to_voxelidx[index]; int start = threadIdx.y * feature_per_thread; auto voxels_offset = voxels + voxelidx * max_points * num_features + num * num_features; auto points_offset = points + index * num_features; for (int k = start; k < start + feature_per_thread; k++) { points_offset[k] = voxels_offset[k]; } } } template <typename T_int> __global__ void point_to_voxelidx_kernel(const T_int* coor, T_int* point_to_voxelidx, T_int* point_to_pointidx, const int num_points, const int NDim) { const int index = blockIdx.x * threadsPerBlock + threadIdx.x; auto coor_offset = coor + index * NDim; // skip invalid points if ((index >= num_points) || (coor_offset[0] == -1)) return; int num = 0; int coor_x = coor_offset[0]; int coor_y = coor_offset[1]; int coor_z = coor_offset[2]; // only calculate the coors before this coor[index] for (int i = 0; i < index; ++i) { auto prev_coor = coor + i * NDim; if (prev_coor[0] == -1) continue; // record voxel if ((prev_coor[0] == coor_x) && (prev_coor[1] == coor_y) && (prev_coor[2] == coor_z)) { num++; if (num == 1) { point_to_pointidx[index] = i; } } } if (num == 0) { point_to_pointidx[index] = index; } point_to_voxelidx[index] = num; } template <typename T_int> __global__ void determin_voxel_num( const T_int* coor, T_int* num_points_per_voxel, T_int* point_to_voxelidx, T_int* point_to_pointidx, T_int* coor_to_voxelidx, T_int* voxel_num, T_int* max_points, const int num_points, const int NDim) { // only calculate the coors before this coor[index] for (int i = 0; i < num_points; ++i) { auto coor_offset = coor + i * NDim; if (coor_offset[0] == -1) continue; int point_pos_in_voxel = point_to_voxelidx[i]; // record voxel if (point_pos_in_voxel == -1) { // out of max_points or invalid point printf("point_pos_in_voxel == -1, point:%d", i); continue; } else if (point_pos_in_voxel == 0) { // record new voxel int voxelidx = voxel_num[0]; voxel_num[0] += 1; coor_to_voxelidx[i] = voxelidx; num_points_per_voxel[voxelidx] = 1; } else { int point_idx = point_to_pointidx[i]; int voxelidx = coor_to_voxelidx[point_idx]; if (voxelidx != -1) { num_points_per_voxel[voxelidx] += 1; coor_to_voxelidx[i] = voxelidx; max_points[0] = max(max_points[0], point_pos_in_voxel + 1); } else { printf("voxelidx = -1, point:%d", i); } } } } namespace voxelization { std::vector<at::Tensor> dynamic_point_to_voxel_forward_gpu( const at::Tensor& points, const at::Tensor& voxel_mapping, const std::vector<float> voxel_size, const std::vector<float> coors_range) { CHECK_INPUT(points); at::cuda::CUDAGuard device_guard(points.device()); const int NDim = voxel_mapping.size(1); const int num_points = points.size(0); const int num_features = points.size(1); std::vector<int> grid_size(NDim); for (int i = 0; i < NDim; ++i) { grid_size[i] = round((coors_range[NDim + i] - coors_range[i]) / voxel_size[i]); } // assume the mapping is already given auto point_to_pointidx = -at::ones( { num_points, }, voxel_mapping.options()); auto point_to_voxelidx = -at::ones( { num_points, }, voxel_mapping.options()); auto max_points = at::zeros( { 1, }, voxel_mapping.options()); // must be zero from the begining int col_blocks = at::cuda::ATenCeilDiv(num_points, threadsPerBlock); dim3 blocks(col_blocks); dim3 threads(threadsPerBlock); cudaStream_t map_stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES( voxel_mapping.scalar_type(), "determin_duplicate", ([&] { point_to_voxelidx_kernel<int><<<blocks, threads, 0, map_stream>>>( voxel_mapping.data_ptr<int>(), point_to_voxelidx.data_ptr<int>(), point_to_pointidx.data_ptr<int>(), num_points, NDim); })); cudaDeviceSynchronize(); AT_CUDA_CHECK(cudaGetLastError()); // make the logic in the CUDA device could accelerate about 10 times auto num_points_per_voxel = at::zeros( { num_points, }, voxel_mapping.options()); auto coor_to_voxelidx = -at::ones( { num_points, }, voxel_mapping.options()); auto voxel_num = at::zeros( { 1, }, voxel_mapping.options()); // must be zero from the begining cudaStream_t logic_stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES( voxel_mapping.scalar_type(), "determin_duplicate", ([&] { determin_voxel_num<int><<<1, 1, 0, logic_stream>>>( voxel_mapping.data_ptr<int>(), num_points_per_voxel.data_ptr<int>(), point_to_voxelidx.data_ptr<int>(), point_to_pointidx.data_ptr<int>(), coor_to_voxelidx.data_ptr<int>(), voxel_num.data_ptr<int>(), max_points.data_ptr<int>(), num_points, NDim); })); cudaDeviceSynchronize(); AT_CUDA_CHECK(cudaGetLastError()); // some temporary data auto max_points_cpu = max_points.to(at::kCPU); int max_points_int = max_points_cpu.data_ptr<int>()[0]; auto voxel_num_cpu = voxel_num.to(at::kCPU); int voxel_num_int = voxel_num_cpu.data_ptr<int>()[0]; at::Tensor coors = at::zeros({voxel_num_int, NDim}, points.options().dtype(at::kInt)); at::Tensor voxels = at::zeros({voxel_num_int, max_points_int, num_features}, points.options()); // copy point features to voxels dim3 cp_threads(threadsPerBlock, 4); cudaStream_t cp_stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES( points.scalar_type(), "scatter_point_to_voxel", ([&] { scatter_point_to_voxel_kernel<float, int> <<<blocks, cp_threads, 0, cp_stream>>>( points.data_ptr<float>(), voxel_mapping.data_ptr<int>(), point_to_voxelidx.data_ptr<int>(), coor_to_voxelidx.data_ptr<int>(), voxels.data_ptr<float>(), coors.data_ptr<int>(), num_features, num_points, max_points_int, NDim); })); cudaDeviceSynchronize(); AT_CUDA_CHECK(cudaGetLastError()); at::Tensor num_points_per_voxel_out = num_points_per_voxel.slice(/*dim=*/0, /*start=*/0, /*end=*/voxel_num_int); return {voxels, coors, num_points_per_voxel_out, point_to_voxelidx, coor_to_voxelidx}; } void dynamic_point_to_voxel_backward_gpu(at::Tensor& grad_input_points, const at::Tensor& grad_output_voxels, const at::Tensor& point_to_voxelidx, const at::Tensor& coor_to_voxelidx) { CHECK_INPUT(grad_input_points); CHECK_INPUT(grad_output_voxels); CHECK_INPUT(point_to_voxelidx); CHECK_INPUT(coor_to_voxelidx); at::cuda::CUDAGuard device_guard(grad_input_points.device()); const int num_points = grad_input_points.size(0); const int num_features = grad_input_points.size(1); const int max_points = grad_output_voxels.size(1); // copy voxel grad to points int col_blocks = at::cuda::ATenCeilDiv(num_points, threadsPerBlock); dim3 blocks(col_blocks); dim3 cp_threads(threadsPerBlock, 4); cudaStream_t cp_stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES(grad_input_points.scalar_type(), "scatter_point_to_voxel", ([&] { map_voxel_to_point_kernel<float, int> <<<blocks, cp_threads, 0, cp_stream>>>( grad_input_points.data_ptr<float>(), grad_output_voxels.data_ptr<float>(), point_to_voxelidx.data_ptr<int>(), coor_to_voxelidx.data_ptr<int>(), num_features, num_points, max_points); })); cudaDeviceSynchronize(); AT_CUDA_CHECK(cudaGetLastError()); return; } } // namespace voxelization
the_stack
#include <cassert> #include <cuda_runtime.h> #pragma GCC diagnostic ignored "-Wunknown-pragmas" #include <cudf/column/column_device_view.cuh> #include <cudf/datetime.hpp> #include <cudf/scalar/scalar_device_view.cuh> #include <cudf/strings/string_view.cuh> #include <cudf/table/table_device_view.cuh> #include <cudf/types.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #pragma GCC diagnostic pop #include <limits> #include <numeric> #include <cuda/std/chrono> #include <type_traits> #include <curand_kernel.h> #include "interpreter_cpp.h" #include "utilities/error.hpp" namespace interops { typedef int64_t temp_gdf_valid_type; // until its an int32 in cudf typedef int16_t column_index_type; template <typename T> CUDA_DEVICE_CALLABLE T getMagicNumber() { return T{}; } template <> CUDA_DEVICE_CALLABLE int64_t getMagicNumber<int64_t>() { return std::numeric_limits<int64_t>::max() - 13ll; } template <> CUDA_DEVICE_CALLABLE double getMagicNumber<double>() { return 1.7976931348623123e+308; } __global__ void setup_rand_kernel(curandState *state, unsigned long long seed ) { int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(seed, id, 0, &state[id]); } enum class datetime_component { INVALID = 0, YEAR, MONTH, DAY, WEEKDAY, HOUR, MINUTE, SECOND, }; template <typename Timestamp, datetime_component Component> struct extract_component_operator { static_assert(cudf::is_timestamp<Timestamp>(), ""); CUDA_DEVICE_CALLABLE int16_t operator()(Timestamp const ts) const { using namespace cuda::std::chrono; auto days_since_epoch = floor<days>(ts); auto time_since_midnight = ts - days_since_epoch; if(time_since_midnight.count() < 0) { time_since_midnight += days(1); } auto hrs_ = duration_cast<hours>(time_since_midnight); auto mins_ = duration_cast<minutes>(time_since_midnight - hrs_); auto secs_ = duration_cast<seconds>(time_since_midnight - hrs_ - mins_); switch(Component) { case datetime_component::YEAR: return static_cast<int>(year_month_day(days_since_epoch).year()); case datetime_component::MONTH: return static_cast<unsigned>(year_month_day(days_since_epoch).month()); case datetime_component::DAY: return static_cast<unsigned>(year_month_day(days_since_epoch).day()); case datetime_component::WEEKDAY: return year_month_weekday(days_since_epoch).weekday().iso_encoding(); case datetime_component::HOUR: return hrs_.count(); case datetime_component::MINUTE: return mins_.count(); case datetime_component::SECOND: return secs_.count(); default: return 0; } } }; template <datetime_component Component> struct launch_extract_component { template <typename Element, std::enable_if_t<!cudf::is_timestamp<Element>()> * = nullptr> CUDA_DEVICE_CALLABLE int16_t operator()(int64_t val) { assert(false); return 0; } template <typename Timestamp, std::enable_if_t<cudf::is_timestamp<Timestamp>()> * = nullptr> CUDA_DEVICE_CALLABLE int16_t operator()(int64_t val) { return extract_component_operator<Timestamp, Component>{}(Timestamp{static_cast<typename Timestamp::duration>(val)}); } }; struct cast_to_timestamp_ns { template <typename Element, std::enable_if_t<!cudf::is_timestamp<Element>()> * = nullptr> CUDA_DEVICE_CALLABLE cudf::timestamp_ns operator()(int64_t val) { assert(false); return cudf::timestamp_ns{}; } template <typename Timestamp, std::enable_if_t<cudf::is_timestamp<Timestamp>()> * = nullptr> CUDA_DEVICE_CALLABLE cudf::timestamp_ns operator()(int64_t val) { return cudf::timestamp_ns{Timestamp{static_cast<typename Timestamp::duration>(val)}}; } }; struct cast_to_duration_ns { template <typename Element, std::enable_if_t<!cudf::is_duration<Element>()> * = nullptr> CUDA_DEVICE_CALLABLE cudf::duration_ns operator()(int64_t val) { assert(false); return cudf::duration_ns{}; } template <typename Duration, std::enable_if_t<cudf::is_duration<Duration>()> * = nullptr> CUDA_DEVICE_CALLABLE cudf::duration_ns operator()(int64_t val) { return cudf::duration_ns{Duration{static_cast<typename Duration::duration>(val)}}; } }; CUDA_DEVICE_CALLABLE bool is_float_type(cudf::type_id type) { return (cudf::type_id::FLOAT32 == type || cudf::type_id::FLOAT64 == type); } CUDA_DEVICE_CALLABLE bool is_timestamp_type(cudf::type_id type) { return (cudf::type_id::TIMESTAMP_DAYS == type || cudf::type_id::TIMESTAMP_SECONDS == type || cudf::type_id::TIMESTAMP_MILLISECONDS == type || cudf::type_id::TIMESTAMP_MICROSECONDS == type || cudf::type_id::TIMESTAMP_NANOSECONDS == type); } CUDA_DEVICE_CALLABLE bool is_duration_type(cudf::type_id type) { return (cudf::type_id::DURATION_DAYS == type || cudf::type_id::DURATION_SECONDS == type || cudf::type_id::DURATION_MILLISECONDS == type || cudf::type_id::DURATION_MICROSECONDS == type || cudf::type_id::DURATION_NANOSECONDS == type); } CUDA_DEVICE_CALLABLE bool is_string_type(cudf::type_id type) { return (cudf::type_id::STRING == type); } /** * every element that is stored in the local buffer is 8 bytes in local, so signed ints are cast to int64, unsigned to * uint64, and floating points are all doubles */ class InterpreterFunctor { public: ~InterpreterFunctor() = default; InterpreterFunctor(InterpreterFunctor && other) = default; InterpreterFunctor(InterpreterFunctor const & other) = default; InterpreterFunctor & operator=(InterpreterFunctor const & other) = delete; InterpreterFunctor & operator=(InterpreterFunctor && other) = delete; InterpreterFunctor(cudf::mutable_table_device_view out_table, cudf::table_device_view table, cudf::size_type num_operations, const column_index_type * left_input_positions, const column_index_type * right_input_positions, const column_index_type * output_positions, const column_index_type * final_output_positions, const cudf::type_id * input_types_left, const cudf::type_id * input_types_right, const operator_type * operations, cudf::detail::scalar_device_view_base ** scalars_left, cudf::detail::scalar_device_view_base ** scalars_right, void * temp_valids_in_buffer, void * temp_valids_out_buffer) : out_table{out_table}, table{table}, num_operations{num_operations}, left_input_positions{left_input_positions}, right_input_positions{right_input_positions}, output_positions{output_positions}, final_output_positions{final_output_positions}, input_types_left{input_types_left}, input_types_right{input_types_right}, operations{operations}, scalars_left{scalars_left}, scalars_right{scalars_right}, temp_valids_in_buffer{static_cast<cudf::bitmask_type *>(temp_valids_in_buffer)}, temp_valids_out_buffer{static_cast<cudf::bitmask_type *>(temp_valids_out_buffer)} { } CUDA_DEVICE_CALLABLE void operator()( cudf::size_type row_index, int64_t total_buffer[], cudf::size_type size, curandState & state) { cudf::bitmask_type * valids_in_buffer = temp_valids_in_buffer + (blockIdx.x * blockDim.x + threadIdx.x) * table.num_columns(); cudf::bitmask_type * valids_out_buffer = temp_valids_out_buffer + (blockIdx.x * blockDim.x + threadIdx.x) * out_table.num_columns(); for(cudf::size_type column_index = 0; column_index < table.num_columns(); column_index++) { read_valid_data(column_index, valids_in_buffer, row_index); } // NOTE: Currently interops does not support plans with an input or output index greater than 63 // This is a limitation by using an uint64_t to store all the valids in the plan uint64_t cur_row_valids; for(cudf::size_type row = 0; row < 32 && row_index + row < size; row++) { // load current row valids and data for(cudf::size_type column_index = 0; column_index < table.num_columns(); column_index++) { setColumnValid(cur_row_valids, column_index, getColumnValid(valids_in_buffer[column_index], row)); read_data(column_index, total_buffer, row_index + row); } for(int16_t op_index = 0; op_index < num_operations; op_index++) { process_operator(op_index, total_buffer, row_index + row, cur_row_valids,state ); } // copy data and row valids into buffer for(cudf::size_type column_index = 0; column_index < out_table.num_columns(); column_index++) { write_data(column_index, final_output_positions[column_index], total_buffer, row_index + row); setColumnValid(valids_out_buffer[column_index], row, getColumnValid(cur_row_valids, this->final_output_positions[column_index])); } } // copy row valids into global for(cudf::size_type column_index = 0; column_index < out_table.num_columns(); column_index++) { write_valid_data(column_index, valids_out_buffer[column_index], row_index); } } private: /** * @param buffer the local buffer which storse the information that is to be processed * @param position the position in the local buffer where this data needs to be written */ CUDA_DEVICE_CALLABLE void get_data_from_buffer(int64_t * data, int64_t * buffer, int position) { *data = *(buffer + (position * blockDim.x + threadIdx.x)); } CUDA_DEVICE_CALLABLE void get_data_from_buffer(double * data, int64_t * buffer, int position) { *data = __longlong_as_double(*(buffer + (position * blockDim.x + threadIdx.x))); } CUDA_DEVICE_CALLABLE void store_data_in_buffer(int64_t data, int64_t * buffer, int position) { *(buffer + (position * blockDim.x + threadIdx.x)) = data; } CUDA_DEVICE_CALLABLE void store_data_in_buffer(double data, int64_t * buffer, int position) { *(buffer + (position * blockDim.x + threadIdx.x)) = __double_as_longlong(data); } /** * @param buffer the local buffer which storse the information that is to be processed */ struct device_ptr_read_into_buffer { template <typename ColType, std::enable_if_t<std::is_integral<ColType>::value> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::table_device_view& table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer) { *(buffer + (col_index * blockDim.x + threadIdx.x)) = static_cast<int64_t>(table.column(col_index).element<ColType>(row)); } template <typename ColType, std::enable_if_t<cudf::is_fixed_point<ColType>()> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::table_device_view& table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer) { //TODO: implement fixed point //*(buffer + (col_index * blockDim.x + threadIdx.x)) = static_cast<int64_t>(table.column(col_index).element<ColType>(row)); } template <typename ColType, std::enable_if_t<std::is_floating_point<ColType>::value> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::table_device_view& table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer) { *(buffer + (col_index * blockDim.x + threadIdx.x)) = __double_as_longlong(static_cast<double>(table.column(col_index).element<ColType>(row))); } template <typename ColType, std::enable_if_t<cudf::is_timestamp<ColType>()> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::table_device_view& table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer) { *(buffer + (col_index * blockDim.x + threadIdx.x)) = static_cast<int64_t>(table.column(col_index).element<ColType>(row).time_since_epoch().count()); } template <typename ColType, std::enable_if_t<cudf::is_compound<ColType>()> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::table_device_view& table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer) { } template <typename ColType, std::enable_if_t<cudf::is_duration<ColType>()> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::table_device_view& table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer) { *(buffer + (col_index * blockDim.x + threadIdx.x)) = static_cast<int64_t>(table.column(col_index).element<ColType>(row).count()); } }; CUDA_DEVICE_CALLABLE void read_data(cudf::size_type cur_column, int64_t * buffer, cudf::size_type row_index) { cudf::type_dispatcher(table.column(cur_column).type(), device_ptr_read_into_buffer{}, table, cur_column, row_index, buffer); } /** * @param buffer the local buffer which storse the information that is to be processed */ struct device_ptr_write_from_buffer { template <typename ColType, std::enable_if_t<std::is_integral<ColType>::value> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::mutable_table_device_view & out_table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer, int position) { out_table.column(col_index).element<ColType>(row) = static_cast<ColType>(*(buffer + (position * blockDim.x + threadIdx.x))); } template <typename ColType, std::enable_if_t<cudf::is_fixed_point<ColType>()> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::mutable_table_device_view & out_table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer, int position) { //TODO: implement fixed point //out_table.column(col_index).element<ColType>(row) = static_cast<ColType>(*(buffer + (position * blockDim.x + threadIdx.x))); } template <typename ColType, std::enable_if_t<std::is_floating_point<ColType>::value> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::mutable_table_device_view & out_table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer, int position) { out_table.column(col_index).element<ColType>(row) = static_cast<ColType>(__longlong_as_double(*(buffer + (position * blockDim.x + threadIdx.x)))); } template <typename ColType, std::enable_if_t<cudf::is_timestamp<ColType>()> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::mutable_table_device_view & out_table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer, int position) { out_table.column(col_index).element<ColType>(row) = ColType{typename ColType::duration{*(buffer + (position * blockDim.x + threadIdx.x))}}; } template <typename ColType, std::enable_if_t<cudf::is_duration<ColType>()> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::mutable_table_device_view & out_table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer, int position) { out_table.column(col_index).element<ColType>(row) = ColType{typename ColType::duration{*(buffer + (position * blockDim.x + threadIdx.x))}}; } template <typename ColType, std::enable_if_t<cudf::is_compound<ColType>()> * = nullptr> CUDA_DEVICE_CALLABLE void operator() (cudf::mutable_table_device_view & out_table, cudf::size_type col_index, cudf::size_type row, int64_t * buffer, int position) { } }; CUDA_DEVICE_CALLABLE void write_data(cudf::size_type cur_column, int cur_buffer, int64_t * buffer, cudf::size_type row_index) { cudf::type_dispatcher(out_table.column(cur_column).type(), device_ptr_write_from_buffer{}, out_table, cur_column, row_index, buffer, cur_buffer); } CUDA_DEVICE_CALLABLE void read_valid_data(cudf::size_type column_idx, cudf::bitmask_type * buffer, cudf::size_type row_index) { const cudf::bitmask_type * valid_in = table.column(column_idx).null_mask(); if(valid_in != nullptr) { buffer[column_idx] = valid_in[cudf::word_index(row_index)]; } else { buffer[column_idx] = 0xffffffff; } } CUDA_DEVICE_CALLABLE void write_valid_data(cudf::size_type column_idx, cudf::bitmask_type valid_data, cudf::size_type row_index) { if(out_table.column(column_idx).nullable()) { cudf::bitmask_type * valid_out = out_table.column(column_idx).null_mask(); valid_out[cudf::word_index(row_index)] = valid_data; } } CUDA_DEVICE_CALLABLE bool getColumnValid(uint64_t row_valid, int bit_idx) { assert(bit_idx < sizeof(uint64_t)*8); return (row_valid >> bit_idx) & uint64_t{1}; } CUDA_DEVICE_CALLABLE void setColumnValid(cudf::bitmask_type & row_valid, int bit_idx, bool value) { assert(bit_idx < sizeof(cudf::bitmask_type)*8); row_valid ^= ((-value) ^ row_valid) & (cudf::bitmask_type{1} << bit_idx); } CUDA_DEVICE_CALLABLE void setColumnValid(uint64_t & row_valid, int bit_idx, bool value) { assert(bit_idx < sizeof(uint64_t)*8); row_valid ^= ((-value) ^ row_valid) & (uint64_t{1} << bit_idx); } template <typename LeftType> CUDA_DEVICE_CALLABLE LeftType get_scalar_value(cudf::detail::scalar_device_view_base * scalar_ptr) { switch (scalar_ptr->type().id()) { case cudf::type_id::BOOL8: return static_cast<LeftType>(static_cast<cudf::numeric_scalar_device_view<bool>*>(scalar_ptr)->value()); case cudf::type_id::INT8: return static_cast<LeftType>(static_cast<cudf::numeric_scalar_device_view<int8_t>*>(scalar_ptr)->value()); case cudf::type_id::UINT8: return static_cast<LeftType>(static_cast<cudf::numeric_scalar_device_view<uint8_t>*>(scalar_ptr)->value()); case cudf::type_id::INT16: return static_cast<LeftType>(static_cast<cudf::numeric_scalar_device_view<int16_t>*>(scalar_ptr)->value()); case cudf::type_id::UINT16: return static_cast<LeftType>(static_cast<cudf::numeric_scalar_device_view<uint16_t>*>(scalar_ptr)->value()); case cudf::type_id::INT32: return static_cast<LeftType>(static_cast<cudf::numeric_scalar_device_view<int32_t>*>(scalar_ptr)->value()); case cudf::type_id::UINT32: return static_cast<LeftType>(static_cast<cudf::numeric_scalar_device_view<uint32_t>*>(scalar_ptr)->value()); case cudf::type_id::INT64: return static_cast<LeftType>(static_cast<cudf::numeric_scalar_device_view<int64_t>*>(scalar_ptr)->value()); case cudf::type_id::UINT64: return static_cast<LeftType>(static_cast<cudf::numeric_scalar_device_view<uint64_t>*>(scalar_ptr)->value()); case cudf::type_id::FLOAT32: return static_cast<LeftType>(static_cast<cudf::numeric_scalar_device_view<float>*>(scalar_ptr)->value()); case cudf::type_id::FLOAT64: return static_cast<LeftType>(static_cast<cudf::numeric_scalar_device_view<double>*>(scalar_ptr)->value()); case cudf::type_id::TIMESTAMP_DAYS: return static_cast<LeftType>(static_cast<cudf::timestamp_scalar_device_view<cudf::timestamp_D>*>(scalar_ptr)->value().time_since_epoch().count()); case cudf::type_id::TIMESTAMP_SECONDS: return static_cast<LeftType>(static_cast<cudf::timestamp_scalar_device_view<cudf::timestamp_s>*>(scalar_ptr)->value().time_since_epoch().count()); case cudf::type_id::TIMESTAMP_MILLISECONDS: return static_cast<LeftType>(static_cast<cudf::timestamp_scalar_device_view<cudf::timestamp_ms>*>(scalar_ptr)->value().time_since_epoch().count()); case cudf::type_id::TIMESTAMP_MICROSECONDS: return static_cast<LeftType>(static_cast<cudf::timestamp_scalar_device_view<cudf::timestamp_us>*>(scalar_ptr)->value().time_since_epoch().count()); case cudf::type_id::TIMESTAMP_NANOSECONDS: return static_cast<LeftType>(static_cast<cudf::timestamp_scalar_device_view<cudf::timestamp_ns>*>(scalar_ptr)->value().time_since_epoch().count()); case cudf::type_id::DURATION_DAYS: return static_cast<LeftType>(static_cast<cudf::duration_scalar_device_view<cudf::duration_D>*>(scalar_ptr)->value().count()); case cudf::type_id::DURATION_SECONDS: return static_cast<LeftType>(static_cast<cudf::duration_scalar_device_view<cudf::duration_s>*>(scalar_ptr)->value().count()); case cudf::type_id::DURATION_MILLISECONDS: return static_cast<LeftType>(static_cast<cudf::duration_scalar_device_view<cudf::duration_ms>*>(scalar_ptr)->value().count()); case cudf::type_id::DURATION_MICROSECONDS: return static_cast<LeftType>(static_cast<cudf::duration_scalar_device_view<cudf::duration_us>*>(scalar_ptr)->value().count()); case cudf::type_id::DURATION_NANOSECONDS: return static_cast<LeftType>(static_cast<cudf::duration_scalar_device_view<cudf::duration_ns>*>(scalar_ptr)->value().count()); default: return LeftType{}; } } CUDA_DEVICE_CALLABLE void process_operator( size_t op_index, int64_t * buffer, cudf::size_type row_index, uint64_t & row_valids, curandState & state) { cudf::type_id type = input_types_left[op_index]; if(is_float_type(type)) { process_operator_1<double>(op_index, buffer, row_index, row_valids,state); } else { process_operator_1<int64_t>(op_index, buffer, row_index, row_valids,state); } } template <typename LeftType> CUDA_DEVICE_CALLABLE void process_operator_1( size_t op_index, int64_t * buffer, cudf::size_type row_index, uint64_t & row_valids, curandState & state) { cudf::type_id type = input_types_right[op_index]; if(is_float_type(type)) { process_operator_2<LeftType, double>(op_index, buffer, row_index, row_valids,state); } else { process_operator_2<LeftType, int64_t>(op_index, buffer, row_index, row_valids,state); } } template <typename LeftType, typename RightType> CUDA_DEVICE_CALLABLE void process_operator_2( size_t op_index, int64_t * buffer, cudf::size_type row_index, uint64_t & row_valids, curandState & state) { column_index_type left_position = left_input_positions[op_index]; column_index_type right_position = right_input_positions[op_index]; column_index_type output_position = output_positions[op_index]; operator_type oper = operations[op_index]; if(right_position != UNARY_INDEX && right_position != NULLARY_INDEX) { // It's a binary operation cudf::type_id left_type_id = input_types_left[op_index]; LeftType left_value; cudf::string_view left_str_view; bool left_valid; if(left_position >= 0) { if (is_string_type(left_type_id)) { // string values always come from the table input, // intermediate string result not supported left_str_view = table.column(left_position).element<cudf::string_view>(row_index); } else { get_data_from_buffer(&left_value, buffer, left_position); } left_valid = getColumnValid(row_valids, left_position); } else if(left_position == SCALAR_INDEX) { if (is_string_type(left_type_id)) { left_str_view = static_cast<cudf::string_scalar_device_view*>(scalars_left[op_index])->value(); } else { left_value = get_scalar_value<LeftType>(scalars_left[op_index]); } left_valid = true; } else { // if(left_position == SCALAR_NULL_INDEX) left_valid = false; } cudf::type_id right_type_id = input_types_right[op_index]; RightType right_value; cudf::string_view right_str_view; bool right_valid; if(right_position >= 0) { if (is_string_type(right_type_id)) { // string values always come from the table input, // intermediate string result not supported right_str_view = table.column(right_position).element<cudf::string_view>(row_index); } else { get_data_from_buffer(&right_value, buffer, right_position); } right_valid = getColumnValid(row_valids, right_position); } else if(right_position == SCALAR_INDEX) { if (is_string_type(right_type_id)) { right_str_view = static_cast<cudf::string_scalar_device_view*>(scalars_right[op_index])->value(); } else { right_value = get_scalar_value<RightType>(scalars_right[op_index]); } right_valid = true; } else { // if(right_position == SCALAR_NULL_INDEX) right_valid = false; } if(oper == operator_type::BLZ_MAGIC_IF_NOT) { if(left_valid && left_value) { store_data_in_buffer(right_value, buffer, output_position); setColumnValid(row_valids, output_position, right_valid); } else { // we want to indicate to first_non_magic to use the second value store_data_in_buffer(getMagicNumber<RightType>(), buffer, output_position); } } else if(oper == operator_type::BLZ_FIRST_NON_MAGIC) { if(left_value == getMagicNumber<LeftType>()) { store_data_in_buffer(right_value, buffer, output_position); setColumnValid(row_valids, output_position, right_valid); } else { store_data_in_buffer(left_value, buffer, output_position); setColumnValid(row_valids, output_position, left_valid); } } else if(oper == operator_type::BLZ_LOGICAL_OR) { if(left_valid && right_valid) { store_data_in_buffer(static_cast<int64_t>(left_value || right_value), buffer, output_position); setColumnValid(row_valids, output_position, true); } else if(left_valid) { store_data_in_buffer(left_value, buffer, output_position); setColumnValid(row_valids, output_position, !!left_value); } else if(right_valid) { store_data_in_buffer(right_value, buffer, output_position); setColumnValid(row_valids, output_position, !!right_valid); } else { setColumnValid(row_valids, output_position, false); } } else if(oper == operator_type::BLZ_IS_NOT_DISTINCT_FROM) { bool result_value; if(!left_valid && !right_valid) { result_value = true; } else if(!left_valid || !right_valid) { result_value = false; } else { result_value = left_value == right_value; } store_data_in_buffer(static_cast<int64_t>(result_value), buffer, output_position); setColumnValid(row_valids, output_position, true); } else if (left_valid && right_valid) { if(oper == operator_type::BLZ_LOGICAL_AND) { store_data_in_buffer(static_cast<int64_t>(left_value && right_value), buffer, output_position); } else if(oper == operator_type::BLZ_ADD) { store_data_in_buffer(left_value + right_value, buffer, output_position); } else if(oper == operator_type::BLZ_SUB) { store_data_in_buffer(left_value - right_value, buffer, output_position); } else if(oper == operator_type::BLZ_MUL) { store_data_in_buffer(left_value * right_value, buffer, output_position); } else if(oper == operator_type::BLZ_DIV) { store_data_in_buffer(left_value / right_value, buffer, output_position); } else if(oper == operator_type::BLZ_MOD) { if (!is_float_type(left_type_id) && !is_float_type(right_type_id)) { store_data_in_buffer( static_cast<int64_t>(left_value) % static_cast<int64_t>(right_value), buffer, output_position); } else { store_data_in_buffer( fmod(static_cast<double>(left_value), static_cast<double>(right_value)), buffer, output_position); } } else if(oper == operator_type::BLZ_POW) { store_data_in_buffer(pow(static_cast<double>(left_value), static_cast<double>(right_value)), buffer, output_position); } else if(oper == operator_type::BLZ_ROUND) { double factor = pow(10, right_value); store_data_in_buffer(round(static_cast<double>(left_value) * factor) / factor, buffer, output_position); } else if(oper == operator_type::BLZ_EQUAL) { int64_t computed; if (is_string_type(left_type_id) && is_string_type(right_type_id)) { computed = left_str_view == right_str_view; } else if(is_timestamp_type(left_type_id) && is_timestamp_type(right_type_id)) { cudf::timestamp_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(left_value)); cudf::timestamp_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(right_value)); computed = left_ts == right_ts; } else if (is_duration_type(left_type_id) && is_duration_type(right_type_id)) { cudf::duration_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(left_value)); cudf::duration_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(right_value)); computed = left_value == right_value; } else { computed = left_value == right_value; } store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_NOT_EQUAL) { int64_t computed; if (is_string_type(left_type_id) && is_string_type(right_type_id)) { computed = left_str_view != right_str_view; } else if(is_timestamp_type(left_type_id) && is_timestamp_type(right_type_id)) { cudf::timestamp_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(left_value)); cudf::timestamp_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(right_value)); computed = left_ts != right_ts; } else if (is_duration_type(left_type_id) && is_duration_type(right_type_id)) { cudf::duration_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(left_value)); cudf::duration_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(right_value)); computed = left_value != right_value; } else { computed = left_value != right_value; } store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_LESS) { int64_t computed; if (is_string_type(left_type_id) && is_string_type(right_type_id)) { computed = left_str_view < right_str_view; } else if(is_timestamp_type(left_type_id) && is_timestamp_type(right_type_id)) { cudf::timestamp_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(left_value)); cudf::timestamp_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(right_value)); computed = left_ts < right_ts; } else if (is_duration_type(left_type_id) && is_duration_type(right_type_id)) { cudf::duration_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(left_value)); cudf::duration_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(right_value)); computed = left_value < right_value; } else { computed = left_value < right_value; } store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_GREATER) { int64_t computed; if (is_string_type(left_type_id) && is_string_type(right_type_id)) { computed = left_str_view > right_str_view; } else if(is_timestamp_type(left_type_id) && is_timestamp_type(right_type_id)) { cudf::timestamp_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(left_value)); cudf::timestamp_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(right_value)); computed = left_ts > right_ts; } else if (is_duration_type(left_type_id) && is_duration_type(right_type_id)) { cudf::duration_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(left_value)); cudf::duration_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(right_value)); computed = left_value > right_value; } else { computed = left_value > right_value; } store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_LESS_EQUAL) { int64_t computed; if (is_string_type(left_type_id) && is_string_type(right_type_id)) { computed = left_str_view <= right_str_view; } else if(is_timestamp_type(left_type_id) && is_timestamp_type(right_type_id)) { cudf::timestamp_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(left_value)); cudf::timestamp_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(right_value)); computed = left_ts <= right_ts; } else if (is_duration_type(left_type_id) && is_duration_type(right_type_id)) { cudf::duration_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(left_value)); cudf::duration_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(right_value)); computed = left_value <= right_value; } else { computed = left_value <= right_value; } store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_GREATER_EQUAL) { int64_t computed; if (is_string_type(left_type_id) && is_string_type(right_type_id)) { computed = left_str_view >= right_str_view; } else if(is_timestamp_type(left_type_id) && is_timestamp_type(right_type_id)) { cudf::timestamp_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(left_value)); cudf::timestamp_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_timestamp_ns{}, static_cast<int64_t>(right_value)); computed = left_ts >= right_ts; } else if (is_duration_type(left_type_id) && is_duration_type(right_type_id)) { cudf::duration_ns left_ts = cudf::type_dispatcher(cudf::data_type{left_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(left_value)); cudf::duration_ns right_ts = cudf::type_dispatcher(cudf::data_type{right_type_id}, cast_to_duration_ns{}, static_cast<int64_t>(right_value)); computed = left_value >= right_value; } else { computed = left_value >= right_value; } store_data_in_buffer(computed, buffer, output_position); } if(oper == operator_type::BLZ_DIV && right_value == 0) //if div by zero = null setColumnValid(row_valids, output_position, false); else setColumnValid(row_valids, output_position, true); } else { setColumnValid(row_valids, output_position, false); } } else if( right_position != NULLARY_INDEX) { // It's a unary operation, scalar inputs are not allowed assert(left_position >= 0); cudf::type_id left_type_id = input_types_left[op_index]; LeftType left_value; cudf::string_view left_str_view; if (is_string_type(left_type_id)) { // string values always come from the table input, // intermediate string result not supported left_str_view = table.column(left_position).element<cudf::string_view>(row_index); } else { get_data_from_buffer(&left_value, buffer, left_position); } bool left_valid = getColumnValid(row_valids, left_position); if (oper == operator_type::BLZ_IS_NOT_TRUE) { bool val = (left_valid == true && left_value == true) ? false : true; left_valid = true; store_data_in_buffer(static_cast<int64_t>(val), buffer, output_position); } else if (oper == operator_type::BLZ_IS_NOT_FALSE) { bool val = (left_valid == true && left_value == false) ? false : true; left_valid = true; store_data_in_buffer(static_cast<int64_t>(val), buffer, output_position); } else if(oper == operator_type::BLZ_IS_NULL) { store_data_in_buffer(static_cast<int64_t>(!left_valid), buffer, output_position); } else if(oper == operator_type::BLZ_IS_NOT_NULL) { store_data_in_buffer(static_cast<int64_t>(left_valid), buffer, output_position); } else if (left_valid) { if(oper == operator_type::BLZ_FLOOR) { double val = static_cast<double>(left_value); store_data_in_buffer(floor(val), buffer, output_position); } else if(oper == operator_type::BLZ_CEIL) { double val = static_cast<double>(left_value); store_data_in_buffer(ceil(val), buffer, output_position); } else if(oper == operator_type::BLZ_SIN) { double val = static_cast<double>(left_value); store_data_in_buffer(sin(val), buffer, output_position); } else if(oper == operator_type::BLZ_COS) { double val = static_cast<double>(left_value); store_data_in_buffer(cos(val), buffer, output_position); } else if(oper == operator_type::BLZ_ASIN) { double val = static_cast<double>(left_value); store_data_in_buffer(asin(val), buffer, output_position); } else if(oper == operator_type::BLZ_ACOS) { double val = static_cast<double>(left_value); store_data_in_buffer(acos(val), buffer, output_position); } else if(oper == operator_type::BLZ_TAN) { double val = static_cast<double>(left_value); store_data_in_buffer(tan(val), buffer, output_position); } else if(oper == operator_type::BLZ_COTAN) { double val = static_cast<double>(left_value); double sin_, cos_; sincos(val, &sin_, &cos_); store_data_in_buffer(cos_ / sin_, buffer, output_position); } else if(oper == operator_type::BLZ_ATAN) { double val = static_cast<double>(left_value); store_data_in_buffer(atan(val), buffer, output_position); } else if(oper == operator_type::BLZ_ABS) { if (is_float_type(left_type_id)){ double val = static_cast<double>(left_value); store_data_in_buffer(fabs(val), buffer, output_position); } else { int64_t val = static_cast<int64_t>(left_value); store_data_in_buffer(abs(val), buffer, output_position); } } else if(oper == operator_type::BLZ_NOT) { store_data_in_buffer(static_cast<int64_t>(!left_value), buffer, output_position); } else if(oper == operator_type::BLZ_IS_TRUE) { store_data_in_buffer(static_cast<int64_t>(left_value), buffer, output_position); } else if(oper == operator_type::BLZ_LN) { double val = static_cast<double>(left_value); store_data_in_buffer(log(val), buffer, output_position); } else if(oper == operator_type::BLZ_LOG) { double val = static_cast<double>(left_value); store_data_in_buffer(log10(val), buffer, output_position); } else if(oper == operator_type::BLZ_YEAR) { int64_t computed = cudf::type_dispatcher(cudf::data_type{left_type_id}, launch_extract_component<datetime_component::YEAR>{}, static_cast<int64_t>(left_value)); store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_MONTH) { int64_t computed = cudf::type_dispatcher(cudf::data_type{left_type_id}, launch_extract_component<datetime_component::MONTH>{}, static_cast<int64_t>(left_value)); store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_DAY) { int64_t computed = cudf::type_dispatcher(cudf::data_type{left_type_id}, launch_extract_component<datetime_component::DAY>{}, static_cast<int64_t>(left_value)); store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_DAYOFWEEK) { int64_t computed = cudf::type_dispatcher(cudf::data_type{left_type_id}, launch_extract_component<datetime_component::WEEKDAY>{}, static_cast<int64_t>(left_value)); store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_HOUR) { int64_t computed = cudf::type_dispatcher(cudf::data_type{left_type_id}, launch_extract_component<datetime_component::HOUR>{}, static_cast<int64_t>(left_value)); store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_MINUTE) { int64_t computed = cudf::type_dispatcher(cudf::data_type{left_type_id}, launch_extract_component<datetime_component::MINUTE>{}, static_cast<int64_t>(left_value)); store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_SECOND) { int64_t computed = cudf::type_dispatcher(cudf::data_type{left_type_id}, launch_extract_component<datetime_component::SECOND>{}, static_cast<int64_t>(left_value)); store_data_in_buffer(computed, buffer, output_position); } else if(oper == operator_type::BLZ_CAST_TINYINT || oper == operator_type::BLZ_CAST_SMALLINT || oper == operator_type::BLZ_CAST_INTEGER || oper == operator_type::BLZ_CAST_BIGINT) { store_data_in_buffer(static_cast<int64_t>(left_value), buffer, output_position); } else if(oper == operator_type::BLZ_CAST_FLOAT || oper == operator_type::BLZ_CAST_DOUBLE) { store_data_in_buffer(static_cast<double>(left_value), buffer, output_position); } else if(oper == operator_type::BLZ_CAST_DATE) { int64_t val = static_cast<int64_t>(left_value); cudf::timestamp_D computed; switch (left_type_id) { case cudf::type_id::INT8: case cudf::type_id::UINT8: case cudf::type_id::INT16: case cudf::type_id::UINT16: case cudf::type_id::INT32: case cudf::type_id::UINT32: case cudf::type_id::INT64: case cudf::type_id::UINT64: case cudf::type_id::FLOAT32: case cudf::type_id::FLOAT64: case cudf::type_id::TIMESTAMP_DAYS: computed = cudf::timestamp_D{static_cast<cudf::timestamp_D::duration>(val)}; break; case cudf::type_id::TIMESTAMP_SECONDS: computed = cuda::std::chrono::time_point_cast<cuda::std::chrono::days>(cudf::timestamp_s{static_cast<cudf::timestamp_s::duration>(val)}); break; case cudf::type_id::TIMESTAMP_MILLISECONDS: computed = cuda::std::chrono::time_point_cast<cuda::std::chrono::days>(cudf::timestamp_ms{static_cast<cudf::timestamp_ms::duration>(val)}); break; case cudf::type_id::TIMESTAMP_MICROSECONDS: computed = cuda::std::chrono::time_point_cast<cuda::std::chrono::days>(cudf::timestamp_us{static_cast<cudf::timestamp_us::duration>(val)}); break; case cudf::type_id::TIMESTAMP_NANOSECONDS: computed = cuda::std::chrono::time_point_cast<cuda::std::chrono::days>(cudf::timestamp_ns{static_cast<cudf::timestamp_ns::duration>(val)}); break; default: // should not reach here, invalid conversion assert(false); break; } store_data_in_buffer(static_cast<int64_t>(computed.time_since_epoch().count()), buffer, output_position); } else if(oper == operator_type::BLZ_CAST_TIMESTAMP || oper == operator_type::BLZ_CAST_TIMESTAMP_MICROSECONDS || oper == operator_type::BLZ_CAST_TIMESTAMP_MILLISECONDS || oper == operator_type::BLZ_CAST_TIMESTAMP_SECONDS) { int64_t val = static_cast<int64_t>(left_value); cudf::timestamp_ns computed; switch (left_type_id) { case cudf::type_id::INT8: case cudf::type_id::UINT8: case cudf::type_id::INT16: case cudf::type_id::UINT16: case cudf::type_id::INT32: case cudf::type_id::UINT32: case cudf::type_id::INT64: case cudf::type_id::UINT64: case cudf::type_id::FLOAT32: case cudf::type_id::FLOAT64: case cudf::type_id::TIMESTAMP_NANOSECONDS: computed = cudf::timestamp_ns{static_cast<cudf::timestamp_ns::duration>(val)}; break; case cudf::type_id::TIMESTAMP_DAYS: computed = cuda::std::chrono::time_point_cast<cuda::std::chrono::nanoseconds>(cudf::timestamp_D{static_cast<cudf::timestamp_D::duration>(val)}); break; case cudf::type_id::TIMESTAMP_SECONDS: computed = cuda::std::chrono::time_point_cast<cuda::std::chrono::nanoseconds>(cudf::timestamp_s{static_cast<cudf::timestamp_s::duration>(val)}); break; case cudf::type_id::TIMESTAMP_MILLISECONDS: computed = cuda::std::chrono::time_point_cast<cuda::std::chrono::nanoseconds>(cudf::timestamp_ms{static_cast<cudf::timestamp_ms::duration>(val)}); break; case cudf::type_id::TIMESTAMP_MICROSECONDS: computed = cuda::std::chrono::time_point_cast<cuda::std::chrono::nanoseconds>(cudf::timestamp_us{static_cast<cudf::timestamp_us::duration>(val)}); break; default: // should not reach here, invalid conversion assert(false); break; } store_data_in_buffer(static_cast<int64_t>(computed.time_since_epoch().count()), buffer, output_position); } else if(oper == operator_type::BLZ_CHAR_LENGTH) { int64_t computed = left_str_view.length(); store_data_in_buffer(computed, buffer, output_position); } } bool out_valid = (oper == operator_type::BLZ_IS_NULL || oper == operator_type::BLZ_IS_NOT_NULL) ? true : left_valid; setColumnValid(row_valids, output_position, out_valid); }else{ if(oper == operator_type::BLZ_RAND) { double out = curand_uniform_double(&state); store_data_in_buffer(out, buffer, output_position); } setColumnValid(row_valids, output_position, true); } } private: cudf::mutable_table_device_view out_table; cudf::table_device_view table; cudf::size_type num_operations; const column_index_type * left_input_positions; const column_index_type * right_input_positions; const column_index_type * output_positions; const column_index_type * final_output_positions; // should be same size as output_data, e.g. num_outputs const cudf::type_id * input_types_left; const cudf::type_id * input_types_right; const operator_type * operations; cudf::detail::scalar_device_view_base ** scalars_left; cudf::detail::scalar_device_view_base ** scalars_right; cudf::bitmask_type * temp_valids_in_buffer; cudf::bitmask_type * temp_valids_out_buffer; }; __global__ void transformKernel(InterpreterFunctor op, cudf::size_type size, curandState *state) { extern __shared__ int64_t total_buffer[]; int id = threadIdx.x + blockIdx.x * blockDim.x; curandState localState = state[id]; for(cudf::size_type i = (blockIdx.x * blockDim.x + threadIdx.x) * 32; i < size; i += blockDim.x * gridDim.x * 32) { op(i, total_buffer, size, localState); } state[id] = localState; } } // namespace interops
the_stack
static inline void THNN_(VolumetricFullDilatedConvolution_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *weight, THCTensor *bias, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, int adjT, int adjW, int adjH) { THCUNN_argCheck(state, input->nDimension == 4 || input->nDimension == 5, 2, input, "4D or 5D (batch mode) tensor expected for input, but got: %s"); // number of input & output planes and kernel size is indirectly defined by the weight tensor THCUNN_argCheck(state, weight->nDimension == 5, 4, weight, "5D (nOutputPlane x nInputPlane x kT x kH x kW) tensor " "expected for weight, but got: %s"); THArgCheck(THCTensor_(isContiguous)(state, weight), 4, "weight tensor has to be contiguous"); THArgCheck(!bias || THCTensor_(isContiguous)(state, bias), 5, "bias tensor has to be contiguous"); THArgCheck(dT > 0 && dW > 0 && dH > 0, 8, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); THArgCheck(dilationT > 0 && dilationW > 0 && dilationH > 0, 15, "dilation should be greater than zero, but got dilationT: %d, dilationH: %d, dilationW: %d", dilationT, dilationH, dilationW); THArgCheck((adjT < dT || adjT < dilationT) && (adjW < dW || adjW < dilationW) && (adjH < dH || adjH < dilationH), 15, "output padding must be smaller than either stride or dilation," " but got adjT: %d adjH: %d adjW: %d dT: %d dH: %d dW: %d " "dilationT: %d dilationH: %d dilationW: %d", adjT, adjH, adjW, dT, dH, dW, dilationT, dilationH, dilationW); int ndim = input->nDimension; int nInputPlane = THCTensor_(size)(state, weight, 0); int nOutputPlane = THCTensor_(size)(state, weight, 1); const int kT = (int)weight->size[2]; const int kH = (int)weight->size[3]; const int kW = (int)weight->size[4]; if (bias != NULL) { THCUNN_check_dim_size(state, bias, 1, 0, weight->size[1]); } int dimf = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimd++; dimh++; dimw++; } long inputWidth = input->size[dimw]; long inputHeight = input->size[dimh]; long inputDepth = input->size[dimd]; long outputDepth = (inputDepth - 1) * dT - 2*padT + (dilationT * (kT - 1) + 1) + adjT; long outputHeight = (inputHeight - 1) * dH - 2*padH + (dilationH * (kH - 1) + 1) + adjH; long outputWidth = (inputWidth - 1) * dW - 2*padW + (dilationW * (kW - 1) + 1) + adjW; if (outputDepth < 1 || outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", nInputPlane,inputDepth,inputHeight,inputWidth,nOutputPlane,outputDepth,outputHeight,outputWidth); THCUNN_check_dim_size(state, input, ndim, dimf, nInputPlane); if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane); THCUNN_check_dim_size(state, gradOutput, ndim, dimd, outputDepth); THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight); THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth); } } void THNN_(VolumetricFullDilatedConvolution_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *weight, THCTensor *bias, THCTensor *finput, THCTensor *fgradInput, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, int adjT, int adjW, int adjH) { THCTensor *columns = finput; THCTensor *ones = fgradInput; int nInputPlane = THCTensor_(size)(state, weight, 0); int nOutputPlane = THCTensor_(size)(state, weight, 1); const int kT = (int)weight->size[2]; const int kH = (int)weight->size[3]; const int kW = (int)weight->size[4]; THCUNN_assertSameGPU(state, 6, input, output, weight, bias, columns, ones); THNN_(VolumetricFullDilatedConvolution_shapeCheck)( state, input, NULL, weight, bias, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH, adjT, adjW, adjH); input = THCTensor_(newContiguous)(state, input); weight = THCTensor_(newContiguous)(state, weight); bias = bias ? THCTensor_(newContiguous)(state, bias) : bias; int batch = 1; if (input->nDimension == 4) { // Force batch batch = 0; THCTensor_(resize5d)(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); } long inputWidth = input->size[4]; long inputHeight = input->size[3]; long inputDepth = input->size[2]; long outputDepth = (inputDepth - 1) * dT - 2*padT + (dilationT * (kT - 1) + 1) + adjT; long outputHeight = (inputHeight - 1) * dH - 2*padH + (dilationH * (kH - 1) + 1) + adjH; long outputWidth = (inputWidth - 1) * dW - 2*padW + (dilationW * (kW - 1) + 1) + adjW; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCTensor_(resize5d)(state, output, batchSize, nOutputPlane, outputDepth, outputHeight, outputWidth); // Resize temporary columns THCTensor_(resize2d)(state, columns, nOutputPlane*kW*kH*kT, inputDepth*inputHeight*inputWidth); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets increased, // and always contains ones. if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize3d)(state, ones, outputDepth, outputHeight, outputWidth); THCTensor_(fill)(state, ones, ScalarConvert<int, real>::to(1)); } // Helpers THCTensor *input_n = THCTensor_(new)(state); THCTensor *output_n = THCTensor_(new)(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCTensor_(select)(state, input_n, input, 0, elt); THCTensor_(select)(state, output_n, output, 0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = weight->size[1] * weight->size[2] * weight->size[3] * weight->size[4]; long n = columns->size[1]; long k = weight->size[0]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemm( #elif defined(THC_REAL_IS_HALF) THCudaBlas_Hgemm( #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm( #endif state, 'n', 't', n, m, k, ScalarConvert<int, real>::to(1), THCTensor_(data)(state, input_n), n, THCTensor_(data)(state, weight), m, ScalarConvert<int, real>::to(0), THCTensor_(data)(state, columns), n ); // Unpack columns back into input: col2vol<real, accreal>( THCState_getCurrentStream(state), THCTensor_(data)(state, columns), nOutputPlane, outputDepth, outputHeight, outputWidth, inputDepth, inputHeight, inputWidth, kT, kH, kW, padT, padH, padW, dT, dH, dW, dilationT, dilationH, dilationW, THCTensor_(data)(state, output_n) ); // Do Bias after: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long n_ = outputDepth * outputHeight * outputWidth; long k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) if (bias) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemm( #elif defined(THC_REAL_IS_HALF) THCudaBlas_Hgemm( #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm( #endif state, 't', 'n', n_, m_, k_, ScalarConvert<int, real>::to(1), THCTensor_(data)(state, ones), k_, THCTensor_(data)(state, bias), k_, ScalarConvert<int, real>::to(1), THCTensor_(data)(state, output_n), n_ ); } } // Free THCTensor_(free)(state, input_n); THCTensor_(free)(state, output_n); // Resize output if (batch == 0) { THCTensor_(resize4d)(state, output, nOutputPlane, outputDepth, outputHeight, outputWidth); THCTensor_(resize4d)(state, input, nInputPlane, inputDepth, inputHeight, inputWidth); } THCTensor_(free)(state, input); THCTensor_(free)(state, weight); if (bias) THCTensor_(free)(state, bias); } void THNN_(VolumetricFullDilatedConvolution_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *weight, THCTensor *finput, THCTensor *fgradInput, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, int adjT, int adjW, int adjH) { THCTensor *gradColumns = finput; int nInputPlane = THCTensor_(size)(state, weight, 0); int nOutputPlane = THCTensor_(size)(state, weight, 1); const int kT = (int)weight->size[2]; const int kH = (int)weight->size[3]; const int kW = (int)weight->size[4]; THCUNN_assertSameGPU(state, 5, input, gradOutput, weight, gradColumns, gradInput); THNN_(VolumetricFullDilatedConvolution_shapeCheck)( state, input, gradOutput, weight, NULL, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH, adjT, adjW, adjH); input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); weight = THCTensor_(newContiguous)(state, weight); int batch = 1; if (input->nDimension == 4) { // Force batch batch = 0; THCTensor_(resize5d)(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); THCTensor_(resize5d)(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); } long inputWidth = input->size[4]; long inputHeight = input->size[3]; long inputDepth = input->size[2]; long outputDepth = (inputDepth - 1) * dT - 2*padT + (dilationT * (kT - 1) + 1) + adjT; long outputHeight = (inputHeight - 1) * dH - 2*padH + (dilationH * (kH - 1) + 1) + adjH; long outputWidth = (inputWidth - 1) * dW - 2*padW + (dilationW * (kW - 1) + 1) + adjW; // Batch size + input planes long batchSize = input->size[0]; // Resize output THCTensor_(resize5d)(state, gradInput, batchSize, nInputPlane, inputDepth, inputHeight, inputWidth); // Resize temporary columns THCTensor_(resize2d)(state, gradColumns, nOutputPlane*kW*kH*kT, inputDepth*inputHeight*inputWidth); // Helpers THCTensor *gradInput_n = THCTensor_(new)(state); THCTensor *gradOutput_n = THCTensor_(new)(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: THCTensor_(select)(state, gradInput_n, gradInput, 0, elt); THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt); // Extract columns: vol2col( THCState_getCurrentStream(state), THCTensor_(data)(state, gradOutput_n), nOutputPlane, outputDepth, outputHeight, outputWidth, kT, kH, kW, padT, padH, padW, dT, dH, dW, dilationT, dilationH, dilationW, THCTensor_(data)(state, gradColumns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m = weight->size[0]; long n = gradColumns->size[1]; long k = weight->size[1] * weight->size[2] * weight->size[3] * weight->size[4]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemm( #elif defined(THC_REAL_IS_HALF) THCudaBlas_Hgemm( #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm( #endif state, 'n', 'n', n, m, k, ScalarConvert<int, real>::to(1), THCTensor_(data)(state, gradColumns), n, THCTensor_(data)(state, weight), k, ScalarConvert<int, real>::to(0), THCTensor_(data)(state, gradInput_n), n ); } // Free THCTensor_(free)(state, gradInput_n); THCTensor_(free)(state, gradOutput_n); // Resize output if (batch == 0) { THCTensor_(resize4d)(state, gradOutput, nOutputPlane, outputDepth, outputHeight, outputWidth); THCTensor_(resize4d)(state, input, nInputPlane, inputDepth, inputHeight, inputWidth); THCTensor_(resize4d)(state, gradInput, nInputPlane, inputDepth, inputHeight, inputWidth); } THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); THCTensor_(free)(state, weight); } void THNN_(VolumetricFullDilatedConvolution_accGradParameters)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradWeight, THCTensor *gradBias, THCTensor *finput, THCTensor *fgradInput, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, int adjT, int adjW, int adjH, accreal scale_) { real scale = ScalarConvert<accreal, real>::to(scale_); THCTensor *columns = finput; THCTensor *ones = fgradInput; int nInputPlane = THCTensor_(size)(state, gradWeight, 0); int nOutputPlane = THCTensor_(size)(state, gradWeight, 1); const int kT = (int)gradWeight->size[2]; const int kH = (int)gradWeight->size[3]; const int kW = (int)gradWeight->size[4]; THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight, gradBias, columns, ones); THNN_(VolumetricFullDilatedConvolution_shapeCheck)( state, input, gradOutput, gradWeight, gradBias, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH, adjT, adjW, adjH); THArgCheck(THCTensor_(isContiguous)(state, gradWeight), 4, "gradWeight needs to be contiguous"); if (gradBias) THArgCheck(THCTensor_(isContiguous)(state, gradBias), 5, "gradBias needs to be contiguous"); input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); int batch = 1; if (input->nDimension == 4) { // Force batch batch = 0; THCTensor_(resize5d)(state, input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); THCTensor_(resize5d)(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); } long inputWidth = input->size[4]; long inputHeight = input->size[3]; long inputDepth = input->size[2]; long outputDepth = (inputDepth - 1) * dT - 2*padT + (dilationT * (kT - 1) + 1) + adjT; long outputHeight = (inputHeight - 1) * dH - 2*padH + (dilationH * (kH - 1) + 1) + adjH; long outputWidth = (inputWidth - 1) * dW - 2*padW + (dilationW * (kW - 1) + 1) + adjW; // Batch size + input planes long batchSize = input->size[0]; // Define a buffer of ones, for bias accumulation if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) { // Resize plane and fill with ones... THCTensor_(resize3d)(state, ones, outputDepth, outputHeight, outputWidth); THCTensor_(fill)(state, ones, ScalarConvert<int, real>::to(1)); } // Resize temporary columns THCTensor_(resize2d)(state, columns, nOutputPlane*kW*kH*kT, inputDepth*inputHeight*inputWidth); // Helpers THCTensor *input_n = THCTensor_(new)(state); THCTensor *gradOutput_n = THCTensor_(new)(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: THCTensor_(select)(state, input_n, input, 0, elt); THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt); // Extract columns: vol2col( THCState_getCurrentStream(state), THCTensor_(data)(state, gradOutput_n), nOutputPlane, outputDepth, outputHeight, outputWidth, kT, kH, kW, padT, padH, padW, dT, dH, dW, dilationT, dilationH, dilationW, THCTensor_(data)(state, columns) ); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long n = columns->size[0]; // nOutputPlane * kt * kh * kw long m = input_n->size[0]; // nInputPlane long k = columns->size[1]; // inputHeight * inputWidth // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemm( #elif defined(THC_REAL_IS_HALF) THCudaBlas_Hgemm( #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm( #endif state, 't', 'n', n, m, k, scale, THCTensor_(data)(state, columns), k, THCTensor_(data)(state, input_n), k, ScalarConvert<int, real>::to(1), THCTensor_(data)(state, gradWeight), n ); // Do Bias: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = nOutputPlane; long k_ = outputDepth * outputHeight * outputWidth; // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) if (gradBias) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv( #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv( #endif state, 't', k_, m_, scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), 1, ScalarConvert<int, real>::to(1), THCTensor_(data)(state, gradBias), 1 ); #endif #ifdef THC_REAL_IS_HALF THCudaBlas_Hgemm( state, 't', 'n', m_, 1, k_, scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), k_, ScalarConvert<int, real>::to(1), THCTensor_(data)(state, gradBias), m_ ); #endif } } // Free THCTensor_(free)(state, input_n); THCTensor_(free)(state, gradOutput_n); // Resize if (batch == 0) { THCTensor_(resize4d)(state, gradOutput, nOutputPlane, outputDepth, outputHeight, outputWidth); THCTensor_(resize4d)(state, input, nInputPlane, inputDepth, inputHeight, inputWidth); } THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); } #endif
the_stack
namespace anakin { namespace saber { const int TRANS_BLOCK_SIZE = 16; template <typename Dtype> __global__ void ker_permute_fwd(Dtype * out_data, const int num_axes,\ const int count, const int * permute_order,\ const int * new_steps, const int * old_steps,\ const Dtype* in_data) { CUDA_KERNEL_LOOP(tid, count){ int org_idx = tid; int in_idx = 0; #pragma unroll for (int i = 0; i < num_axes; i++) { int order = permute_order[i]; int new_step = new_steps[i]; int old_step = old_steps[order]; in_idx += (org_idx / new_step) * old_step; org_idx %= new_step; } out_data[tid] = in_data[in_idx]; } } template <typename Dtype> __global__ void ker_permute_fwd(Dtype * out_data, const int num_axes,\ const int count, const int * permute_order,\ const int * new_steps, const int * old_steps,\ const int * new_valid_shape, const Dtype* in_data) { CUDA_KERNEL_LOOP(tid, count){ int in_idx = 0; int out_idx = 0; int new_valid_stride = 1; #pragma unroll for (int i = num_axes - 1; i >= 0; --i) { int order = permute_order[i]; int new_step = new_steps[i]; int old_step = old_steps[order]; int id = (tid / new_valid_stride) % new_valid_shape[i]; in_idx += id * old_step; out_idx += id * new_step; new_valid_stride *= new_valid_shape[i]; } out_data[out_idx] = in_data[in_idx]; } } /*in this kernel, we suppose img with format (1, h, w, c) tranform to (1, c, h, w), and c = 3. out_h = c, out_w = h * w. each thread process one pixel*/ template<typename Dtype> __global__ void ker_permute_fwd_transpose(Dtype * out_data, \ const int out_h, const int out_w, \ const Dtype* in_data) { int tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ float tile[3][CUDA_NUM_THREADS]; if (tid < out_w) { int offset = tid * out_h; tile[0][threadIdx.x] = in_data[offset]; tile[1][threadIdx.x] = in_data[offset + 1]; tile[2][threadIdx.x] = in_data[offset + 2]; } __syncthreads(); if (tid < out_w) { out_data[0 *out_w + tid] = tile[0][threadIdx.x]; out_data[1 *out_w + tid] = tile[1][threadIdx.x]; out_data[2 *out_w + tid] = tile[2][threadIdx.x]; } } template<typename Dtype> __global__ void ker_permute_fwd_transpose(Dtype * out_data, \ const int n, const int c, const int h, const int w, const int * out_stride, const int * in_stride, const Dtype* in_data) { int tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ float tile[3][CUDA_NUM_THREADS]; int out_w_id = tid % w; int out_h_id = (tid / w) % h; int out_n_id = tid / (h * w); int out_offset = out_n_id * out_stride[0] + out_h_id * out_stride[2] + out_w_id * out_stride[3]; int in_offset = out_n_id * in_stride[0] + out_h_id * in_stride[1] + out_w_id * in_stride[2]; if (tid < n * h * w) { tile[0][threadIdx.x] = in_data[in_offset]; tile[1][threadIdx.x] = in_data[in_offset + 1]; tile[2][threadIdx.x] = in_data[in_offset + 2]; } __syncthreads(); if (tid < n * h * w ){ out_data[out_offset + out_stride[1] * 0] = tile[0][threadIdx.x]; out_data[out_offset + out_stride[1] * 1] = tile[1][threadIdx.x]; out_data[out_offset + out_stride[1] * 2] = tile[2][threadIdx.x]; } } /*in this kernel, we suppose img with format (1, c, h, w) tranform to (1, h, w, c), and out_h = h*w, out_w = c. each thread process one data. we use share memory*/ template<typename Dtype> __global__ void ker_transpose(Dtype * out_data, \ const int out_h, const int out_w, \ const Dtype* in_data) { __shared__ float tile[TRANS_BLOCK_SIZE][TRANS_BLOCK_SIZE]; int tid_x = threadIdx.x + blockIdx.x * blockDim.x;//in index int tid_y = threadIdx.y + blockIdx.y * blockDim.y;//in index if (tid_x < out_h && tid_y < out_w) { tile[threadIdx.x][threadIdx.y] = in_data[tid_x + tid_y * out_h]; } __syncthreads(); if (tid_x < out_h && tid_y < out_w) { out_data[tid_x * out_w + tid_y] = tile[threadIdx.x][threadIdx.y]; } } template<typename Dtype> __global__ void ker_nchw_to_nhwc(Dtype * out_data, const int n, const int c, const int h, const int w, const int * out_stride, const int * in_stride, const Dtype* in_data) { __shared__ float tile[TRANS_BLOCK_SIZE][TRANS_BLOCK_SIZE]; int tid_x = threadIdx.x + blockIdx.x * blockDim.x;//in index int tid_y = threadIdx.y + blockIdx.y * blockDim.y;//in index int w_id = tid_y % w; int h_id = tid_y / w; int c_id = tid_x % c; int n_id = tid_x / c; int in_offset = n_id * in_stride[0] + c_id * in_stride[1] \ + h_id * in_stride[2] + w_id * in_stride[3]; int out_offset = n_id * out_stride[0] + h_id * out_stride[1] + \ w_id * out_stride[2] + c_id * out_stride[3]; if (tid_x < n*c && tid_y < h*w) { tile[threadIdx.x][threadIdx.y] = in_data[in_offset]; } __syncthreads(); if (tid_x < n*c && tid_y < h*w) { out_data[out_offset] = tile[threadIdx.x][threadIdx.y]; } } template <> SaberStatus SaberPermute<NV, AK_FLOAT>::dispatch(\ const std::vector<Tensor<NV> *>& inputs, \ std::vector<Tensor<NV> *>& outputs, \ PermuteParam<NV>& param) { cudaStream_t cuda_stream = this->_ctx->get_compute_stream(); const float* in_data =static_cast<const float*>(inputs[0]->data()); float* out_data = static_cast<float*>(outputs[0]->mutable_data()); int count = outputs[0]->valid_size(); const int* permute_order = static_cast<const int*>(_permute_order.data()); const int* new_steps = static_cast<const int*>(_out_steps.data()); const int* old_steps = static_cast<const int*>(_in_steps.data()); const int* out_valid_shape = static_cast<const int*>(_out_valid_shape.data()); std::vector<int> permute_order_nhwc_to_nchw = {0, 3, 1, 2}; PermuteParam<NV> param_nhwc_to_nchw(permute_order_nhwc_to_nchw); std::vector<int> permute_order_nchw_to_nhwc = {0, 2, 3, 1}; PermuteParam<NV> param_nchw_to_nhwc(permute_order_nchw_to_nhwc); if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) { if (_need_permute) { if (inputs[0]->num() == 1 && inputs[0]->width() == 3 && param == param_nhwc_to_nchw) { int out_w = outputs[0]->width() * outputs[0]->height(); int out_h = outputs[0]->channel(); ker_permute_fwd_transpose<float>\ <<<CUDA_GET_BLOCKS(out_w), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ out_data, out_h, out_w, in_data); } else if (inputs[0]->num() == 1 && param == param_nchw_to_nhwc) { int out_h = inputs[0]->width() * inputs[0]->height(); int out_w = inputs[0]->channel(); dim3 block_size(TRANS_BLOCK_SIZE, TRANS_BLOCK_SIZE); dim3 grid_size((out_h + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE, (out_w + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE); ker_transpose<float>\ <<<grid_size, block_size, 0, cuda_stream>>>(\ out_data, out_h, out_w, in_data); } else { ker_permute_fwd<float>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ out_data, _num_axes, count, permute_order, \ new_steps, old_steps, in_data); } } else { outputs[0]->copy_from(*inputs[0]); //outputs[0]->share_from(inputs[0]); } } else { if (_need_permute) { if (inputs[0]->num() == 1 && inputs[0]->width() == 3 && param == param_nhwc_to_nchw) { int out_w = outputs[0]->width() * outputs[0]->height(); int out_h = outputs[0]->channel(); ker_permute_fwd_transpose<float>\ <<<CUDA_GET_BLOCKS(out_w), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ out_data, outputs[0]->num(), outputs[0]->channel(), \ outputs[0]->height(), outputs[0]->width(), new_steps, old_steps, in_data); } else if (inputs[0]->num() == 1 && param == param_nchw_to_nhwc) { dim3 block_size(TRANS_BLOCK_SIZE, TRANS_BLOCK_SIZE); dim3 grid_size((inputs[0]->num() * inputs[0]->channel() + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE, (inputs[0]->height() * inputs[0]->width() + TRANS_BLOCK_SIZE - 1) / TRANS_BLOCK_SIZE); ker_nchw_to_nhwc<float>\ <<<grid_size, block_size, 0, cuda_stream>>>(\ out_data, inputs[0]->num(), inputs[0]->channel(),\ inputs[0]->height(), inputs[0]->width(),\ new_steps, old_steps, in_data); } else { ker_permute_fwd<float>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ out_data, _num_axes, count, permute_order, \ new_steps, old_steps, in_data); } } else { outputs[0]->copy_from(*inputs[0]); //outputs[0]->share_from(inputs[0]); } } return SaberSuccess; } DEFINE_OP_TEMPLATE(SaberPermute, PermuteParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberPermute, PermuteParam, NV, AK_INT8); } }
the_stack
#pragma once #include <Cuda/Common/Palatte.h> #include <math_constants.h> #include "ScalableTSDFVolumeCudaDevice.cuh" namespace open3d { namespace cuda { __global__ void CreateKernel(ScalableTSDFVolumeCudaDevice server) { const size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= server.value_capacity_) return; int N = server.N_; const size_t offset = (N * N * N) * index; UniformTSDFVolumeCudaDevice &subvolume = server.hash_table_.memory_heap_value_.value_at(index); /** Assign dimension **/ subvolume.N_ = server.N_; /** Assign memory **/ subvolume.tsdf_ = &server.tsdf_memory_pool_[offset]; subvolume.logit_ = &server.logit_memory_pool_[offset]; subvolume.weight_ = &server.weight_memory_pool_[offset]; subvolume.color_ = &server.color_memory_pool_[offset]; /** Assign property **/ subvolume.voxel_length_ = server.voxel_length_; subvolume.inv_voxel_length_ = server.inv_voxel_length_; subvolume.sdf_trunc_ = server.sdf_trunc_; subvolume.transform_volume_to_world_ = server.transform_volume_to_world_; subvolume.transform_world_to_volume_ = server.transform_world_to_volume_; } __host__ void ScalableTSDFVolumeCudaKernelCaller::Create( ScalableTSDFVolumeCuda &volume) { const dim3 threads(THREAD_1D_UNIT); const dim3 blocks(DIV_CEILING(volume.value_capacity_, THREAD_1D_UNIT)); CreateKernel<<<blocks, threads>>>(*volume.device_); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } __global__ void TouchSubvolumesKernel(ScalableTSDFVolumeCudaDevice server, ImageCudaDevice<float, 1> depth, PinholeCameraIntrinsicCuda camera, TransformCuda transform_camera_to_world) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= depth.width_ || y >= depth.height_) return; const Vector2i p = Vector2i(x, y); server.TouchSubvolume(p, depth, camera, transform_camera_to_world); } __host__ void ScalableTSDFVolumeCudaKernelCaller::TouchSubvolumes( ScalableTSDFVolumeCuda &volume, ImageCuda<float, 1> &depth, PinholeCameraIntrinsicCuda &camera, TransformCuda &transform_camera_to_world) { const dim3 blocks(DIV_CEILING(depth.width_, THREAD_2D_UNIT), DIV_CEILING(depth.height_, THREAD_2D_UNIT)); const dim3 threads(THREAD_2D_UNIT, THREAD_2D_UNIT); TouchSubvolumesKernel<<<blocks, threads>>>( *volume.device_, *depth.device_, camera, transform_camera_to_world); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } __global__ void IntegrateSubvolumesKernel( ScalableTSDFVolumeCudaDevice server, RGBDImageCudaDevice rgbd, PinholeCameraIntrinsicCuda camera, TransformCuda transform_camera_to_world) { const size_t entry_idx = blockIdx.x; // 1070 supports up to 1024 threads per block // Each thread processes 4 blocks, so 1024 * 4 = 4096 = 16^3 can be achieved for (int workload = 0; workload < 4; ++workload) { const Vector3i Xlocal = Vector3i(threadIdx.x, threadIdx.y, threadIdx.z + blockDim.z * workload); #ifdef CUDA_DEBUG_ENABLE_ASSERTION assert(entry_idx < server.active_subvolume_entry_array().size() && Xlocal(0) < N && Xlocal(1) < N && Xlocal(2) < N); #endif HashEntry<Vector3i> &entry = server.active_subvolume_entry_array_.at(entry_idx); #ifdef CUDA_DEBUG_ENABLE_ASSERTION assert(entry.internal_addr >= 0); #endif server.Integrate(Xlocal, entry, rgbd, camera, transform_camera_to_world); } } __host__ void ScalableTSDFVolumeCudaKernelCaller::IntegrateSubvolumes( ScalableTSDFVolumeCuda &volume, RGBDImageCuda &rgbd, PinholeCameraIntrinsicCuda &camera, TransformCuda &transform_camera_to_world) { const dim3 blocks(volume.active_subvolume_entry_array_.size()); const dim3 threads(volume.N_, volume.N_, volume.N_ / 4); IntegrateSubvolumesKernel<<<blocks, threads>>>( *volume.device_, *rgbd.device_, camera, transform_camera_to_world); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } __global__ void GetSubvolumesInFrustumKernel( ScalableTSDFVolumeCudaDevice server, PinholeCameraIntrinsicCuda camera, TransformCuda transform_camera_to_world) { const int bucket_idx = threadIdx.x + blockIdx.x * blockDim.x; if (bucket_idx >= server.bucket_count_) return; auto &hash_table = server.hash_table_; int bucket_base_idx = bucket_idx * BUCKET_SIZE; #pragma unroll 1 for (size_t i = 0; i < BUCKET_SIZE; ++i) { HashEntry<Vector3i> &entry = hash_table.entry_array_.at(bucket_base_idx + i); if (entry.internal_addr != NULLPTR_CUDA) { Vector3f X = server.voxelf_local_to_global(Vector3f(0), entry.key); if (camera.IsPointInFrustum(transform_camera_to_world.Inverse() * server.voxelf_to_world(X))) { server.ActivateSubvolume(entry); } } } LinkedListCudaDevice<HashEntry<Vector3i>> &linked_list = hash_table.entry_list_array_.at(bucket_idx); int node_ptr = linked_list.head_node_ptr(); while (node_ptr != NULLPTR_CUDA) { LinkedListNodeCuda<HashEntry<Vector3i>> &linked_list_node = linked_list.get_node(node_ptr); HashEntry<Vector3i> &entry = linked_list_node.data; Vector3f X = server.voxelf_local_to_global(Vector3f(0), entry.key); if (camera.IsPointInFrustum(transform_camera_to_world.Inverse() * server.voxelf_to_world(X))) { server.ActivateSubvolume(entry); } node_ptr = linked_list_node.next_node_ptr; } } __host__ void ScalableTSDFVolumeCudaKernelCaller::GetSubvolumesInFrustum( ScalableTSDFVolumeCuda &volume, PinholeCameraIntrinsicCuda &camera, TransformCuda &transform_camera_to_world) { const dim3 blocks(volume.bucket_count_); const dim3 threads(THREAD_1D_UNIT); GetSubvolumesInFrustumKernel<<<blocks, threads>>>( *volume.device_, camera, transform_camera_to_world); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } __global__ void GetAllSubvolumesKernel(ScalableTSDFVolumeCudaDevice server) { const int bucket_idx = threadIdx.x + blockIdx.x * blockDim.x; if (bucket_idx >= server.bucket_count_) return; auto &hash_table = server.hash_table_; int bucket_base_idx = bucket_idx * BUCKET_SIZE; #pragma unroll 1 for (size_t i = 0; i < BUCKET_SIZE; ++i) { HashEntry<Vector3i> &entry = hash_table.entry_array_.at(bucket_base_idx + i); if (entry.internal_addr != NULLPTR_CUDA) { server.ActivateSubvolume(entry); } } LinkedListCudaDevice<HashEntry<Vector3i>> &linked_list = hash_table.entry_list_array_.at(bucket_idx); int node_ptr = linked_list.head_node_ptr(); while (node_ptr != NULLPTR_CUDA) { LinkedListNodeCuda<HashEntry<Vector3i>> &linked_list_node = linked_list.get_node(node_ptr); server.ActivateSubvolume(linked_list_node.data); node_ptr = linked_list_node.next_node_ptr; } } __host__ void ScalableTSDFVolumeCudaKernelCaller::GetAllSubvolumes( ScalableTSDFVolumeCuda &volume) { const dim3 blocks(volume.bucket_count_); const dim3 threads(THREAD_1D_UNIT); GetAllSubvolumesKernel<<<blocks, threads>>>(*volume.device_); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } __global__ void RayCastingKernel(ScalableTSDFVolumeCudaDevice server, ImageCudaDevice<float, 3> vertex, ImageCudaDevice<float, 3> normal, ImageCudaDevice<uchar, 3> color, PinholeCameraIntrinsicCuda camera, TransformCuda transform_camera_to_world) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= vertex.width_ || y >= vertex.height_) return; Vector2i p = Vector2i(x, y); Vector3f v, n; Vector3b c; bool mask = server.RayCasting(p, v, n, c, camera, transform_camera_to_world); if (!mask) { vertex.at(x, y) = Vector3f(nanf("nan")); normal.at(x, y) = Vector3f(nanf("nan")); color.at(x, y) = Vector3b(0); return; } vertex.at(x, y) = v; normal.at(x, y) = n; color.at(x, y) = c; } __host__ void ScalableTSDFVolumeCudaKernelCaller::RayCasting( ScalableTSDFVolumeCuda &volume, ImageCuda<float, 3> &vertex, ImageCuda<float, 3> &normal, ImageCuda<uchar, 3> &color, PinholeCameraIntrinsicCuda &camera, TransformCuda &transform_camera_to_world) { const dim3 blocks(DIV_CEILING(vertex.width_, THREAD_2D_UNIT), DIV_CEILING(vertex.height_, THREAD_2D_UNIT)); const dim3 threads(THREAD_2D_UNIT, THREAD_2D_UNIT); RayCastingKernel<<<blocks, threads>>>(*volume.device_, *vertex.device_, *normal.device_, *color.device_, camera, transform_camera_to_world); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } __global__ void VolumeRenderingKernel(ScalableTSDFVolumeCudaDevice server, ImageCudaDevice<float, 3> vertex, PinholeCameraIntrinsicCuda camera, TransformCuda transform_camera_to_world) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= vertex.width_ || y >= vertex.height_) return; Vector2i p = Vector2i(x, y); Vector3f v = server.VolumeRendering(p, camera, transform_camera_to_world); vertex.at(x, y) = v; } __host__ void ScalableTSDFVolumeCudaKernelCaller::VolumeRendering( ScalableTSDFVolumeCuda &volume, ImageCuda<float, 3> &image, PinholeCameraIntrinsicCuda &camera, TransformCuda &transform_camera_to_world) { const dim3 blocks(DIV_CEILING(image.width_, THREAD_2D_UNIT), DIV_CEILING(image.height_, THREAD_2D_UNIT)); const dim3 threads(THREAD_2D_UNIT, THREAD_2D_UNIT); VolumeRenderingKernel<<<blocks, threads>>>( *volume.device_, *image.device_, camera, transform_camera_to_world); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } __global__ void DownSampleKernel(ScalableTSDFVolumeCudaDevice volume, ScalableTSDFVolumeCudaDevice volume_down) { HashEntry<Vector3i> &entry = volume.active_subvolume_entry_array_[blockIdx.x]; UniformTSDFVolumeCudaDevice *subvolume = volume.QuerySubvolume(entry.key); UniformTSDFVolumeCudaDevice *subvolume_down = volume_down.QuerySubvolume(entry.key); assert(subvolume != nullptr && subvolume_down != nullptr); int x = 2 * threadIdx.x, y = 2 * threadIdx.y, z = 2 * threadIdx.z; float sum_tsdf = 0; float sum_weight = 0; Vector3f sum_color = Vector3f(0); for (int i = 0; i < 8; ++i) { int idx = subvolume->IndexOf( Vector3i(x + (i & 4), y + (i & 2), z + (i & 1))); sum_tsdf += subvolume->tsdf_[idx]; sum_weight += (float)subvolume->weight_[idx]; const Vector3b &color = subvolume->color_[idx]; sum_color(0) += (float)color(0); sum_color(1) += (float)color(1); sum_color(2) += (float)color(2); } int idx = subvolume_down->IndexOf( Vector3i(threadIdx.x, threadIdx.y, threadIdx.z)); subvolume_down->tsdf_[idx] = 0.125f * sum_tsdf; subvolume_down->weight_[idx] = uchar(0.125f * sum_weight); sum_color *= 0.125f; subvolume_down->color_[idx] = sum_color.template saturate_cast<uchar>(); } void ScalableTSDFVolumeCudaKernelCaller::DownSample( ScalableTSDFVolumeCuda &volume, ScalableTSDFVolumeCuda &volume_down) { const dim3 blocks(volume.active_subvolume_entry_array_.size()); const dim3 threads(volume.N_ / 2, volume.N_ / 2, volume.N_ / 2); DownSampleKernel<<<blocks, threads>>>(*volume.device_, *volume_down.device_); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } } // namespace cuda } // namespace open3d
the_stack
#define NUM_THREADS 64 #define AE_DIM 32 __device__ __forceinline__ float sigmoid(float x) { return exp(x) / (exp(x) + 1.0); } __device__ __forceinline__ void se3_transform_point_inplace(const float T[7], float X[3]) { const float tx=T[0], ty=T[1], tz=T[2]; const float qx=T[3], qy=T[4], qz=T[5], qw=T[6]; float uv[3]; uv[0] = 2.0 * (qy*X[2] - qz*X[1]); uv[1] = 2.0 * (qz*X[0] - qx*X[2]); uv[2] = 2.0 * (qx*X[1] - qy*X[0]); X[0] += qw*uv[0] + (qy*uv[2] - qz*uv[1]) + tx; X[1] += qw*uv[1] + (qz*uv[0] - qx*uv[2]) + ty; X[2] += qw*uv[2] + (qx*uv[1] - qy*uv[0]) + tz; } __device__ __forceinline__ void pinhole_jacobians(const float p[3], const float fx, const float fy, float Ju[6], float Jv[6], float Jz[6]) { const float X1=p[0], Y1=p[1], Z1=p[2]; const float d = 1.0 / Z1; const float d2 = d * d; // x-jacobians Ju[0] = fx * d; Ju[1] = fx * 0.0; Ju[2] = fx * (-X1*d2); Ju[3] = fx * (-X1*Y1*d2); Ju[4] = fx * (1 + X1*X1*d2); Ju[5] = fx * (-Y1*d); // y-jacobians Jv[0] = fy * 0.0; Jv[1] = fy * d; Jv[2] = fy * (-Y1*d2); Jv[3] = fy * -1 * (1+Y1*Y1*d2); Jv[4] = fy * X1*Y1*d2; Jv[5] = fy * X1*d; // z-jacobians Jz[0] = 0.0; Jz[1] = 0.0; Jz[2] = -d2; Jz[3] = d * Y1; Jz[4] = -d * X1; Jz[5] = 0.0; } __global__ void dense_se3_forward_kernel( const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> transforms, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> embeddings, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> points, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> targets, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> weights, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> intrinsics, torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> Hx, torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> bx, int radius) { int batch_id = blockIdx.x; // batch_index int tx = threadIdx.x; int ix = blockIdx.y * NUM_THREADS + tx; // image_index const int ht = transforms.size(2); const int wd = transforms.size(3); const int ae_dim = embeddings.size(1); const int dim = ht * wd; const int h1 = ix / wd; const int w1 = ix % wd; const float* Xdata = points[batch_id].data(); const float* rdata = targets[batch_id].data(); const float* wdata = weights[batch_id].data(); const float* ae_data = embeddings[batch_id].data(); __shared__ float fx, fy, cx, cy; if (tx == 0) { fx = intrinsics[batch_id][0]; fy = intrinsics[batch_id][1]; cx = intrinsics[batch_id][2]; cy = intrinsics[batch_id][3]; } // transformation float G[7]; float ae1[AE_DIM]; // linear system float H[6][6], b[6]; if (ix < dim) { G[0] = transforms[batch_id][0][h1][w1]; // tx G[1] = transforms[batch_id][1][h1][w1]; // ty G[2] = transforms[batch_id][2][h1][w1]; // tz G[3] = transforms[batch_id][3][h1][w1]; // qx G[4] = transforms[batch_id][4][h1][w1]; // qy G[5] = transforms[batch_id][5][h1][w1]; // qz G[6] = transforms[batch_id][6][h1][w1]; // qw for (int ii=0; ii<ae_dim; ii++) { ae1[ii] = embeddings[batch_id][ii][h1][w1]; } } for (int ii=0; ii<6; ii++) { b[ii] = 0; } for (int ii=0; ii<6; ii++) { for (int jj=0; jj<6; jj++) { H[ii][jj] = 0; } } // jacobians float Ju[6], Jv[6], Jz[6]; __shared__ float X0[3][NUM_THREADS]; __shared__ float ae2[AE_DIM][NUM_THREADS]; __shared__ float rvec[3][NUM_THREADS]; __shared__ float wvec[3][NUM_THREADS]; __syncthreads(); for (int i=0; i<dim; i+=NUM_THREADS) { // load in data int jx = i + tx; if (jx < dim) { X0[0][tx] = Xdata[jx+0*dim]; X0[1][tx] = Xdata[jx+1*dim]; X0[2][tx] = Xdata[jx+2*dim]; rvec[0][tx] = rdata[jx+0*dim]; rvec[1][tx] = rdata[jx+1*dim]; rvec[2][tx] = rdata[jx+2*dim]; wvec[0][tx] = wdata[jx+0*dim]; wvec[1][tx] = wdata[jx+1*dim]; wvec[2][tx] = wdata[jx+2*dim]; for (int k=0; k<ae_dim; k++) ae2[k][tx] = ae_data[jx + k*dim]; } __syncthreads(); for (int j=0; j<NUM_THREADS; j++) { jx = i + j; if (ix<dim && jx<dim) { int h2 = jx / wd; int w2 = jx % wd; int r = max(abs(h1-h2), abs(w1-w2)); if (r > radius) continue; float p[3] = { X0[0][j], X0[1][j], X0[2][j] }; se3_transform_point_inplace(G, p); // residual vectors const float X1=p[0], Y1=p[1], Z1=p[2]; const float u = fx * (X1 / Z1) + cx; const float v = fy * (Y1 / Z1) + cy; const float ru = rvec[0][j] - u; const float rv = rvec[1][j] - v; const float rz = rvec[2][j] - 1.0 / Z1; // exclude pixels too close or errors too big if (Z1 < 0.1 || abs(ru) > 128 || abs(rv) > 128) continue; float s=0.0; for (int k=0; k<ae_dim; k++) { s += (ae1[k] - ae2[k][j]) * (ae1[k] - ae2[k][j]); } const float w = sigmoid(-s); const float wu = w * wvec[0][j]; const float wv = w * wvec[1][j]; const float wz = w * wvec[2][j]; pinhole_jacobians(p, fx, fy, Ju, Jv, Jz); for (int ii=0; ii<6; ii++) { b[ii] += wu*ru*Ju[ii] + wv*rv*Jv[ii] + wz*rz*Jz[ii]; } for (int ii=0; ii<6; ii++) { for (int jj=0; jj<6; jj++) { H[ii][jj] += wu*Ju[ii]*Ju[jj] + wv*Jv[ii]*Jv[jj] + wz*Jz[ii]*Jz[jj]; } } } } __syncthreads(); } if (ix < dim) { for (int ii=0; ii<6; ii++) { bx[batch_id][ii][0][h1][w1] = b[ii]; } for (int ii=0; ii<6; ii++) { for (int jj=0; jj<6; jj++) { Hx[batch_id][ii][jj][h1][w1] = H[ii][jj]; } } } } __global__ void dense_se3_backward_kernel1( const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> transforms, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> embeddings, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> points, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> targets, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> weights, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> intrinsics, const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> Hx_grad, const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> bx_grad, torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> embedding_grad, torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> targets_grad, torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> weights_grad, int radius) { int batch_id = blockIdx.x; // batch_index int tx = threadIdx.x; int ix = blockIdx.y * NUM_THREADS + tx; // image_index const int ht = transforms.size(2); const int wd = transforms.size(3); const int dim = ht * wd; const int ae_dim = embeddings.size(1); int h2 = ix / wd; int w2 = ix % wd; const float* transform_data = transforms[batch_id].data(); const float* ae_data = embeddings[batch_id].data(); const float* diffH_data = Hx_grad[batch_id].data(); const float* diffb_data = bx_grad[batch_id].data(); __shared__ float fx, fy, cx, cy; if (tx == 0) { fx = intrinsics[batch_id][0]; fy = intrinsics[batch_id][1]; cx = intrinsics[batch_id][2]; cy = intrinsics[batch_id][3]; } float X0[3]; float target_u, target_v, target_z; float wu, wv, wz; float ae2[AE_DIM]; float diff_ae2[AE_DIM]; if (ix < dim) { X0[0] = points[batch_id][0][h2][w2]; X0[1] = points[batch_id][1][h2][w2]; X0[2] = points[batch_id][2][h2][w2]; target_u = targets[batch_id][0][h2][w2]; target_v = targets[batch_id][1][h2][w2]; target_z = targets[batch_id][2][h2][w2]; wu = weights[batch_id][0][h2][w2]; wv = weights[batch_id][1][h2][w2]; wz = weights[batch_id][2][h2][w2]; for (int ii=0; ii<ae_dim; ii++) { ae2[ii] = ae_data[ix + ii*dim]; diff_ae2[ii] = 0; } } // jacobians float Ju[6], Jv[6], Jz[6]; float diff_ru = 0; float diff_rv = 0; float diff_rz = 0; float diff_wu = 0; float diff_wv = 0; float diff_wz = 0; __shared__ float Gs[NUM_THREADS][7]; __shared__ float dH[6][6][NUM_THREADS]; __shared__ float db[6][NUM_THREADS]; __shared__ float ae1[AE_DIM][NUM_THREADS]; __syncthreads(); for (int i=0; i<dim; i+=NUM_THREADS) { int jx = i + tx; // read from global if (jx < dim) { Gs[tx][0] = transform_data[jx + 0*dim]; Gs[tx][1] = transform_data[jx + 1*dim]; Gs[tx][2] = transform_data[jx + 2*dim]; Gs[tx][3] = transform_data[jx + 3*dim]; Gs[tx][4] = transform_data[jx + 4*dim]; Gs[tx][5] = transform_data[jx + 5*dim]; Gs[tx][6] = transform_data[jx + 6*dim]; for (int ii=0; ii<ae_dim; ii++) { ae1[ii][tx] = ae_data[jx + ii*dim]; } for (int ii=0; ii<6; ii++) { for (int jj=0; jj<6; jj++) { dH[ii][jj][tx] = diffH_data[jx + (ii*6+jj)*dim]; } } for (int ii=0; ii<6; ii++) { db[ii][tx] = diffb_data[jx + ii*dim]; } } __syncthreads(); for (int j=0; j<NUM_THREADS; j++) { jx = i + j; if (ix<dim && jx<dim) { int h1 = jx / wd; int w1 = jx % wd; int r = max(abs(h1-h2), abs(w1-w2)); if (r > radius) continue; float p[3] = { X0[0], X0[1], X0[2] }; se3_transform_point_inplace(&Gs[j][0], p); // residual vectors const float X1=p[0], Y1=p[1], Z1=p[2]; const float u = fx * (X1 / Z1) + cx; const float v = fy * (Y1 / Z1) + cy; const float ru = target_u - u; const float rv = target_v - v; const float rz = target_z - 1.0 / Z1; float s=0.0; for (int k=0; k<ae_dim; k++) { s += (ae1[k][j] - ae2[k]) * (ae1[k][j] - ae2[k]); } float diff_w = 0.0f; const float w = sigmoid(-s); // exclude pixels too close or errors too big if (Z1 < 0.1 || abs(ru) > 128 || abs(rv) > 128) continue; pinhole_jacobians(p, fx, fy, Ju, Jv, Jz); for (int ii=0; ii<6; ii++) { const float db_i = db[ii][j]; // residual gradients diff_ru += w*wu*Ju[ii] * db_i; diff_rv += w*wv*Jv[ii] * db_i; diff_rz += w*wz*Jz[ii] * db_i; // weights gradients diff_wu += w*ru*Ju[ii] * db_i; diff_wv += w*rv*Jv[ii] * db_i; diff_wz += w*rz*Jz[ii] * db_i; // embedding weight diff_w += (wu*ru*Ju[ii] + wv*rv*Jv[ii] + wz*rz*Jz[ii]) * db_i; for (int jj=0; jj<6; jj++) { const float dH_ij = dH[ii][jj][j]; diff_wu += w*Ju[ii]*Ju[jj] * dH_ij; diff_wv += w*Jv[ii]*Jv[jj] * dH_ij; diff_wz += w*Jz[ii]*Jz[jj] * dH_ij; diff_w += (wu*Ju[ii]*Ju[jj] + wv*Jv[ii]*Jv[jj] + wz*Jz[ii]*Jz[jj]) * dH_ij; } } float diff_s = -diff_w * sigmoid(-s) * (1.0f - sigmoid(-s)); for (int k=0; k<ae_dim; k++) { diff_ae2[k] += -2 * diff_s * (ae1[k][j] - ae2[k]); } } } __syncthreads(); } if (ix < dim) { targets_grad[batch_id][0][h2][w2] = diff_ru; targets_grad[batch_id][1][h2][w2] = diff_rv; targets_grad[batch_id][2][h2][w2] = diff_rz; weights_grad[batch_id][0][h2][w2] = diff_wu; weights_grad[batch_id][1][h2][w2] = diff_wv; weights_grad[batch_id][2][h2][w2] = diff_wz; for (int k=0; k<ae_dim; k++) embedding_grad[batch_id][k][h2][w2] += diff_ae2[k]; } } __global__ void dense_se3_backward_kernel2( const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> transforms, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> embeddings, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> points, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> targets, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> weights, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> intrinsics, const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> Hx_grad, const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> bx_grad, torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> embedding_grad, torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> targets_grad, torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> weights_grad, int radius) { int batch_id = blockIdx.x; // batch_index int tx = threadIdx.x; int ix = blockIdx.y * NUM_THREADS + tx; // image_index const int ht = transforms.size(2); const int wd = transforms.size(3); const int ae_dim = embeddings.size(1); const int dim = ht * wd; const int h1 = ix / wd; const int w1 = ix % wd; const float* transform_data = transforms[batch_id].data(); const float* Xdata = points[batch_id].data(); const float* rdata = targets[batch_id].data(); const float* wdata = weights[batch_id].data(); const float* ae_data = embeddings[batch_id].data(); __shared__ float fx, fy, cx, cy; if (tx == 0) { fx = intrinsics[batch_id][0]; fy = intrinsics[batch_id][1]; cx = intrinsics[batch_id][2]; cy = intrinsics[batch_id][3]; } // transformation float G[7]; float ae1[AE_DIM]; float diff_ae1[AE_DIM]; float db[6], dH[6][6]; if (ix < dim) { G[0] = transform_data[ix + 0*dim]; // tx G[1] = transform_data[ix + 1*dim]; // ty G[2] = transform_data[ix + 2*dim]; // tz G[3] = transform_data[ix + 3*dim]; // qx G[4] = transform_data[ix + 4*dim]; // qy G[5] = transform_data[ix + 5*dim]; // qz G[6] = transform_data[ix + 6*dim]; // qw for (int ii=0; ii<ae_dim; ii++) { ae1[ii] = embeddings[batch_id][ii][h1][w1]; diff_ae1[ii] = 0; } for (int ii=0; ii<6; ii++) { db[ii] = bx_grad[batch_id][ii][0][h1][w1]; } for (int ii=0; ii<6; ii++) { for (int jj=0; jj<6; jj++) { dH[ii][jj] = Hx_grad[batch_id][ii][jj][h1][w1]; } } } // jacobians float Ju[6], Jv[6], Jz[6]; __shared__ float X0[3][NUM_THREADS]; __shared__ float ae2[AE_DIM][NUM_THREADS]; __shared__ float rvec[3][NUM_THREADS]; __shared__ float wvec[3][NUM_THREADS]; __syncthreads(); for (int i=0; i<dim; i+=NUM_THREADS) { // load in data int jx = i + tx; if (jx < dim) { X0[0][tx] = Xdata[jx+0*dim]; X0[1][tx] = Xdata[jx+1*dim]; X0[2][tx] = Xdata[jx+2*dim]; rvec[0][tx] = rdata[jx+0*dim]; rvec[1][tx] = rdata[jx+1*dim]; rvec[2][tx] = rdata[jx+2*dim]; wvec[0][tx] = wdata[jx+0*dim]; wvec[1][tx] = wdata[jx+1*dim]; wvec[2][tx] = wdata[jx+2*dim]; for (int k=0; k<ae_dim; k++) ae2[k][tx] = ae_data[jx + k*dim]; } __syncthreads(); for (int j=0; j<NUM_THREADS; j++) { jx = i + j; if (ix<dim && jx<dim) { int h2 = jx / wd; int w2 = jx % wd; int r = max(abs(h1-h2), abs(w1-w2)); if (r > radius) continue; float p[3] = { X0[0][j], X0[1][j], X0[2][j] }; se3_transform_point_inplace(G, p); // residual vectors const float X1=p[0], Y1=p[1], Z1=p[2]; const float u = fx * (X1 / Z1) + cx; const float v = fy * (Y1 / Z1) + cy; const float ru = rvec[0][j] - u; const float rv = rvec[1][j] - v; const float rz = rvec[2][j] - 1.0 / Z1; float s=0.0; for (int k=0; k<ae_dim; k++) { s += (ae1[k] - ae2[k][j]) * (ae1[k] - ae2[k][j]); } const float w = sigmoid(-s); float diff_w = 0; const float wu = wvec[0][j]; const float wv = wvec[1][j]; const float wz = wvec[2][j]; // exclude pixels too close or errors too big if (Z1 < 0.1 || abs(ru) > 128 || abs(rv) > 128) continue; pinhole_jacobians(p, fx, fy, Ju, Jv, Jz); for (int ii=0; ii<6; ii++) { diff_w += (wu*ru*Ju[ii] + wv*rv*Jv[ii] + wz*rz*Jz[ii]) * db[ii]; for (int jj=0; jj<6; jj++) { diff_w += (wu*Ju[ii]*Ju[jj] + wv*Jv[ii]*Jv[jj] + wz*Jz[ii]*Jz[jj]) * dH[ii][jj]; } } float diff_s = -diff_w * sigmoid(-s) * (1.0f - sigmoid(-s)); for (int k=0; k<ae_dim; k++) { diff_ae1[k] += 2 * diff_s * (ae1[k] - ae2[k][j]); } } } __syncthreads(); } if (ix < dim) { for (int k=0; k<ae_dim; k++) embedding_grad[batch_id][k][h1][w1] += diff_ae1[k]; } } std::vector<torch::Tensor> dense_se3_forward_cuda( torch::Tensor transforms, torch::Tensor embeddings, torch::Tensor points, torch::Tensor targets, torch::Tensor weights, torch::Tensor intrinsics, int radius) { int batch_size = transforms.size(0); int ht = transforms.size(2); int wd = transforms.size(3); dim3 grid = dim3(batch_size, (ht*wd + NUM_THREADS-1) / NUM_THREADS); auto opts = targets.options(); torch::Tensor H = torch::zeros({batch_size, 6, 6, ht, wd}, opts); torch::Tensor b = torch::zeros({batch_size, 6, 1, ht, wd}, opts); dense_se3_forward_kernel<<<grid, NUM_THREADS>>>( transforms.packed_accessor32<float,4,torch::RestrictPtrTraits>(), embeddings.packed_accessor32<float,4,torch::RestrictPtrTraits>(), points.packed_accessor32<float,4,torch::RestrictPtrTraits>(), targets.packed_accessor32<float,4,torch::RestrictPtrTraits>(), weights.packed_accessor32<float,4,torch::RestrictPtrTraits>(), intrinsics.packed_accessor32<float,2,torch::RestrictPtrTraits>(), H.packed_accessor32<float,5,torch::RestrictPtrTraits>(), b.packed_accessor32<float,5,torch::RestrictPtrTraits>(), radius); return {H, b}; } std::vector<torch::Tensor> dense_se3_backward_cuda( torch::Tensor transforms, torch::Tensor embeddings, torch::Tensor points, torch::Tensor targets, torch::Tensor weights, torch::Tensor intrinsics, torch::Tensor H_grad, torch::Tensor b_grad, int radius) { int batch_size = transforms.size(0); int ht = transforms.size(2); int wd = transforms.size(3); dim3 grid = dim3(batch_size, (ht*wd + NUM_THREADS-1) / NUM_THREADS); torch::Tensor embedding_grad = torch::zeros_like(embeddings); torch::Tensor targets_grad = torch::zeros_like(targets); torch::Tensor weights_grad = torch::zeros_like(weights); // backward pass split into two kernels to avoid atomics dense_se3_backward_kernel1<<<grid, NUM_THREADS>>>( transforms.packed_accessor32<float,4,torch::RestrictPtrTraits>(), embeddings.packed_accessor32<float,4,torch::RestrictPtrTraits>(), points.packed_accessor32<float,4,torch::RestrictPtrTraits>(), targets.packed_accessor32<float,4,torch::RestrictPtrTraits>(), weights.packed_accessor32<float,4,torch::RestrictPtrTraits>(), intrinsics.packed_accessor32<float,2,torch::RestrictPtrTraits>(), H_grad.packed_accessor32<float,5,torch::RestrictPtrTraits>(), b_grad.packed_accessor32<float,5,torch::RestrictPtrTraits>(), embedding_grad.packed_accessor32<float,4,torch::RestrictPtrTraits>(), targets_grad.packed_accessor32<float,4,torch::RestrictPtrTraits>(), weights_grad.packed_accessor32<float,4,torch::RestrictPtrTraits>(), radius); dense_se3_backward_kernel2<<<grid, NUM_THREADS>>>( transforms.packed_accessor32<float,4,torch::RestrictPtrTraits>(), embeddings.packed_accessor32<float,4,torch::RestrictPtrTraits>(), points.packed_accessor32<float,4,torch::RestrictPtrTraits>(), targets.packed_accessor32<float,4,torch::RestrictPtrTraits>(), weights.packed_accessor32<float,4,torch::RestrictPtrTraits>(), intrinsics.packed_accessor32<float,2,torch::RestrictPtrTraits>(), H_grad.packed_accessor32<float,5,torch::RestrictPtrTraits>(), b_grad.packed_accessor32<float,5,torch::RestrictPtrTraits>(), embedding_grad.packed_accessor32<float,4,torch::RestrictPtrTraits>(), targets_grad.packed_accessor32<float,4,torch::RestrictPtrTraits>(), weights_grad.packed_accessor32<float,4,torch::RestrictPtrTraits>(), radius); return {embedding_grad, targets_grad, weights_grad}; }
the_stack
#include <vector> #include <iostream> #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Parallel.h> // #include "utils.cuh" #include <Eigen/Sparse> #include <Eigen/SparseCore> #include <Eigen/SparseCholesky> typedef Eigen::SparseMatrix<double> SpMat; typedef Eigen::Triplet<double> T; typedef std::vector<std::vector<long>> graph_t; typedef std::vector<torch::Tensor> tensor_list_t; #define MIN_DEPTH 0.25 #define THREADS 256 #define NUM_BLOCKS(batch_size) ((batch_size + THREADS - 1) / THREADS) #define GPU_1D_KERNEL_LOOP(k, n) \ for (size_t k = threadIdx.x; k<n; k += blockDim.x) __device__ void warpReduce(volatile float *sdata, unsigned int tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } __device__ void blockReduce(volatile float *sdata) { unsigned int tid = threadIdx.x; __syncthreads(); // if (threadIdx.x < 256) {sdata[tid] += sdata[tid + 256]; } __syncthreads(); if (threadIdx.x < 128) {sdata[tid] += sdata[tid + 128]; } __syncthreads(); if (threadIdx.x < 64) {sdata[tid] += sdata[tid + 64]; } __syncthreads(); if (tid < 32) warpReduce(sdata, tid); __syncthreads(); } __device__ void actSO3(const float *q, const float *X, float *Y) { float uv[3]; uv[0] = 2.0 * (q[1]*X[2] - q[2]*X[1]); uv[1] = 2.0 * (q[2]*X[0] - q[0]*X[2]); uv[2] = 2.0 * (q[0]*X[1] - q[1]*X[0]); Y[0] = X[0] + q[3]*uv[0] + (q[1]*uv[2] - q[2]*uv[1]); Y[1] = X[1] + q[3]*uv[1] + (q[2]*uv[0] - q[0]*uv[2]); Y[2] = X[2] + q[3]*uv[2] + (q[0]*uv[1] - q[1]*uv[0]); } __device__ void actSE3(const float *t, const float *q, const float *X, float *Y) { actSO3(q, X, Y); Y[3] = X[3]; Y[0] += X[3] * t[0]; Y[1] += X[3] * t[1]; Y[2] += X[3] * t[2]; } __device__ void adjSE3(const float *t, const float *q, const float *X, float *Y) { float qinv[4] = {-q[0], -q[1], -q[2], q[3]}; actSO3(qinv, &X[0], &Y[0]); actSO3(qinv, &X[3], &Y[3]); float u[3], v[3]; u[0] = t[2]*X[1] - t[1]*X[2]; u[1] = t[0]*X[2] - t[2]*X[0]; u[2] = t[1]*X[0] - t[0]*X[1]; actSO3(qinv, u, v); Y[3] += v[0]; Y[4] += v[1]; Y[5] += v[2]; } __device__ void relSE3(const float *ti, const float *qi, const float *tj, const float *qj, float *tij, float *qij) { qij[0] = -qj[3] * qi[0] + qj[0] * qi[3] - qj[1] * qi[2] + qj[2] * qi[1], qij[1] = -qj[3] * qi[1] + qj[1] * qi[3] - qj[2] * qi[0] + qj[0] * qi[2], qij[2] = -qj[3] * qi[2] + qj[2] * qi[3] - qj[0] * qi[1] + qj[1] * qi[0], qij[3] = qj[3] * qi[3] + qj[0] * qi[0] + qj[1] * qi[1] + qj[2] * qi[2], actSO3(qij, ti, tij); tij[0] = tj[0] - tij[0]; tij[1] = tj[1] - tij[1]; tij[2] = tj[2] - tij[2]; } __device__ void expSO3(const float *phi, float* q) { // SO3 exponential map float theta_sq = phi[0]*phi[0] + phi[1]*phi[1] + phi[2]*phi[2]; float theta_p4 = theta_sq * theta_sq; float theta = sqrtf(theta_sq); float imag, real; if (theta_sq < 1e-8) { imag = 0.5 - (1.0/48.0)*theta_sq + (1.0/3840.0)*theta_p4; real = 1.0 - (1.0/ 8.0)*theta_sq + (1.0/ 384.0)*theta_p4; } else { imag = sinf(0.5 * theta) / theta; real = cosf(0.5 * theta); } q[0] = imag * phi[0]; q[1] = imag * phi[1]; q[2] = imag * phi[2]; q[3] = real; } __device__ void crossInplace(const float* a, float *b) { float x[3] = { a[1]*b[2] - a[2]*b[1], a[2]*b[0] - a[0]*b[2], a[0]*b[1] - a[1]*b[0], }; b[0] = x[0]; b[1] = x[1]; b[2] = x[2]; } __device__ void expSE3(const float *xi, float* t, float* q) { // SE3 exponential map expSO3(xi + 3, q); float tau[3] = {xi[0], xi[1], xi[2]}; float phi[3] = {xi[3], xi[4], xi[5]}; float theta_sq = phi[0]*phi[0] + phi[1]*phi[1] + phi[2]*phi[2]; float theta = sqrtf(theta_sq); t[0] = tau[0]; t[1] = tau[1]; t[2] = tau[2]; if (theta > 1e-4) { float a = (1 - cosf(theta)) / theta_sq; crossInplace(phi, tau); t[0] += a * tau[0]; t[1] += a * tau[1]; t[2] += a * tau[2]; float b = (theta - sinf(theta)) / (theta * theta_sq); crossInplace(phi, tau); t[0] += b * tau[0]; t[1] += b * tau[1]; t[2] += b * tau[2]; } } __global__ void projective_transform_kernel( const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> target, const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> weight, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> poses, const torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> disps, const torch::PackedTensorAccessor32<float,1,torch::RestrictPtrTraits> intrinsics, const torch::PackedTensorAccessor32<long,1,torch::RestrictPtrTraits> ii, const torch::PackedTensorAccessor32<long,1,torch::RestrictPtrTraits> jj, torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> Hs, torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> vs, torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> Eii, torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> Eij, torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> Cii, torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> bz) { const int block_id = blockIdx.x; const int thread_id = threadIdx.x; const int ht = disps.size(1); const int wd = disps.size(2); int ix = static_cast<int>(ii[block_id]); int jx = static_cast<int>(jj[block_id]); __shared__ float fx; __shared__ float fy; __shared__ float cx; __shared__ float cy; __shared__ float ti[3], tj[3], tij[3]; __shared__ float qi[4], qj[4], qij[4]; // load intrinsics from global memory if (thread_id == 0) { fx = intrinsics[0]; fy = intrinsics[1]; cx = intrinsics[2]; cy = intrinsics[3]; } __syncthreads(); // stereo frames if (ix == jx) { if (thread_id == 0) { tij[0] = -0.1; tij[1] = 0; tij[2] = 0; qij[0] = 0; qij[1] = 0; qij[2] = 0; qij[3] = 1; } } else { // load poses from global memory if (thread_id < 3) { ti[thread_id] = poses[ix][thread_id]; tj[thread_id] = poses[jx][thread_id]; } if (thread_id < 4) { qi[thread_id] = poses[ix][thread_id+3]; qj[thread_id] = poses[jx][thread_id+3]; } __syncthreads(); if (thread_id == 0) { relSE3(ti, qi, tj, qj, tij, qij); } } __syncthreads(); //points float Xi[4]; float Xj[4]; // jacobians float Jx[12]; float Jz; float* Ji = &Jx[0]; float* Jj = &Jx[6]; // hessians float hij[12*(12+1)/2]; float vi[6], vj[6]; int l; for (l=0; l<12*(12+1)/2; l++) { hij[l] = 0; } for (int n=0; n<6; n++) { vi[n] = 0; vj[n] = 0; } __syncthreads(); GPU_1D_KERNEL_LOOP(k, ht*wd) { const int i = k / wd; const int j = k % wd; const float u = static_cast<float>(j); const float v = static_cast<float>(i); // homogenous coordinates Xi[0] = (u - cx) / fx; Xi[1] = (v - cy) / fy; Xi[2] = 1; Xi[3] = disps[ix][i][j]; // transform homogenous point actSE3(tij, qij, Xi, Xj); const float x = Xj[0]; const float y = Xj[1]; const float h = Xj[3]; const float d = (Xj[2] < MIN_DEPTH) ? 0.0 : 1.0 / Xj[2]; const float d2 = d * d; float wu = (Xj[2] < MIN_DEPTH) ? 0.0 : .001 * weight[block_id][0][i][j]; float wv = (Xj[2] < MIN_DEPTH) ? 0.0 : .001 * weight[block_id][1][i][j]; const float ru = target[block_id][0][i][j] - (fx * d * x + cx); const float rv = target[block_id][1][i][j] - (fy * d * y + cy); // x - coordinate Jj[0] = fx * (h*d); Jj[1] = fx * 0; Jj[2] = fx * (-x*h*d2); Jj[3] = fx * (-x*y*d2); Jj[4] = fx * (1 + x*x*d2); Jj[5] = fx * (-y*d); Jz = fx * (tij[0] * d - tij[2] * (x * d2)); Cii[block_id][k] = wu * Jz * Jz; bz[block_id][k] = wu * ru * Jz; if (ix == jx) wu = 0; adjSE3(tij, qij, Jj, Ji); for (int n=0; n<6; n++) Ji[n] *= -1; l=0; for (int n=0; n<12; n++) { for (int m=0; m<=n; m++) { hij[l] += wu * Jx[n] * Jx[m]; l++; } } for (int n=0; n<6; n++) { vi[n] += wu * ru * Ji[n]; vj[n] += wu * ru * Jj[n]; Eii[block_id][n][k] = wu * Jz * Ji[n]; Eij[block_id][n][k] = wu * Jz * Jj[n]; } Jj[0] = fy * 0; Jj[1] = fy * (h*d); Jj[2] = fy * (-y*h*d2); Jj[3] = fy * (-1 - y*y*d2); Jj[4] = fy * (x*y*d2); Jj[5] = fy * (x*d); Jz = fy * (tij[1] * d - tij[2] * (y * d2)); Cii[block_id][k] += wv * Jz * Jz; bz[block_id][k] += wv * rv * Jz; if (ix == jx) wv = 0; adjSE3(tij, qij, Jj, Ji); for (int n=0; n<6; n++) Ji[n] *= -1; l=0; for (int n=0; n<12; n++) { for (int m=0; m<=n; m++) { hij[l] += wv * Jx[n] * Jx[m]; l++; } } for (int n=0; n<6; n++) { vi[n] += wv * rv * Ji[n]; vj[n] += wv * rv * Jj[n]; Eii[block_id][n][k] += wv * Jz * Ji[n]; Eij[block_id][n][k] += wv * Jz * Jj[n]; } } __syncthreads(); __shared__ float sdata[THREADS]; for (int n=0; n<6; n++) { sdata[threadIdx.x] = vi[n]; blockReduce(sdata); if (threadIdx.x == 0) { vs[0][block_id][n] = sdata[0]; } __syncthreads(); sdata[threadIdx.x] = vj[n]; blockReduce(sdata); if (threadIdx.x == 0) { vs[1][block_id][n] = sdata[0]; } } l=0; for (int n=0; n<12; n++) { for (int m=0; m<=n; m++) { sdata[threadIdx.x] = hij[l]; blockReduce(sdata); if (threadIdx.x == 0) { if (n<6 && m<6) { Hs[0][block_id][n][m] = sdata[0]; Hs[0][block_id][m][n] = sdata[0]; } else if (n >=6 && m<6) { Hs[1][block_id][m][n-6] = sdata[0]; Hs[2][block_id][n-6][m] = sdata[0]; } else { Hs[3][block_id][n-6][m-6] = sdata[0]; Hs[3][block_id][m-6][n-6] = sdata[0]; } } l++; } } } __global__ void projmap_kernel( const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> poses, const torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> disps, const torch::PackedTensorAccessor32<float,1,torch::RestrictPtrTraits> intrinsics, const torch::PackedTensorAccessor32<long,1,torch::RestrictPtrTraits> ii, const torch::PackedTensorAccessor32<long,1,torch::RestrictPtrTraits> jj, torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> coords, torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> valid) { const int block_id = blockIdx.x; const int thread_id = threadIdx.x; const int ht = disps.size(1); const int wd = disps.size(2); __shared__ int ix; __shared__ int jx; __shared__ float fx; __shared__ float fy; __shared__ float cx; __shared__ float cy; __shared__ float ti[3], tj[3], tij[3]; __shared__ float qi[4], qj[4], qij[4]; // load intrinsics from global memory if (thread_id == 0) { ix = static_cast<int>(ii[block_id]); jx = static_cast<int>(jj[block_id]); fx = intrinsics[0]; fy = intrinsics[1]; cx = intrinsics[2]; cy = intrinsics[3]; } __syncthreads(); // load poses from global memory if (thread_id < 3) { ti[thread_id] = poses[ix][thread_id]; tj[thread_id] = poses[jx][thread_id]; } if (thread_id < 4) { qi[thread_id] = poses[ix][thread_id+3]; qj[thread_id] = poses[jx][thread_id+3]; } __syncthreads(); if (thread_id == 0) { relSE3(ti, qi, tj, qj, tij, qij); } //points float Xi[4]; float Xj[4]; __syncthreads(); GPU_1D_KERNEL_LOOP(k, ht*wd) { const int i = k / wd; const int j = k % wd; const float u = static_cast<float>(j); const float v = static_cast<float>(i); // homogenous coordinates Xi[0] = (u - cx) / fx; Xi[1] = (v - cy) / fy; Xi[2] = 1; Xi[3] = disps[ix][i][j]; // transform homogenous point actSE3(tij, qij, Xi, Xj); coords[block_id][i][j][0] = u; coords[block_id][i][j][1] = v; if (Xj[2] > 0.01) { coords[block_id][i][j][0] = fx * (Xj[0] / Xj[2]) + cx; coords[block_id][i][j][1] = fy * (Xj[1] / Xj[2]) + cy; } valid[block_id][i][j][0] = (Xj[2] > MIN_DEPTH) ? 1.0 : 0.0; } } __global__ void frame_distance_kernel( const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> poses, const torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> disps, const torch::PackedTensorAccessor32<float,1,torch::RestrictPtrTraits> intrinsics, const torch::PackedTensorAccessor32<long,1,torch::RestrictPtrTraits> ii, const torch::PackedTensorAccessor32<long,1,torch::RestrictPtrTraits> jj, torch::PackedTensorAccessor32<float,1,torch::RestrictPtrTraits> dist, const float beta) { const int block_id = blockIdx.x; const int thread_id = threadIdx.x; const int ht = disps.size(1); const int wd = disps.size(2); __shared__ int ix; __shared__ int jx; __shared__ float fx; __shared__ float fy; __shared__ float cx; __shared__ float cy; __shared__ float ti[3], tj[3], tij[3]; __shared__ float qi[4], qj[4], qij[4]; // load intrinsics from global memory if (thread_id == 0) { ix = static_cast<int>(ii[block_id]); jx = static_cast<int>(jj[block_id]); fx = intrinsics[0]; fy = intrinsics[1]; cx = intrinsics[2]; cy = intrinsics[3]; } __syncthreads(); //points float Xi[4]; float Xj[4]; __shared__ float accum[THREADS]; accum[thread_id] = 0; __shared__ float valid[THREADS]; valid[thread_id] = 0; __shared__ float total[THREADS]; total[thread_id] = 0; __syncthreads(); for (int n=0; n<1; n++) { if (thread_id < 3) { ti[thread_id] = poses[ix][thread_id]; tj[thread_id] = poses[jx][thread_id]; } if (thread_id < 4) { qi[thread_id] = poses[ix][thread_id+3]; qj[thread_id] = poses[jx][thread_id+3]; } __syncthreads(); relSE3(ti, qi, tj, qj, tij, qij); float d, du, dv; GPU_1D_KERNEL_LOOP(k, ht*wd) { const int i = k / wd; const int j = k % wd; const float u = static_cast<float>(j); const float v = static_cast<float>(i); // if (disps[ix][i][j] < 0.01) { // continue; // } // homogenous coordinates Xi[0] = (u - cx) / fx; Xi[1] = (v - cy) / fy; Xi[2] = 1; Xi[3] = disps[ix][i][j]; // transform homogenous point actSE3(tij, qij, Xi, Xj); du = fx * (Xj[0] / Xj[2]) + cx - u; dv = fy * (Xj[1] / Xj[2]) + cy - v; d = sqrtf(du*du + dv*dv); total[threadIdx.x] += beta; if (Xj[2] > MIN_DEPTH) { accum[threadIdx.x] += beta * d; valid[threadIdx.x] += beta; } Xi[0] = (u - cx) / fx; Xi[1] = (v - cy) / fy; Xi[2] = 1; Xi[3] = disps[ix][i][j]; Xj[0] = Xi[0] + Xi[3] * tij[0]; Xj[1] = Xi[1] + Xi[3] * tij[1]; Xj[2] = Xi[2] + Xi[3] * tij[2]; du = fx * (Xj[0] / Xj[2]) + cx - u; dv = fy * (Xj[1] / Xj[2]) + cy - v; d = sqrtf(du*du + dv*dv); total[threadIdx.x] += (1 - beta); if (Xj[2] > MIN_DEPTH) { accum[threadIdx.x] += (1 - beta) * d; valid[threadIdx.x] += (1 - beta); } } if (threadIdx.x == 0) { int tmp = ix; ix = jx; jx = tmp; } __syncthreads(); } __syncthreads(); blockReduce(accum); __syncthreads(); blockReduce(total); __syncthreads(); blockReduce(valid); __syncthreads(); if (thread_id == 0) { dist[block_id] = (valid[0] / (total[0] + 1e-8) < 0.75) ? 1000.0 : accum[0] / valid[0]; } } __global__ void depth_filter_kernel( const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> poses, const torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> disps, const torch::PackedTensorAccessor32<float,1,torch::RestrictPtrTraits> intrinsics, const torch::PackedTensorAccessor32<long,1,torch::RestrictPtrTraits> inds, const torch::PackedTensorAccessor32<float,1,torch::RestrictPtrTraits> thresh, torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> counter) { const int block_id = blockIdx.x; const int neigh_id = blockIdx.y; const int index = blockIdx.z * blockDim.x + threadIdx.x; // if (threadIdx.x == 0) { // printf("%d %d %d %d\n", blockIdx.x, blockIdx.y, blockDim.x, threadIdx.x); // } const int num = disps.size(0); const int ht = disps.size(1); const int wd = disps.size(2); __shared__ int ix; __shared__ int jx; __shared__ float fx; __shared__ float fy; __shared__ float cx; __shared__ float cy; __shared__ float ti[3], tj[3], tij[3]; __shared__ float qi[4], qj[4], qij[4]; if (threadIdx.x == 0) { ix = static_cast<int>(inds[block_id]); jx = (neigh_id < 3) ? ix - neigh_id - 1 : ix + neigh_id; fx = intrinsics[0]; fy = intrinsics[1]; cx = intrinsics[2]; cy = intrinsics[3]; } __syncthreads(); if (jx < 0 || jx >= num) { return; } const float t = thresh[block_id]; // load poses from global memory if (threadIdx.x < 3) { ti[threadIdx.x] = poses[ix][threadIdx.x]; tj[threadIdx.x] = poses[jx][threadIdx.x]; } if (threadIdx.x < 4) { qi[threadIdx.x] = poses[ix][threadIdx.x+3]; qj[threadIdx.x] = poses[jx][threadIdx.x+3]; } __syncthreads(); if (threadIdx.x == 0) { relSE3(ti, qi, tj, qj, tij, qij); } //points float Xi[4]; float Xj[4]; __syncthreads(); if (index < ht*wd) { const int i = index / wd; const int j = index % wd; const float ui = static_cast<float>(j); const float vi = static_cast<float>(i); const float di = disps[ix][i][j]; // homogenous coordinates Xi[0] = (ui - cx) / fx; Xi[1] = (vi - cy) / fy; Xi[2] = 1; Xi[3] = di; // transform homogenous point actSE3(tij, qij, Xi, Xj); const float uj = fx * (Xj[0] / Xj[2]) + cx; const float vj = fy * (Xj[1] / Xj[2]) + cy; const float dj = Xj[3] / Xj[2]; const int u0 = static_cast<int>(floor(uj)); const int v0 = static_cast<int>(floor(vj)); if (u0 >= 0 && v0 >= 0 && u0 < wd-1 && v0 < ht-1) { const float wx = ceil(uj) - uj; const float wy = ceil(vj) - vj; const float d00 = disps[jx][v0+0][u0+0]; const float d01 = disps[jx][v0+0][u0+1]; const float d10 = disps[jx][v0+1][u0+0]; const float d11 = disps[jx][v0+1][u0+1]; const float dj_hat = wy*wx*d00 + wy*(1-wx)*d01 + (1-wy)*wx*d10 + (1-wy)*(1-wx)*d11; const float err = abs(1.0/dj - 1.0/dj_hat); if (abs(1.0/dj - 1.0/d00) < t) atomicAdd(&counter[block_id][i][j], 1.0f); else if (abs(1.0/dj - 1.0/d01) < t) atomicAdd(&counter[block_id][i][j], 1.0f); else if (abs(1.0/dj - 1.0/d10) < t) atomicAdd(&counter[block_id][i][j], 1.0f); else if (abs(1.0/dj - 1.0/d11) < t) atomicAdd(&counter[block_id][i][j], 1.0f); } } } __global__ void iproj_kernel( const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> poses, const torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> disps, const torch::PackedTensorAccessor32<float,1,torch::RestrictPtrTraits> intrinsics, torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> points) { const int block_id = blockIdx.x; const int index = blockIdx.y * blockDim.x + threadIdx.x; const int num = disps.size(0); const int ht = disps.size(1); const int wd = disps.size(2); __shared__ float fx; __shared__ float fy; __shared__ float cx; __shared__ float cy; __shared__ float t[3]; __shared__ float q[4]; if (threadIdx.x == 0) { fx = intrinsics[0]; fy = intrinsics[1]; cx = intrinsics[2]; cy = intrinsics[3]; } __syncthreads(); // load poses from global memory if (threadIdx.x < 3) { t[threadIdx.x] = poses[block_id][threadIdx.x]; } if (threadIdx.x < 4) { q[threadIdx.x] = poses[block_id][threadIdx.x+3]; } __syncthreads(); //points float Xi[4]; float Xj[4]; if (index < ht*wd) { const int i = index / wd; const int j = index % wd; const float ui = static_cast<float>(j); const float vi = static_cast<float>(i); const float di = disps[block_id][i][j]; // homogenous coordinates Xi[0] = (ui - cx) / fx; Xi[1] = (vi - cy) / fy; Xi[2] = 1; Xi[3] = di; // transform homogenous point actSE3(t, q, Xi, Xj); points[block_id][i][j][0] = Xj[0] / Xj[3]; points[block_id][i][j][1] = Xj[1] / Xj[3]; points[block_id][i][j][2] = Xj[2] / Xj[3]; } } __global__ void accum_kernel( const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> inps, const torch::PackedTensorAccessor32<long,1,torch::RestrictPtrTraits> ptrs, const torch::PackedTensorAccessor32<long,1,torch::RestrictPtrTraits> idxs, torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> outs) { const int block_id = blockIdx.x; const int D = inps.size(2); const int start = ptrs[block_id]; const int end = ptrs[block_id+1]; for (int k=threadIdx.x; k<D; k+=blockDim.x) { float x = 0; for (int i=start; i<end; i++) { x += inps[idxs[i]][k]; } outs[block_id][k] = x; } } __device__ void retrSE3(const float *xi, const float* t, const float* q, float* t1, float* q1) { // retraction on SE3 manifold float dt[3] = {0, 0, 0}; float dq[4] = {0, 0, 0, 1}; expSE3(xi, dt, dq); q1[0] = dq[3] * q[0] + dq[0] * q[3] + dq[1] * q[2] - dq[2] * q[1]; q1[1] = dq[3] * q[1] + dq[1] * q[3] + dq[2] * q[0] - dq[0] * q[2]; q1[2] = dq[3] * q[2] + dq[2] * q[3] + dq[0] * q[1] - dq[1] * q[0]; q1[3] = dq[3] * q[3] - dq[0] * q[0] - dq[1] * q[1] - dq[2] * q[2]; actSO3(dq, t, t1); t1[0] += dt[0]; t1[1] += dt[1]; t1[2] += dt[2]; } __global__ void pose_retr_kernel( torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> poses, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> dx, const int t0, const int t1) { for (int k=t0+threadIdx.x; k<t1; k+=blockDim.x) { float xi[6], q[4], q1[4], t[3], t1[3]; t[0] = poses[k][0]; t[1] = poses[k][1]; t[2] = poses[k][2]; q[0] = poses[k][3]; q[1] = poses[k][4]; q[2] = poses[k][5]; q[3] = poses[k][6]; for (int n=0; n<6; n++) { xi[n] = dx[k-t0][n]; } retrSE3(xi, t, q, t1, q1); poses[k][0] = t1[0]; poses[k][1] = t1[1]; poses[k][2] = t1[2]; poses[k][3] = q1[0]; poses[k][4] = q1[1]; poses[k][5] = q1[2]; poses[k][6] = q1[3]; } } __global__ void disp_retr_kernel( torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> disps, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> dz, const torch::PackedTensorAccessor32<long,1,torch::RestrictPtrTraits> inds) { const int i = inds[blockIdx.x]; const int ht = disps.size(1); const int wd = disps.size(2); for (int k=threadIdx.x; k<ht*wd; k+=blockDim.x) { float d = disps[i][k/wd][k%wd] + dz[blockIdx.x][k]; disps[i][k/wd][k%wd] = d; } } torch::Tensor accum_cuda(torch::Tensor data, torch::Tensor ix, torch::Tensor jx) { torch::Tensor ix_cpu = ix.to(torch::kCPU); torch::Tensor jx_cpu = jx.to(torch::kCPU); torch::Tensor inds = torch::argsort(ix_cpu); long* ix_data = ix_cpu.data_ptr<long>(); long* jx_data = jx_cpu.data_ptr<long>(); long* kx_data = inds.data_ptr<long>(); int count = jx.size(0); std::vector<int> cols; torch::Tensor ptrs_cpu = torch::zeros({count+1}, torch::TensorOptions().dtype(torch::kInt64)); long* ptrs_data = ptrs_cpu.data_ptr<long>(); ptrs_data[0] = 0; int i = 0; for (int j=0; j<count; j++) { while (i < ix.size(0) && ix_data[kx_data[i]] <= jx_data[j]) { if (ix_data[kx_data[i]] == jx_data[j]) cols.push_back(kx_data[i]); i++; } ptrs_data[j+1] = cols.size(); } torch::Tensor idxs_cpu = torch::zeros({long(cols.size())}, torch::TensorOptions().dtype(torch::kInt64)); long* idxs_data = idxs_cpu.data_ptr<long>(); for (int i=0; i<cols.size(); i++) { idxs_data[i] = cols[i]; } torch::Tensor ptrs = ptrs_cpu.to(torch::kCUDA); torch::Tensor idxs = idxs_cpu.to(torch::kCUDA); torch::Tensor out = torch::zeros({jx.size(0), data.size(1)}, torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA)); accum_kernel<<<count, THREADS>>>( data.packed_accessor32<float,2,torch::RestrictPtrTraits>(), ptrs.packed_accessor32<long,1,torch::RestrictPtrTraits>(), idxs.packed_accessor32<long,1,torch::RestrictPtrTraits>(), out.packed_accessor32<float,2,torch::RestrictPtrTraits>()); return out; } __global__ void EEt6x6_kernel( const torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> E, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> Q, const torch::PackedTensorAccessor32<long,2,torch::RestrictPtrTraits> idx, torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> S) { // indicices const int ix = idx[blockIdx.x][0]; const int jx = idx[blockIdx.x][1]; const int kx = idx[blockIdx.x][2]; const int D = E.size(2); float dS[6][6]; float ei[6]; float ej[6]; for (int i=0; i<6; i++) { for (int j=0; j<6; j++) { dS[i][j] = 0; } } for (int k=threadIdx.x; k<D; k+=blockDim.x) { const float q = Q[kx][k]; // coalesced memory read for (int n=0; n<6; n++) { ei[n] = E[ix][n][k] * q; ej[n] = E[jx][n][k]; } // block EEt for (int n=0; n<6; n++) { for (int m=0; m<6; m++) { dS[n][m] += ei[n] * ej[m]; } } } __syncthreads(); __shared__ float sdata[THREADS]; for (int n=0; n<6; n++) { for (int m=0; m<6; m++) { sdata[threadIdx.x] = dS[n][m]; blockReduce(sdata); if (threadIdx.x == 0) { S[blockIdx.x][n][m] = sdata[0]; } } } } __global__ void Ev6x1_kernel( const torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> E, const torch::PackedTensorAccessor32<float, 2,torch::RestrictPtrTraits> Q, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> w, const torch::PackedTensorAccessor32<long,2,torch::RestrictPtrTraits> idx, torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> v) { const int D = E.size(2); const int kx = idx[blockIdx.x][0]; float b[6]; for (int n=0; n<6; n++) { b[n] = 0.0; } for (int k=threadIdx.x; k<D; k+=blockDim.x) { const float q_w = Q[kx][k] * w[kx][k]; for (int n=0; n<6; n++) { b[n] += q_w * E[blockIdx.x][n][k]; } } __syncthreads(); __shared__ float sdata[THREADS]; for (int n=0; n<6; n++) { sdata[threadIdx.x] = b[n]; blockReduce(sdata); if (threadIdx.x == 0) { v[blockIdx.x][n] += sdata[0]; } } } __global__ void EvT6x1_kernel( const torch::PackedTensorAccessor32<float,3,torch::RestrictPtrTraits> E, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> x, const torch::PackedTensorAccessor32<long,1,torch::RestrictPtrTraits> idx, torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> w) { const int D = E.size(2); const int ix = idx[blockIdx.x]; if (idx[blockIdx.x] <= 0 || idx[blockIdx.x] >= x.size(0)) return; for (int k=threadIdx.x; k<D; k+=blockDim.x) { float dw = 0; for (int n=0; n<6; n++) { dw += E[blockIdx.x][n][k] * x[ix][n]; } w[blockIdx.x][k] = dw; } } class SparseBlock { public: Eigen::SparseMatrix<double> A; Eigen::VectorX<double> b; SparseBlock(int N, int M) : N(N), M(M) { A = Eigen::SparseMatrix<double>(N*M, N*M); b = Eigen::VectorXd::Zero(N*M); } SparseBlock(Eigen::SparseMatrix<double> const& A, Eigen::VectorX<double> const& b, int N, int M) : A(A), b(b), N(N), M(M) {} void update_lhs(torch::Tensor As, torch::Tensor ii, torch::Tensor jj) { auto As_cpu = As.to(torch::kCPU).to(torch::kFloat64); auto ii_cpu = ii.to(torch::kCPU).to(torch::kInt64); auto jj_cpu = jj.to(torch::kCPU).to(torch::kInt64); auto As_acc = As_cpu.accessor<double,3>(); auto ii_acc = ii_cpu.accessor<long,1>(); auto jj_acc = jj_cpu.accessor<long,1>(); std::vector<T> tripletList; for (int n=0; n<ii.size(0); n++) { const int i = ii_acc[n]; const int j = jj_acc[n]; if (i >= 0 && j >= 0) { for (int k=0; k<M; k++) { for (int l=0; l<M; l++) { double val = As_acc[n][k][l]; tripletList.push_back(T(M*i + k, M*j + l, val)); } } } } A.setFromTriplets(tripletList.begin(), tripletList.end()); } void update_rhs(torch::Tensor bs, torch::Tensor ii) { auto bs_cpu = bs.to(torch::kCPU).to(torch::kFloat64); auto ii_cpu = ii.to(torch::kCPU).to(torch::kInt64); auto bs_acc = bs_cpu.accessor<double,2>(); auto ii_acc = ii_cpu.accessor<long,1>(); for (int n=0; n<ii.size(0); n++) { const int i = ii_acc[n]; if (i >= 0) { for (int j=0; j<M; j++) { b(i*M + j) += bs_acc[n][j]; } } } } SparseBlock operator-(const SparseBlock& S) { return SparseBlock(A - S.A, b - S.b, N, M); } std::tuple<torch::Tensor, torch::Tensor> get_dense() { Eigen::MatrixXd Ad = Eigen::MatrixXd(A); torch::Tensor H = torch::from_blob(Ad.data(), {N*M, N*M}, torch::TensorOptions() .dtype(torch::kFloat64)).to(torch::kCUDA).to(torch::kFloat32); torch::Tensor v = torch::from_blob(b.data(), {N*M, 1}, torch::TensorOptions() .dtype(torch::kFloat64)).to(torch::kCUDA).to(torch::kFloat32); return std::make_tuple(H, v); } torch::Tensor solve(const float lm=0.0001, const float ep=0.1) { torch::Tensor dx; Eigen::SparseMatrix<double> L(A); L.diagonal().array() += ep + lm * L.diagonal().array(); Eigen::SimplicialLLT<Eigen::SparseMatrix<double>> solver; solver.compute(L); if (solver.info() == Eigen::Success) { Eigen::VectorXd x = solver.solve(b); dx = torch::from_blob(x.data(), {N, M}, torch::TensorOptions() .dtype(torch::kFloat64)).to(torch::kCUDA).to(torch::kFloat32); } else { dx = torch::zeros({N, M}, torch::TensorOptions() .device(torch::kCUDA).dtype(torch::kFloat32)); } return dx; } private: const int N; const int M; }; SparseBlock schur_block(torch::Tensor E, torch::Tensor Q, torch::Tensor w, torch::Tensor ii, torch::Tensor jj, torch::Tensor kk, const int t0, const int t1) { torch::Tensor ii_cpu = ii.to(torch::kCPU); torch::Tensor jj_cpu = jj.to(torch::kCPU); torch::Tensor kk_cpu = kk.to(torch::kCPU); const int P = t1 - t0; const long* ii_data = ii_cpu.data_ptr<long>(); const long* jj_data = jj_cpu.data_ptr<long>(); const long* kk_data = kk_cpu.data_ptr<long>(); std::vector<std::vector<long>> graph(P); std::vector<std::vector<long>> index(P); for (int n=0; n<ii_cpu.size(0); n++) { const int j = jj_data[n]; const int k = kk_data[n]; if (j >= t0 && j <= t1) { const int t = j - t0; graph[t].push_back(k); index[t].push_back(n); } } std::vector<long> ii_list, jj_list, idx, jdx; for (int i=0; i<P; i++) { for (int j=0; j<P; j++) { for (int k=0; k < graph[i].size(); k++) { for (int l=0; l < graph[j].size(); l++) { if (graph[i][k] == graph[j][l]) { ii_list.push_back(i); jj_list.push_back(j); idx.push_back(index[i][k]); idx.push_back(index[j][l]); idx.push_back(graph[i][k]); } } } } } torch::Tensor ix_cuda = torch::from_blob(idx.data(), {long(idx.size())}, torch::TensorOptions().dtype(torch::kInt64)).to(torch::kCUDA).view({-1, 3}); torch::Tensor jx_cuda = torch::stack({kk_cpu}, -1) .to(torch::kCUDA).to(torch::kInt64); torch::Tensor ii2_cpu = torch::from_blob(ii_list.data(), {long(ii_list.size())}, torch::TensorOptions().dtype(torch::kInt64)).view({-1}); torch::Tensor jj2_cpu = torch::from_blob(jj_list.data(), {long(jj_list.size())}, torch::TensorOptions().dtype(torch::kInt64)).view({-1}); torch::Tensor S = torch::zeros({ix_cuda.size(0), 6, 6}, torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA)); torch::Tensor v = torch::zeros({jx_cuda.size(0), 6}, torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA)); EEt6x6_kernel<<<ix_cuda.size(0), THREADS>>>( E.packed_accessor32<float,3,torch::RestrictPtrTraits>(), Q.packed_accessor32<float,2,torch::RestrictPtrTraits>(), ix_cuda.packed_accessor32<long,2,torch::RestrictPtrTraits>(), S.packed_accessor32<float,3,torch::RestrictPtrTraits>()); Ev6x1_kernel<<<jx_cuda.size(0), THREADS>>>( E.packed_accessor32<float,3,torch::RestrictPtrTraits>(), Q.packed_accessor32<float,2,torch::RestrictPtrTraits>(), w.packed_accessor32<float,2,torch::RestrictPtrTraits>(), jx_cuda.packed_accessor32<long,2,torch::RestrictPtrTraits>(), v.packed_accessor32<float,2,torch::RestrictPtrTraits>()); // schur block SparseBlock A(P, 6); A.update_lhs(S, ii2_cpu, jj2_cpu); A.update_rhs(v, jj_cpu - t0); return A; } std::vector<torch::Tensor> ba_cuda( torch::Tensor poses, torch::Tensor disps, torch::Tensor intrinsics, torch::Tensor disps_sens, torch::Tensor targets, torch::Tensor weights, torch::Tensor eta, torch::Tensor ii, torch::Tensor jj, const int t0, const int t1, const int iterations, const float lm, const float ep, const bool motion_only) { auto opts = poses.options(); const int num = ii.size(0); const int ht = disps.size(1); const int wd = disps.size(2); torch::Tensor ts = torch::arange(t0, t1).to(torch::kCUDA); torch::Tensor ii_exp = torch::cat({ts, ii}, 0); torch::Tensor jj_exp = torch::cat({ts, jj}, 0); std::tuple<torch::Tensor, torch::Tensor> kuniq = torch::_unique(ii_exp, true, true); torch::Tensor kx = std::get<0>(kuniq); torch::Tensor kk_exp = std::get<1>(kuniq); torch::Tensor dx; torch::Tensor dz; // initialize buffers torch::Tensor Hs = torch::zeros({4, num, 6, 6}, opts); torch::Tensor vs = torch::zeros({2, num, 6}, opts); torch::Tensor Eii = torch::zeros({num, 6, ht*wd}, opts); torch::Tensor Eij = torch::zeros({num, 6, ht*wd}, opts); torch::Tensor Cii = torch::zeros({num, ht*wd}, opts); torch::Tensor wi = torch::zeros({num, ht*wd}, opts); for (int itr=0; itr<iterations; itr++) { projective_transform_kernel<<<num, THREADS>>>( targets.packed_accessor32<float,4,torch::RestrictPtrTraits>(), weights.packed_accessor32<float,4,torch::RestrictPtrTraits>(), poses.packed_accessor32<float,2,torch::RestrictPtrTraits>(), disps.packed_accessor32<float,3,torch::RestrictPtrTraits>(), intrinsics.packed_accessor32<float,1,torch::RestrictPtrTraits>(), ii.packed_accessor32<long,1,torch::RestrictPtrTraits>(), jj.packed_accessor32<long,1,torch::RestrictPtrTraits>(), Hs.packed_accessor32<float,4,torch::RestrictPtrTraits>(), vs.packed_accessor32<float,3,torch::RestrictPtrTraits>(), Eii.packed_accessor32<float,3,torch::RestrictPtrTraits>(), Eij.packed_accessor32<float,3,torch::RestrictPtrTraits>(), Cii.packed_accessor32<float,2,torch::RestrictPtrTraits>(), wi.packed_accessor32<float,2,torch::RestrictPtrTraits>()); // pose x pose block SparseBlock A(t1 - t0, 6); A.update_lhs(Hs.reshape({-1, 6, 6}), torch::cat({ii, ii, jj, jj}) - t0, torch::cat({ii, jj, ii, jj}) - t0); A.update_rhs(vs.reshape({-1, 6}), torch::cat({ii, jj}) - t0); if (motion_only) { dx = A.solve(lm, ep); // update poses pose_retr_kernel<<<1, THREADS>>>( poses.packed_accessor32<float,2,torch::RestrictPtrTraits>(), dx.packed_accessor32<float,2,torch::RestrictPtrTraits>(), t0, t1); } else { // add depth residual if there are depth sensor measurements const float alpha = 0.05; torch::Tensor m = (disps_sens.index({kx, "..."}) > 0).to(torch::TensorOptions().dtype(torch::kFloat32)).view({-1, ht*wd}); torch::Tensor C = accum_cuda(Cii, ii, kx) + m * alpha + (1 - m) * eta.view({-1, ht*wd}); torch::Tensor w = accum_cuda(wi, ii, kx) - m * alpha * (disps.index({kx, "..."}) - disps_sens.index({kx, "..."})).view({-1, ht*wd}); torch::Tensor Q = 1.0 / C; torch::Tensor Ei = accum_cuda(Eii.view({num, 6*ht*wd}), ii, ts).view({t1-t0, 6, ht*wd}); torch::Tensor E = torch::cat({Ei, Eij}, 0); SparseBlock S = schur_block(E, Q, w, ii_exp, jj_exp, kk_exp, t0, t1); dx = (A - S).solve(lm, ep); torch::Tensor ix = jj_exp - t0; torch::Tensor dw = torch::zeros({ix.size(0), ht*wd}, opts); EvT6x1_kernel<<<ix.size(0), THREADS>>>( E.packed_accessor32<float,3,torch::RestrictPtrTraits>(), dx.packed_accessor32<float,2,torch::RestrictPtrTraits>(), ix.packed_accessor32<long,1,torch::RestrictPtrTraits>(), dw.packed_accessor32<float,2,torch::RestrictPtrTraits>()); dz = Q * (w - accum_cuda(dw, ii_exp, kx)); // update poses pose_retr_kernel<<<1, THREADS>>>( poses.packed_accessor32<float,2,torch::RestrictPtrTraits>(), dx.packed_accessor32<float,2,torch::RestrictPtrTraits>(), t0, t1); // update disparity maps disp_retr_kernel<<<kx.size(0), THREADS>>>( disps.packed_accessor32<float,3,torch::RestrictPtrTraits>(), dz.packed_accessor32<float,2,torch::RestrictPtrTraits>(), kx.packed_accessor32<long,1,torch::RestrictPtrTraits>()); } } return {dx, dz}; } torch::Tensor frame_distance_cuda( torch::Tensor poses, torch::Tensor disps, torch::Tensor intrinsics, torch::Tensor ii, torch::Tensor jj, const float beta) { auto opts = poses.options(); const int num = ii.size(0); torch::Tensor dist = torch::zeros({num}, opts); frame_distance_kernel<<<num, THREADS>>>( poses.packed_accessor32<float,2,torch::RestrictPtrTraits>(), disps.packed_accessor32<float,3,torch::RestrictPtrTraits>(), intrinsics.packed_accessor32<float,1,torch::RestrictPtrTraits>(), ii.packed_accessor32<long,1,torch::RestrictPtrTraits>(), jj.packed_accessor32<long,1,torch::RestrictPtrTraits>(), dist.packed_accessor32<float,1,torch::RestrictPtrTraits>(), beta); return dist; } std::vector<torch::Tensor> projmap_cuda( torch::Tensor poses, torch::Tensor disps, torch::Tensor intrinsics, torch::Tensor ii, torch::Tensor jj) { auto opts = poses.options(); const int num = ii.size(0); const int ht = disps.size(1); const int wd = disps.size(2); torch::Tensor coords = torch::zeros({num, ht, wd, 3}, opts); torch::Tensor valid = torch::zeros({num, ht, wd, 1}, opts); projmap_kernel<<<num, THREADS>>>( poses.packed_accessor32<float,2,torch::RestrictPtrTraits>(), disps.packed_accessor32<float,3,torch::RestrictPtrTraits>(), intrinsics.packed_accessor32<float,1,torch::RestrictPtrTraits>(), ii.packed_accessor32<long,1,torch::RestrictPtrTraits>(), jj.packed_accessor32<long,1,torch::RestrictPtrTraits>(), coords.packed_accessor32<float,4,torch::RestrictPtrTraits>(), valid.packed_accessor32<float,4,torch::RestrictPtrTraits>()); return {coords, valid}; } torch::Tensor depth_filter_cuda( torch::Tensor poses, torch::Tensor disps, torch::Tensor intrinsics, torch::Tensor ix, torch::Tensor thresh) { const int num = ix.size(0); const int ht = disps.size(1); const int wd = disps.size(2); torch::Tensor counter = torch::zeros({num, ht, wd}, disps.options()); dim3 blocks(num, 6, NUM_BLOCKS(ht * wd)); depth_filter_kernel<<<blocks, THREADS>>>( poses.packed_accessor32<float,2,torch::RestrictPtrTraits>(), disps.packed_accessor32<float,3,torch::RestrictPtrTraits>(), intrinsics.packed_accessor32<float,1,torch::RestrictPtrTraits>(), ix.packed_accessor32<long,1,torch::RestrictPtrTraits>(), thresh.packed_accessor32<float,1,torch::RestrictPtrTraits>(), counter.packed_accessor32<float,3,torch::RestrictPtrTraits>()); return counter; } torch::Tensor iproj_cuda( torch::Tensor poses, torch::Tensor disps, torch::Tensor intrinsics) { const int nm = disps.size(0); const int ht = disps.size(1); const int wd = disps.size(2); auto opts = disps.options(); torch::Tensor points = torch::zeros({nm, ht, wd, 3}, opts); dim3 blocks(nm, NUM_BLOCKS(ht * wd)); iproj_kernel<<<blocks, THREADS>>>( poses.packed_accessor32<float,2,torch::RestrictPtrTraits>(), disps.packed_accessor32<float,3,torch::RestrictPtrTraits>(), intrinsics.packed_accessor32<float,1,torch::RestrictPtrTraits>(), points.packed_accessor32<float,4,torch::RestrictPtrTraits>()); return points; }
the_stack
#pragma once #include <ptx_primitives.cuh> namespace IndexQueueAccessControl { struct AtomicCheckedAbortOnOverflow { template<unsigned int SIZE, class T> __device__ static int enqueue(const T& element, int& count, unsigned int& back, T* indices, T UNUSED) { int fill = atomicAdd(&count, 1); if (fill < static_cast<int>(SIZE-1)) { unsigned int pos = atomicInc(&back, SIZE - 1U); while (atomicCAS(indices + pos, UNUSED, element) != UNUSED) __threadfence(); return fill; } else __trap(); } template<unsigned int SIZE, class T> __device__ static void read(T& localElement, T* readElement, T UNUSED) { while ((localElement = atomicExch(readElement, UNUSED)) == UNUSED) __threadfence(); } }; struct AtomicCheckedWaitOnOverflow { template<unsigned int SIZE, class T> __device__ static int enqueue(const T& element, int& count, unsigned int& back, T* indices, T UNUSED) { int fill = atomicAdd(&count, 1); unsigned int pos = atomicInc(&back, SIZE - 1U); while (atomicCAS(indices + pos, UNUSED, element) != UNUSED) __threadfence(); return fill; } template<unsigned int SIZE, class T> __device__ static void read(T& localElement, T* readElement, T UNUSED) { while ((localElement = atomicExch(readElement, UNUSED)) == UNUSED) __threadfence(); } }; struct NonAtomicCheckedAbortOnOverflow { template<unsigned int SIZE, class T> __device__ static int enqueue(const T& element, int& count, unsigned int& back, T* indices, T UNUSED) { int fill = atomicAdd(&count, 1); if (fill < static_cast<int>(SIZE-1)) { unsigned int pos = atomicInc(&back, SIZE - 1U); while (ldg_cg(indices + pos) != UNUSED) __threadfence(); stg_cg(indices + pos, element); } else __trap(); return fill; } template<unsigned int SIZE, class T> __device__ static void read(T& localElement, T* readElement, T UNUSED) { while ((localElement = ldg_cg(readElement)) == UNUSED) __threadfence(); stg_cg(readElement, UNUSED); } }; struct NonAtomicCheckedWaitOnOverflow { template<unsigned int SIZE, class T> __device__ static int enqueue(const T& element, int& count, unsigned int& back, T* indices, T UNUSED) { int fill = atomicAdd(&count, 1); //while (fill >= static_cast<int>(SIZE)) // fill = ldg_cg(&count); unsigned int pos = atomicInc(&back, SIZE - 1U); while (ldg_cg(indices + pos) != UNUSED) __threadfence(); stg_cg(indices + pos, element); return fill; } template<unsigned int SIZE, class T> __device__ static void read(T& localElement, T* readElement, T UNUSED) { while ((localElement = ldg_cg(readElement)) == UNUSED) __threadfence(); stg_cg(readElement, UNUSED); } }; template <bool AtomicAccess, bool AbortOnOverflow> class EnumAccessControl; template <> class EnumAccessControl<true, true> : public AtomicCheckedAbortOnOverflow{}; template <> class EnumAccessControl<true, false> : public AtomicCheckedWaitOnOverflow{}; template <> class EnumAccessControl<false, true> : public NonAtomicCheckedAbortOnOverflow{}; template <> class EnumAccessControl<false, false> : public NonAtomicCheckedWaitOnOverflow{}; } template <unsigned int NUMQUEUES, unsigned int SIZE, typename T = unsigned int, class AccessControl = IndexQueueAccessControl::AtomicCheckedAbortOnOverflow, T UNUSED = static_cast<T>(-1), bool TRACK_FILL_LEVEL = false> class MultiIndexQueue { static constexpr int InternalCounterSize = (NUMQUEUES + 1023U) / 1024U * 1024U; //static constexpr T UNUSED = static_cast<T>(-1); int count[InternalCounterSize]; unsigned int front[InternalCounterSize]; unsigned int back[InternalCounterSize]; int max_fill_level[InternalCounterSize]; T indices[NUMQUEUES][SIZE]; public: typedef T Type; __device__ void init() { int linid = blockIdx.x * blockDim.x + threadIdx.x; if (linid < InternalCounterSize) { count[linid] = 0; front[linid] = back[linid] = 0U; if (TRACK_FILL_LEVEL) max_fill_level[linid] = 0; } for (int q = 0; q < NUMQUEUES; ++q) for (int i = linid; i < SIZE; i += blockDim.x * gridDim.x) indices[q][i] = UNUSED; } __device__ void writeMaxFillLevel(int* dest) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < NUMQUEUES; i += blockDim.x * gridDim.x) dest[i] = max_fill_level[i]; } __device__ void enqueue(int q, T i) { int fill_level = AccessControl:: template enqueue<SIZE,T>(i, count[q], back[q], indices[q], UNUSED); if (TRACK_FILL_LEVEL) atomicMax(&max_fill_level[q], fill_level); } __device__ int singleThreadReserveRead(int q, int num) { int readable = atomicSub(&count[q], num); if (readable < num) { int putback = min(num - readable, num); atomicAdd(&count[q], putback); num = num - putback; } return num; } __device__ int singleThreadTake(int q, int num) { return atomicAdd(&front[q], num); } __device__ void multiThreadRead(int q, T* localElement, int tid, int offset) { int pos = (offset + tid) % SIZE; T el; AccessControl:: template read<SIZE, T>(el, indices[q] + pos, UNUSED); *localElement = el; } __device__ int dequeueBlock(int q, T* localElement, int num) { __shared__ int take, offset; if (threadIdx.x == 0) { int ttake = singleThreadReserveRead(q, num); offset = singleThreadTake(q, ttake); take = ttake; } __syncthreads(); if (threadIdx.x < take) multiThreadRead(q, localElement, threadIdx.x, offset); return take; } __device__ int dequeueBlockUnsave(int q, T* localElement, int num) { __shared__ int take, offset; if (threadIdx.x == 0) { int ttake = singleThreadReserveRead(q, num); take = ttake; unsigned int toffset = ldg_cg(&front[q]); offset = toffset; stg_cg(&front[q], toffset + ttake); } __syncthreads(); if (threadIdx.x < take) multiThreadRead(q, localElement, threadIdx.x, offset); return take; } __device__ int dequeueWarp(int q, T* localElement, int num) { int take, offset, lid = laneid(); if (lid == 0) { take = singleThreadReserveRead(q, num); offset = atomicAdd(&front[q], take); } take = __shfl_sync(~0U, take, 0); if (lid < take) { offset = __shfl_sync(~0U, offset, 0); multiThreadRead(q, localElement, threadIdx.x, offset); } return take; } __device__ int dequeue(int q, T& element) { int readable = atomicSub(&count[q], 1); if (readable <= 0) { atomicAdd(&count[q], 1); return 0; } unsigned int pos = atomicAdd(&front[q], 1) % SIZE; T el; AccessControl:: template readread<SIZE, T>(el, indices[q] + pos, UNUSED); element = el; return 1; } __device__ int size(int q) { return *const_cast<volatile int*>(&count[q]); } struct QueuePos { unsigned int pos; public: QueuePos() = default; __device__ QueuePos(unsigned int pos) : pos(pos) {} __device__ const QueuePos& operator += (unsigned int n) { pos = (pos + n) % SIZE; return *this; } __device__ QueuePos operator + (unsigned int n) { return QueuePos((pos + n) % SIZE); } __device__ bool operator == (const QueuePos& other) { return pos == other.pos; } __device__ bool operator != (const QueuePos& other) { return pos != other.pos; } __device__ T read(MultiIndexQueue& qs, int q) { T copy; while ((copy = ldg_cg(qs.indices[q] + pos)) == UNUSED) __threadfence(); return copy; } __device__ void write(MultiIndexQueue& qs, int q, const T& val) { stg_cg(qs.indices[q] + pos, val); } __device__ int until(const QueuePos& other) { return (other.pos - pos + SIZE) % SIZE; } }; __device__ QueuePos begin(int q) { return QueuePos(ldg_cg(&front[q])); } __device__ QueuePos end(int q) { return QueuePos(ldg_cg(&back[q])); } __device__ void readState(int q, int &count, unsigned int &front, unsigned int &back) { count = this->count[q]; front = this->front[q]; back = this->back[q]; } }; //template <unsigned int SIZE, typename T = unsigned int, bool OVERFLOWCHECK = true, bool WAITFORFREE = false, bool AVOIDATOMICS = false> //class IndexQueue //{ //private: // static constexpr T UNUSED = static_cast<T>(-1); // // T indices[SIZE]; // // int count; // // unsigned int front; // unsigned int back; // //public: // __device__ // void init() // { // if (blockIdx.x == 0 && threadIdx.x == 0) // { // count = 0; // front = back = 0U; // } // // for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < SIZE; i += blockDim.x * gridDim.x) // indices[i] = UNUSED; // } // // __device__ // void enqueue(T i) // { // int fill = atomicAdd(&count, 1); // if (!OVERFLOWCHECK || fill < static_cast<int>(SIZE)) // { // unsigned int pos = atomicInc(&back, SIZE - 1U); // if (OVERFLOWCHECK || WAITFORFREE) // { // if (AVOIDATOMICS) // { // while (ldg_cg(indices + pos) != UNUSED) // __threadfence(); // stg_cg(indices + pos, i); // __threadfence(); // } // else // { // while (atomicCAS(indices + pos, UNUSED, i) != UNUSED) // __threadfence(); // } // } // else if (AVOIDATOMICS) // { // stg_cg(indices + pos, i); // __threadfence(); // } // else // atomicExch(indices + pos, i); // } // else // __trap(); // } // // __device__ // int singleThreadReserveRead(int& offset, int num) // { // int readable = atomicSub(&count, num); // if (readable < num) // { // int putback = min(num - readable, num); // atomicAdd(&count, putback); // num = num - putback; // // note: if could be removed -> trade useless atomicAdd vs if // if (num == 0) // return 0; // } // offset = atomicAdd(&front, num); // return num; // } // // __device__ // void multiThreadRead(T* localElement, int tid, int offset) // { // int pos = (offset + tid) % SIZE; // T el; // if (AVOIDATOMICS) // { // while ((el = ldg_cg(indices + pos)) == UNUSED) // __threadfence(); // stg_cg(indices + pos, UNUSED); // __threadfence(); // } // else // { // while ((el = atomicExch(indices + pos, UNUSED)) == UNUSED) // __threadfence(); // } // *localElement = el; // } // // __device__ // int dequeueBlock(T* localElement, int num) // { // __shared__ int take, offset; // if (threadIdx.x == 0) // take = singleThreadReserveRead(offset, num); // __syncthreads(); // if (threadIdx.x < take) // multiThreadRead(localElement, threadIdx.x, offset); // return take; // } // // __device__ // int dequeueWarp(T* localElement, int num) // { // int take, offset, lid = laneid(); // if (lid == 0) // take = singleThreadReserveRead(offset, num); // take = __shfl_sync(~0U, take, 0); // if (lid < take) // { // offset = __shfl_sync(~0U, offset, 0); // multiThreadRead(localElement, threadIdx.x, offset); // } // return take; // } // // __device__ // int dequeue(T& element) // { // int readable = atomicSub(&count, 1); // if (readable <= 0) // { // atomicAdd(&count, 1); // return 0; // } // unsigned int pos = atomicAdd(&front, 1) % SIZE; // element = atomicExch(indices + pos, UNUSED); // return 1; // } // // __device__ int size() // { // return *const_cast <volatile int*>(&count); // } // // __device__ // void readState(int &count, unsigned int &front, unsigned int &back) // { // count = this->count; // front = this->front; // back = this->back; // } //}; #endif // INCLUDED_CURE_INDEX_QUEUE
the_stack
#include <stdlib.h> #include <stdio.h> #include "cuda.h" #include <cufft.h> extern int nblock_size; extern int maxgsx; static cudaError_t crc; static cufftResult cfrc = CUFFT_SUCCESS; static cufftHandle planrx = 0, planxr = 0, planrxn = 0, planxrn = 0; static cufftHandle plany = 0, planyn = 0; __global__ void gpuppmtposes(float2 f[], float2 sm[], int nx, int kxp, int kyps, int kstrt, int nvp, int kxyp, int nxv, int kypd); __global__ void gpuppmtposer(float2 g[], float2 tm[], int ny, int kyp, int kxps, int kstrt, int nvp, int kxyp, int nyv, int kxpd); __global__ void gpuppmtposesn(float2 fn[], float2 sm[], int nx, int kxp, int kyps, int kstrt, int nvp, int ndim, int kxyp, int nxv, int kypd); __global__ void gpuppmtposern(float2 gn[], float2 tm[], int ny, int kyp, int kxps, int kstrt, int nvp, int ndim, int kxyp, int nyv, int kxpd); /*--------------------------------------------------------------------*/ __global__ void gpuppsmtposes(float2 f[], float2 sm[], float ani, int nx, int kxp, int kyps, int kstrt, int nvp, int kxyp, int nxv, int kypd) { /* extract data to send and normalize */ /* local data */ int ks, j, k, n, nn, id, joff, ld; float2 a; ks = kstrt - 1; /* for (n = 0; n < nvp; n++) { */ n = blockIdx.y; if (n < nvp) { id = n - ks; if (id < 0) id += nvp; /* find which node sends to itself */ nn = 2*ks; if (nn >= nvp) nn -= nvp; /* adjust counter */ if (n > nn) n -= 1; /* do not send local data */ if (id != ks) { joff = kxp*id; ld = nx - joff; ld = 0 > ld ? 0 : ld; ld = kxp < ld ? kxp : ld; /* for (k = 0; k < kyps; k++) { */ k = blockIdx.x; if (k < kyps) { /* for (j = 0; j < ld; j++) { */ j = threadIdx.x; while (j < ld) { a = f[j+joff+nxv*k]; a.x = ani*a.x; a.y = ani*a.y; sm[j+ld*k+kxyp*n] = a; j += blockDim.x; } } } } return; } /*--------------------------------------------------------------------*/ __global__ void gpuppsmtposesn(float2 fn[], float2 sm[], float ani, int nx, int kxp, int kyps, int kstrt, int nvp, int ndim, int kxyp, int nxv, int kypd) { /* extract vector data to send and normalize */ /* local data */ int ks, i, j, k, n, nn, id, joff, ld, nnxv, nkxyp; float2 a; ks = kstrt - 1; nnxv = ndim*nxv; nkxyp = ndim*kxyp; /* for (n = 0; n < nvp; n++) { */ n = blockIdx.y; if (n < nvp) { id = n - ks; if (id < 0) id += nvp; /* find which node sends to itself */ nn = 2*ks; if (nn >= nvp) nn -= nvp; /* adjust counter */ if (n > nn) n -= 1; /* do not send local data */ if (id != ks) { joff = kxp*id; ld = nx - joff; ld = 0 > ld ? 0 : ld; ld = kxp < ld ? kxp : ld; /* for (k = 0; k < kyps; k++) { */ k = blockIdx.x; if (k < kyps) { /* for (j = 0; j < ld; j++) { */ j = threadIdx.x; while (j < ld) { for (i = 0; i < ndim; i++) { a = fn[j+joff+nxv*i+nnxv*k]; a.x = ani*a.x; a.y = ani*a.y; sm[j+ld*(i+ndim*k)+nkxyp*n] = a; } j += blockDim.x; } } } } return; } /*--------------------------------------------------------------------*/ __global__ void gpuppsltpose(float2 f[], float2 g[], float ani, int nx, int ny, int kxp, int kyp, int kstrt, int nxv, int nyv) { /* transpose local data with scaling */ /* local data */ int mxv, j, k, ks, kxps, kyps, joff, koff, js, jj, kk; float2 a; /* The size of the shared memory array is as follows: */ /* float2 s2[(mx + 1)*mx]; */ extern __shared__ float2 s2[]; mxv = blockDim.x + 1; ks = kstrt - 1; joff = kxp*ks; koff = kyp*ks; kxps = nx - joff; kxps = 0 > kxps ? 0 : kxps; kxps = kxp < kxps ? kxp : kxps; kyps = ny - koff; kyps = 0 > kyps ? 0 : kyps; kyps = kyp < kyps ? kyp : kyps; js = threadIdx.x; ks = threadIdx.y; jj = blockDim.x*blockIdx.x; kk = blockDim.y*blockIdx.y; j = js + jj; k = ks + kk; if ((j < kxps) && (k < kyps)) { s2[js+mxv*ks] = f[j+joff+nxv*k]; } /* synchronize threads */ __syncthreads(); j = ks + jj; k = js + kk; if ((j < kxps) && (k < kyps)) { a = s2[ks+mxv*js]; a.x = ani*a.x; a.y = ani*a.y; g[k+koff+nyv*j] = a; } return; } /*--------------------------------------------------------------------*/ __global__ void gpuppsltposen(float2 fn[], float2 gn[], float ani, int nx, int ny, int kxp, int kyp, int kstrt, int ndim, int nxv, int nyv) { /* transpose local vector data with scaling */ /* local data */ int mxv, i, j, k, ks, kxps, kyps, joff, koff, js, jj, kk; int nnxv, nnyv; float2 a; /* The size of the shared memory array is as follows: */ /* float2 s2n[ndim*(mx + 1)*mx]; */ extern __shared__ float2 s2n[]; mxv = blockDim.x + 1; ks = kstrt - 1; nnxv = ndim*nxv; nnyv = ndim*nyv; joff = kxp*ks; koff = kyp*ks; kxps = nx - joff; kxps = 0 > kxps ? 0 : kxps; kxps = kxp < kxps ? kxp : kxps; kyps = ny - koff; kyps = 0 > kyps ? 0 : kyps; kyps = kyp < kyps ? kyp : kyps; js = threadIdx.x; ks = threadIdx.y; jj = blockDim.x*blockIdx.x; kk = blockDim.y*blockIdx.y; j = js + jj; k = ks + kk; if ((j < kxps) && (k < kyps)) { for (i = 0; i < ndim; i++) { s2n[js+mxv*(i+ndim*ks)] = fn[j+joff+nxv*i+nnxv*k]; } } /* synchronize threads */ __syncthreads(); j = ks + jj; k = js + kk; if ((j < kxps) && (k < kyps)) { for (i = 0; i < ndim; i++) { a = s2n[ks+mxv*(i+ndim*js)]; a.x = ani*a.x; a.y = ani*a.y; gn[k+koff+nyv*i+nnyv*j] = a; } } return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcuinit(int nx, int kypp, int ndim) { if (kypp <= 0) return; cfrc = cufftPlan1d(&planrx,nx,CUFFT_R2C,kypp); if (cfrc) { printf("cufftPlan1d planrx error=%d\n",cfrc); exit(1); } cfrc = cufftPlan1d(&planxr,nx,CUFFT_C2R,kypp); if (cfrc) { printf("cufftPlan1d planxr error=%d\n",cfrc); exit(1); } cfrc = cufftPlan1d(&planrxn,nx,CUFFT_R2C,ndim*kypp); if (cfrc) { printf("cufftPlan1d planrxn error=%d\n",cfrc); exit(1); } cfrc = cufftPlan1d(&planxrn,nx,CUFFT_C2R,ndim*kypp); if (cfrc) { printf("cufftPlan1d planxrn error=%d\n",cfrc); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2cuinit(int kxpp, int ny, int ndim) { if (kxpp <= 0) return; cfrc = cufftPlan1d(&plany,ny,CUFFT_C2C,kxpp); if (cfrc) { printf("cufftPlan1d plany error=%d\n",cfrc); exit(1); } cfrc = cufftPlan1d(&planyn,ny,CUFFT_C2C,ndim*kxpp); if (cfrc) { printf("cufftPlan1d planyn error=%d\n",cfrc); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcudel() { if (planrx != 0) cfrc = cufftDestroy(planrx); if (cfrc) { printf("cufftPlan1d planrx error=%d\n",cfrc); exit(1); } if (planxr != 0) cfrc = cufftDestroy(planxr); if (cfrc) { printf("cufftPlan1d planxr error=%d\n",cfrc); exit(1); } if (planrxn != 0) cfrc = cufftDestroy(planrxn); if (cfrc) { printf("cufftPlan1d planrxn error=%d\n",cfrc); exit(1); } if (planxr != 0) cfrc = cufftDestroy(planxrn); if (cfrc) { printf("cufftPlan1d planxrn error=%d\n",cfrc); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2cudel() { if (plany != 0) cfrc = cufftDestroy(plany); if (cfrc) { printf("cufftPlan1d plany error=%d\n",cfrc); exit(1); } if (planyn != 0) cfrc = cufftDestroy(planyn); if (cfrc) { printf("cufftPlan1d planyn error=%d\n",cfrc); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcux(float2 *f, float2 *bsm, int isign, int indx, int indy, int kstrt, int nvp, int kxp1, int kyp, int nxh1d, int kypd) { /* wrapper function for real to complex fft in x, */ /* without packed data */ /* uses 1D real to complex and complex to complex NVIDIA FFTs */ /* nxh1d must be = nx/2+1 */ /* local data */ int nx, nxh1, ny, ks, kypp, kxyp, ns; int mx = 16; float ani; dim3 dimBlock(nblock_size); dim3 dimBlockt(mx,mx); /* calculate range of indices */ nx = 1L<<indx; nxh1 = nx/2 + 1; ny = 1L<<indy; ks = kstrt - 1; kypp = ny - kyp*ks; kypp = 0 > kypp ? 0 : kypp; kypp = kyp < kypp ? kyp : kypp; if (kypp <= 0) return; kxyp = kxp1*kyp; dim3 dimGrids(kypp,nvp); dim3 dimGridty((kyp-1)/mx+1,(kxp1-1)/mx+1,nvp); ns = (mx+1)*mx*sizeof(float2); /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ cfrc = cufftExecR2C(planrx,(cufftReal *)f,(cufftComplex *)f); /* cudaThreadSynchronize(); */ if (cfrc) { printf("cufftExecR2C(-1) planrx error=%d\n",cfrc); exit(1); } /* extract data to send and normalize */ ani = 1.0f/(((float) nx)*((float) ny)); crc = cudaGetLastError(); gpuppsmtposes<<<dimGrids,dimBlock>>>(f,bsm,ani,nxh1,kxp1,kypp, kstrt,nvp,kxyp,nxh1d,kypd); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpuppsmtposes error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } } /* forward fourier transform */ else if (isign > 0) { /* transpose data received */ crc = cudaGetLastError(); gpuppmtposer<<<dimGridty,dimBlockt,ns>>>(f,bsm,nxh1,kxp1,kypp, kstrt,nvp,kxyp,nxh1d, kypd); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuppmtposer error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } /* perform x fft */ cfrc = cufftExecC2R(planxr,(cufftComplex *)f,(cufftReal *)f); cudaThreadSynchronize(); if (cfrc) { printf("cufftExecC2R(1) planxr error=%d\n",cfrc); exit(1); } } return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcuy(float2 *g, float2 *brm, int isign, int indx, int indy, int kstrt, int nvp, int kxp1, int kyp, int nyd) { /* wrapper function for real to complex fft in y, */ /* without packed data */ /* uses 1D real to complex and complex to complex NVIDIA FFTs */ /* local data */ int nx, nxh1, ny, ks, kxpp, kxyp, ns; int mx = 16; dim3 dimBlock(nblock_size); dim3 dimBlockt(mx,mx); /* calculate range of indices */ nx = 1L<<indx; nxh1 = nx/2 + 1; ny = 1L<<indy; ks = kstrt - 1; kxpp = nxh1 - kxp1*ks; kxpp = 0 > kxpp ? 0 : kxpp; kxpp = kxp1 < kxpp ? kxp1 : kxpp; if (kxpp <= 0) return; kxyp = kxp1*kyp; dim3 dimGrids(kxpp,nvp); dim3 dimGridtx((kxp1-1)/mx+1,(kyp-1)/mx+1,nvp); ns = (mx+1)*mx*sizeof(float2); /* inverse fourier transform */ if (isign < 0) { /* transpose data received */ crc = cudaGetLastError(); gpuppmtposer<<<dimGridtx,dimBlockt,ns>>>(g,brm,ny,kyp,kxpp,kstrt, nvp,kxyp,nyd,kxp1); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuppmtposer error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } /* perform y fft */ cfrc = cufftExecC2C(plany,(cufftComplex *)g,(cufftComplex *)g, CUFFT_FORWARD); cudaThreadSynchronize(); if (cfrc) { printf("cufftExecC2C(-1) plany error=%d\n",cfrc); exit(1); } } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ cfrc = cufftExecC2C(plany,(cufftComplex *)g,(cufftComplex *)g, CUFFT_INVERSE); /* cudaThreadSynchronize(); */ if (cfrc) { printf("cufftExecC2C(1) plany error=%d\n",cfrc); exit(1); } /* extract data to send */ crc = cudaGetLastError(); gpuppmtposes<<<dimGrids,dimBlock>>>(g,brm,ny,kyp,kxpp,kstrt, nvp,kxyp,nyd,kxp1); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpuppmtposes error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } } return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcuxn(float2 *fn, float2 *bsm, int isign, int indx, int indy, int ndim, int kstrt, int nvp, int kxp1, int kyp, int nxh1d, int kypd) { /* wrapper function for real to complex fft in x, */ /* without packed data */ /* uses 1D real to complex and complex to complex NVIDIA FFTs */ /* ndim = vector dimension */ /* nxh1d must be = nx/2+1 */ /* local data */ int nx, nxh1, ny, ks, kypp, kxyp, ns; int mx = 16; float ani; dim3 dimBlock(nblock_size); dim3 dimBlockt(mx,mx); /* calculate range of indices */ nx = 1L<<indx; nxh1 = nx/2 + 1; ny = 1L<<indy; ks = kstrt - 1; kypp = ny - kyp*ks; kypp = 0 > kypp ? 0 : kypp; kypp = kyp < kypp ? kyp : kypp; if (kypp <= 0) return; kxyp = kxp1*kyp; dim3 dimGrids(kypp,nvp); dim3 dimGridty((kyp-1)/mx+1,(kxp1-1)/mx+1,nvp); ns = ndim*(mx+1)*mx*sizeof(float2); /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ cfrc = cufftExecR2C(planrxn,(cufftReal *)fn,(cufftComplex *)fn); /* cudaThreadSynchronize(); */ if (cfrc) { printf("cufftExecR2C(-1) planrxn error=%d\n",cfrc); exit(1); } /* extract data to send and normalize */ ani = 1.0f/(((float) nx)*((float) ny)); crc = cudaGetLastError(); gpuppsmtposesn<<<dimGrids,dimBlock>>>(fn,bsm,ani,nxh1,kxp1,kypp, kstrt,nvp,ndim,kxyp,nxh1d, kypd); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpuppsmtposesn error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } } /* forward fourier transform */ else if (isign > 0) { /* transpose data received */ crc = cudaGetLastError(); gpuppmtposern<<<dimGridty,dimBlockt,ns>>>(fn,bsm,nxh1,kxp1,kypp, kstrt,nvp,ndim,kxyp, nxh1d,kypd); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuppmtposern error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } /* perform x fft */ cfrc = cufftExecC2R(planxrn,(cufftComplex *)fn,(cufftReal *)fn); cudaThreadSynchronize(); if (cfrc) { printf("cufftExecC2R(1) planxrn error=%d\n",cfrc); exit(1); } } return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcuyn(float2 *gn, float2 *brm, int isign, int indx, int indy, int ndim, int kstrt, int nvp, int kxp1, int kyp, int nyd) { /* wrapper function for real to complex fft in y, */ /* without packed data */ /* uses 1D real to complex and complex to complex NVIDIA FFTs */ /* ndim = vector dimension */ /* local data */ int nx, nxh1, ny, ks, kxpp, kxyp, ns; int mx = 16; dim3 dimBlock(nblock_size); dim3 dimBlockt(mx,mx); /* calculate range of indices */ nx = 1L<<indx; nxh1 = nx/2 + 1; ny = 1L<<indy; ks = kstrt - 1; kxpp = nxh1 - kxp1*ks; kxpp = 0 > kxpp ? 0 : kxpp; kxpp = kxp1 < kxpp ? kxp1 : kxpp; if (kxpp <= 0) return; kxyp = kxp1*kyp; dim3 dimGrids(kxpp,nvp); dim3 dimGridtx((kxp1-1)/mx+1,(kyp-1)/mx+1,nvp); ns = ndim*(mx+1)*mx*sizeof(float2); /* inverse fourier transform */ if (isign < 0) { /* transpose data received */ crc = cudaGetLastError(); gpuppmtposern<<<dimGridtx,dimBlockt,ns>>>(gn,brm,ny,kyp,kxpp,kstrt, nvp,ndim,kxyp,nyd,kxp1); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuppmtposern error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } /* perform y fft */ cfrc = cufftExecC2C(planyn,(cufftComplex *)gn,(cufftComplex *)gn, CUFFT_FORWARD); cudaThreadSynchronize(); if (cfrc) { printf("cufftExecC2C(-1) planyn error=%d\n",cfrc); exit(1); } } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ cfrc = cufftExecC2C(planyn,(cufftComplex *)gn,(cufftComplex *)gn, CUFFT_INVERSE); /* cudaThreadSynchronize(); */ if (cfrc) { printf("cufftExecC2C(1) planyn error=%d\n",cfrc); exit(1); } /* extract data to send */ crc = cudaGetLastError(); gpuppmtposesn<<<dimGrids,dimBlock>>>(gn,brm,ny,kyp,kxpp,kstrt, nvp,ndim,kxyp,nyd,kxp1); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpuppmtposesn error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } } return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuppsltpose(float2 *f, float2 *g, float ani, int nx, int ny, int kxp, int kyp, int kstrt, int nxv, int nyv) { /* local complex transpose with scaling */ /* input = f, output = g */ /* local data */ int ns; static int mx = 16; dim3 dimBlockt(mx,mx); /* calculate range of indices */ dim3 dimGridtx((kxp-1)/mx+1,(kyp-1)/mx+1); ns = (mx+1)*mx*sizeof(float2); /* local transpose f to g */ crc = cudaGetLastError(); gpuppsltpose<<<dimGridtx,dimBlockt,ns>>>(f,g,ani,nx,ny,kxp,kyp,kstrt, nxv,nyv); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpuppsltpose error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuppsltposen(float2 *fn, float2 *gn, float ani, int nx, int ny, int kxp, int kyp, int kstrt, int ndim, int nxv, int nyv) { /* local complex vector transpose with scaling */ /* input = fn, output = gn */ /* local data */ int ns; static int mx = 16; dim3 dimBlockt(mx,mx); /* calculate range of indices */ dim3 dimGridtx((kxp-1)/mx+1,(kyp-1)/mx+1); ns = ndim*(mx+1)*mx*sizeof(float2); /* local transpose f to g */ crc = cudaGetLastError(); gpuppsltposen<<<dimGridtx,dimBlockt,ns>>>(fn,gn,ani,nx,ny,kxp,kyp, kstrt,ndim,nxv,nyv); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("gpuppsltposen error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } /* Interfaces to Fortran */ /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcuinit_(int *nx, int *kypp, int *ndim) { gpupfft2rrcuinit(*nx,*kypp,*ndim); return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2cuinit_(int *nx, int *ny, int *ndim) { gpupfft2cuinit(*nx,*ny,*ndim); return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcudel_() { gpupfft2rrcudel(); return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2cudel_() { gpupfft2cudel(); return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcux_(unsigned long *gp_f, unsigned long *gp_bsm, int *isign, int *indx, int *indy, int *kstrt, int *nvp, int *kxp1, int *kyp, int *nxh1d, int *kypd) { float2 *f, *bsm; f = (float2 *)*gp_f; bsm = (float2 *)*gp_bsm; gpupfft2rrcux(f,bsm,*isign,*indx,*indy,*kstrt,*nvp,*kxp1,*kyp,*nxh1d, *kypd); return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcuy_(unsigned long *gp_g, unsigned long *gp_brm, int *isign, int *indx, int *indy, int *kstrt, int *nvp, int *kxp1, int *kyp, int *nyd) { float2 *g, *brm; g = (float2 *)*gp_g; brm = (float2 *)*gp_brm; gpupfft2rrcuy(g,brm,*isign,*indx,*indy,*kstrt,*nvp, *kxp1,*kyp,*nyd); return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcuxn_(unsigned long *gp_fn, unsigned long *gp_bsm, int *isign, int *indx, int *indy, int *ndim, int *kstrt, int *nvp, int *kxp1, int *kyp, int *nxh1d, int *kypd) { float2 *fn, *bsm; fn = (float2 *)*gp_fn; bsm = (float2 *)*gp_bsm; gpupfft2rrcuxn(fn,bsm,*isign,*indx,*indy,*ndim,*kstrt,*nvp,*kxp1, *kyp,*nxh1d,*kypd); return; } /*--------------------------------------------------------------------*/ extern "C" void gpupfft2rrcuyn_(unsigned long *gp_gn, unsigned long *gp_brm, int *isign, int *indx, int *indy, int *ndim, int *kstrt, int *nvp, int *kxp1, int *kyp, int *nyd) { float2 *gn, *brm; gn = (float2 *)*gp_gn; brm = (float2 *)*gp_brm; gpupfft2rrcuyn(gn,brm,*isign,*indx,*indy,*ndim,*kstrt,*nvp,*kxp1, *kyp,*nyd); return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuppsltpose_(unsigned long *gp_f, unsigned long *gp_g, float *ani, int *nx, int *ny, int *kxp, int *kyp, int *kstrt, int *nxv, int *nyv) { float2 *f, *g; f = (float2 *)*gp_f; g = (float2 *)*gp_g; cgpuppsltpose(f,g,*ani,*nx,*ny,*kxp,*kyp,*kstrt,*nxv,*nyv); return; } /*--------------------------------------------------------------------*/ extern "C" void cgpuppsltposen_(unsigned long *gp_fn, unsigned long *gp_gn, float *ani, int *nx, int *ny, int *kxp, int *kyp, int *kstrt, int *ndim, int *nxv, int *nyv) { float2 *fn, *gn; fn = (float2 *)*gp_fn; gn = (float2 *)*gp_gn; cgpuppsltposen(fn,gn,*ani,*nx,*ny,*kxp,*kyp,*kstrt,*ndim,*nxv,*nyv); return; }
the_stack
#include <iostream> //headers in local files #include "lidar_point_pillars/common.h" #include "lidar_point_pillars/preprocess_points_cuda.h" __global__ void make_pillar_histo_kernel( const float* dev_points, float* dev_pillar_x_in_coors, float* dev_pillar_y_in_coors, float* dev_pillar_z_in_coors, float* dev_pillar_i_in_coors, int* pillar_count_histo, const int num_points, const int max_points_per_pillar, const int GRID_X_SIZE, const int GRID_Y_SIZE, const int GRID_Z_SIZE, const float MIN_X_RANGE, const float MIN_Y_RANGE, const float MIN_Z_RANGE, const float PILLAR_X_SIZE, const float PILLAR_Y_SIZE, const float PILLAR_Z_SIZE, const int NUM_BOX_CORNERS ) { int th_i = threadIdx.x + blockIdx.x * blockDim.x; if(th_i >= num_points) { return; } int y_coor = floor((dev_points[th_i*NUM_BOX_CORNERS + 1] - MIN_Y_RANGE)/PILLAR_Y_SIZE); int x_coor = floor((dev_points[th_i*NUM_BOX_CORNERS + 0] - MIN_X_RANGE)/PILLAR_X_SIZE); int z_coor = floor((dev_points[th_i*NUM_BOX_CORNERS + 2] - MIN_Z_RANGE)/PILLAR_Z_SIZE); if(x_coor >= 0 && x_coor < GRID_X_SIZE && y_coor >= 0 && y_coor < GRID_Y_SIZE && z_coor >= 0 && z_coor < GRID_Z_SIZE) { int count = atomicAdd(&pillar_count_histo[y_coor*GRID_X_SIZE + x_coor], 1); if(count < max_points_per_pillar) { int ind = y_coor*GRID_X_SIZE*max_points_per_pillar + x_coor*max_points_per_pillar + count; dev_pillar_x_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 0]; dev_pillar_y_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 1]; dev_pillar_z_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 2]; dev_pillar_i_in_coors[ind] = dev_points[th_i*NUM_BOX_CORNERS + 3]; } } } __global__ void make_pillar_index_kernel( int* dev_pillar_count_histo, int* dev_counter, int* dev_pillar_count, int* dev_x_coors, int* dev_y_coors, float* dev_x_coors_for_sub, float* dev_y_coors_for_sub, float* dev_num_points_per_pillar, int* dev_sparse_pillar_map, const int max_pillars, const int max_points_per_pillar, const int GRID_X_SIZE, const float PILLAR_X_SIZE, const float PILLAR_Y_SIZE, const int NUM_INDS_FOR_SCAN) { int x = blockIdx.x; int y = threadIdx.x; int num_points_at_this_pillar = dev_pillar_count_histo[y*GRID_X_SIZE + x]; if(num_points_at_this_pillar == 0) { return; } int count = atomicAdd(dev_counter, 1); if(count < max_pillars) { atomicAdd(dev_pillar_count, 1); if(num_points_at_this_pillar >= max_points_per_pillar) { dev_num_points_per_pillar[count] = max_points_per_pillar; } else { dev_num_points_per_pillar[count] = num_points_at_this_pillar; } dev_x_coors[count] = x; dev_y_coors[count] = y; //TODO Need to be modified after making properly trained weight // Will be modified in ver 1.1 // x_offset = self.vx / 2 + pc_range[0] // y_offset = self.vy / 2 + pc_range[1] // x_sub = coors_x.unsqueeze(1) * 0.16 + x_offset // y_sub = coors_y.unsqueeze(1) * 0.16 + y_offset dev_x_coors_for_sub[count] = x* PILLAR_X_SIZE + 0.1f; dev_y_coors_for_sub[count] = y* PILLAR_Y_SIZE + -39.9f; dev_sparse_pillar_map[y*NUM_INDS_FOR_SCAN + x] = 1; } } __global__ void make_pillar_feature_kernel( float* dev_pillar_x_in_coors, float* dev_pillar_y_in_coors, float* dev_pillar_z_in_coors, float* dev_pillar_i_in_coors, float* dev_pillar_x, float* dev_pillar_y, float* dev_pillar_z, float* dev_pillar_i, int* dev_x_coors, int* dev_y_coors, float* dev_num_points_per_pillar, const int max_points, const int GRID_X_SIZE) { int ith_pillar = blockIdx.x; int num_points_at_this_pillar = dev_num_points_per_pillar[ith_pillar]; int ith_point = threadIdx.x; if(ith_point >= num_points_at_this_pillar) { return; } int x_ind = dev_x_coors[ith_pillar]; int y_ind = dev_y_coors[ith_pillar]; int pillar_ind = ith_pillar*max_points + ith_point; int coors_ind = y_ind*GRID_X_SIZE*max_points + x_ind*max_points + ith_point; dev_pillar_x[pillar_ind] = dev_pillar_x_in_coors[coors_ind]; dev_pillar_y[pillar_ind] = dev_pillar_y_in_coors[coors_ind]; dev_pillar_z[pillar_ind] = dev_pillar_z_in_coors[coors_ind]; dev_pillar_i[pillar_ind] = dev_pillar_i_in_coors[coors_ind]; } __global__ void make_extra_network_input_kernel(float* dev_x_coors_for_sub, float* dev_y_coors_for_sub, float* dev_num_points_per_pillar, float* dev_x_coors_for_sub_shaped, float* dev_y_coors_for_sub_shaped, float* dev_pillar_feature_mask, const int MAX_NUM_POINTS_PER_PILLAR) { int ith_pillar = blockIdx.x; int ith_point = threadIdx.x; float x = dev_x_coors_for_sub[ith_pillar]; float y = dev_y_coors_for_sub[ith_pillar]; int num_points_for_a_pillar = dev_num_points_per_pillar[ith_pillar]; int ind = ith_pillar*MAX_NUM_POINTS_PER_PILLAR + ith_point; dev_x_coors_for_sub_shaped[ind] = x; dev_y_coors_for_sub_shaped[ind] = y; if(ith_point < num_points_for_a_pillar) { dev_pillar_feature_mask[ind] = 1.0; } else { dev_pillar_feature_mask[ind] = 0.0; } } PreprocessPointsCuda::PreprocessPointsCuda(const int NUM_THREADS, const int MAX_NUM_PILLARS, const int MAX_POINTS_PER_PILLAR, const int NUM_INDS_FOR_SCAN, const int GRID_X_SIZE, const int GRID_Y_SIZE, const int GRID_Z_SIZE, const float PILLAR_X_SIZE, const float PILLAR_Y_SIZE, const float PILLAR_Z_SIZE, const float MIN_X_RANGE, const float MIN_Y_RANGE, const float MIN_Z_RANGE, const int NUM_BOX_CORNERS) : NUM_THREADS_(NUM_THREADS), MAX_NUM_PILLARS_(MAX_NUM_PILLARS), MAX_NUM_POINTS_PER_PILLAR_(MAX_POINTS_PER_PILLAR), NUM_INDS_FOR_SCAN_(NUM_INDS_FOR_SCAN), GRID_X_SIZE_(GRID_X_SIZE), GRID_Y_SIZE_(GRID_Y_SIZE), GRID_Z_SIZE_(GRID_Z_SIZE), PILLAR_X_SIZE_(PILLAR_X_SIZE), PILLAR_Y_SIZE_(PILLAR_Y_SIZE), PILLAR_Z_SIZE_(PILLAR_Z_SIZE), MIN_X_RANGE_(MIN_X_RANGE), MIN_Y_RANGE_(MIN_Y_RANGE), MIN_Z_RANGE_(MIN_Z_RANGE), NUM_BOX_CORNERS_(NUM_BOX_CORNERS) { GPU_CHECK(cudaMalloc((void**)&dev_pillar_x_in_coors_, GRID_Y_SIZE_*GRID_X_SIZE_*MAX_NUM_POINTS_PER_PILLAR_* sizeof(float))); GPU_CHECK(cudaMalloc((void**)&dev_pillar_y_in_coors_, GRID_Y_SIZE_*GRID_X_SIZE_*MAX_NUM_POINTS_PER_PILLAR_* sizeof(float))); GPU_CHECK(cudaMalloc((void**)&dev_pillar_z_in_coors_, GRID_Y_SIZE_*GRID_X_SIZE_*MAX_NUM_POINTS_PER_PILLAR_* sizeof(float))); GPU_CHECK(cudaMalloc((void**)&dev_pillar_i_in_coors_, GRID_Y_SIZE_*GRID_X_SIZE_*MAX_NUM_POINTS_PER_PILLAR_* sizeof(float))); GPU_CHECK(cudaMalloc((void**)&dev_pillar_count_histo_, GRID_Y_SIZE_*GRID_X_SIZE_*sizeof(int))); GPU_CHECK(cudaMalloc((void**)&dev_counter_, sizeof(int))); GPU_CHECK(cudaMalloc((void**)&dev_pillar_count_, sizeof(int))); GPU_CHECK(cudaMalloc((void**)&dev_x_coors_for_sub_, MAX_NUM_PILLARS_* sizeof(float))); GPU_CHECK(cudaMalloc((void**)&dev_y_coors_for_sub_, MAX_NUM_PILLARS_* sizeof(float))); } PreprocessPointsCuda::~PreprocessPointsCuda() { GPU_CHECK(cudaFree(dev_pillar_x_in_coors_)); GPU_CHECK(cudaFree(dev_pillar_y_in_coors_)); GPU_CHECK(cudaFree(dev_pillar_z_in_coors_)); GPU_CHECK(cudaFree(dev_pillar_i_in_coors_)); GPU_CHECK(cudaFree(dev_pillar_count_histo_)); GPU_CHECK(cudaFree(dev_counter_)); GPU_CHECK(cudaFree(dev_pillar_count_)); GPU_CHECK(cudaFree(dev_x_coors_for_sub_)); GPU_CHECK(cudaFree(dev_y_coors_for_sub_)); } void PreprocessPointsCuda::doPreprocessPointsCuda(const float* dev_points, const int in_num_points, int* dev_x_coors, int* dev_y_coors, float* dev_num_points_per_pillar, float* dev_pillar_x, float* dev_pillar_y, float* dev_pillar_z, float* dev_pillar_i, float* dev_x_coors_for_sub_shaped, float* dev_y_coors_for_sub_shaped, float* dev_pillar_feature_mask, int* dev_sparse_pillar_map, int* host_pillar_count) { GPU_CHECK(cudaMemset(dev_pillar_count_histo_, 0, GRID_Y_SIZE_*GRID_X_SIZE_*sizeof(int))); GPU_CHECK(cudaMemset(dev_counter_, 0, sizeof(int))); GPU_CHECK(cudaMemset(dev_pillar_count_, 0, sizeof(int))); int num_block = DIVUP(in_num_points, NUM_THREADS_); make_pillar_histo_kernel<<<num_block, NUM_THREADS_>>>( dev_points, dev_pillar_x_in_coors_, dev_pillar_y_in_coors_, dev_pillar_z_in_coors_, dev_pillar_i_in_coors_, dev_pillar_count_histo_, in_num_points, MAX_NUM_POINTS_PER_PILLAR_, GRID_X_SIZE_, GRID_Y_SIZE_, GRID_Z_SIZE_, MIN_X_RANGE_, MIN_Y_RANGE_, MIN_Z_RANGE_, PILLAR_X_SIZE_, PILLAR_Y_SIZE_, PILLAR_Z_SIZE_, NUM_BOX_CORNERS_); make_pillar_index_kernel<<<GRID_X_SIZE_, GRID_Y_SIZE_>>>( dev_pillar_count_histo_, dev_counter_, dev_pillar_count_, dev_x_coors, dev_y_coors, dev_x_coors_for_sub_, dev_y_coors_for_sub_, dev_num_points_per_pillar, dev_sparse_pillar_map, MAX_NUM_PILLARS_, MAX_NUM_POINTS_PER_PILLAR_, GRID_X_SIZE_, PILLAR_X_SIZE_, PILLAR_Y_SIZE_, NUM_INDS_FOR_SCAN_); GPU_CHECK(cudaMemcpy(host_pillar_count, dev_pillar_count_, sizeof(int), cudaMemcpyDeviceToHost ) ); make_pillar_feature_kernel<<<host_pillar_count[0], MAX_NUM_POINTS_PER_PILLAR_>>>( dev_pillar_x_in_coors_, dev_pillar_y_in_coors_, dev_pillar_z_in_coors_, dev_pillar_i_in_coors_, dev_pillar_x, dev_pillar_y, dev_pillar_z, dev_pillar_i, dev_x_coors, dev_y_coors, dev_num_points_per_pillar, MAX_NUM_POINTS_PER_PILLAR_, GRID_X_SIZE_); make_extra_network_input_kernel<<<MAX_NUM_PILLARS_, MAX_NUM_POINTS_PER_PILLAR_>>>( dev_x_coors_for_sub_, dev_y_coors_for_sub_, dev_num_points_per_pillar, dev_x_coors_for_sub_shaped, dev_y_coors_for_sub_shaped, dev_pillar_feature_mask, MAX_NUM_POINTS_PER_PILLAR_); }
the_stack
using namespace mgpu; using namespace thrust::placeholders; vector<void*> alloced_mem; template<typename T> struct distinct : public binary_function<T,T,T> { __host__ __device__ T operator()(const T &lhs, const T &rhs) const { return lhs != rhs; } }; struct gpu_getyear { const int_type *source; int_type *dest; gpu_getyear(const int_type *_source, int_type *_dest): source(_source), dest(_dest) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned long long int sec; uint quadricentennials, centennials, quadrennials, annuals/*1-ennial?*/; uint year, leap; uint yday; uint month, mday; const uint daysSinceJan1st[2][13]= { {0,31,59,90,120,151,181,212,243,273,304,334,365}, // 365 days, non-leap {0,31,60,91,121,152,182,213,244,274,305,335,366} // 366 days, leap }; unsigned long long int SecondsSinceEpoch = source[i]/1000; sec = SecondsSinceEpoch + 11644473600; //wday = (uint)((sec / 86400 + 1) % 7); // day of week quadricentennials = (uint)(sec / 12622780800ULL); // 400*365.2425*24*3600 sec %= 12622780800ULL; centennials = (uint)(sec / 3155673600ULL); // 100*(365+24/100)*24*3600 if (centennials > 3) { centennials = 3; } sec -= centennials * 3155673600ULL; quadrennials = (uint)(sec / 126230400); // 4*(365+1/4)*24*3600 if (quadrennials > 24) { quadrennials = 24; } sec -= quadrennials * 126230400ULL; annuals = (uint)(sec / 31536000); // 365*24*3600 if (annuals > 3) { annuals = 3; } sec -= annuals * 31536000ULL; year = 1601 + quadricentennials * 400 + centennials * 100 + quadrennials * 4 + annuals; leap = !(year % 4) && (year % 100 || !(year % 400)); // Calculate the day of the year and the time yday = sec / 86400; sec %= 86400; //hour = sec / 3600; sec %= 3600; //min = sec / 60; sec %= 60; // Calculate the month for (mday = month = 1; month < 13; month++) { if (yday < daysSinceJan1st[leap][month]) { mday += yday - daysSinceJan1st[leap][month - 1]; break; } } dest[i] = year; } }; struct gpu_getmonth { const int_type *source; int_type *dest; gpu_getmonth(const int_type *_source, int_type *_dest): source(_source), dest(_dest) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned long long int sec; uint quadricentennials, centennials, quadrennials, annuals/*1-ennial?*/; uint year, leap; uint yday; uint month, mday; const uint daysSinceJan1st[2][13]= { {0,31,59,90,120,151,181,212,243,273,304,334,365}, // 365 days, non-leap {0,31,60,91,121,152,182,213,244,274,305,335,366} // 366 days, leap }; unsigned long long int SecondsSinceEpoch = source[i]/1000; sec = SecondsSinceEpoch + 11644473600; //wday = (uint)((sec / 86400 + 1) % 7); // day of week quadricentennials = (uint)(sec / 12622780800ULL); // 400*365.2425*24*3600 sec %= 12622780800ULL; centennials = (uint)(sec / 3155673600ULL); // 100*(365+24/100)*24*3600 if (centennials > 3) { centennials = 3; } sec -= centennials * 3155673600ULL; quadrennials = (uint)(sec / 126230400); // 4*(365+1/4)*24*3600 if (quadrennials > 24) { quadrennials = 24; } sec -= quadrennials * 126230400ULL; annuals = (uint)(sec / 31536000); // 365*24*3600 if (annuals > 3) { annuals = 3; } sec -= annuals * 31536000ULL; year = 1601 + quadricentennials * 400 + centennials * 100 + quadrennials * 4 + annuals; leap = !(year % 4) && (year % 100 || !(year % 400)); // Calculate the day of the year and the time yday = sec / 86400; sec %= 86400; //hour = sec / 3600; sec %= 3600; //min = sec / 60; sec %= 60; // Calculate the month for (mday = month = 1; month < 13; month++) { if (yday < daysSinceJan1st[leap][month]) { mday += yday - daysSinceJan1st[leap][month - 1]; break; } } dest[i] = year*100+month; } }; struct gpu_getday { const int_type *source; int_type *dest; gpu_getday(const int_type *_source, int_type *_dest): source(_source), dest(_dest) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned long long int sec; uint quadricentennials, centennials, quadrennials, annuals/*1-ennial?*/; uint year, leap; uint yday; uint month, mday; const uint daysSinceJan1st[2][13]= { {0,31,59,90,120,151,181,212,243,273,304,334,365}, // 365 days, non-leap {0,31,60,91,121,152,182,213,244,274,305,335,366} // 366 days, leap }; unsigned long long int SecondsSinceEpoch = source[i]/1000; sec = SecondsSinceEpoch + 11644473600; //wday = (uint)((sec / 86400 + 1) % 7); // day of week quadricentennials = (uint)(sec / 12622780800ULL); // 400*365.2425*24*3600 sec %= 12622780800ULL; centennials = (uint)(sec / 3155673600ULL); // 100*(365+24/100)*24*3600 if (centennials > 3) { centennials = 3; } sec -= centennials * 3155673600ULL; quadrennials = (uint)(sec / 126230400); // 4*(365+1/4)*24*3600 if (quadrennials > 24) { quadrennials = 24; } sec -= quadrennials * 126230400ULL; annuals = (uint)(sec / 31536000); // 365*24*3600 if (annuals > 3) { annuals = 3; } sec -= annuals * 31536000ULL; year = 1601 + quadricentennials * 400 + centennials * 100 + quadrennials * 4 + annuals; leap = !(year % 4) && (year % 100 || !(year % 400)); // Calculate the day of the year and the time yday = sec / 86400; sec %= 86400; //hour = sec / 3600; sec %= 3600; //min = sec / 60; sec %= 60; // Calculate the month for (mday = month = 1; month < 13; month++) { if (yday < daysSinceJan1st[leap][month]) { mday += yday - daysSinceJan1st[leap][month - 1]; break; } } dest[i] = year*10000+month*100+mday; } }; void make_calc_columns(queue<string> op_type, queue<string> op_value, CudaSet* a, set<string>& order_field_names) { string ss, s1_val; stack<string> exe_type, exe_value; string op_t, op_v; unsigned int bits; for(int i=0; !op_type.empty(); ++i, op_type.pop()) { ss = op_type.front(); if (ss.compare("NAME") == 0) { if(!op_value.empty()) { exe_value.push(op_value.front()); op_value.pop(); }; } else if (ss.compare("CAST") == 0 || ss.compare("YEAR") == 0) { op_v = exe_value.top(); exe_value.pop(); op_t = ss; } else if (ss.compare("emit sel_name") == 0) { if(!op_t.empty()) { if(cpy_bits.empty()) bits = 0; else bits = cpy_bits[op_v]; if(order_field_names.find(op_value.front()) == order_field_names.end()) { order_field_names.insert(op_value.front()); order_field_names.erase(op_v); }; a->columnNames.push_back(op_value.front()); a->cols[a->cols.size()+1] = op_value.front(); a->type[op_value.front()] = 0; a->decimal[op_value.front()] = 0; a->decimal_zeroes[op_value.front()] = 0; a->h_columns_int[op_value.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >(a->mRecCount); a->d_columns_int[op_value.front()] = thrust::device_vector<int_type>(a->mRecCount); if (op_t.compare("CAST") == 0) { cpy_bits[op_value.front()] = bits; cpy_init_val[op_value.front()] = cpy_init_val[op_v]/100; if(bits == 8) { thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[op_v].data())); thrust::device_ptr<unsigned char> dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[op_value.front()].data())); thrust::transform(src, src + a->mRecCount, dest, _1/100); } else if(bits == 16) { thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[op_v].data())); thrust::device_ptr<unsigned short int> dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[op_value.front()].data())); thrust::transform(src, src + a->mRecCount, dest, _1/100); } else if(bits == 32) { thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[op_v].data())); thrust::device_ptr<unsigned int> dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[op_value.front()].data())); thrust::transform(src, src + a->mRecCount, dest, _1/100); } else thrust::transform(a->d_columns_int[op_v].begin(), a->d_columns_int[op_v].begin() + a->mRecCount, a->d_columns_int[op_value.front()].begin(), _1/100); } else { cpy_init_val[op_value.front()] = 0; cpy_bits[op_value.front()] = 0; if(bits == 8) { thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[op_v].data())); thrust::device_ptr<unsigned char> dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[op_value.front()].data())); thrust::copy(src, src + a->mRecCount, a->d_columns_int[op_value.front()].begin()); thrust::transform(a->d_columns_int[op_value.front()].begin(), a->d_columns_int[op_value.front()].begin() + a->mRecCount, thrust::make_constant_iterator(cpy_init_val[op_v]), a->d_columns_int[op_value.front()].begin(), thrust::plus<int_type>()); thrust::transform(a->d_columns_int[op_value.front()].begin(), a->d_columns_int[op_value.front()].begin() + a->mRecCount, a->d_columns_int[op_value.front()].begin(), _1/10000); } else if(bits == 16) { thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[op_v].data())); thrust::device_ptr<unsigned short int> dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[op_value.front()].data())); thrust::transform(src, src + a->mRecCount, thrust::make_constant_iterator(10000), dest, thrust::divides<unsigned short int>()); } else if(bits == 32) { thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[op_v].data())); thrust::device_ptr<unsigned int> dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[op_value.front()].data())); thrust::transform(src, src + a->mRecCount, thrust::make_constant_iterator(10000), dest, thrust::divides<unsigned int>()); } else thrust::transform(a->d_columns_int[op_v].begin(), a->d_columns_int[op_v].begin() + a->mRecCount, thrust::make_constant_iterator(10000), a->d_columns_int[op_value.front()].begin(), thrust::divides<int_type>()); }; op_t.clear(); }; op_value.pop(); } else if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) { if(!exe_value.empty()) exe_value.pop(); if(!exe_value.empty()) exe_value.pop(); }; }; } bool select(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums, queue<float_type> op_nums_f, queue<unsigned int> op_nums_precision, CudaSet* a, CudaSet* b, vector<thrust::device_vector<int_type> >& distinct_tmp) { stack<string> exe_type, exe_value; stack<int_type*> exe_vectors, exe_vectors1; stack<int_type> exe_nums, exe_nums1; string s1, s2, s1_val, s2_val, grp_type; int_type n1, n2, res; unsigned int colCount = 0, dist_processed = 0; stack<int> col_type; stack<string> grp_type1, col_val, exe_value1; size_t res_size = 0; stack<float_type*> exe_vectors1_d; stack<unsigned int> exe_precision, exe_precision1; stack<bool> exe_ts; bool one_line = 0, ts, free_mem, free_mem1; //thrust::device_ptr<bool> d_di(thrust::raw_pointer_cast(a->grp.data())); if (a->grp_count && (a->mRecCount != 0)) res_size = a->grp_count; std::clock_t start1 = std::clock(); for(int i=0; !op_type.empty(); ++i, op_type.pop()) { string ss = op_type.front(); cout << ss << endl; if(ss.compare("emit sel_name") != 0) { grp_type = "NULL"; if (ss.compare("COUNT") == 0 || ss.compare("SUM") == 0 || ss.compare("AVG") == 0 || ss.compare("MIN") == 0 || ss.compare("MAX") == 0 || ss.compare("DISTINCT") == 0 || ss.compare("YEAR") == 0 || ss.compare("MONTH") == 0 || ss.compare("DAY") == 0 || ss.compare("CAST") == 0) { if(!a->grp_count && ss.compare("YEAR") && ss.compare("MONTH") && ss.compare("DAY") && ss.compare("CAST")) { one_line = 1; }; if (ss.compare("CAST") == 0) { exe_type.push(ss); exe_value.push(op_value.front()); } else if (ss.compare("YEAR") == 0) { s1_val = exe_value.top(); exe_value.pop(); exe_type.pop(); thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(a->mRecCount); if(a->ts_cols[s1_val]) { thrust::counting_iterator<unsigned int> begin(0); gpu_getyear ff((const int_type*)thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), thrust::raw_pointer_cast(res)); thrust::for_each(begin, begin + a->mRecCount, ff); exe_precision.push(0); exe_vectors.push(thrust::raw_pointer_cast(res)); exe_type.push("NAME"); exe_value.push(""); } else { exe_type.push(ss); exe_value.push(op_value.front()); exe_precision.push(a->decimal_zeroes[s1_val]); }; } else if (ss.compare("MONTH") == 0) { s1_val = exe_value.top(); exe_value.pop(); exe_type.pop(); thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(a->mRecCount); thrust::counting_iterator<unsigned int> begin(0); gpu_getmonth ff((const int_type*)thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), thrust::raw_pointer_cast(res)); thrust::for_each(begin, begin + a->mRecCount, ff); exe_precision.push(0); exe_vectors.push(thrust::raw_pointer_cast(res)); exe_type.push("NAME"); exe_value.push(""); } else if (ss.compare("DAY") == 0) { s1_val = exe_value.top(); exe_value.pop(); exe_type.pop(); thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(a->mRecCount); thrust::counting_iterator<unsigned int> begin(0); gpu_getday ff((const int_type*)thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), thrust::raw_pointer_cast(res)); thrust::for_each(begin, begin + a->mRecCount, ff); exe_precision.push(0); exe_vectors.push(thrust::raw_pointer_cast(res)); exe_type.push("NAME"); exe_value.push(""); } else if (ss.compare("DISTINCT") == 0) { s1_val = exe_value.top(); exe_type.pop(); exe_value.pop(); if(a->type[s1_val] == 0) { thrust::copy(a->d_columns_int[s1_val].begin(), a->d_columns_int[s1_val].begin() + a->mRecCount, distinct_tmp[dist_processed].begin()); dist_processed++; thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(res_size); exe_vectors.push(thrust::raw_pointer_cast(res)); exe_type.push("NAME"); exe_value.push(""); } else if(a->type[s1_val] == 2) { //will add a DISTINCT on strings if anyone needs it cout << "DISTINCT on strings is not supported yet" << endl; exit(0); } else { cout << "DISTINCT on float is not supported yet" << endl; exit(0); }; } else if (ss.compare("COUNT") == 0) { s1 = exe_type.top(); //if(s1.compare("NAME") != 0) { // non distinct grp_type = "COUNT"; exe_type.pop(); s1_val = exe_value.top(); exe_value.pop(); if (a->grp_count > 1) { thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size); if(alloced_mem.empty()) { alloc_pool(a->maxRecs); }; thrust::device_ptr<int_type> const_seq((int_type*)alloced_mem.back()); thrust::fill(const_seq, const_seq+a->mRecCount, (int_type)1); segreduce(thrust::raw_pointer_cast(const_seq), a->mRecCount, (int*)thrust::raw_pointer_cast(a->grp.data()), a->grp.size(), thrust::raw_pointer_cast(count_diff), plus_t<int_type>(), (int_type)0, context); exe_vectors.push(thrust::raw_pointer_cast(count_diff)); exe_type.push("NAME"); } else { thrust::device_ptr<int_type> dest = thrust::device_malloc<int_type>(1); dest[0] = a->mRecCount; exe_vectors.push(thrust::raw_pointer_cast(dest)); exe_type.push("NAME"); }; // } // else // grp_type = "COUNTD"; exe_precision.push(0); exe_value.push(""); } else if (ss.compare("SUM") == 0) { /*if(op_case) { cout << "found case " << endl; op_case = 0; while(!exe_type.empty()) { cout << "CASE type " << exe_type.top() << endl; exe_type.pop(); exit(0); } }; */ grp_type = "SUM"; s1 = exe_type.top(); exe_type.pop(); s1_val = exe_value.top(); exe_value.pop(); if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) == a->columnNames.end()) { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); if (a->grp_count > 1) { thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size); segreduce(s3, a->mRecCount, (int*)thrust::raw_pointer_cast(a->grp.data()), a->grp.size(), thrust::raw_pointer_cast(count_diff), plus_t<int_type>(), (int_type)0, context); exe_vectors.push(thrust::raw_pointer_cast(count_diff)); } else { thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(1); reduce(s3, a->mRecCount, thrust::raw_pointer_cast(count_diff), plus_t<int_type>(), context); exe_vectors.push(thrust::raw_pointer_cast(count_diff)); }; cudaFree(s3); } else { if (a->grp_count > 1) { thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size); segreduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, (int*)thrust::raw_pointer_cast(a->grp.data()), a->grp.size(), thrust::raw_pointer_cast(count_diff), plus_t<int_type>(), (int_type)0, context); exe_vectors.push(thrust::raw_pointer_cast(count_diff)); } else { thrust::device_ptr<int_type> dest; thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size); reduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, thrust::raw_pointer_cast(count_diff), plus_t<int_type>(), context); if (one_line) { dest = thrust::device_malloc<int_type>(1); dest[0] = count_diff[0]; } else { dest = thrust::device_malloc<int_type>(a->mRecCount); int_type cc = count_diff[0]; thrust::sequence(dest, dest+(a->mRecCount), cc, (int_type)0); }; exe_vectors.push(thrust::raw_pointer_cast(dest)); }; exe_precision.push(get_decimals(a, s1_val, exe_precision)); } exe_type.push("NAME"); exe_value.push(""); } else if (ss.compare("MIN") == 0) { grp_type = "MIN"; s1 = exe_type.top(); exe_type.pop(); s1_val = exe_value.top(); exe_value.pop(); thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size); if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) { if (a->grp_count > 1) { segreduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, (int*)thrust::raw_pointer_cast(a->grp.data()), a->grp.size(), thrust::raw_pointer_cast(count_diff), minimum_t<int_type>(), (int_type)0, context); } else { reduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, thrust::raw_pointer_cast(count_diff), minimum_t<int_type>(), context); }; } else { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); if (a->grp_count > 1) { segreduce(s3, a->mRecCount, (int*)thrust::raw_pointer_cast(a->grp.data()), a->grp.size(), thrust::raw_pointer_cast(count_diff), minimum_t<int_type>(), (int_type)0, context); } else { reduce(s3, a->mRecCount, thrust::raw_pointer_cast(count_diff), minimum_t<int_type>(), context); }; cudaFree(s3); }; exe_vectors.push(thrust::raw_pointer_cast(count_diff)); exe_type.push("NAME"); exe_value.push(""); exe_precision.push(get_decimals(a, s1_val, exe_precision)); } else if (ss.compare("MAX") == 0) { grp_type = "MAX"; s1 = exe_type.top(); exe_type.pop(); s1_val = exe_value.top(); exe_value.pop(); thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size); if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) { if (a->grp_count > 1) { segreduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, (int*)thrust::raw_pointer_cast(a->grp.data()), a->grp.size(), thrust::raw_pointer_cast(count_diff), maximum_t<int_type>(), (int_type)0, context); } else { reduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, thrust::raw_pointer_cast(count_diff), maximum_t<int_type>(), context); }; } else { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); if (a->grp_count > 1) { segreduce(s3, a->mRecCount, (int*)thrust::raw_pointer_cast(a->grp.data()), a->grp.size(), thrust::raw_pointer_cast(count_diff), maximum_t<int_type>(), (int_type)0, context); } else { reduce(s3, a->mRecCount, thrust::raw_pointer_cast(count_diff), maximum_t<int_type>(), context); }; cudaFree(s3); }; exe_vectors.push(thrust::raw_pointer_cast(count_diff)); exe_type.push("NAME"); exe_value.push(""); exe_precision.push(get_decimals(a, s1_val, exe_precision)); } else if (ss.compare("AVG") == 0) { grp_type = "AVG"; s1 = exe_type.top(); exe_type.pop(); s1_val = exe_value.top(); exe_value.pop(); thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size); if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) { if (a->grp_count > 1) { segreduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, (int*)thrust::raw_pointer_cast(a->grp.data()), a->grp.size(), thrust::raw_pointer_cast(count_diff), plus_t<int_type>(), (int_type)0, context); } else { reduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, thrust::raw_pointer_cast(count_diff), plus_t<int_type>(), context); }; } else { int_type* s3 = exe_vectors.top(); exe_vectors.pop(); if (a->grp_count > 1) { segreduce(s3, a->mRecCount, (int*)thrust::raw_pointer_cast(a->grp.data()), a->grp.size(), thrust::raw_pointer_cast(count_diff), plus_t<int_type>(), (int_type)0, context); } else { reduce(s3, a->mRecCount, thrust::raw_pointer_cast(count_diff), plus_t<int_type>(), context); } cudaFree(s3); }; exe_vectors.push(thrust::raw_pointer_cast(count_diff)); exe_type.push("NAME"); exe_value.push(""); exe_precision.push(get_decimals(a, s1_val, exe_precision)); }; }; if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 ) { exe_type.push(ss); if (ss.compare("NUMBER") == 0) { exe_nums.push(op_nums.front()); op_nums.pop(); exe_precision.push(op_nums_precision.front()); op_nums_precision.pop(); } else if (ss.compare("NAME") == 0) { exe_value.push(op_value.front()); ts = a->ts_cols[op_value.front()]; op_value.pop(); } } else { if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) { // get 2 values from the stack s1 = exe_type.top(); exe_type.pop(); s2 = exe_type.top(); exe_type.pop(); if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) { n1 = exe_nums.top(); exe_nums.pop(); n2 = exe_nums.top(); exe_nums.pop(); auto p1 = exe_precision.top(); exe_precision.pop(); auto p2 = exe_precision.top(); exe_precision.pop(); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); if(p1) n1 = n1*(unsigned int)pow(10,p1); if(p2) n2 = n2*(unsigned int)pow(10,p2); if (ss.compare("ADD") == 0 ) res = n1+n2; else if (ss.compare("MUL") == 0 ) res = n1*n2; else if (ss.compare("DIV") == 0 ) res = n1/n2; else res = n1-n2; thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount); thrust::sequence(p, p+(a->mRecCount),res,(int_type)0); exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(thrust::raw_pointer_cast(p)); } else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) { s1_val = exe_value.top(); exe_value.pop(); n1 = exe_nums.top(); exe_nums.pop(); auto p1 = exe_precision.top(); exe_precision.pop(); auto p2 = get_decimals(a, s1_val, exe_precision); int_type* t = get_vec(a, s1_val, exe_vectors, free_mem); auto pres = precision_func(p2, p1, ss); exe_precision.push(pres); exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(a->op(t,n1,ss,1, p2, p1)); if(free_mem) cudaFree(t); } else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) { n1 = exe_nums.top(); exe_nums.pop(); s1_val = exe_value.top(); exe_value.pop(); auto p1 = exe_precision.top(); exe_precision.pop(); auto p2 = get_decimals(a, s1_val, exe_precision); int_type* t = get_vec(a, s1_val, exe_vectors, free_mem); auto pres = precision_func(p2, p1, ss); exe_precision.push(pres); exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(a->op(t,n1,ss,0, p2, p1)); if(free_mem) cudaFree(t); } else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) { s1_val = exe_value.top(); exe_value.pop(); s2_val = exe_value.top(); exe_value.pop(); int_type* t1 = get_vec(a, s1_val, exe_vectors, free_mem); int_type* t = get_vec(a, s2_val, exe_vectors, free_mem1); auto p1 = get_decimals(a, s1_val, exe_precision); auto p2 = get_decimals(a, s2_val, exe_precision); auto pres = precision_func(p1, p2, ss); exe_precision.push(pres); exe_type.push("NAME"); exe_value.push(""); exe_vectors.push(a->op(t,t1,ss,0,p2,p1)); if(free_mem) cudaFree(t1); if(free_mem1) cudaFree(t); } } } } else { // here we need to save what is where col_val.push(op_value.front()); op_value.pop(); grp_type1.push(grp_type); if(!exe_nums.empty()) { //number col_type.push(0); exe_nums1.push(exe_nums.top()); exe_nums.pop(); exe_precision1.push(exe_precision.top()); exe_precision.pop(); }; if(!exe_value.empty() && exe_value.top() != "") { //field name col_type.push(1); exe_precision1.push(a->decimal_zeroes[exe_value.top()]); exe_value1.push(exe_value.top()); exe_ts.push(ts); exe_value.pop(); }; if(!exe_vectors.empty()) { //vector int exe_vectors1.push(exe_vectors.top()); exe_vectors.pop(); col_type.push(2); exe_precision1.push(exe_precision.top()); exe_precision.pop(); exe_value.pop(); }; colCount++; }; }; for(unsigned int j=0; j < colCount; j++) { if ((grp_type1.top()).compare("COUNT") == 0 ) b->grp_type[col_val.top()] = 0; else if ((grp_type1.top()).compare("AVG") == 0 ) b->grp_type[col_val.top()] = 1; else if ((grp_type1.top()).compare("SUM") == 0 ) b->grp_type[col_val.top()] = 2; else if ((grp_type1.top()).compare("NULL") == 0 ) b->grp_type[col_val.top()] = 3; else if ((grp_type1.top()).compare("MIN") == 0 ) b->grp_type[col_val.top()] = 4; else if ((grp_type1.top()).compare("MAX") == 0 ) b->grp_type[col_val.top()] = 5; else if ((grp_type1.top()).compare("COUNTD") == 0 ) { b->grp_type[col_val.top()] = 6; }; if(col_type.top() == 0) { // create a vector if (a->grp_count) { thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size); thrust::gather(a->grp.begin(), a->grp.end(), thrust::make_constant_iterator((int)exe_nums1.top()), count_diff); b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size); thrust::device_free(count_diff); } else { thrust::device_ptr<int_type> s = thrust::device_malloc<int_type>(a->mRecCount); thrust::sequence(s, s+(a->mRecCount), (int)exe_nums1.top(), 0); b->addDeviceColumn(thrust::raw_pointer_cast(s), col_val.top(), a->mRecCount); } exe_nums1.pop(); b->decimal_zeroes[col_val.top()] = exe_precision1.top(); exe_precision1.pop(); } else if(col_type.top() == 1) { //modify what we push there in case of a grouping if (a->grp_count) { thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size); if(!exe_ts.top()) { thrust::gather(a->grp.begin(), a->grp.end(), a->d_columns_int[exe_value1.top()].begin(), count_diff); } else { thrust::device_vector<unsigned int> dd_tmp(res_size); thrust::gather(a->grp.begin(), a->grp.end(), rcol_matches.begin(), count_diff); thrust::gather(dd_tmp.begin(), dd_tmp.end(), rcol_dev.begin(), count_diff); }; b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size); thrust::device_free(count_diff); } else b->addDeviceColumn(thrust::raw_pointer_cast(a->d_columns_int[exe_value1.top()].data()) , col_val.top(), a->mRecCount); if(a->type[exe_value1.top()] == 0) { b->decimal_zeroes[col_val.top()] = exe_precision1.top(); b->ts_cols[col_val.top()] = exe_ts.top(); }; if(a->type[exe_value1.top()] == 2 || (a->type[exe_value1.top()] == 0 && a->string_map.find(exe_value1.top()) != a->string_map.end())) { b->string_map[col_val.top()] = a->string_map[exe_value1.top()]; }; exe_precision1.pop(); exe_ts.pop(); exe_value1.pop(); } else if(col_type.top() == 2) { // int if (a->grp_count) b->addDeviceColumn(exe_vectors1.top() , col_val.top(), res_size); else { if(!one_line) b->addDeviceColumn(exe_vectors1.top() , col_val.top(), a->mRecCount); else b->addDeviceColumn(exe_vectors1.top() , col_val.top(), 1); }; cudaFree(exe_vectors1.top()); exe_vectors1.pop(); b->decimal_zeroes[col_val.top()] = exe_precision1.top(); exe_precision1.pop(); } col_type.pop(); col_val.pop(); grp_type1.pop(); }; if (!a->grp_count) { if(!one_line) b->mRecCount = a->mRecCount; else b->mRecCount = 1; return one_line; } else { b->mRecCount = res_size; return 0; }; }
the_stack
__constant__ size_t CUDA_minChunkY; __constant__ size_t CUDA_lookupSize; __device__ Vec3* CUDA_vertices; __device__ Vec2* CUDA_texCoords; __device__ Triangle* CUDA_triangles; __device__ color* CUDA_lookupColors; __device__ uint16_t* CUDA_lookupIndices; namespace CUDA { void setMinChunkY(size_t chunkY) { checkCUDA(cudaMemcpyToSymbol(CUDA_minChunkY, (const void*)&chunkY, sizeof(size_t), 0, cudaMemcpyHostToDevice)); } void setLookupSize(size_t size) { checkCUDA(cudaMemcpyToSymbol(CUDA_lookupSize, (const void*)&size, sizeof(size_t), 0, cudaMemcpyHostToDevice)); } cudaTextureObject_t* createTexture(Image& img) { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned); cudaArray_t cuArray; cudaMallocArray(&cuArray, &channelDesc, img.width, img.height); const size_t spitch = (size_t)img.width * img.numChannels * sizeof(uint8_t); size_t texAtlasSize = 0; const uint8_t* pixels = img.pixels(texAtlasSize); cudaMemcpy2DToArray(cuArray, 0, 0, pixels, spitch, spitch, img.height, cudaMemcpyHostToDevice); cudaResourceDesc resDesc{ cudaResourceTypeArray, { cuArray } }; cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; texDesc.normalizedCoords = 1; cudaTextureObject_t* texture = new cudaTextureObject_t{}; cudaCreateTextureObject(texture, &resDesc, &texDesc, NULL); return texture; } //-----------------/ buffer init /-----------------// void initVertexBuffer(std::vector<Vec3> &buffer) { Vec3* tmp_devicePtr; cudaMalloc(&tmp_devicePtr, buffer.size() * sizeof(Vec3)); cudaMemcpy(tmp_devicePtr, buffer.data(), buffer.size() * sizeof(Vec3), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(CUDA_vertices, &tmp_devicePtr, sizeof(Vec3*)); } void initTexCoordBuffer(std::vector<Vec2> &buffer) { Vec3* tmp_devicePtr; cudaMalloc(&tmp_devicePtr, buffer.size() * sizeof(Vec2)); cudaMemcpy(tmp_devicePtr, buffer.data(), buffer.size() * sizeof(Vec2), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(CUDA_texCoords, &tmp_devicePtr, sizeof(Vec2*)); } void initTriangleBuffer(std::vector<Triangle> &buffer) { Vec3* tmp_devicePtr; cudaMalloc(&tmp_devicePtr, buffer.size() * sizeof(Triangle)); cudaMemcpy(tmp_devicePtr, buffer.data(), buffer.size() * sizeof(Triangle), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(CUDA_triangles, &tmp_devicePtr, sizeof(Triangle*)); } void initLookupColorBuffer(std::vector<color> &buffer) { uint8_t* tmp_devicePtr; cudaMalloc(&tmp_devicePtr, buffer.size() * sizeof(uint8_t)); cudaMemcpy(tmp_devicePtr, buffer.data(), buffer.size() * sizeof(uint8_t), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(CUDA_lookupColors, &tmp_devicePtr, sizeof(uint8_t*)); } void initLookupIndexBuffer(std::vector<uint16_t> &buffer) { uint16_t* tmp_devicePtr; cudaMalloc(&tmp_devicePtr, buffer.size() * sizeof(uint16_t)); cudaMemcpy(tmp_devicePtr, buffer.data(), buffer.size() * sizeof(uint16_t), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(CUDA_lookupIndices, &tmp_devicePtr, sizeof(uint16_t*)); } //-----------------/ buffer cleanup /-----------------// void freeVertexBuffer() { checkCUDA(cudaFree(CUDA_vertices)); } void freeTexCoordBuffer() { checkCUDA(cudaFree(CUDA_texCoords)); } void freeTriangleBuffer() { checkCUDA(cudaFree(CUDA_triangles)); } void freeLookupColorBuffer() { checkCUDA(cudaFree(CUDA_lookupColors)); } void freeLookupIndexBuffer() { checkCUDA(cudaFree(CUDA_lookupIndices)); } } //-----------------/ vector math /-----------------// __device__ Vec3 add(const Vec3& p1, const Vec3& p2) { return { p1.x + p2.x, p1.y + p2.y, p1.z + p2.z }; } __device__ Vec3 sub(const Vec3& p1, const Vec3& p2) { return { p1.x - p2.x, p1.y - p2.y, p1.z - p2.z }; } __device__ Vec3 cross(const Vec3& p1, const Vec3& p2) { return { p1.y * p2.z - p2.y * p1.z, p1.z * p2.x - p2.z * p1.x, p1.x * p2.y - p2.x * p1.y }; } __device__ float dot(const Vec3& p1, const Vec3& p2) { return p1.x * p2.x + p1.y * p2.y + p1.z * p2.z; } //-----------------/ AABB triangle collision /-----------------// __device__ bool cuda_approxTriBoxOverlap(const Vec3& min, const Vec3& max, const Vec3& v0, const Vec3& v1, const Vec3& v2) { return !((v0.x > max.x && v1.x > max.x && v2.x > max.x) || (v0.y > max.y && v1.y > max.y && v2.y > max.y) || (v0.z > max.z && v1.z > max.z && v2.z > max.z) || (v0.x < min.x&& v1.x < min.x&& v2.x < min.x) || (v0.y < min.y&& v1.y < min.y&& v2.y < min.y) || (v0.z < min.z&& v1.z < min.z&& v2.z < min.z)); } __device__ void cuda_findMinMax(float x0, float x1, float x2, float& min, float& max) { min = max = x0; if (x1 < min) min = x1; if (x1 > max) max = x1; if (x2 < min) min = x2; if (x2 > max) max = x2; } __device__ bool cuda_planeBoxOverlap(const Vec3& normal, const Vec3& vert, const Vec3& maxbox) { Vec3 vmin{ 0, 0, 0 }; Vec3 vmax{ 0, 0, 0 }; if (normal.x > 0.0f) { vmin.x = -maxbox.x - vert.x; vmax.x = maxbox.x - vert.x; } else { vmin.x = maxbox.x - vert.x; vmax.x = -maxbox.x - vert.x; } if (normal.y > 0.0f) { vmin.y = -maxbox.y - vert.y; vmax.y = maxbox.y - vert.y; } else { vmin.y = maxbox.y - vert.y; vmax.y = -maxbox.y - vert.y; } if (normal.z > 0.0f) { vmin.z = -maxbox.z - vert.z; vmax.z = maxbox.z - vert.z; } else { vmin.z = maxbox.z - vert.z; vmax.z = -maxbox.z - vert.z; } if (dot(normal, vmin) > 0.0f) return false; if (dot(normal, vmax) >= 0.0f) return true; return false; } __device__ bool cuda_axisTestX01(float a, float b, float fa, float fb, const Vec3& v0, const Vec3& v2, const Vec3& boxhalfsize, float& rad, float& min, float& max, float& p0, float& p2) { p0 = a * v0.y - b * v0.z; p2 = a * v2.y - b * v2.z; if (p0 < p2) { min = p0; max = p2; } else { min = p2; max = p0; } rad = fa * boxhalfsize.y + fb * boxhalfsize.z; if (min > rad || max < -rad) return false; return true; } __device__ bool cuda_axisTestX2(float a, float b, float fa, float fb, const Vec3& v0, const Vec3& v1, const Vec3& boxhalfsize, float& rad, float& min, float& max, float& p0, float& p1) { p0 = a * v0.y - b * v0.z; p1 = a * v1.y - b * v1.z; if (p0 < p1) { min = p0; max = p1; } else { min = p1; max = p0; } rad = fa * boxhalfsize.y + fb * boxhalfsize.z; if (min > rad || max < -rad) return false; return true; } __device__ bool cuda_axisTestY02(float a, float b, float fa, float fb, const Vec3& v0, const Vec3& v2, const Vec3& boxhalfsize, float& rad, float& min, float& max, float& p0, float& p2) { p0 = -a * v0.x + b * v0.z; p2 = -a * v2.x + b * v2.z; if (p0 < p2) { min = p0; max = p2; } else { min = p2; max = p0; } rad = fa * boxhalfsize.x + fb * boxhalfsize.z; if (min > rad || max < -rad) return false; return true; } __device__ bool cuda_axisTestY1(float a, float b, float fa, float fb, const Vec3& v0, const Vec3& v1, const Vec3& boxhalfsize, float& rad, float& min, float& max, float& p0, float& p1) { p0 = -a * v0.x + b * v0.z; p1 = -a * v1.x + b * v1.z; if (p0 < p1) { min = p0; max = p1; } else { min = p1; max = p0; } rad = fa * boxhalfsize.x + fb * boxhalfsize.z; if (min > rad || max < -rad) return false; return true; } __device__ bool cuda_axisTestZ12(float a, float b, float fa, float fb, const Vec3& v1, const Vec3& v2, const Vec3& boxhalfsize, float& rad, float& min, float& max, float& p1, float& p2) { p1 = a * v1.x - b * v1.y; p2 = a * v2.x - b * v2.y; if (p1 < p2) { min = p1; max = p2; } else { min = p2; max = p1; } rad = fa * boxhalfsize.x + fb * boxhalfsize.y; if (min > rad || max < -rad) return false; return true; } __device__ bool cuda_axisTestZ0(float a, float b, float fa, float fb, const Vec3& v0, const Vec3& v1, const Vec3& boxhalfsize, float& rad, float& min, float& max, float& p0, float& p1) { p0 = a * v0.x - b * v0.y; p1 = a * v1.x - b * v1.y; if (p0 < p1) { min = p0; max = p1; } else { min = p1; max = p0; } rad = fa * boxhalfsize.x + fb * boxhalfsize.y; if (min > rad || max < -rad) return false; return true; } __device__ bool cuda_triBoxOverlap(const Vec3& boxcenter, const Vec3& boxhalfsize, const Vec3& tv0, const Vec3& tv1, const Vec3& tv2) { Vec3 v0, v1, v2; float min, max, p0, p1, p2, rad, fex, fey, fez; Vec3 normal, e0, e1, e2; v0 = sub(tv0, boxcenter); v1 = sub(tv1, boxcenter); v2 = sub(tv2, boxcenter); e0 = sub(v1, v0); e1 = sub(v2, v1); e2 = sub(v0, v2); fex = fabsf(e0.x); fey = fabsf(e0.y); fez = fabsf(e0.z); if (!cuda_axisTestX01(e0.z, e0.y, fez, fey, v0, v2, boxhalfsize, rad, min, max, p0, p2)) return false; if (!cuda_axisTestY02(e0.z, e0.x, fez, fex, v0, v2, boxhalfsize, rad, min, max, p0, p2)) return false; if (!cuda_axisTestZ12(e0.y, e0.x, fey, fex, v1, v2, boxhalfsize, rad, min, max, p1, p2)) return false; fex = fabsf(e1.x); fey = fabsf(e1.y); fez = fabsf(e1.z); if (!cuda_axisTestX01(e1.z, e1.y, fez, fey, v0, v2, boxhalfsize, rad, min, max, p0, p2)) return false; if (!cuda_axisTestY02(e1.z, e1.x, fez, fex, v0, v2, boxhalfsize, rad, min, max, p0, p2)) return false; if (!cuda_axisTestZ0(e1.y, e1.x, fey, fex, v0, v1, boxhalfsize, rad, min, max, p0, p1)) return false; fex = fabsf(e2.x); fey = fabsf(e2.y); fez = fabsf(e2.z); if (!cuda_axisTestX2(e2.z, e2.y, fez, fey, v0, v1, boxhalfsize, rad, min, max, p0, p1)) return false; if (!cuda_axisTestY1(e2.z, e2.x, fez, fex, v0, v1, boxhalfsize, rad, min, max, p0, p1)) return false; if (!cuda_axisTestZ12(e2.y, e2.x, fey, fex, v1, v2, boxhalfsize, rad, min, max, p1, p2)) return false; cuda_findMinMax(v0.x, v1.x, v2.x, min, max); if (min > boxhalfsize.x || max < -boxhalfsize.x) return false; cuda_findMinMax(v0.y, v1.y, v2.y, min, max); if (min > boxhalfsize.y || max < -boxhalfsize.y) return false; cuda_findMinMax(v0.z, v1.z, v2.z, min, max); if (min > boxhalfsize.z || max < -boxhalfsize.z) return false; normal = cross(e0, e1); if (!cuda_planeBoxOverlap(normal, v0, boxhalfsize)) return false; return true; } //-----------------/ main kernel /-----------------// __global__ void chunkInserter(const size_t* indexBuffer, const size_t numIndices, cudaTextureObject_t tex, uint16_t* blockBuffer, const int chunkX, const int chunkZ) { const size_t index = (size_t)threadIdx.x + (size_t)blockIdx.x * (size_t)blockDim.x; const Vec3 boxCenter = { chunkX * 16.0f + index % 16 + 0.5f, (index / 4096 + CUDA_minChunkY) * 16.0f + (index % 4096) / 256 + 0.5f, chunkZ * 16.0f + (index / 16) % 16 + 0.5f }; const Vec3 boxHalfSize = { 0.5f, 0.5f, 0.5f }; const Vec3 boxMin = sub(boxCenter, boxHalfSize); const Vec3 boxMax = add(boxCenter, boxHalfSize); for (size_t i = 0; i < numIndices; i++) { const Triangle* tri = &CUDA_triangles[indexBuffer[i]]; if (cuda_approxTriBoxOverlap(boxMin, boxMax, CUDA_vertices[tri->vertexIndices[0]], CUDA_vertices[tri->vertexIndices[1]], CUDA_vertices[tri->vertexIndices[2]]) && cuda_triBoxOverlap(boxCenter, boxHalfSize, CUDA_vertices[tri->vertexIndices[0]], CUDA_vertices[tri->vertexIndices[1]], CUDA_vertices[tri->vertexIndices[2]])){ if (tri->blockID == UINT16_MAX) { const Vec3 s = sub(CUDA_vertices[tri->vertexIndices[1]], CUDA_vertices[tri->vertexIndices[0]]); const Vec3 t = sub(CUDA_vertices[tri->vertexIndices[2]], CUDA_vertices[tri->vertexIndices[0]]); const Vec3 n = cross(s, t); const Vec3 delta = sub(boxCenter, CUDA_vertices[tri->vertexIndices[0]]); const float nnInv = 1.0f / dot(n, n); const float w = dot(cross(s, delta), n) * nnInv; const float v = dot(cross(delta, t), n) * nnInv; const float u = 1.0f - w - v; const float x = u * CUDA_texCoords[tri->vertexIndices[0]].u + v * CUDA_texCoords[tri->vertexIndices[1]].u + w * CUDA_texCoords[tri->vertexIndices[2]].u; const float y = u * CUDA_texCoords[tri->vertexIndices[0]].v + v * CUDA_texCoords[tri->vertexIndices[1]].v + w * CUDA_texCoords[tri->vertexIndices[2]].v; const uchar4 color = tex2D<uchar4>(tex, x, y); size_t closestMatch = 0; uint64_t minDelta = UINT64_MAX; for (size_t j = 0; j < CUDA_lookupSize; j++) { const uint64_t delta = (uint64_t)abs(color.x - CUDA_lookupColors[j].r) + (uint64_t)abs(color.y - CUDA_lookupColors[j].g) + (uint64_t)abs(color.z - CUDA_lookupColors[j].b) + (uint64_t)abs(color.w - CUDA_lookupColors[j].a); if (delta < minDelta) { minDelta = delta; closestMatch = j; } } blockBuffer[index] = CUDA_lookupIndices[closestMatch]; } else { blockBuffer[index] = tri->blockID; } break; } } } namespace CUDA { void insertBlocks(size_t numBlocks, size_t numThreads, size_t* indexBuffer, size_t numIndices, cudaTextureObject_t tex, uint16_t* blockBuffer, int chunkX, int chunkZ) { chunkInserter <<<numBlocks, numThreads>>> (indexBuffer, numIndices, tex, blockBuffer, chunkX, chunkZ); } }
the_stack
#include "bsp.h" #include "cudabsp.h" #include "cudarad.h" #include "cudautils.h" static __device__ inline float luma_from_rgb(float3 rgb) { //return sqrt(dot(rgb / 255.0, make_float3(0.299, 0.587, 0.114))); return sqrt(dot(rgb / 255.0f, make_float3(1.0f))); } static __device__ inline float clamp(float x, float lower, float upper) { return fmaxf(lower, fminf(upper, x)); } static __device__ float3 sample( CUDABSP::CUDABSP& cudaBSP, CUDARAD::FaceInfo& faceInfo, float3* samplesIn, size_t width, size_t height, float s, float t ) { if (0.0f <= s && s < width) { if (0.0f <= t && t < height) { return samplesIn[static_cast<size_t>(t * width + s)]; } } // If we're not in bounds, we have no choice but to take another light // sample. return DirectLighting::sample_at(cudaBSP, faceInfo, s, t); } static __device__ float3 subsample( CUDABSP::CUDABSP& cudaBSP, CUDARAD::FaceInfo& faceInfo, float3* samplesIn, size_t width, size_t height, float s, float t ) { const float EPSILON = 1e-3; s = fminf(fmaxf(0.0f, s), width - EPSILON); t = fminf(fmaxf(0.0f, t), height - EPSILON); int s0 = static_cast<int>(floorf(s)); int t0 = static_cast<int>(floorf(t)); int s1 = s0 + 1; int t1 = t0 + 1; float rWeight = s - floorf(s); float lWeight = 1.0f - rWeight; float dWeight = t - floorf(t); float uWeight = 1.0f - dWeight; auto get_sample = [&] (float s, float t) -> float3 { return sample(cudaBSP, faceInfo, samplesIn, width, height, s, t); }; float3 sampleUL = get_sample(s0, t0); float3 sampleUR = get_sample(s1, t0); float3 sampleDL = get_sample(s0, t1); float3 sampleDR = get_sample(s1, t1); float3 sampleU = lWeight * sampleUL + rWeight * sampleUR; float3 sampleD = lWeight * sampleDL + rWeight * sampleDR; return uWeight * sampleU + dWeight * sampleD; } static __device__ const float EDGE_THRESHOLD = 0.125; // 1/8 static __device__ const float EDGE_THRESHOLD_MIN = 0.03125; // 1/32 static __device__ const size_t MAX_ITERATIONS = 12; static __device__ const float SUBPIXEL_QUALITY = 0.75; /** * CUDA FXAA implementation based on shader code at: * http://blog.simonrodriguez.fr/articles/30-07-2016_implementing_fxaa.html * and also: * http://developer.download.nvidia.com/assets/gamedev/files/sdk/11/FXAA_WhitePaper.pdf */ __global__ void map_samples_fxaa( CUDABSP::CUDABSP* pCudaBSP, CUDARAD::FaceInfo* pFaceInfo, float3* samplesIn, /* output */ float3* samplesOut, size_t width, size_t height ) { float s = static_cast<float>(blockIdx.x * blockDim.x + threadIdx.x); float t = static_cast<float>(blockIdx.y * blockDim.y + threadIdx.y); if (s >= width || t >= height) { return; } auto get_sample = [&] (float s, float t) -> float3 { return sample(*pCudaBSP, *pFaceInfo, samplesIn, width, height, s, t); }; auto get_subsample = [&] (float s, float t) -> float3 { return subsample(*pCudaBSP, *pFaceInfo, samplesIn, width, height, s, t); }; float3 rgbSample = get_sample(s, t); float lumaCenter = luma_from_rgb(rgbSample); /* Grab the lumas of our four direct neighbors. */ float lumaUp = luma_from_rgb(get_sample(s, t - 1)); float lumaDown = luma_from_rgb(get_sample(s, t + 1)); float lumaLeft = luma_from_rgb(get_sample(s - 1, t)); float lumaRight = luma_from_rgb(get_sample(s + 1, t)); /* Determine the color contrast between ourselves and our neighbors. */ float lumaMin = fminf( lumaCenter, fminf( fminf(lumaUp, lumaDown), fminf(lumaLeft, lumaRight) ) ); float lumaMax = fmaxf( lumaCenter, fmaxf( fmaxf(lumaUp, lumaDown), fmaxf(lumaLeft, lumaRight) ) ); float lumaRange = lumaMax - lumaMin; /* * Luma contrast too low (or this is a really dark spot). * Don't perform AA. */ if (lumaRange < fmaxf(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD)) { samplesOut[static_cast<size_t>(t * width + s)] = rgbSample; return; } //else { // samplesOut[t * width + s] = make_float3(255.0, 0.0, 0.0); // return; //} /* Grab the lumas of our remaining corner neighbors. */ float lumaUL = luma_from_rgb(get_sample(s - 1, t - 1)); float lumaUR = luma_from_rgb(get_sample(s + 1, t - 1)); float lumaDL = luma_from_rgb(get_sample(s - 1, t + 1)); float lumaDR = luma_from_rgb(get_sample(s + 1, t + 1)); /* Combine the edge lumas. */ float lumaUD = lumaUp + lumaDown; float lumaLR = lumaLeft + lumaRight; /* Combine the corner lumas. */ float lumaULUR = lumaUL + lumaUR; float lumaDLDR = lumaDL + lumaDR; float lumaULDL = lumaUL + lumaDL; float lumaURDR = lumaUR + lumaDR; /* Estimate horizontal and vertical gradients. */ float gradientHoriz = ( fabsf(-2.0f * lumaLeft + lumaULDL) + fabsf(-2.0f * lumaCenter + lumaUD) * 2.0f + fabsf(-2.0f * lumaRight + lumaURDR) ); float gradientVerti = ( fabsf(-2.0f * lumaUp + lumaULUR) + fabsf(-2.0f * lumaCenter + lumaLR) * 2.0f + fabsf(-2.0f * lumaDown + lumaDLDR) ); /* Are we at a horizontal or vertical edge? */ bool isHoriz = (gradientHoriz >= gradientVerti); //if (isHoriz) { // samplesOut[t * width + s] = make_float3(255.0, 0.0, 0.0); //} //else { // samplesOut[t * width + s] = make_float3(0.0, 255.0, 0.0); //} //return; /* Choose two lumas in the direction opposite of the edge. */ float luma1 = isHoriz ? lumaUp : lumaLeft; float luma2 = isHoriz ? lumaDown : lumaRight; /* Compute their gradients. */ float gradient1 = luma1 - lumaCenter; float gradient2 = luma2 - lumaCenter; /* Choose the steeper gradient. */ bool grad1Steeper = fabsf(gradient1) >= fabsf(gradient2); /* Normalize the gradients. */ float gradientNorm = 0.25f * fmaxf(fabsf(gradient1), fabsf(gradient2)); /* Determine directional luma average. */ float lumaLocalAvg; if (grad1Steeper) { lumaLocalAvg = 0.5f * (luma1 + lumaCenter); } else { lumaLocalAvg = 0.5f * (luma2 + lumaCenter); } /* Subsample locations for each iteration. */ float iteration1S = static_cast<float>(s); float iteration1T = static_cast<float>(t); float iteration2S = iteration1S; float iteration2T = iteration1T; /* Offset our sample locations toward the edge by half a pixel. */ if (isHoriz) { iteration1T += grad1Steeper ? -0.5f : 0.5f; iteration2T += grad1Steeper ? -0.5f : 0.5f; } else { iteration1S += grad1Steeper ? -0.5f : 0.5f; iteration2S += grad1Steeper ? -0.5f : 0.5f; } /* Determine iteration offsets. */ size_t offsetS = isHoriz ? 1 : 0; size_t offsetT = isHoriz ? 0 : 1; iteration1S -= offsetS; iteration1T -= offsetT; iteration2S += offsetS; iteration2T += offsetT; /* Iterate! */ float lumaEnd1; float lumaEnd2; float lumaDiff1; float lumaDiff2; bool reached1 = false; bool reached2 = false; for (size_t i=0; i<MAX_ITERATIONS; i++) { /* Sample lumas in both directions along the edge. */ if (!reached1) { lumaEnd1 = luma_from_rgb(get_subsample(iteration1S, iteration1T)); lumaDiff1 = lumaEnd1 - lumaLocalAvg; } if (!reached2) { lumaEnd2 = luma_from_rgb(get_subsample(iteration2S, iteration2T)); lumaDiff2 = lumaEnd2 - lumaLocalAvg; } /* Did we reach the end of the edge? */ reached1 = lumaDiff1 < 0.0f && (fabsf(lumaDiff1) >= gradientNorm); reached2 = lumaDiff2 < 0.0f && (fabsf(lumaDiff2) >= gradientNorm); /* If we've reached the end, stop iteration. */ if (reached1 && reached2) { break; } /* But if we HAVEN'T reached the end, continue... */ if (!reached1) { iteration1S -= offsetS; iteration1T -= offsetT; } if (!reached2) { iteration2S += offsetS; iteration2T += offsetT; } } /* Determine how far we've traveled along the edge. */ float dist1 = isHoriz ? (s - iteration1S) : (t - iteration1T); float dist2 = isHoriz ? (iteration2S - s) : (iteration2T - t); /* Which way is closer? */ bool dir1Closer = dist1 < dist2; float closerDist = fminf(dist1, dist2); /* Total length of the edge. */ float edgeLen = dist1 + dist2; /* * The pixel offset where we should subsample, in the direction of the * closer edge endpoint. */ float pixelOffset; if ((lumaCenter < lumaLocalAvg) != ((dir1Closer ? lumaEnd1 : lumaEnd2) < 0.0f)) { pixelOffset = 0.0f; } else { pixelOffset = -closerDist / edgeLen + 0.5f; } //printf( // "(%u, %u) %s distance: %f / %f (%f) Offset: %f\n", // static_cast<unsigned int>(s), static_cast<unsigned int>(t), // isHoriz ? "horizontal" : "vertical", // closerDist, edgeLen, closerDist / edgeLen, // pixelOffset //); ///* // * Subpixel antialiasing // */ ///* Weighted average of all the lumas in our local 3x3 grid. */ //float lumaAvg = ( // (1.0 / 12.0) * (2.0 * (lumaUD + lumaLR) + lumaULDL + lumaURDR) //); //float subpixelOffset1 = clamp( // fabsf(lumaAvg - lumaCenter) / lumaRange, // 0.0, 1.0 //); //float subpixelOffset2 = ( // (-2.0 * subpixelOffset1 + 3.0) * subpixelOffset1 * subpixelOffset1 //); //float subpixelOffset = ( // subpixelOffset2 * subpixelOffset2 * SUBPIXEL_QUALITY //); //float finalOffset = fmaxf(subpixelOffset, pixelOffset); float finalOffset = pixelOffset; if (grad1Steeper) { finalOffset = -finalOffset; } /* Determine the final subsample coordinates. */ float finalS = static_cast<float>(s); float finalT = static_cast<float>(t); if (isHoriz) { finalT += finalOffset; } else { finalS += finalOffset; } /* Final subsample... */ float3 color = get_subsample(finalS, finalT); //{ // int s0 = static_cast<int>(floorf(s)); // int t0 = static_cast<int>(floorf(t)); // int s1 = s0 + 1; // int t1 = t0 + 1; // if (s0 < 0) { // s0 = 0; // } // if (t0 < 0) { // t0 = 0; // } // if (s1 >= width) { // s1 = width - 1; // } // if (t1 >= height) { // t1 = height - 1; // } // float3 sampleUL = samplesIn[t0 * width + s0]; // float3 sampleUR = samplesIn[t0 * width + s1]; // float3 sampleDL = samplesIn[t1 * width + s0]; // float3 sampleDR = samplesIn[t1 * width + s1]; // printf( // "(%u, %u) sampled at (%f, %f)\n" // "\tUL(%f, %f, %f) UR(%f, %f, %f)\n" // "\tDL(%f, %f, %f) DR(%f, %f, %f)\n" // "\tyields (%f, %f, %f)\n", // static_cast<unsigned int>(s), static_cast<unsigned int>(t), // finalS, finalT, // sampleUL.x, sampleUL.y, sampleUL.z, // sampleUR.x, sampleUR.y, sampleUR.z, // sampleDL.x, sampleDL.y, sampleDL.z, // sampleDR.x, sampleDR.y, sampleDR.z, // color.x, color.y, color.z // ); //} //color = isHoriz ? // make_float3(color.x * 10.0, color.y, color.z) : // make_float3(color.x, color.y * 10.0, color.z); /* ... and we're done! */ samplesOut[static_cast<size_t>(t * width + s)] = color; } __global__ void map_faces( CUDABSP::CUDABSP* pCudaBSP, CUDARAD::FaceInfo* faceInfos ) { size_t faceIndex = blockIdx.x * blockDim.x + threadIdx.x; if (faceIndex >= pCudaBSP->numFaces) { return; } faceInfos[faceIndex] = CUDARAD::FaceInfo(*pCudaBSP, faceIndex); CUDARAD::FaceInfo& faceInfo = faceInfos[faceIndex]; size_t width = faceInfo.lightmapWidth; size_t height = faceInfo.lightmapHeight; size_t numSamples = faceInfo.lightmapSize; size_t startIndex = faceInfo.lightmapStartIndex; float3* lightSamples = &pCudaBSP->lightSamples[startIndex]; float3* results; CUDA_CHECK_ERROR_DEVICE( cudaMalloc(&results, sizeof(float3) * numSamples) ); const size_t BLOCK_WIDTH = 16; const size_t BLOCK_HEIGHT = 16; dim3 gridDim( div_ceil(width, BLOCK_WIDTH), div_ceil(height, BLOCK_HEIGHT) ); dim3 blockDim(BLOCK_WIDTH, BLOCK_HEIGHT); KERNEL_LAUNCH_DEVICE( map_samples_fxaa, gridDim, blockDim, pCudaBSP, &faceInfo, lightSamples, results, width, height ); CUDA_CHECK_ERROR_DEVICE(cudaDeviceSynchronize()); /* Transfer the AA'd results back into the light sample buffer. */ memcpy(lightSamples, results, sizeof(float3) * numSamples); //CUDA_CHECK_ERROR_DEVICE(cudaFree(faceInfoBuffer)); CUDA_CHECK_ERROR_DEVICE(cudaFree(results)); } namespace CUDAFXAA { void antialias_lightsamples(CUDABSP::CUDABSP* pCudaBSP) { size_t numFaces; CUDA_CHECK_ERROR( cudaMemcpy( &numFaces, &pCudaBSP->numFaces, sizeof(size_t), cudaMemcpyDeviceToHost ) ); /* * Allocate an array of FaceInfo structures, which will be needed for * each round of FXAA. */ CUDARAD::FaceInfo* faceInfos; CUDA_CHECK_ERROR( cudaMalloc(&faceInfos, sizeof(CUDARAD::FaceInfo) * numFaces) ); const size_t BLOCK_WIDTH = 1; size_t numBlocks = div_ceil(numFaces, BLOCK_WIDTH); KERNEL_LAUNCH( map_faces, numBlocks, BLOCK_WIDTH, pCudaBSP, faceInfos ); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); CUDA_CHECK_ERROR(cudaFree(faceInfos)); } }
the_stack
// Make sure we don't allow dynamic initialization for device // variables, but accept empty constructors allowed by CUDA. // RUN: %clang_cc1 -verify %s -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 %s #ifdef __clang__ #include "Inputs/cuda.h" #endif // Use the types we share with CodeGen tests. #include "Inputs/cuda-initializers.h" __shared__ int s_v_i = 1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ int d_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ int s_v_f = f(); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ int c_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T s_t_i = {2}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ T d_t_i = {2}; __constant__ T c_t_i = {2}; __device__ ECD d_ecd_i{}; __shared__ ECD s_ecd_i{}; __constant__ ECD c_ecd_i{}; __device__ EC d_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ EC s_ec_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ EC d_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ EC s_ec_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ ETC d_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ ETC s_etc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ ETC d_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ ETC s_etc_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ UC d_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UC c_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ UD d_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UD s_ud; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UD c_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ ECI d_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ ECI s_eci; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ECI c_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NEC d_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NEC s_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NEC c_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NED d_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NED c_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NCV d_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NCV s_ncv; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCV c_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ VD d_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ VD s_vd; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ VD c_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NCF d_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NCF s_ncf; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCF c_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ UTC d_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UTC s_utc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ UTC d_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ UTC s_utc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NETC d_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NETC s_netc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ NETC d_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ NETC s_netc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ EC_I_EC1 d_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ EC_I_EC1 s_ec_i_ec1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC_I_EC1 c_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_V_T d_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_V_T s_t_v_t; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_V_T c_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_B_NEC d_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_B_NEC s_t_b_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NEC c_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_F_NEC d_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_F_NEC s_t_f_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NEC c_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_FA_NEC d_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_FA_NEC s_t_fa_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NEC c_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_B_NED d_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_B_NED s_t_b_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NED c_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_F_NED d_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_F_NED s_t_f_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NED c_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __device__ T_FA_NED d_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} __shared__ T_FA_NED s_t_fa_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NED c_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} // Verify that local variables may be static on device // side and that they conform to the initialization constraints. // __shared__ can't be initialized at all and others don't support dynamic initialization. __device__ void df_sema() { static __device__ int ds; static __constant__ int dc; static int v; static const int cv = 1; static const __device__ int cds = 1; static const __constant__ int cdc = 1; // __shared__ does not need to be explicitly static. __shared__ int lsi; // __constant__, __device__, and __managed__ can not be non-static local __constant__ int lci; // expected-error@-1 {{__constant__, __device__, and __managed__ are not allowed on non-static local variables}} __device__ int ldi; // expected-error@-1 {{__constant__, __device__, and __managed__ are not allowed on non-static local variables}} // Same test cases as for the globals above. static __device__ int d_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ int s_v_f = f(); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ int c_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T s_t_i = {2}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __device__ T d_t_i = {2}; static __constant__ T c_t_i = {2}; static __device__ ECD d_ecd_i; static __shared__ ECD s_ecd_i; static __constant__ ECD c_ecd_i; static __device__ EC d_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ EC s_ec_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ EC c_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ EC d_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ EC s_ec_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ EC c_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ ETC d_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ ETC s_etc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ ETC c_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ ETC d_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ ETC s_etc_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ ETC c_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ UC d_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UC c_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ UD d_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UD s_ud; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UD c_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ ECI d_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ ECI s_eci; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ ECI c_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NEC d_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NEC s_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NEC c_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NED d_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NED c_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NCV d_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NCV s_ncv; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NCV c_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ VD d_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ VD s_vd; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ VD c_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NCF d_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NCF s_ncf; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NCF c_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __device__ UTC d_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UTC s_utc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UTC c_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ UTC d_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ UTC s_utc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ UTC c_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NETC d_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NETC s_netc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NETC c_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ NETC d_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ NETC s_netc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ NETC c_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ EC_I_EC1 d_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ EC_I_EC1 s_ec_i_ec1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ EC_I_EC1 c_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_V_T d_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_V_T s_t_v_t; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_V_T c_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_B_NEC d_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_B_NEC s_t_b_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_B_NEC c_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_F_NEC d_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_F_NEC s_t_f_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_F_NEC c_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_FA_NEC d_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_FA_NEC s_t_fa_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_FA_NEC c_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_B_NED d_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_B_NED s_t_b_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_B_NED c_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_F_NED d_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_F_NED s_t_f_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_F_NED c_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __device__ T_FA_NED d_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} static __shared__ T_FA_NED s_t_fa_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __constant__ T_FA_NED c_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}} } __host__ __device__ void hd_sema() { static int x = 42; } inline __host__ __device__ void hd_emitted_host_only() { static int x = 42; // no error on device because this is never codegen'ed there. } void call_hd_emitted_host_only() { hd_emitted_host_only(); } // Verify that we also check field initializers in instantiated structs. struct NontrivialInitializer { __host__ __device__ NontrivialInitializer() : x(43) {} int x; }; template <typename T> __global__ void bar() { __shared__ T bad; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} } void instantiate() { bar<NontrivialInitializer><<<1, 1>>>(); // expected-note@-1 {{in instantiation of function template specialization 'bar<NontrivialInitializer>' requested here}} }
the_stack
#include "dcn_v2_psroi_pooling_cuda_double.h" #include <cstdio> #include <algorithm> #include <cstring> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif __device__ double bilinear_interp( const double *data, const double x, const double y, const int width, const int height) { int x1 = floor(x); int x2 = ceil(x); int y1 = floor(y); int y2 = ceil(y); double dist_x = (double)(x - x1); double dist_y = (double)(y - y1); double value11 = data[y1 * width + x1]; double value12 = data[y2 * width + x1]; double value21 = data[y1 * width + x2]; double value22 = data[y2 * width + x2]; double value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; return value; } __global__ void DeformablePSROIPoolForwardKernel( const int count, const double *bottom_data, const double spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const double *bottom_rois, const double *bottom_trans, const int no_trans, const double trans_std, const int sample_per_part, const int output_dim, const int group_size, const int part_size, const int num_classes, const int channels_each_class, double *top_data, double *top_count) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const double *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; double roi_start_w = (double)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; double roi_start_h = (double)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; double roi_end_w = (double)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; double roi_end_h = (double)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 double roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 double roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom double bin_size_h = roi_height / (double)(pooled_height); double bin_size_w = roi_width / (double)(pooled_width); double sub_bin_size_h = bin_size_h / (double)(sample_per_part); double sub_bin_size_w = bin_size_w / (double)(sample_per_part); int part_h = floor((double)(ph) / pooled_height * part_size); int part_w = floor((double)(pw) / pooled_width * part_size); int class_id = ctop / channels_each_class; double trans_x = no_trans ? (double)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; double trans_y = no_trans ? (double)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; double wstart = (double)(pw)*bin_size_w + roi_start_w; wstart += trans_x * roi_width; double hstart = (double)(ph)*bin_size_h + roi_start_h; hstart += trans_y * roi_height; double sum = 0; int count = 0; int gw = floor((double)(pw)*group_size / pooled_width); int gh = floor((double)(ph)*group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); const double *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { double w = wstart + iw * sub_bin_size_w; double h = hstart + ih * sub_bin_size_h; // bilinear interpolation if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop * group_size + gh) * group_size + gw; double val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height); sum += val; count++; } } top_data[index] = count == 0 ? (double)(0) : sum / count; top_count[index] = count; } } __global__ void DeformablePSROIPoolBackwardAccKernel( const int count, const double *top_diff, const double *top_count, const int num_rois, const double spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, double *bottom_data_diff, double *bottom_trans_diff, const double *bottom_data, const double *bottom_rois, const double *bottom_trans, const int no_trans, const double trans_std, const int sample_per_part, const int group_size, const int part_size, const int num_classes, const int channels_each_class) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const double *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; double roi_start_w = (double)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; double roi_start_h = (double)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; double roi_end_w = (double)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; double roi_end_h = (double)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 double roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 double roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom double bin_size_h = roi_height / (double)(pooled_height); double bin_size_w = roi_width / (double)(pooled_width); double sub_bin_size_h = bin_size_h / (double)(sample_per_part); double sub_bin_size_w = bin_size_w / (double)(sample_per_part); int part_h = floor((double)(ph) / pooled_height * part_size); int part_w = floor((double)(pw) / pooled_width * part_size); int class_id = ctop / channels_each_class; double trans_x = no_trans ? (double)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; double trans_y = no_trans ? (double)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; double wstart = (double)(pw)*bin_size_w + roi_start_w; wstart += trans_x * roi_width; double hstart = (double)(ph)*bin_size_h + roi_start_h; hstart += trans_y * roi_height; if (top_count[index] <= 0) { continue; } double diff_val = top_diff[index] / top_count[index]; const double *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; double *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; int gw = floor((double)(pw)*group_size / pooled_width); int gh = floor((double)(ph)*group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { double w = wstart + iw * sub_bin_size_w; double h = hstart + ih * sub_bin_size_h; // bilinear interpolation if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop * group_size + gh) * group_size + gw; // backward on feature int x0 = floor(w); int x1 = ceil(w); int y0 = floor(h); int y1 = ceil(h); double dist_x = w - x0, dist_y = h - y0; double q00 = (1 - dist_x) * (1 - dist_y); double q01 = (1 - dist_x) * dist_y; double q10 = dist_x * (1 - dist_y); double q11 = dist_x * dist_y; int bottom_index_base = c * height * width; atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val); if (no_trans) { continue; } double U00 = offset_bottom_data[bottom_index_base + y0 * width + x0]; double U01 = offset_bottom_data[bottom_index_base + y1 * width + x0]; double U10 = offset_bottom_data[bottom_index_base + y0 * width + x1]; double U11 = offset_bottom_data[bottom_index_base + y1 * width + x1]; double diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val; diff_x *= roi_width; double diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val; diff_y *= roi_height; atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); } } } } void DeformablePSROIPoolForward(cudaStream_t stream, const double *data, const double *bbox, const double *trans, double *out, double *top_count, const int batch, const int channels, const int height, const int width, const int num_bbox, const int channels_trans, const int no_trans, const double spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const double trans_std) { const double *bottom_data = data; const double *bottom_rois = bbox; const double *bottom_trans = no_trans ? NULL : trans; double *top_data = out; double *top_count_data = top_count; const int pooled_height = pooled_size; const int pooled_width = pooled_size; const int count = num_bbox * output_dim * pooled_height * pooled_width; const int num_classes = no_trans ? 1 : channels_trans / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; DeformablePSROIPoolForwardKernel<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>( count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, bottom_trans, no_trans, trans_std, sample_per_part, output_dim, group_size, part_size, num_classes, channels_each_class, top_data, top_count_data); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); } } void DeformablePSROIPoolBackwardAcc(cudaStream_t stream, const double *out_grad, const double *data, const double *bbox, const double *trans, const double *top_count, double *in_grad, double *trans_grad, const int batch, const int channels, const int height, const int width, const int num_bbox, const int channels_trans, const int no_trans, const double spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const double trans_std) { // LOG(INFO) << "DeformablePSROIPoolBackward"; const double *top_diff = out_grad; const double *bottom_data = data; const double *bottom_rois = bbox; const double *bottom_trans = no_trans ? NULL : trans; double *bottom_data_diff = in_grad; double *bottom_trans_diff = no_trans ? NULL : trans_grad; const double *top_count_data = top_count; const int num_rois = num_bbox; const int pooled_height = pooled_size; const int pooled_width = pooled_size; const int count = num_bbox * output_dim * pooled_height * pooled_width; const int num_classes = no_trans ? 1 : channels_trans / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; DeformablePSROIPoolBackwardAccKernel<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>( count, top_diff, top_count_data, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff, bottom_data, bottom_rois, bottom_trans, no_trans, trans_std, sample_per_part, group_size, part_size, num_classes, channels_each_class); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); } }
the_stack
__global__ void copy_to_fft_input(volatile float *__restrict__ fft_input, const float *w_coefficients_device, const int n_fft_coeffs, const int n_fft_coeffs_half, const int n_terms) { register int i, j; register int TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half) return; register int current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half); register int current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half); i = current_loc / n_fft_coeffs_half; j = current_loc % n_fft_coeffs_half; fft_input[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] = w_coefficients_device[current_term + current_loc * n_terms]; } __global__ void copy_from_fft_output(volatile float *__restrict__ y_tilde_values, const float *fft_output, const int n_fft_coeffs, const int n_fft_coeffs_half, const int n_terms) { register int i, j; register int TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half) return; register int current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half); register int current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half); i = current_loc / n_fft_coeffs_half + n_fft_coeffs_half; j = current_loc % n_fft_coeffs_half + n_fft_coeffs_half; y_tilde_values[current_term + n_terms * current_loc] = fft_output[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] / (float)(n_fft_coeffs * n_fft_coeffs); } __global__ void compute_point_box_idx(volatile int *__restrict__ point_box_idx, volatile float *__restrict__ x_in_box, volatile float *__restrict__ y_in_box, const float *const xs, const float *const ys, const float *const box_lower_bounds, const float coord_min, const float box_width, const int n_boxes, const int n_total_boxes, const int N) { register int TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= N) return; register int x_idx = (int)((xs[TID] - coord_min) / box_width); register int y_idx = (int)((ys[TID] - coord_min) / box_width); x_idx = max(0, x_idx); x_idx = min(n_boxes - 1, x_idx); y_idx = max(0, y_idx); y_idx = min(n_boxes - 1, y_idx); register int box_idx = y_idx * n_boxes + x_idx; point_box_idx[TID] = box_idx; x_in_box[TID] = (xs[TID] - box_lower_bounds[box_idx]) / box_width; y_in_box[TID] = (ys[TID] - box_lower_bounds[n_total_boxes + box_idx]) / box_width; } __global__ void interpolate_device( volatile float *__restrict__ interpolated_values, const float *const y_in_box, const float *const y_tilde_spacings, const float *const denominator, const int n_interpolation_points, const int N) { register int TID, i, j, k; register float value, ybox_i; TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= N * n_interpolation_points) return; i = TID % N; j = TID / N; value = 1; ybox_i = y_in_box[i]; for (k = 0; k < n_interpolation_points; k++) { if (j != k) { value *= ybox_i - y_tilde_spacings[k]; } } interpolated_values[j * N + i] = value / denominator[j]; } __global__ void compute_interpolated_indices( float *__restrict__ w_coefficients_device, const int *const point_box_indices, const float *const chargesQij, const float *const x_interpolated_values, const float *const y_interpolated_values, const int N, const int n_interpolation_points, const int n_boxes, const int n_terms) { register int TID, current_term, i, interp_i, interp_j, box_idx, box_i, box_j, idx; TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N) return; current_term = TID % n_terms; i = (TID / n_terms) % N; interp_j = ((TID / n_terms) / N) % n_interpolation_points; interp_i = ((TID / n_terms) / N) / n_interpolation_points; box_idx = point_box_indices[i]; box_i = box_idx % n_boxes; box_j = box_idx / n_boxes; // interpolated_values[TID] = x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * chargesQij[i * n_terms + current_term]; idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) + (box_j * n_interpolation_points) + interp_j; // interpolated_indices[TID] = idx * n_terms + current_term; atomicAdd( w_coefficients_device + idx * n_terms + current_term, x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * chargesQij[i * n_terms + current_term]); } __global__ void compute_potential_indices( float *__restrict__ potentialsQij, const int *const point_box_indices, const float *const y_tilde_values, const float *const x_interpolated_values, const float *const y_interpolated_values, const int N, const int n_interpolation_points, const int n_boxes, const int n_terms) { register int TID, current_term, i, interp_i, interp_j, box_idx, box_i, box_j, idx; TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N) return; current_term = TID % n_terms; i = (TID / n_terms) % N; interp_j = ((TID / n_terms) / N) % n_interpolation_points; interp_i = ((TID / n_terms) / N) / n_interpolation_points; box_idx = point_box_indices[i]; box_i = box_idx % n_boxes; box_j = box_idx / n_boxes; idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) + (box_j * n_interpolation_points) + interp_j; // interpolated_values[TID] = x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term]; // interpolated_indices[TID] = i * n_terms + current_term; atomicAdd( potentialsQij + i * n_terms + current_term, x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term]); } __host__ __device__ float squared_cauchy_2d(float x1, float x2, float y1, float y2) { return pow(1.0 + pow(x1 - y1, 2) + pow(x2 - y2, 2), -2); } __global__ void compute_kernel_tilde( volatile float *__restrict__ kernel_tilde, const float x_min, const float y_min, const float h, const int n_interpolation_points_1d, const int n_fft_coeffs) { register int TID, i, j; register float tmp; TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_interpolation_points_1d * n_interpolation_points_1d) return; i = TID / n_interpolation_points_1d; j = TID % n_interpolation_points_1d; // TODO: Possibly issuing a memory pre-fetch here could help the code. tmp = squared_cauchy_2d(y_min + h / 2, x_min + h / 2, y_min + h / 2 + i * h, x_min + h / 2 + j * h); kernel_tilde[(n_interpolation_points_1d + i) * n_fft_coeffs + (n_interpolation_points_1d + j)] = tmp; kernel_tilde[(n_interpolation_points_1d - i) * n_fft_coeffs + (n_interpolation_points_1d + j)] = tmp; kernel_tilde[(n_interpolation_points_1d + i) * n_fft_coeffs + (n_interpolation_points_1d - j)] = tmp; kernel_tilde[(n_interpolation_points_1d - i) * n_fft_coeffs + (n_interpolation_points_1d - j)] = tmp; } __global__ void compute_upper_and_lower_bounds( volatile float *__restrict__ box_upper_bounds, volatile float *__restrict__ box_lower_bounds, const float box_width, const float x_min, const float y_min, const int n_boxes, const int n_total_boxes) { register int TID, i, j; TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_boxes * n_boxes) return; i = TID / n_boxes; j = TID % n_boxes; box_lower_bounds[i * n_boxes + j] = j * box_width + x_min; box_upper_bounds[i * n_boxes + j] = (j + 1) * box_width + x_min; box_lower_bounds[n_total_boxes + i * n_boxes + j] = i * box_width + y_min; box_upper_bounds[n_total_boxes + i * n_boxes + j] = (i + 1) * box_width + y_min; } __global__ void copy_to_w_coefficients( volatile float *__restrict__ w_coefficients_device, const int *const output_indices, const float *const output_values, const int num_elements) { register int TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_elements) return; w_coefficients_device[output_indices[TID]] = output_values[TID]; } void tsnecuda::PrecomputeFFT2D( cufftHandle &plan_kernel_tilde, float x_max, float x_min, float y_max, float y_min, int n_boxes, int n_interpolation_points, thrust::device_vector<float> &box_lower_bounds_device, thrust::device_vector<float> &box_upper_bounds_device, thrust::device_vector<float> &kernel_tilde_device, thrust::device_vector<thrust::complex<float>> &fft_kernel_tilde_device) { const int num_threads = 32; int num_blocks = (n_boxes * n_boxes + num_threads - 1) / num_threads; /* * Set up the boxes */ int n_total_boxes = n_boxes * n_boxes; float box_width = (x_max - x_min) / (float)n_boxes; // Left and right bounds of each box, first the lower bounds in the x direction, then in the y direction compute_upper_and_lower_bounds<<<num_blocks, num_threads>>>( thrust::raw_pointer_cast(box_upper_bounds_device.data()), thrust::raw_pointer_cast(box_lower_bounds_device.data()), box_width, x_min, y_min, n_boxes, n_total_boxes); // Coordinates of all the equispaced interpolation points int n_interpolation_points_1d = n_interpolation_points * n_boxes; int n_fft_coeffs = 2 * n_interpolation_points_1d; float h = box_width / (float)n_interpolation_points; /* * Evaluate the kernel at the interpolation nodes and form the embedded generating kernel vector for a circulant * matrix */ // thrust::device_vector<float> kernel_tilde_device(n_fft_coeffs * n_fft_coeffs); num_blocks = (n_interpolation_points_1d * n_interpolation_points_1d + num_threads - 1) / num_threads; compute_kernel_tilde<<<num_blocks, num_threads>>>( thrust::raw_pointer_cast(kernel_tilde_device.data()), x_min, y_min, h, n_interpolation_points_1d, n_fft_coeffs); GpuErrorCheck(cudaDeviceSynchronize()); // Precompute the FFT of the kernel generating matrix cufftExecR2C(plan_kernel_tilde, reinterpret_cast<cufftReal *>(thrust::raw_pointer_cast(kernel_tilde_device.data())), reinterpret_cast<cufftComplex *>(thrust::raw_pointer_cast(fft_kernel_tilde_device.data()))); } void tsnecuda::NbodyFFT2D( cufftHandle &plan_dft, cufftHandle &plan_idft, int N, int n_terms, int n_boxes, int n_interpolation_points, thrust::device_vector<thrust::complex<float>> &fft_kernel_tilde_device, int n_total_boxes, int total_interpolation_points, float coord_min, float box_width, int n_fft_coeffs_half, int n_fft_coeffs, thrust::device_vector<float> &fft_input, thrust::device_vector<thrust::complex<float>> &fft_w_coefficients, thrust::device_vector<float> &fft_output, thrust::device_vector<int> &point_box_idx_device, thrust::device_vector<float> &x_in_box_device, thrust::device_vector<float> &y_in_box_device, thrust::device_vector<float> &points_device, thrust::device_vector<float> &box_lower_bounds_device, thrust::device_vector<float> &y_tilde_spacings_device, thrust::device_vector<float> &denominator_device, thrust::device_vector<float> &y_tilde_values, thrust::device_vector<float> &all_interpolated_values_device, thrust::device_vector<float> &output_values, thrust::device_vector<int> &all_interpolated_indices, thrust::device_vector<int> &output_indices, thrust::device_vector<float> &w_coefficients_device, thrust::device_vector<float> &chargesQij_device, thrust::device_vector<float> &x_interpolated_values_device, thrust::device_vector<float> &y_interpolated_values_device, thrust::device_vector<float> &potentialsQij_device) { // std::cout << "start" << std::endl; const int num_threads = 128; int num_blocks = (N + num_threads - 1) / num_threads; // Compute box indices and the relative position of each point in its box in the interval [0, 1] compute_point_box_idx<<<num_blocks, num_threads>>>( thrust::raw_pointer_cast(point_box_idx_device.data()), thrust::raw_pointer_cast(x_in_box_device.data()), thrust::raw_pointer_cast(y_in_box_device.data()), thrust::raw_pointer_cast(points_device.data()), thrust::raw_pointer_cast(points_device.data() + N), thrust::raw_pointer_cast(box_lower_bounds_device.data()), coord_min, box_width, n_boxes, n_total_boxes, N); GpuErrorCheck(cudaDeviceSynchronize()); /* * Step 1: Interpolate kernel using Lagrange polynomials and compute the w coefficients */ // TODO: We can stream-parallelize these two interpolation functions // Compute the interpolated values at each real point with each Lagrange polynomial in the `x` direction num_blocks = (N * n_interpolation_points + num_threads - 1) / num_threads; interpolate_device<<<num_blocks, num_threads>>>( thrust::raw_pointer_cast(x_interpolated_values_device.data()), thrust::raw_pointer_cast(x_in_box_device.data()), thrust::raw_pointer_cast(y_tilde_spacings_device.data()), thrust::raw_pointer_cast(denominator_device.data()), n_interpolation_points, N); GpuErrorCheck(cudaDeviceSynchronize()); // TODO: Remove the synchronization here // Compute the interpolated values at each real point with each Lagrange polynomial in the `y` direction interpolate_device<<<num_blocks, num_threads>>>( thrust::raw_pointer_cast(y_interpolated_values_device.data()), thrust::raw_pointer_cast(y_in_box_device.data()), thrust::raw_pointer_cast(y_tilde_spacings_device.data()), thrust::raw_pointer_cast(denominator_device.data()), n_interpolation_points, N); GpuErrorCheck(cudaDeviceSynchronize()); //TODO: Synchronization required here // TODO: This section has an atomic-add, can we remove it? num_blocks = (n_terms * n_interpolation_points * n_interpolation_points * N + num_threads - 1) / num_threads; compute_interpolated_indices<<<num_blocks, num_threads>>>( thrust::raw_pointer_cast(w_coefficients_device.data()), thrust::raw_pointer_cast(point_box_idx_device.data()), thrust::raw_pointer_cast(chargesQij_device.data()), thrust::raw_pointer_cast(x_interpolated_values_device.data()), thrust::raw_pointer_cast(y_interpolated_values_device.data()), N, n_interpolation_points, n_boxes, n_terms); GpuErrorCheck(cudaDeviceSynchronize()); /* * Step 2: Compute the values v_{m, n} at the equispaced nodes, multiply the kernel matrix with the coefficients w */ num_blocks = ((n_terms * n_fft_coeffs_half * n_fft_coeffs_half) + num_threads - 1) / num_threads; copy_to_fft_input<<<num_blocks, num_threads>>>( thrust::raw_pointer_cast(fft_input.data()), thrust::raw_pointer_cast(w_coefficients_device.data()), n_fft_coeffs, n_fft_coeffs_half, n_terms); GpuErrorCheck(cudaDeviceSynchronize()); // Compute fft values at interpolated nodes cufftExecR2C(plan_dft, reinterpret_cast<cufftReal *>(thrust::raw_pointer_cast(fft_input.data())), reinterpret_cast<cufftComplex *>(thrust::raw_pointer_cast(fft_w_coefficients.data()))); GpuErrorCheck(cudaDeviceSynchronize()); // Take the broadcasted Hadamard product of a complex matrix and a complex vector // TODO: Check timing on this kernel tsnecuda::util::BroadcastMatrixVector( fft_w_coefficients, fft_kernel_tilde_device, n_fft_coeffs * (n_fft_coeffs / 2 + 1), n_terms, thrust::multiplies<thrust::complex<float>>(), 0, thrust::complex<float>(1.0)); // Invert the computed values at the interpolated nodes cufftExecC2R(plan_idft, reinterpret_cast<cufftComplex *>(thrust::raw_pointer_cast(fft_w_coefficients.data())), reinterpret_cast<cufftReal *>(thrust::raw_pointer_cast(fft_output.data()))); GpuErrorCheck(cudaDeviceSynchronize()); copy_from_fft_output<<<num_blocks, num_threads>>>( thrust::raw_pointer_cast(y_tilde_values.data()), thrust::raw_pointer_cast(fft_output.data()), n_fft_coeffs, n_fft_coeffs_half, n_terms); GpuErrorCheck(cudaDeviceSynchronize()); /* * Step 3: Compute the potentials \tilde{\phi} */ // TODO: Depending on the profiling here, we should check to see if we can split this code num_blocks = (n_terms * n_interpolation_points * n_interpolation_points * N + num_threads - 1) / num_threads; compute_potential_indices<<<num_blocks, num_threads>>>( thrust::raw_pointer_cast(potentialsQij_device.data()), thrust::raw_pointer_cast(point_box_idx_device.data()), thrust::raw_pointer_cast(y_tilde_values.data()), thrust::raw_pointer_cast(x_interpolated_values_device.data()), thrust::raw_pointer_cast(y_interpolated_values_device.data()), N, n_interpolation_points, n_boxes, n_terms); GpuErrorCheck(cudaDeviceSynchronize()); }
the_stack
#include <stdlib.h> #include <stdio.h> #include "cuda.h" int nblock_size = 64; int ngrid_size = 1; int maxgsx = 65535; int mmcc = 0; static int devid; static cudaError_t crc; __global__ void emptyKernel() {} /*--------------------------------------------------------------------*/ extern "C" void gpu_setgbsize(int nblock) { /* set blocksize */ nblock_size = nblock; return; } /*--------------------------------------------------------------------*/ extern "C" int getmmcc() { /* get major and minor computer capability */ return mmcc; } /*--------------------------------------------------------------------*/ extern "C" void gpu_fallocate(float **g_f, int nsize, int *irc) { /* allocate global float memory on GPU, return pointer to C */ void *gptr; crc = cudaMalloc(&gptr,sizeof(float)*nsize); if (crc) { printf("cudaMalloc float Error=%d:%s,l=%d\n",crc, cudaGetErrorString(crc),nsize); *irc = 1; } *g_f = (float *)gptr; return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_iallocate(int **g_i, int nsize, int *irc) { /* allocate global integer memory on GPU, return pointer to C */ void *gptr; crc = cudaMalloc(&gptr,sizeof(int)*nsize); if (crc) { printf("cudaMalloc int Error=%d:%s,l=%d\n",crc, cudaGetErrorString(crc),nsize); *irc = 1; } *g_i = (int *)gptr; return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_callocate(float2 **g_c, int nsize, int *irc) { /* allocate global float2 memory on GPU, return pointer to C */ void *gptr; crc = cudaMalloc(&gptr,sizeof(float2)*nsize); if (crc) { printf("cudaMalloc float2 Error=%d:%s,l=%d\n",crc, cudaGetErrorString(crc),nsize); *irc = 1; } *g_c = (float2 *)gptr; return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_deallocate(void *g_d, int *irc) { /* deallocate global memory on GPU */ crc = cudaFree(g_d); if (crc) { printf("cudaFree Error=%d:%s\n",crc,cudaGetErrorString(crc)); *irc = 1; } return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_fcopyin(float *f, float *g_f, int nsize) { /* copy float array from host memory to global GPU memory */ crc = cudaMemcpy((void *)g_f,f,sizeof(float)*nsize, cudaMemcpyHostToDevice); if (crc) { printf("cudaMemcpyHostToDevice float Error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_fcopyout(float *f, float *g_f, int nsize) { /* copy float array from global GPU memory to host memory */ crc = cudaMemcpy(f,(void *)g_f,sizeof(float)*nsize, cudaMemcpyDeviceToHost); if (crc) { printf("cudaMemcpyDeviceToHost float Error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_icopyin(int *f, int *g_f, int nsize) { /* copy int array from host memory to global GPU memory */ crc = cudaMemcpy((void *)g_f,f,sizeof(int)*nsize, cudaMemcpyHostToDevice); if (crc) { printf("cudaMemcpyHostToDevice int Error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_icopyout(int *f, int *g_f, int nsize) { /* copy int array from global GPU memory to host memory */ crc = cudaMemcpy(f,(void *)g_f,sizeof(int)*nsize, cudaMemcpyDeviceToHost); if (crc) { printf("cudaMemcpyDeviceToHost int Error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_ccopyin(float2 *f, float2 *g_f, int nsize) { /* copy float2 array from host memory to global GPU memory */ crc = cudaMemcpy((void *)g_f,f,sizeof(float2)*nsize, cudaMemcpyHostToDevice); if (crc) { printf("cudaMemcpyHostToDevice float2 Error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_ccopyout(float2 *f, float2 *g_f, int nsize) { /* copy float2 array from global GPU memory to host memory */ crc = cudaMemcpy(f,(void *)g_f,sizeof(float2)*nsize, cudaMemcpyDeviceToHost); if (crc) { printf("cudaMemcpyDeviceToHost float2 Error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_zfmem(float *g_f, int nsize) { /* initialize float array in global GPU memory to zero */ crc = cudaMemset((void *)g_f,0,sizeof(float)*nsize); if (crc) { printf("cudaMemset Error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_zcmem(float2 *g_f, int nsize) { /* initialize float2 array in global GPU memory to zero */ crc = cudaMemset((void *)g_f,0,sizeof(float2)*nsize); if (crc) { printf("cudaMemset Error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_set_cache_size(int nscache) { /* request preferred cache size, requires CUDA 3.2 or higher */ /* nscache = (0,1,2) = (no,small,big) cache size */ cudaFuncCache cpref; if ((nscache < 0) || (nscache > 2)) return; if (nscache==0) cpref = cudaFuncCachePreferNone; else if (nscache==1) cpref = cudaFuncCachePreferShared; else if (nscache==2) cpref = cudaFuncCachePreferL1; crc = cudaThreadSetCacheConfig(cpref); /* crc = cudaDeviceSetCacheConfig(cpref); */ if (crc) { printf("cudaThreadSetCacheConfig error=%d:%s\n",crc, cudaGetErrorString(crc)); } return; } /*--------------------------------------------------------------------*/ extern "C" void emptykernel() { int ngx, ngy; ngx = nblock_size < 32768 ? nblock_size : 32768; ngy = (ngrid_size - 1)/ngx + 1; dim3 dimBlock(nblock_size,1); dim3 dimGrid(ngx,ngy); crc = cudaGetLastError(); emptyKernel<<<dimGrid,dimBlock>>>(); cudaThreadSynchronize(); crc = cudaGetLastError(); if (crc) { printf("emptyKernel error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void init_cu(int dev, int *irc) { /* initialize CUDA with device dev or selects best GPU available */ /* searches throughs devices, selects the device with the most compute */ /* units, and saves the device id devid */ /* if dev is a valid device, it is used, otherwise the GPU with the */ /* most multi-processors is selected */ /* error code is modified only if there is an error */ int maxcpus = 0, jm = -1; int j, ndevs, maxunits; unsigned long msize; double z; struct cudaDeviceProp prop; /* returns number of device */ crc = cudaGetDeviceCount(&ndevs); if (crc) { printf("cudaGetDeviceCount Error=%i:%s\n",crc, cudaGetErrorString(crc)); *irc = 1; return; } /* get information about devices */ for (j = 0; j < ndevs; j++) { crc = cudaGetDeviceProperties(&prop,j); if (crc) { printf("cudaGetDeviceProperties Error=%i:%s\n",crc, cudaGetErrorString(crc)); prop.name[0] = 0; } maxunits = prop.multiProcessorCount; if (dev <= 0) { printf("j=%i:CUDA_DEVICE_NAME=%s,CUDA_MULTIPROCESSOR_COUNT=%i\n", j,prop.name,maxunits); msize = prop.totalGlobalMem; z = ((double) msize)/1073741824.0; mmcc = 10*prop.major + prop.minor; printf(" CUDA_GLOBAL_MEM_SIZE=%lu(%f GB),Capability=%d\n", msize,(float) z,mmcc); if (maxunits > maxcpus) { maxcpus = maxunits; jm = j; } } } devid = jm; if (dev >= 0) devid = dev % ndevs; printf("using device j=%i\n",devid); /* get properties for this device */ crc = cudaGetDeviceProperties(&prop,devid); maxgsx = prop.maxGridSize[0]; mmcc = 10*prop.major + prop.minor; /* set device */ crc = cudaSetDevice(devid); if (crc) { printf("cudaSetDevice Error=%i:%s\n",crc, cudaGetErrorString(crc)); *irc = 1; return; } /* run empty kernel */ emptykernel(); return; } /*--------------------------------------------------------------------*/ extern "C" void end_cu() { /* terminate CUDA */ crc = cudaThreadExit(); if (crc) { printf("cudaThreadExit Error=%d:%s\n",crc,cudaGetErrorString(crc)); } return; } /* Interfaces to Fortran */ /*--------------------------------------------------------------------*/ extern "C" void gpu_setgbsize_(int *nblock) { gpu_setgbsize(*nblock); return; } /*--------------------------------------------------------------------*/ extern "C" int getmmcc_() { /* get major and minor computer capability */ return getmmcc(); } /*--------------------------------------------------------------------*/ extern "C" void gpu_fallocate_(unsigned long *gp_f, int *nsize, int *irc) { /* allocate global float memory on GPU, return pointer to Fortran */ float *fptr; gpu_fallocate(&fptr,*nsize,irc); *gp_f = (long )fptr; return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_iallocate_(unsigned long *gp_i, int *nsize, int *irc) { /* allocate global integer memory on GPU, return pointer to Fortran */ int *iptr; gpu_iallocate(&iptr,*nsize,irc); *gp_i = (long )iptr; return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_callocate_(unsigned long *gp_f, int *nsize, int *irc) { /* allocate global float2 memory on GPU, return pointer */ /* to Fortran */ float2 *fptr; gpu_callocate(&fptr,*nsize,irc); *gp_f = (long )fptr; return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_deallocate_(unsigned long *gp_d, int *irc) { /* deallocate global memory on GPU, return pointer to Fortran */ void *d; d = (void *)*gp_d; gpu_deallocate(d,irc); *gp_d = 0; return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_fcopyin_(float *f, unsigned long *gp_f, int *nsize) { /* copy float array from main memory to global GPU memory */ float *g_f; g_f = (float *)*gp_f; gpu_fcopyin(f,g_f,*nsize); return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_fcopyout_(float *f, unsigned long *gp_f, int *nsize) { /* copy float array from global GPU memory to main memory */ float *g_f; g_f = (float *)*gp_f; gpu_fcopyout(f,g_f,*nsize); return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_icopyin_(int *f, unsigned long *gp_f, int *nsize) { /* copy int array from main memory to global GPU memory */ int *g_f; g_f = (int *)*gp_f; gpu_icopyin(f,g_f,*nsize); return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_icopyout_(int *f, unsigned long *gp_f, int *nsize) { /* copy int array from global GPU memory to main memory */ int *g_f; g_f = (int *)*gp_f; gpu_icopyout(f,g_f,*nsize); return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_ccopyin_(float2 *f, unsigned long *gp_f, int *nsize) { /* copy float2 array from main memory to global GPU memory */ float2 *g_f; g_f = (float2 *)*gp_f; gpu_ccopyin(f,g_f,*nsize); return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_ccopyout_(float2 *f, unsigned long *gp_f, int *nsize) { /* copy float2 array from global GPU memory to main memory */ float2 *g_f; g_f = (float2 *)*gp_f; gpu_ccopyout(f,g_f,*nsize); return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_zcmem_(unsigned long *gp_f, int *nsize) { float2 *g_f; g_f = (float2 *)*gp_f; gpu_zcmem(g_f,*nsize); return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_zfmem_(unsigned long *gp_f, int *nsize) { float *g_f; g_f = (float *)*gp_f; gpu_zfmem(g_f,*nsize); return; } /*--------------------------------------------------------------------*/ extern "C" void gpu_set_cache_size_(int *nscache) { gpu_set_cache_size(*nscache); return; } /*--------------------------------------------------------------------*/ extern "C" void emptykernel_() { emptykernel(); return; } /*--------------------------------------------------------------------*/ extern "C" void init_cu_(int *dev, int *irc) { init_cu(*dev,irc); return; } /*--------------------------------------------------------------------*/ extern "C" void end_cu_() { end_cu(); return; }
the_stack
#include <cub/cub.cuh> #include <limits> #include <raft/cuda_utils.cuh> #include <raft/distance/detail/pairwise_distance_base.cuh> #include <raft/linalg/contractions.cuh> #include <stdint.h> namespace raft { namespace distance { namespace detail { #if (ENABLE_MEMCPY_ASYNC == 1) #include <cuda_pipeline.h> using namespace nvcuda::experimental; #endif template <typename LabelT, typename DataT> struct KVPMinReduceImpl { typedef cub::KeyValuePair<LabelT, DataT> KVP; DI KVP operator()(LabelT rit, const KVP& a, const KVP& b) { return b.value < a.value ? b : a; } }; // KVPMinReduce template <typename LabelT, typename DataT> struct MinAndDistanceReduceOpImpl { typedef typename cub::KeyValuePair<LabelT, DataT> KVP; DI void operator()(LabelT rid, KVP* out, const KVP& other) { if (other.value < out->value) { out->key = other.key; out->value = other.value; } } DI void init(KVP* out, DataT maxVal) { out->key = -1; out->value = maxVal; } }; template <typename LabelT, typename DataT> struct MinReduceOpImpl { typedef typename cub::KeyValuePair<LabelT, DataT> KVP; DI void operator()(LabelT rid, DataT* out, const KVP& other) { if (other.value < *out) { *out = other.value; } } DI void init(DataT* out, DataT maxVal) { *out = maxVal; } }; template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT> __global__ void initKernel(OutT* min, IdxT m, DataT maxVal, ReduceOpT redOp) { auto tid = IdxT(blockIdx.x) * blockDim.x + threadIdx.x; if (tid < m) { redOp.init(min + tid, maxVal); } } template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT> void initialize(OutT* min, IdxT m, DataT maxVal, ReduceOpT redOp, cudaStream_t stream) { auto blks = raft::ceildiv(m, 256); initKernel<DataT, OutT, IdxT><<<blks, 256, 0, stream>>>(min, m, maxVal, redOp); } // TODO: specialize this function for MinAndDistanceReduceOp<int, float> // with atomicCAS of 64 bit which will eliminate mutex and shfls template <typename P, typename OutT, typename IdxT, typename KVPair, typename ReduceOpT> DI void updateReducedVal( int* mutex, OutT* min, KVPair* val, ReduceOpT red_op, IdxT m, IdxT gridStrideY) { const auto lid = threadIdx.x % raft::WarpSize; const auto accrowid = threadIdx.x / P::AccThCols; // for now have first lane from each warp update a unique output row. This // will resolve hang issues with pre-Volta architectures #pragma unroll for (int j = 0; j < (raft::WarpSize / P::AccThCols); j++) { if (lid == 0) { #pragma unroll for (int i = 0; i < P::AccRowsPerTh; ++i) { auto rid = gridStrideY + accrowid + j + i * P::AccThRows; if (rid < m) { auto value = val[i]; while (atomicCAS(mutex + rid, 0, 1) == 1) ; __threadfence(); red_op(rid, min + rid, value); __threadfence(); atomicCAS(mutex + rid, 1, 0); } } } if (j < (raft::WarpSize / P::AccThCols) - 1) { #pragma unroll for (int i = 0; i < P::AccRowsPerTh; ++i) { auto tmpkey = raft::shfl(val[i].key, (j + 1) * P::AccThCols); auto tmpvalue = raft::shfl(val[i].value, (j + 1) * P::AccThCols); val[i] = {tmpkey, tmpvalue}; } } } } template <typename DataT, typename OutT, typename IdxT, bool Sqrt, typename P, typename ReduceOpT, typename KVPReduceOpT, typename CoreLambda, typename FinalLambda> __global__ __launch_bounds__(P::Nthreads, 2) void fusedL2NNkernel(OutT* min, const DataT* x, const DataT* y, const DataT* xn, const DataT* yn, IdxT m, IdxT n, IdxT k, DataT maxVal, int* mutex, ReduceOpT redOp, KVPReduceOpT pairRedOp, CoreLambda core_op, FinalLambda fin_op) { extern __shared__ char smem[]; typedef cub::KeyValuePair<IdxT, DataT> KVPair; KVPair val[P::AccRowsPerTh]; #pragma unroll for (int i = 0; i < P::AccRowsPerTh; ++i) { val[i] = {-1, maxVal}; } // epilogue operation lambda for final value calculation auto epilog_lambda = [n, pairRedOp, &val, maxVal] __device__( DataT acc[P::AccRowsPerTh][P::AccColsPerTh], DataT * regxn, DataT * regyn, IdxT gridStrideX, IdxT gridStrideY) { KVPReduceOpT pairRed_op(pairRedOp); #pragma unroll for (int i = 0; i < P::AccRowsPerTh; ++i) { #pragma unroll for (int j = 0; j < P::AccColsPerTh; ++j) { acc[i][j] = regxn[i] + regyn[j] - (DataT)2.0 * acc[i][j]; } } if (Sqrt) { #pragma unroll for (int i = 0; i < P::AccRowsPerTh; ++i) { #pragma unroll for (int j = 0; j < P::AccColsPerTh; ++j) { acc[i][j] = raft::mySqrt(acc[i][j]); } } } // intra thread reduce const auto acccolid = threadIdx.x % P::AccThCols; const auto accrowid = threadIdx.x / P::AccThCols; #pragma unroll for (int i = 0; i < P::AccRowsPerTh; ++i) { #pragma unroll for (int j = 0; j < P::AccColsPerTh; ++j) { auto tmpkey = acccolid + j * P::AccThCols + gridStrideX; KVPair tmp = {tmpkey, acc[i][j]}; if (tmpkey < n) { val[i] = pairRed_op(accrowid + i * P::AccThRows + gridStrideY, tmp, val[i]); } } } }; auto rowEpilog_lambda = [m, mutex, min, pairRedOp, redOp, &val, maxVal] __device__(IdxT gridStrideY) { KVPReduceOpT pairRed_op(pairRedOp); ReduceOpT red_op(redOp); const auto accrowid = threadIdx.x / P::AccThCols; const auto lid = raft::laneId(); // reduce #pragma unroll for (int i = 0; i < P::AccRowsPerTh; ++i) { #pragma unroll for (int j = P::AccThCols / 2; j > 0; j >>= 1) { auto tmpkey = raft::shfl(val[i].key, lid + j); auto tmpvalue = raft::shfl(val[i].value, lid + j); KVPair tmp = {tmpkey, tmpvalue}; val[i] = pairRed_op(accrowid + i * P::AccThRows + gridStrideY, tmp, val[i]); } } updateReducedVal<P, OutT, IdxT, KVPair, ReduceOpT>(mutex, min, val, red_op, m, gridStrideY); // reset the val array. #pragma unroll for (int i = 0; i < P::AccRowsPerTh; ++i) { val[i] = {-1, maxVal}; } }; IdxT lda = k, ldb = k, ldd = n; PairwiseDistances<true, DataT, DataT, DataT, IdxT, P, CoreLambda, decltype(epilog_lambda), FinalLambda, decltype(rowEpilog_lambda), true, false> obj(x, y, m, n, k, lda, ldb, ldd, xn, yn, nullptr, smem, core_op, epilog_lambda, fin_op, rowEpilog_lambda); obj.run(); } template <typename DataT, typename OutT, typename IdxT, int VecLen, typename ReduceOpT, typename KVPReduceOpT> void fusedL2NNImpl(OutT* min, const DataT* x, const DataT* y, const DataT* xn, const DataT* yn, IdxT m, IdxT n, IdxT k, int* workspace, ReduceOpT redOp, KVPReduceOpT pairRedOp, bool sqrt, bool initOutBuffer, cudaStream_t stream) { typedef typename linalg::Policy4x4<DataT, VecLen>::Policy P; dim3 blk(P::Nthreads); auto nblks = raft::ceildiv<int>(m, P::Nthreads); constexpr auto maxVal = std::numeric_limits<DataT>::max(); typedef cub::KeyValuePair<IdxT, DataT> KVPair; // Accumulation operation lambda auto core_lambda = [] __device__(DataT & acc, DataT & x, DataT & y) { acc += x * y; }; RAFT_CUDA_TRY(cudaMemsetAsync(workspace, 0, sizeof(int) * m, stream)); if (initOutBuffer) { initKernel<DataT, OutT, IdxT, ReduceOpT> <<<nblks, P::Nthreads, 0, stream>>>(min, m, maxVal, redOp); RAFT_CUDA_TRY(cudaGetLastError()); } auto fin_op = [] __device__(DataT d_val, int g_d_idx) { return d_val; }; constexpr size_t shmemSize = P::SmemSize + ((P::Mblk + P::Nblk) * sizeof(DataT)); if (sqrt) { auto fusedL2NNSqrt = fusedL2NNkernel<DataT, OutT, IdxT, true, P, ReduceOpT, KVPReduceOpT, decltype(core_lambda), decltype(fin_op)>; dim3 grid = launchConfigGenerator<P>(m, n, shmemSize, fusedL2NNSqrt); fusedL2NNSqrt<<<grid, blk, shmemSize, stream>>>( min, x, y, xn, yn, m, n, k, maxVal, workspace, redOp, pairRedOp, core_lambda, fin_op); } else { auto fusedL2NN = fusedL2NNkernel<DataT, OutT, IdxT, false, P, ReduceOpT, KVPReduceOpT, decltype(core_lambda), decltype(fin_op)>; dim3 grid = launchConfigGenerator<P>(m, n, shmemSize, fusedL2NN); fusedL2NN<<<grid, blk, shmemSize, stream>>>( min, x, y, xn, yn, m, n, k, maxVal, workspace, redOp, pairRedOp, core_lambda, fin_op); } RAFT_CUDA_TRY(cudaGetLastError()); } } // namespace detail } // namespace distance } // namespace raft
the_stack
#include <NvInfer.h> #include <cassert> #include <cstring> #include <vector> #include <cub/cub.cuh> #include "trt_engine/trt_network_crt/plugins/common/plugin_util.h" #include "trt_engine/trt_network_crt/plugins/layer_norm_plugin/layer_norm_plugin.h" using namespace nvinfer1; FWD_TRT_NAMESPACE_BEGIN template <typename T, typename R, typename P, int TPB> __device__ inline void layerNorm(const kvp<R>& threadData, const int ld, const int offset, const P* beta, const P* gamma, T* output) { // Assuming threadData is already divided by ld using BlockReduce = cub::BlockReduce<kvp<R>, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ R mu; // mean __shared__ R rsigma; // 1 / std.dev. const auto sumKV = BlockReduce(temp_storage).Reduce(threadData, cub::Sum()); if (threadIdx.x == 0) { mu = sumKV.key; rsigma = rsqrt(sumKV.value - mu * mu); } __syncthreads(); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const R val = output[idx]; const R g(gamma[i]); const R b(beta[i]); output[idx] = g * (val - mu) * rsigma + b; } } template <typename T, typename P, int TPB> __device__ inline void layerNormSmall(const T val, const kvp<T>& threadData, const int ld, const int idx, const P* beta, const P* gamma, T* output) { // Assuming threadData is already divided by ld // Small settings: the block covers the leading dimension TPB >= ld. The input // value is available in a register using BlockReduce = cub::BlockReduce<kvp<T>, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T mu; // mean __shared__ T rsigma; // 1 / std.dev. const auto sumKV = BlockReduce(temp_storage).Reduce(threadData, cub::Sum()); if (threadIdx.x == 0) { mu = sumKV.key; rsigma = rsqrt(sumKV.value - mu * mu); } __syncthreads(); if (threadIdx.x < ld) { const T g(gamma[threadIdx.x]); const T b(beta[threadIdx.x]); output[idx] = g * (val - mu) * rsigma + b; } } template <int TPB, int VPT, bool hasBias> __global__ void lnDQQ(const int ld, const int8_t* input, int8_t* output, const __half* beta, const __half* gamma, const __half* bias, const float dqScaleIn, const float qScale) { const int idx = ld * blockIdx.x + threadIdx.x * VPT; // 4 * 1024 * 4 * 2 Bytes = 16KB per block int8_t in_local[VPT]; __half in_local_dq[VPT]; // dequantized input + bias __half bias_local[VPT]; // bias and beta __half gamma_local[VPT]; copy<sizeof(int8_t) * VPT>(&input[idx], in_local); copy<sizeof(__half) * VPT>(&bias[threadIdx.x * VPT], bias_local); __half2 loc = __floats2half2_rn(0.f, 0.f); // accumulator const __half rld = __half(1) / __half(ld); #pragma unroll for (int it = 0; it < VPT; it++) { // DQ input const float tmp_in = in_local[it]; in_local_dq[it] = dqScaleIn * tmp_in; if (hasBias) in_local_dq[it] += bias_local[it]; const __half tmp = rld * in_local_dq[it]; const __half2 tmp2 = __halves2half2(tmp, tmp * in_local_dq[it]); loc = loc + tmp2; } // load parameters copy<sizeof(__half) * VPT>(&beta[threadIdx.x * VPT], bias_local); copy<sizeof(__half) * VPT>(&gamma[threadIdx.x * VPT], gamma_local); using BlockReduce = cub::BlockReduce<__half2, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ __half mu; // mean __shared__ __half rsigma; // 1 / std.dev. const __half2 sum2 = BlockReduce(temp_storage).Reduce(loc, cub::Sum()); if (threadIdx.x == 0) { mu = __low2half(sum2); rsigma = rsqrt(__high2half(sum2) - mu * mu); } __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { // apply layernorm const float tmp = gamma_local[it] * (in_local_dq[it] - mu) * rsigma + bias_local[it]; // Quantize int tmpq = __float2int_rn(qScale * tmp); tmpq = max(-127, tmpq); tmpq = min(127, tmpq); in_local[it] = tmpq; } copy<sizeof(int8_t) * VPT>(in_local, &output[idx]); } template <typename T, int TPB, int VPT, bool hasBias> __global__ void ln_vec(const int ld, const T* input, T* output, const T* beta, const T* gamma, const T* bias) { const int idx = ld * blockIdx.x + threadIdx.x * VPT; // 4 * 1024 * 4 * 2 Bytes = 16KB per block T in_local[VPT]; T bias_local[VPT]; T gamma_local[VPT]; copy<sizeof(T) * VPT>(&input[idx], in_local); copy<sizeof(T) * VPT>(&bias[threadIdx.x * VPT], bias_local); T local = 0.f; T local2 = 0.f; const T rld = T(1) / T(ld); #pragma unroll for (int it = 0; it < VPT; it++) { if (hasBias) in_local[it] += bias_local[it]; const T tmp = rld * in_local[it]; local += tmp; local2 += tmp * in_local[it]; } copy<sizeof(T) * VPT>(&beta[threadIdx.x * VPT], bias_local); copy<sizeof(T) * VPT>(&gamma[threadIdx.x * VPT], gamma_local); using BlockReduce = cub::BlockReduce<kvp<T>, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T mu; // mean __shared__ T rsigma; // 1 / std.dev. const auto sumKV = BlockReduce(temp_storage).Reduce(kvp<T>(local, local2), cub::Sum()); if (threadIdx.x == 0) { mu = sumKV.key; rsigma = rsqrt(sumKV.value - mu * mu); } __syncthreads(); ///* #pragma unroll for (int it = 0; it < VPT; it++) { in_local[it] = gamma_local[it] * (in_local[it] - mu) * rsigma + bias_local[it]; } /* */ copy<sizeof(T) * VPT>(in_local, &output[idx]); } template <typename T, unsigned TPB, bool hasBias> __global__ void LayerNormKernelSmall(const int ld, const T* input, const T* beta, const T* gamma, T* output, const T* bias) { const T rld = T(1) / T(ld); const int offset = blockIdx.x * ld; cub::Sum pairSum; // reduce x and x^2 kvp<T> threadData(0, 0); const int idx = offset + threadIdx.x; T val = 0; if (threadIdx.x < ld) { val = input[idx]; if (hasBias) { val += bias[threadIdx.x]; } const T rldval = rld * val; threadData = pairSum(threadData, kvp<T>(rldval, rldval * val)); } layerNormSmall<T, T, TPB>(val, threadData, ld, idx, beta, gamma, output); } template <typename T, unsigned TPB, bool hasBias> __global__ void LayerNormKernel(const int ld, const T* input, const T* beta, const T* gamma, T* output, const T* bias) { const T rld = T(1) / T(ld); const int offset = blockIdx.x * ld; cub::Sum pairSum; // reduce x and x^2 kvp<T> threadData(0, 0); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; T val = T(input[idx]); if (hasBias) { val += T(bias[i]); } const T rldval = rld * val; threadData = pairSum(threadData, kvp<T>(rldval, rldval * val)); output[idx] = val; } layerNorm<T, T, T, TPB>(threadData, ld, offset, beta, gamma, output); } template <bool hasBias> int computeLayerNormDQQ(cudaStream_t stream, const int ld, const int n, const int8_t* input, const __half* beta, const __half* gamma, int8_t* output, const __half* bias, const float dqScaleIn, const float qScale) { // this must be true because n is the total size of the tensor assert(n % ld == 0); const int gridSize = n / ld; // we're limited by the size of the parameters, i.e. 8-wide instead of 16 constexpr int VPT = 16 / sizeof(__half); if (ld == 768) { constexpr int TPB = 768 / VPT; lnDQQ<TPB, VPT, hasBias> <<<gridSize, TPB, 0, stream>>>(ld, input, output, beta, gamma, bias, dqScaleIn, qScale); } else if (ld == 1024) { constexpr int TPB = 1024 / VPT; lnDQQ<TPB, VPT, hasBias> <<<gridSize, TPB, 0, stream>>>(ld, input, output, beta, gamma, bias, dqScaleIn, qScale); } else { // TODO need to implement this LOG(ERROR) << "SkipLayerNormDQQ - FATAL: unsupported hidden layer size: " << ld << std::endl; exit(0); } CUDA_CHECK(cudaPeekAtLastError()); return 0; } template <typename T, bool hasBias> int computeLayerNorm(cudaStream_t stream, const int ld, const int n, const T* input, const T* beta, const T* gamma, T* output, const T* bias) { // this must be true because n is the total size of the tensor assert(n % ld == 0); const int gridSize = n / ld; constexpr int VPT = 16 / sizeof(T); if (ld <= 32) { constexpr int blockSize = 32; LayerNormKernelSmall<T, blockSize, hasBias> <<<gridSize, blockSize, 0, stream>>>(ld, input, beta, gamma, output, bias); } else if (ld == 768) { constexpr int TPB = 768 / VPT; ln_vec<T, TPB, VPT, hasBias> <<<gridSize, TPB, 0, stream>>>(ld, input, output, beta, gamma, bias); } else if (ld == 1024) { constexpr int TPB = 1024 / VPT; ln_vec<T, TPB, VPT, hasBias> <<<gridSize, TPB, 0, stream>>>(ld, input, output, beta, gamma, bias); } else { constexpr int blockSize = 256; LayerNormKernel<T, blockSize, hasBias> <<<gridSize, blockSize, 0, stream>>>(ld, input, beta, gamma, output, bias); } CUDA_CHECK(cudaPeekAtLastError()); return 0; } template int computeLayerNormDQQ<true>(cudaStream_t stream, const int ld, const int n, const int8_t* input, const __half* beta, const __half* gamma, int8_t* output, const __half* bias, const float dqScaleIn, const float qScale); template int computeLayerNormDQQ<false>(cudaStream_t stream, const int ld, const int n, const int8_t* input, const __half* beta, const __half* gamma, int8_t* output, const __half* bias, const float dqScaleIn, const float qScale); template int computeLayerNorm<float, true>(cudaStream_t, const int, const int, const float*, const float*, const float*, float*, const float*); template int computeLayerNorm<float, false>(cudaStream_t, const int, const int, const float*, const float*, const float*, float*, const float*); template int computeLayerNorm<half, true>(cudaStream_t, const int, const int, const half*, const half*, const half*, half*, const half*); template int computeLayerNorm<half, false>(cudaStream_t, const int, const int, const half*, const half*, const half*, half*, const half*); FWD_TRT_NAMESPACE_END
the_stack
// ConvexHull.cu // 凸壳算法实现。 #include "ConvexHull.h" #include <stdio.h> // 宏:CH_DEBUG_KERNEL_PRINT(Kernel 调试打印开关) // 打开该开关则会在 Kernel 运行时打印相关的信息,以参考调试程序;如果注释掉该 // 宏,则 Kernel 不会打印这些信息,但这会有助于程序更快速的运行。 //#define CH_DEBUG_KERNEL_PRINT // 宏:CH_DEBUG_CPU_PRINT(CPU版本 调试打印开关) // 打开该开关则会在 CPU 版本运行时打印相关的信息,以参考调试程序;如果注释掉该 // 宏,则 CPU 不会打印这些信息,但这会有助于程序更快速的运行。 //#define CH_DEBUG_CPU_PRINT // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的 2D Block 尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:DEF_BLOCK_1D // 定义了默认的 1D Block 尺寸。 #define DEF_BLOCK_1D 512 // 宏:DEF_WARP_SIZE // 定义了 Warp 尺寸 #define DEF_WARP_SIZE 32 // 宏:CH_LARGE_ENOUGH // 定义了一个足够大的正整数,该整数在使用过程中被认为是无穷大。 #define CH_LARGE_ENOUGH ((1 << 30) - 1) // Kernel 函数:_initiateImgKer(实现将图像初始化为 threshold 算法) // 图像的 threshold 由用户决定,所以在初始化输入图像时 // 将输入图像内各个点的像素值置为 threshold。 static __global__ void _initiateImgKer( ImageCuda inimg, // 输入图像 unsigned char threshold // 阈值 ); // Kernel 函数:_initLabelAryKer(初始化 LABEL 数组) // 在凸壳迭代前初始化 LABEL 数组,初始化后的 LABEL 数组要求除最后一个元素为 1 // 以外,其他的元素皆为 0。 static __global__ void // Kernel 函数无返回值 _initLabelAryKer( int label[], // 待初始化的 LABEL 数组。 int cstcnt // LABEL 数组长度。 ); // Kernel 函数:_swapEdgePointKer(寻找最左最右点) // 寻找一个坐标点集中的最左和最右两侧的点,并将结果存入一个整型数组中。如果存在 // 多个最左点或者多个最右点的情况时,则以 y 值最大者为准。找到最左最右点后将直 // 接修改输入坐标点集,使最左点处于坐标点集的第一个点,输出点处于坐标点集的最末 // 一点。 static __global__ void // Kernel 函数无返回值 _swapEdgePointKer( CoordiSetCuda cst, // 待寻找的坐标点集 int edgecst[4], // 寻找到的最左最右点的坐标。其中下标 0 和 1 表示最 // 左点的下标,下标 2 和 3 表示最右点的下标。该参数 // 可以为 NULL。 int edgeidx[2] // 寻找到的最左点和最右点对应于输入坐标点集的下标。 // 该参数可以为 NULL。 ); // Kernel 函数: _updateDistKer(更新点集的垂距信息) // 根据目前已知的凸壳上的点集和区域的标签值,找出当前每个点所在区域的最左最右 // 点,根据点到直线的垂距公式,计算点集的附带数据:点到当前所在区域的最左最右点 // 构成的直线的垂直距离。并且将垂距为负的点标记一下。 static __global__ void // Kernel 函数无返回值 _updateDistKer( CoordiSetCuda cst, // 输入点集,也是输出点集,更新点集的 // attachData,也就是垂距的信息。 CoordiSetCuda convexcst, // 目前已知凸壳上的点集,即每段的最值点信息。 int label[], // 输入,当前点集的区域标签值数组。 int cstcnt, // 输入,当前点的数量。 int negdistflag[] // 输出,当前点垂距为负的标志数组。如果当前点 // 垂距为负,则对应的标志位为 1。 ); // Kernel 函数: _updateFoundInfoKer(更新新发现凸壳点信息) // 根据分段扫描后得到的点集信息,更新当前区域是否有新发现的凸壳上的点,更新目前 // 已知的凸壳上的点的位置索引。 static __global__ void // Kernel 函数无返回值 _updateFoundInfoKer( int label[], // 输入,当前点集的区域标签值数组。 float dist[], // 输入数组,所有点的垂距,即坐标点集数据结构中的 // attachedData 域。 int maxdistidx[], // 输入,分段扫描后,当前位置记录的本段目前已知的最 // 大垂距点的位置索引数组。 int cstcnt, // 坐标点的数量。 int foundflag[], // 输出数组,如果当前区域内找到新的凸壳上的点,标志 // 位置 1。 int startidx[] // 输出,目前已知的凸壳上的点的位置索引数组,也相当 // 于当前每段上的起始位置的索引数组。 ); // Kernel 函数: _updateConvexCstKer(生成新凸壳点集) // 根据分段扫描后得到的点集信息,和每段上是否发现新凸壳点的信息,生成新的凸壳点 // 集。 static __global__ void // Kernel 函数无返回值 _updateConvexCstKer( CoordiSetCuda cst, // 输入点集 CoordiSetCuda convexcst, // 输入,现有的凸壳上的点集。 int foundflag[], // 输入,当前区域内有新发现点的标志位数组, // 如果当前区域内找到新的凸壳上的点,标志位 // 置 1。 int foundacc[], // 输入,偏移量数组,即当前区域内有新发现点 // 的标志位的累加值。用来计算新添加的 // 凸壳点的存放位置的偏移量。 int startidx[], // 输入,目前已知的凸壳上点的位置索引数组, // 也相当于当前每段上的起始位置的索引数组。 int maxdistidx[], // 输入,分段扫描后,当前位置记录 // 的本段目前已知的最大垂距点的位置索引数组 int convexcnt, // 当前凸壳点的数量。 CoordiSetCuda newconvexcst // 输出,更新后的目前已知凸壳上的点集, // 即每段的最值点信息。 ); // Kernel 函数: _markLeftPointsKer(标记左侧点) // 根据目前每段上是否有新发现凸壳点的标志,标记在新发现点左侧的点,记录到标记数 // 组。 static __global__ void // Kernel 函数无返回值 _markLeftPointsKer( CoordiSetCuda cst, // 输入点集,也是输出点集 CoordiSetCuda newconvexcst, // 输入,更新后的目前已知凸壳上的点集,即 // 每段的最值点信息。 int negdistflag[], // 输入,负垂距标记值。 int label[], // 输入,当前点集的区域标签值数组。 int foundflag[], // 输入,当前区域内有新发现点的标志位数 // 组,如果当前区域内找到新的凸壳上的点, // 标志位置 1 int foundacc[], // 输入,偏移量数组,即当前区域内有新发现 // 点的标志位的累加值。用来计算新添加的凸 // 壳点的存放位置的偏移量。 int cstcnt, // 坐标点的数量。 int leftflag[] // 输出,当前点在目前区域中新发现凸壳点的 // 左侧的标志数组,如果在左侧,则置为 1。 ); // Kernel 函数: _updatePropertyKer(计算新下标) // 计算原坐标点集中各个坐标点的新位置和新的 LABEL 值。这些值将在下一步中用来并 // 行生成新的坐标点集。 static __global__ void // Kernel 函数无返回值 _updatePropertyKer( int leftflag[], // 输入,当前点在目前区域中新发现凸壳点的左侧的标志 // 数组,如果在左侧,则置为 1。 int leftacc[], // 输入,偏移量数组,即当前点在目前区域中新发现凸壳 // 点的左侧的标志的累加值。 int negdistflag[], // 输入,垂距为负的标志数组。如果当前点垂距为负,则 // 对应的标志位为 1。 int negdistacc[], // 输入,垂距为正的标志的累加值数组。 int startidx[], // 输入,目前已知的凸壳上的点的位置索引数组,也相当 // 于当前每段上的起始位置的索引数组。 int label[], // 输入,当前点集的区域标签值数组。 int foundacc[], // 输入,偏移量数组,即当前区域内有新发现点的标志位 // 的累加值。用来计算新添加的凸壳点的存放位置的偏移 // 量。 int cstcnt, // 坐标点的数量。 int newidx[], // 输出,每个点的新的索引值数组。 int tmplabel[] // 输出,当前点集更新后的区域标签值数组。该数组可以 // 和 label 数组是同一个数组,即可以进行 In-place // 操作。 ); // Kernel 函数: _arrangeCstKer(形成新坐标点集) // 根据已知信息,形成新的坐标点集和对应的 LABEL 数组,这些数据将在下一轮迭代中 // 作为输入信息。 static __global__ void // Kernel 函数无返回值 _arrangeCstKer( CoordiSetCuda cst, // 输入点集。 int negdistflag[], // 输入,垂距为负的标志数组。如果当前点垂距为 // 负,则对应的标志位为 1。 int newidx[], // 输入,每个点的新的索引值数组。 int tmplabel[], // 输入,当前点集的区域标签值数组。 int cstcnt, // 输入,坐标点的数量。 CoordiSetCuda newcst, // 输出,更新元素位置后的新点集。 int newlabel[] // 输出,当前点集更新后的区域标签值数组。 ); // Kernel 函数:_flipWholeCstKer(整体翻转坐标点集) // 将坐标点集由第一象限翻转到第四象限,原来 (x, y) 坐标反转后为 (-x, -y)。该步 // 骤用来求解上半凸壳,因为翻转后的点集的下半凸壳恰好是源点集的下半凸壳的相反 // 数。 static __global__ void // Kernel 函数无返回值 _flipWholeCstKer( CoordiSetCuda incst, // 输入坐标点集,该坐标点集为只读点集 CoordiSetCuda outcst // 输出坐标点集,该坐标点集可以和输入坐标点集相 // 同,可进行 In-place 操作。 ); // Kernel 函数:_joinConvexKer(合并凸壳点) // 将通过迭代求得的两个凸壳点集(下半凸壳点集和上半凸壳点集)合并成一个完整的凸 // 壳点集。合并过程中两侧若有重复点需要去掉。 static __global__ void // Kernel 函数无返回值 _joinConvexKer( CoordiSetCuda lconvex, // 下半凸壳 CoordiSetCuda uconvex, // 上半凸壳 CoordiSetCuda convex, // 整合后的凸壳 int *convexcnt // 整合后凸壳点集的数量 ); // Kernel 函数:_initLabelAryKer(初始化 LABEL 数组) static __global__ void _initLabelAryKer(int label[], int cstcnt) { // 计算当前 Thread 对应的数组下标。 int idx = blockIdx.x * blockDim.x + threadIdx.x; // 如果当前下标处理的是越界数据,则直接退出。 if (idx >= cstcnt) return; // 在 LABEL 数组中,将最后一个变量写入 1,其余变量写入 0。 if (idx == cstcnt - 1) label[idx] = 1; else label[idx] = 0; } // Host 成员方法:initLabelAry(初始化 LABEL 数组) __host__ int ConvexHull::initLabelAry(int label[], int cstcnt) { // 检查输入的数组是否为 NULL。 if (label == NULL) return NULL_POINTER; // 检查数组长度是否大于等于 2。 if (cstcnt < 2) return INVALID_DATA; // 计算启动 Kernel 函数所需要的 Block 尺寸与数量。 size_t blocksize = DEF_BLOCK_1D; size_t gridsize = (cstcnt + blocksize - 1) / blocksize; // 启动 Kernel 函数,完成计算。 _initLabelAryKer<<<gridsize, blocksize>>>(label, cstcnt); // 检查 Kernel 函数执行是否正确。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,返回。 return NO_ERROR; } // Host 成员方法:initLabelAryCpu(初始化 LABEL 数组) __host__ int ConvexHull::initLabelAryCpu(int label[], int cstcnt) { // 检查输入的数组是否为 NULL。 if (label == NULL) return NULL_POINTER; // 检查数组长度是否大于等于 2。 if (cstcnt < 2) return INVALID_DATA; // 在 LABEL 数组中,将最后一个变量写入 1,其余变量写入 0。 for (int i = 0; i < cstcnt - 1; i++) { label[i] = 0; // 打印信息检查 #ifdef CH_DEBUG_CPU_PRINT cout << "[initLabelAryCpu]label " << i << " is " << label[i] << endl; #endif } label[cstcnt - 1] = 1; #ifdef CH_DEBUG_CPU_PRINT cout << "[initLabelAryCpu]label " << cstcnt - 1 << " is " << label[cstcnt - 1] << endl; cout << endl; #endif // 处理完毕,返回。 return NO_ERROR; } // Kernel 函数:_swapEdgePointKer(寻找最左最右点) static __global__ void _swapEdgePointKer( CoordiSetCuda cst, int edgecst[4], int edgeidx[2]) { // 计算当前线程的下标,该 Kernel 必须以单 Block 运行,因此不涉及到 Block 相 // 关的变量。 int idx = threadIdx.x; // 当前 Thread 处理的若干个点中找到的局部最左最右点。 int curleftx = CH_LARGE_ENOUGH, curlefty = -CH_LARGE_ENOUGH; int currightx = -CH_LARGE_ENOUGH, currighty = -CH_LARGE_ENOUGH; // 当前 Thread 处理的若干个点中找到的局部最左做有点的下标。 int curleftidx = -1, currightidx = -1; // 处于下标为 idx 处的坐标点坐标值。 int curx, cury; // 迭代处理该线程所要处理的所有坐标点,这些坐标点是间隔 blockDim.x 个的各个 // 坐标点。 while (idx < cst.tplMeta.count) { // 从 Global Memory 中读取坐标值。 curx = cst.tplMeta.tplData[2 * idx]; cury = cst.tplMeta.tplData[2 * idx + 1]; // 判断该坐标值的大小,和已经找到的最左最优值做比较,更新最左最右点。 // 首先对最左点进行更新。 if (curx < curleftx || (curx == curleftx && cury > curlefty)) { curleftx = curx; curlefty = cury; curleftidx = idx; } // 然后对最右点进行更新。 if (curx > currightx || (curx == currightx && cury > currighty)) { currightx = curx; currighty = cury; currightidx = idx; } // 更新 idx,在下一轮迭代时计算下一个点。 idx += blockDim.x; } // 至此,所有 Thread 都得到了自己的局部最左最右点,现在需要将这些点放入 // Shared Memory 中,以便下一步进行归约处理。 // 声明 Shared Memory,并分配各个指针。 extern __shared__ int shdmem[]; int *leftxShd = &shdmem[0]; int *leftyShd = &leftxShd[blockDim.x]; int *leftidxShd = &leftyShd[blockDim.x]; int *rightxShd = &leftidxShd[blockDim.x]; int *rightyShd = &rightxShd[blockDim.x]; int *rightidxShd = &rightyShd[blockDim.x]; // 将局部结果拷贝到 Shared Memory 中。 idx = threadIdx.x; leftxShd[idx] = curleftx; leftyShd[idx] = curlefty; leftidxShd[idx] = curleftidx; rightxShd[idx] = currightx; rightyShd[idx] = currighty; rightidxShd[idx] = currightidx; // 同步所有线程,使初始化Shared Memory 的结果对所有线程可见。 __syncthreads(); // 下面进行折半归约迭代。这里要求 blockDim.x 必须为 2 的整数次幂。 int curstep = blockDim.x / 2; for (/*curstep*/; curstep >= 1; curstep /= 2) { // 每一轮迭代都只有上一轮一半的点在参与。直到最后剩下一个线程。 if (idx < curstep) { // 将两个局部结果归约成一个局部结果。 // 首先处理最左点。 if (leftxShd[idx] > leftxShd[idx + curstep] || (leftxShd[idx] == leftxShd[idx + curstep] && leftyShd[idx] < leftyShd[idx + curstep])) { leftxShd[idx] = leftxShd[idx + curstep]; leftyShd[idx] = leftyShd[idx + curstep]; leftidxShd[idx] = leftidxShd[idx + curstep]; } // 之后处理最右点。 if (rightxShd[idx] < rightxShd[idx + curstep] || (rightxShd[idx] == rightxShd[idx + curstep] && rightyShd[idx] < rightyShd[idx + curstep])) { rightxShd[idx] = rightxShd[idx + curstep]; rightyShd[idx] = rightyShd[idx + curstep]; rightidxShd[idx] = rightidxShd[idx + curstep]; } } // 同步线程,使本轮迭代归约的结果对所有线程可见。 __syncthreads(); } // 下面进行一些零碎的收尾工作。由于访问 Shared Memory 不同部分,造成 Bank // Conflict 的概率很大,因此没有采用并行处理(此时即便是并行代码,硬件上也 // 会串行处理) if (idx == 0) { // 如果 edgecst 不为 NULL,则将找到的最左最右点坐标拷贝其中。 if (edgecst != NULL) { edgecst[0] = leftxShd[0]; edgecst[1] = leftyShd[0]; edgecst[2] = rightxShd[0]; edgecst[3] = rightyShd[0]; } } else if (idx == DEF_WARP_SIZE) { // 如果 edgeidx 不为 NULL,则将找到的最左最右点下标拷贝其中。 if (edgeidx != NULL) { edgeidx[0] = leftidxShd[0]; edgeidx[1] = rightidxShd[0]; } } else if (idx == DEF_WARP_SIZE * 2) { // 将最左点交换到坐标点集的首部。 if (leftidxShd[0] > 0) { curx = cst.tplMeta.tplData[0]; cury = cst.tplMeta.tplData[1]; cst.tplMeta.tplData[0] = leftxShd[0]; cst.tplMeta.tplData[1] = leftyShd[0]; cst.tplMeta.tplData[2 * leftidxShd[0]] = curx; cst.tplMeta.tplData[2 * leftidxShd[0] + 1] = cury; } } else if (idx == DEF_WARP_SIZE * 3) { // 将最右点交换到坐标点集的尾部。 if (rightidxShd[0] < cst.tplMeta.count - 1) { curx = cst.tplMeta.tplData[2 * (cst.tplMeta.count - 1)]; cury = cst.tplMeta.tplData[2 * (cst.tplMeta.count - 1) + 1]; cst.tplMeta.tplData[2 * (cst.tplMeta.count - 1)] = rightxShd[0]; cst.tplMeta.tplData[2 * (cst.tplMeta.count - 1) + 1] = rightyShd[0]; cst.tplMeta.tplData[2 * rightidxShd[0]] = curx; cst.tplMeta.tplData[2 * rightidxShd[0] + 1] = cury; } } } // Host 成员方法:swapEdgePoint(寻找最左最右点) __host__ int ConvexHull::swapEdgePoint(CoordiSet *cst, CoordiSet *convexcst) { // 判断函数参数是否为 NULL。 if (cst == NULL || convexcst == NULL) return NULL_POINTER; // 判断参数是否包含了足够多的坐标点。 if (cst->count < 2 || cst->tplData == NULL || convexcst->count < 2 || convexcst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 int errcode; errcode = CoordiSetBasicOp::copyToCurrentDevice(cst); if (errcode != NO_ERROR) return errcode; errcode = CoordiSetBasicOp::copyToCurrentDevice(convexcst); if (errcode != NO_ERROR) return errcode; CoordiSetCuda *cstCud = COORDISET_CUDA(cst); //CoordiSetCuda *convexcstCud = COORDISET_CUDA(convexcst); size_t blocksize = DEF_BLOCK_1D; size_t gridsize = 1; size_t sharedsize = 6 * blocksize * sizeof (int); _swapEdgePointKer<<<gridsize, blocksize, sharedsize>>>( *cstCud, convexcst->tplData, NULL); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; return NO_ERROR; } // Host 成员方法:swapEdgePointCpu(寻找最左最右点) __host__ int ConvexHull::swapEdgePointCpu(CoordiSet *cst, CoordiSet *convexcst) { // 判断函数参数是否为 NULL。 if (cst == NULL || convexcst == NULL) return NULL_POINTER; // 判断参数是否包含了足够多的坐标点。 if (cst->count < 2 || cst->tplData == NULL || convexcst->count < 2 || convexcst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 拷贝到 Host 端 errcode = CoordiSetBasicOp::copyToHost(cst); if (errcode != NO_ERROR) return errcode; errcode = CoordiSetBasicOp::copyToHost(convexcst); if (errcode != NO_ERROR) return errcode; CoordiSetCuda *cstCud = COORDISET_CUDA(cst); // 记录当前最左最右点,初始化为第一个点。 int curleftx = (*cstCud).tplMeta.tplData[2 * 0]; int curlefty = (*cstCud).tplMeta.tplData[2 * 0 + 1]; int currightx = curleftx; int currighty = curlefty; int curleftidx = 0; int currightidx = 0; int id; int curx, cury; // 寻找最左最右点 for(id = 1; id < (*cstCud).tplMeta.count; id++) { // 当前点 curx = (*cstCud).tplMeta.tplData[2 * id]; cury = (*cstCud).tplMeta.tplData[2 * id + 1]; // 首先对最左点进行更新。 if (curx < curleftx || (curx == curleftx && cury > curlefty)) { curleftx = curx; curlefty = cury; curleftidx = id; } // 然后对最右点进行更新。 if (curx > currightx || (curx == currightx && cury > currighty)) { currightx = curx; currighty = cury; currightidx = id; } } #ifdef CH_DEBUG_CPU_PRINT cout << "[swapEdgePointCpu]the left point is " << curleftidx << " the right point is " << currightidx << endl; #endif // 下面进行一些零碎的收尾工作。 // 将找到的最左最右点坐标拷贝到凸壳点集。 convexcst->tplData[0] = curleftx; convexcst->tplData[1] = curlefty; convexcst->tplData[2] = currightx; convexcst->tplData[3] = currighty; // 将最左点交换到坐标点集的首部。 if (curleftidx > 0) { curx = (*cstCud).tplMeta.tplData[0]; cury = (*cstCud).tplMeta.tplData[1]; (*cstCud).tplMeta.tplData[0] = curleftx; (*cstCud).tplMeta.tplData[1] = curlefty; (*cstCud).tplMeta.tplData[2 * curleftidx] = curx; (*cstCud).tplMeta.tplData[2 * curleftidx + 1] = cury; #ifdef CH_DEBUG_CPU_PRINT cout << "[swapEdgePointCpu]first cst x is " << (*cstCud).tplMeta.tplData[0] << endl; cout << "[swapEdgePointCpu]first cst y is " << (*cstCud).tplMeta.tplData[1] << endl; cout << "[swapEdgePointCpu]former leftest cst x now is " << (*cstCud).tplMeta.tplData[2 * curleftidx] << endl; cout << "[swapEdgePointCpu]former leftest cst y now is " << (*cstCud).tplMeta.tplData[2 * curleftidx + 1] << endl; #endif } // 将最右点交换到坐标点集的尾部。 if (currightidx < cst->count - 1) { curx = (*cstCud).tplMeta.tplData[2 * (cst->count - 1)]; cury = (*cstCud).tplMeta.tplData[2 * (cst->count - 1) + 1]; (*cstCud).tplMeta.tplData[2 * (cst->count - 1)] = currightx; (*cstCud).tplMeta.tplData[2 * (cst->count - 1) + 1] = currighty; (*cstCud).tplMeta.tplData[2 * currightidx] = curx; (*cstCud).tplMeta.tplData[2 * currightidx + 1] = cury; #ifdef CH_DEBUG_CPU_PRINT cout << "[swapEdgePointCpu]last cst x is " << (*cstCud).tplMeta.tplData[2 * (cst->count - 1)] << endl; cout << "[swapEdgePointCpu]last cst y is " << (*cstCud).tplMeta.tplData[2 * (cst->count - 1) + 1] << endl; cout << "[swapEdgePointCpu]former rightest cst x now is " << (*cstCud).tplMeta.tplData[2 * currightidx] << endl; cout << "[swapEdgePointCpu]former rightest cst y now is " << (*cstCud).tplMeta.tplData[2 * currightidx + 1] << endl; #endif } return NO_ERROR; } // Kernel 函数: _updateDistKer(更新点集的垂距信息) static __global__ void _updateDistKer( CoordiSetCuda cst, CoordiSetCuda convexcst, int label[], int cstcnt, int negdistflag[]) { // 记录了本 Kernel 所使用到的共享内存中各个下标所存储的数据的含义。其中, // SIDX_BLK_CNT 表示当前 Block 所需要处理的坐标点的数量,由于坐标点的数量不 // 一定能够被 BlockDim 整除,因此最后一个 Block 所处理的坐标点的数量要小于 // BlockDim。 // SIDX_BLK_LABEL_LOW 和 SIDX_BLK_LABEL_UP 用来存当前 Block 中所加载的点集 // 的区域标签值的上下界。根据这个上下界,可以计算出当前点所在区域的最左最右 // 点,从而根据这两点确定的直线计算当前点的垂距。 // 从下标为 SIDX_BLK_CST 开始的其后的所有共享内存空间存储了当前 Block 中的 // 点集坐标。坐标集中第 i 个点对应的数组下标为 2 * i 和 2 * i + 1,其中下标 // 为 2 * i 的数据表示该点的横坐标,下标为 2 * i + 1 的数据表示该点的纵坐 // 标。 #define SIDX_BLK_CNT 0 #define SIDX_BLK_LABEL_LOW 1 #define SIDX_BLK_LABEL_UP 2 #define SIDX_BLK_CONVEX 3 // 共享内存的声明。 extern __shared__ int shdmem[]; // 基准索引。表示当前 Block 的起始位置索引。 int baseidx = blockIdx.x * blockDim.x; // 全局索引。 int idx = baseidx + threadIdx.x; // 当前 Block 的第 0 个线程来处理共享内存中彼此共享的数据的初始化工作。 if (threadIdx.x == 0) { // 计算当前 Block 所要处理的坐标点的数量。默认情况下该值等于 BlockDim, // 但对于最后一个 Block 来说,在坐标点总数量不能被 BlockDim 所整除的时 // 候,需要处理的坐标点数量会小于 BlockDim。 if (baseidx + blockDim.x <= cstcnt) shdmem[SIDX_BLK_CNT] = blockDim.x; else shdmem[SIDX_BLK_CNT] = cstcnt - baseidx; // 计算当前 Block 所处理的坐标点中起始的 LABEL 编号。 shdmem[SIDX_BLK_LABEL_LOW] = label[baseidx]; // 计算当前 Block 索要处理的坐标点中最大的 LABEL 编号。由于考虑到根据两 // 点计算直线方程,因此所谓的最大 LABEL 编号其实是 if (baseidx + shdmem[SIDX_BLK_CNT] <= cstcnt) shdmem[SIDX_BLK_LABEL_UP] = label[baseidx + shdmem[SIDX_BLK_CNT] - 1] + 1; else shdmem[SIDX_BLK_LABEL_UP] = label[cstcnt - 1]; } // Block 内部同步,使得上一步的初始化对 Block 内的所有 Thread 可见。 __syncthreads(); // 将当前 Block 处理的 LABEL 值上下界加载到寄存器,该步骤没有逻辑上的含义, // 只是为了 GPU 处理速度更快。 int labellower = shdmem[SIDX_BLK_LABEL_LOW]; int labelupper = shdmem[SIDX_BLK_LABEL_UP]; // 为了方便代码编写,这里单独提出一个 blockcstShd 指针,指向当前 Block 所对 // 应的点集数据的共享内存空间。 int *convexShd = &shdmem[SIDX_BLK_CONVEX]; // 加载当前 Block 中所用到的 LABEL 所谓应的凸壳点,两个相邻 LABEL 的凸壳点 // 构成的直线可用来衡量各点的垂距并以此推算出下一轮的凸壳点。将所用到的凸壳 // 点加载的 Shared Memory 中也没有逻辑上的目的,仅仅是为了下一步计算时访存 // 时间的缩短。 if (threadIdx.x < labelupper - labellower + 1) { convexShd[2 * threadIdx.x] = convexcst.tplMeta.tplData[2 * (labellower + threadIdx.x)]; convexShd[2 * threadIdx.x + 1] = convexcst.tplMeta.tplData[2 * (labellower + threadIdx.x) + 1]; } // Block 内部同步,使得上面所有的数据加载对 Block 内的所有 Thread 可见。下 // 面的代码就正式的投入计算了。 __syncthreads(); // 如果当前线程的全局下标越界,则直接返回,因为他没有对应的所要处理坐标点。 if (idx >= cstcnt) return; // 对于最后一个点(其实是坐标集中的最右点)是一个特殊的点,它独自处于一个 // LABEL,因此对于它不需要进行计算,直接赋值就行了。 if (idx == cstcnt - 1) { cst.attachedData[idx] = 0.0f; negdistflag[idx] = 0; return; } // 计算当前点的坐标和区域标签值。 int curx = cst.tplMeta.tplData[2 * idx]; int cury = cst.tplMeta.tplData[2 * idx + 1]; int curlabelidx = 2 * (label[idx] - labellower); // 计算当前 LABEL 区域的最左点的坐标。 int leftx = convexShd[curlabelidx++]; int lefty = convexShd[curlabelidx++]; // 计算当前 LABEL 区域的最右点的坐标。 int rightx = convexShd[curlabelidx++]; int righty = convexShd[curlabelidx ]; // 如果当前点就是凸壳点,那么不需要计算直接赋值退出就可以了。 if ((curx == leftx && cury == lefty) || (curx == rightx && cury == righty)) { cst.attachedData[idx] = 0.0f; negdistflag[idx] = 0; return; } // 计算垂距,该计算通过最左点和最右点形成的直线作为垂距求解的依据,但实际求 // 解过程中并不是真正的垂距,而是垂直于 y 轴的距离。当点在直线之下时,具有 // 正垂距,当点在直线之上时,具有负垂距。 // y ^ right // | + // | / // | / -- // | /| ^ // | / | | dist // | / | v // | / * -- // | + cur // O| left x // --+-----------------> // | float s = (float)(righty - lefty) / (rightx - leftx); float dist = (cury - righty) - (curx - rightx) * s; // 将垂距信息更新到 Global 内存中作为输出。 cst.attachedData[idx] = dist; // 当垂距为负值时,在负数标记数组中标记之,因为这样的点将在下一轮迭代的时候 // 删除,以加快处理速度。 negdistflag[idx] = ((dist < 1.0e-6f) ? 1 : 0); // 调试打印 #ifdef CH_DEBUG_KERNEL_PRINT printf("Kernel[updateDist]: (%3d, %3d) Dist %7.3f, " "Line: (%3d, %3d) - (%3d, %3d) Label %2d\n", curx, cury, dist, leftx, lefty, rightx, righty, label[idx]); #endif // 清除对 Shared Memory 下标含义的定义,因为在其他的函数中不同的下标会有不 // 同的含义。 #undef SIDX_BLK_CNT #undef SIDX_BLK_LABEL_LOW #undef SIDX_BLK_LABEL_UP #undef SIDX_BLK_CONVEX } // 成员方法:updateDist(更新坐标点集垂距) __host__ int ConvexHull::updateDist( CoordiSet *cst, CoordiSet *convexcst, int label[], int cstcnt, int negdistflag[]) { // 检查输入坐标集,输出坐标集是否为空。 if (convexcst == NULL || cst == NULL || label == NULL || negdistflag == NULL) return NULL_POINTER; // 检查当前点的数量,小于等于 0 则无效数据。 if (cstcnt <= 0) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 将输入坐标集拷贝到 device 端。 errcode = CoordiSetBasicOp::copyToCurrentDevice(convexcst); if (errcode != NO_ERROR) return errcode; // 将输入输出坐标集拷贝到 device 端。 errcode = CoordiSetBasicOp::copyToCurrentDevice(cst); if (errcode != NO_ERROR) return errcode; // 坐标集的 CUDA 相关数据 CoordiSetCuda *cstCud = COORDISET_CUDA(cst); CoordiSetCuda *convexcstCud = COORDISET_CUDA(convexcst); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量,以及所需要的 Shared // Memory 的数量。 size_t blocksize = DEF_BLOCK_1D; size_t gridsize = (cstcnt + blocksize - 1) / blocksize; size_t sharedsize = (3 + 2 * blocksize) * sizeof (int); // 调用更新点集的垂距信息的核函数,计算每个点的垂距,更新负垂距标志数组。 _updateDistKer<<<gridsize, blocksize, sharedsize>>>( *cstCud, *convexcstCud, label, cstcnt, negdistflag); // 判断核函数是否出错。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕退出。 return NO_ERROR; } // 成员方法:updateDistCpu(更新坐标点集垂距) __host__ int ConvexHull::updateDistCpu( CoordiSet *cst, CoordiSet *convexcst, int label[], int cstcnt, int negdistflag[]) { // 检查输入坐标集,输出坐标集是否为空。 if (convexcst == NULL || cst == NULL || label == NULL || negdistflag == NULL) return NULL_POINTER; // 检查当前点的数量,小于等于 0 则无效数据。 if (cstcnt <= 0) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 将输入坐标集拷贝到 host 端。 errcode = CoordiSetBasicOp::copyToHost(convexcst); if (errcode != NO_ERROR) return errcode; // 将输入输出坐标集拷贝到 host 端。 errcode = CoordiSetBasicOp::copyToHost(cst); if (errcode != NO_ERROR) return errcode; // 坐标集的 CUDA 相关数据 CoordiSetCuda *cstCud = COORDISET_CUDA(cst); CoordiSetCuda *convexcstCud = COORDISET_CUDA(convexcst); // 初始化末位 (*cstCud).attachedData[cstcnt - 1] = 0.0f; negdistflag[cstcnt - 1] = 0; #ifdef CH_DEBUG_CPU_PRINT cout << "[updateDistCpu]id " << cstcnt - 1 << " dist is "<< (*cstCud).attachedData[cstcnt - 1] << endl; cout << "[updateDistCpu]id " << cstcnt - 1 << " negdistflag is " << negdistflag[cstcnt - 1] << endl; #endif // 本地变量 int curx, cury; int leftx, lefty; int rightx, righty; float s, dist; int id; #ifdef CH_DEBUG_CPU_PRINT cout << "[updateDistCpu]update cstcnt is " << cstcnt << endl; #endif // 计算每个点对应的垂距 for (id = 0; id < cstcnt - 1; id++) { #ifdef CH_DEBUG_CPU_PRINT cout << "[updateDistCpu]update dist id " << id << endl; #endif // 读取当前点坐标 curx = (*cstCud).tplMeta.tplData[2 * id]; cury = (*cstCud).tplMeta.tplData[2 * id + 1]; // 记录当前区域的最左点最右点 if (id == 0 || label[id] != label[id - 1]) { leftx = (*convexcstCud).tplMeta.tplData[2 * label[id]]; lefty = (*convexcstCud).tplMeta.tplData[2 * label[id] + 1]; rightx = (*convexcstCud).tplMeta.tplData[2 * (label[id] + 1)]; righty = (*convexcstCud).tplMeta.tplData[2 * (label[id] + 1) + 1]; #ifdef CH_DEBUG_CPU_PRINT cout << "[updateDistCpu]leftest x is " << leftx << endl; cout << "[updateDistCpu]leftest y is " << lefty << endl; cout << "[updateDistCpu]rightest x is " << rightx << endl; cout << "[updateDistCpu]rightest x is " << righty << endl; #endif } // 如果当前点就是凸壳点,那么不需要计算直接赋值退出就可以了。 if ((curx == leftx && cury == lefty) || (curx == rightx && cury == righty)) { (*cstCud).attachedData[id] = 0.0f; negdistflag[id] = 0; #ifdef CH_DEBUG_CPU_PRINT cout << "[updateDistCpu]id " << id << " dist is "<< (*cstCud).attachedData[id] << endl; cout << "[updateDistCpu]id " << id << " negdistflag is " << negdistflag[id] << endl; #endif // 计算垂距,该计算通过最左点和最右点形成的直线作为垂距求解的依据,但实 // 际求解过程中并不是真正的垂距,而是垂直于 y 轴的距离。当点在直线之下 // 时,具有正垂距,当点在直线之上时,具有负垂距。 // y ^ right // | + // | / // | / -- // | /| ^ // | / | | dist // | / | v // | / * -- // | + cur // O| left x // --+-----------------> // | } else { s = (float)(righty - lefty) / (rightx - leftx); dist = (cury - righty) - (curx - rightx) * s; // 将垂距信息更新到 输出。 (*cstCud).attachedData[id] = dist; // 当垂距为负值时,在负数标记数组中标记之,因为这样的点将在下一轮 // 迭代的时候删除,以加快处理速度。 negdistflag[id] = ((dist < 1.0e-6f) ? 1 : 0); #ifdef CH_DEBUG_CPU_PRINT cout << "[updateDistCpu]id " << id << " dist is "<< (*cstCud).attachedData[id] << endl; cout << "[updateDistCpu]id " << id << " negdistflag is " << negdistflag[id] << endl; #endif } } return NO_ERROR; } // Kernel 函数: _updateFoundInfoKer(更新新发现凸壳点信息) static __global__ void _updateFoundInfoKer( int *label, float *dist, int *maxdistidx, int cstcnt, int *foundflag, int *startidx) { // 共享内存,用来存放当前 Block 处理的 LABEL 值,其长度为 BlockDim + 1,因 // 为需要加载下一 Blcok 的第一个 LABEL 值。 extern __shared__ int labelShd[]; // 基准索引。表示当前 Block 的起始位置索引 int baseidx = blockIdx.x * blockDim.x; // 全局索引。 int idx = baseidx + threadIdx.x; // 初始化 Shared Memory,将当前 Block 所对应的坐标点的 LABEL 值赋值给 // Shared Memroy,为了程序健壮性的考虑,我们将处理越界数据的那些 Thread 所 // 对应的 LABEL 值赋值为最后一个点的 LABEL 值。 if (idx < cstcnt) labelShd[threadIdx.x] = label[idx]; else labelShd[threadIdx.x] = label[cstcnt - 1]; // 使用每个 Block 中第 0 个 Thread 来初始化多出来的那个 LABEL 值,初始化的 // 规则同上面的规则一样,也做了健壮性的考量。 if (threadIdx.x == 0) { if (baseidx + blockDim.x < cstcnt) labelShd[blockDim.x] = label[baseidx + blockDim.x]; else labelShd[blockDim.x] = label[cstcnt - 1]; // 如果是第一块的话,起始索引更新。 if (blockIdx.x == 0) startidx[0] = 0; } // 块内的线程同步 __syncthreads(); // 对于处理越界数据的 Thread 直接进行返回操作,不进行任何处理。 if (idx >= cstcnt) return; // 当前 Thread 处理坐标点的 LABEL 值。 int curlabel = labelShd[threadIdx.x]; // 对于单独处于一个 LABEL 区域的最后一个点,该点不需要做任何查找操作,直接 // 赋值为未找到新的凸壳点。 if (idx == cstcnt - 1) { foundflag[curlabel] = 0; return; } // 本函数只针对处于 LABEL 区域边界的点进行处理,对于不处于区域边界的点则直 // 接返回。 if (curlabel == labelShd[threadIdx.x + 1]) return; // 读取当前 LABEL 区域的最大垂距和最大垂距所对应的下标和该最大垂距的值。 int curmaxdistidx = maxdistidx[idx]; float curmaxdist = dist[curmaxdistidx]; // 如果当前 LABEL 区域的最大垂距点的垂距值大于 0,则说明了在当前的 LABEL 区 // 域内发现了凸壳点。为了健壮性的考虑,这里将 0 写为 1.0e-6。 foundflag[curlabel] = (curmaxdist > 1.0e-6f) ? 1 : 0; // 更新下一个 LABEL 区域的起始下标。由于当前 Thread 是当前 LABEL 区域的最后 // 一个,因此下一个 LABEL 区域的起始下标为当前 Thread 全局索引加 1。 startidx[curlabel + 1] = idx + 1; // 调试打印 #ifdef CH_DEBUG_KERNEL_PRINT printf("Kernel[FoundInfo]: Label %2d - Found %1d (%7.3f at %3d), " "End Idx %3d\n", curlabel, foundflag[curlabel], curmaxdist, curmaxdistidx, idx); #endif } // 成员方法: updateFoundInfo(更新新发现凸壳点信息) __host__ int ConvexHull::updateFoundInfo( int label[], float dist[], int maxdistidx[], int cstcnt, int foundflag[], int startidx[]) { // 检查所有的输入指针或数组是否为 NULL,如果存在一个为 NULL 则报错退出。 if (label == NULL || dist == NULL || maxdistidx == NULL || foundflag == NULL || startidx == NULL) return NULL_POINTER; // 检查坐标点的数量是否小于等于 0,若是则报错推出。 if (cstcnt <= 0) return INVALID_DATA; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量,以及所需要的 Shared // Memory 的数量。 size_t blocksize = DEF_BLOCK_1D; size_t gridsize = (cstcnt + blocksize - 1) / blocksize; size_t sharedsize = (blocksize + 1) * sizeof (int); // 调用 Kernel 函数,完成计算。 _updateFoundInfoKer<<<gridsize, blocksize, sharedsize>>>( label, dist, maxdistidx, cstcnt, foundflag, startidx); // 判断核函数是否出错。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕退出。 return NO_ERROR; } // 成员方法: updateFoundInfoCpu(更新新发现凸壳点信息) __host__ int ConvexHull::updateFoundInfoCpu( int label[], float dist[], int maxdistidx[], int cstcnt, int foundflag[], int startidx[]) { // 检查所有的输入指针或数组是否为 NULL,如果存在一个为 NULL 则报错退出。 if (label == NULL || dist == NULL || maxdistidx == NULL || foundflag == NULL || startidx == NULL) return NULL_POINTER; // 检查坐标点的数量是否小于等于 0,若是则报错推出。 if (cstcnt <= 0) return INVALID_DATA; int id; // 如果是首末位,起始索引更新。 startidx[0] = 0; // 对于单独处于一个 LABEL 区域的最后一个点,该点不需要做任何查找操作,直接 // 赋值为未找到新的凸壳点。 foundflag[label[cstcnt - 1]] = 0; #ifdef CH_DEBUG_CPU_PRINT cout << "[updateFoundInfoCpu]label " << label[cstcnt - 1] << " found " << foundflag[label[cstcnt - 1]] << endl; cout << "[updateFoundInfoCpu]startidx " << label[0] << " is " << startidx[label[0]] << endl; #endif // 循环,更新新发现凸壳点信息,不处理第一个点 for (id = 1; id < cstcnt; id++) { // 处理新区域 if (label[id] != label[id - 1]) { #ifdef CH_DEBUG_CPU_PRINT cout << "[updateFoundInfoCpu]label different " << endl; #endif // 记录新区域的起始位置 startidx[label[id]] = id; // 如果当前 LABEL 区域的最大垂距点的垂距值大于 0,则说明了在当前的 // LABEL 区域内发现了凸壳点。为了健壮性的考虑,这里将 0 写为 1.0e-6 foundflag[label[id - 1]] = (dist[maxdistidx[id - 1]] > 1.0e-6f) ? 1 : 0; #ifdef CH_DEBUG_CPU_PRINT cout << "[updateFoundInfoCpu]label " << label[id - 1] << " found " << foundflag[label[id - 1]] << endl; cout << "[updateFoundInfoCpu]startidx " << label[id] << " is " << startidx[label[id]] << endl; #endif } } // 处理完毕退出。 return NO_ERROR; } // Kernel 函数: _updateConvexCstKer(生成新凸壳点集) static __global__ void _updateConvexCstKer( CoordiSetCuda cst, CoordiSetCuda convexcst, int foundflag[], int foundacc[], int startidx[], int maxdistidx[], int convexcnt, CoordiSetCuda newconvexcst) { // 计算当前 Thread 的全局索引。本 Kernel 中,每个线程都对应于一个 LABEL 区 // 域,对于发现了新凸壳点的 LABEL 区域,则需要将原来这个 LABEL 区域内的凸壳 // 点和新发现的凸壳点同时拷贝到新的凸壳点集中。 int idx = blockIdx.x * blockDim.x + threadIdx.x; // 如果该 Thread 对应的时越界数据,则直接返回,不进行任何处理。 if (idx >= convexcnt) return; // 计算原来的凸壳点在新凸壳点集中的下标,由于前面的 LABEL 区域共产生了 // foundacc[idx] 个凸壳点,因此,下标应相较于原来的下标(idx)增加了相应的 // 数量。 int newidx = idx + foundacc[idx]; // 将这个凸壳点的坐标从原来的凸壳点集中拷贝到新的凸壳点集中。 newconvexcst.tplMeta.tplData[2 * newidx] = convexcst.tplMeta.tplData[2 * idx]; newconvexcst.tplMeta.tplData[2 * newidx + 1] = convexcst.tplMeta.tplData[2 * idx + 1]; // 调试打印 #ifdef CH_DEBUG_KERNEL_PRINT printf("Kernel[UpdateConvex]: Add Old (%3d, %3d) - " "%3d => %3d, Label %2d\n", convexcst.tplMeta.tplData[2 * idx], convexcst.tplMeta.tplData[2 * idx + 1], idx, newidx, idx); #endif // 如果当前 LABEL 区域中没有发现新的凸壳点,则只需要拷贝原有的凸壳点到新的 // 凸壳点集中就可以了,不需要再进行后面的操作。 if (foundflag[idx] == 0) return; // 计算新发现的凸壳点在凸壳点集中的下标(就把它放在原来凸壳点集的后面)和该 // 凸壳点对应的坐标点集中的下标(就是该 LABEL 区域最大垂距点的下标)。由于 // 最大垂距点下标数组是记录的 Scanning 操作的结果,因此正确的结果存放再该 // LABEL 区域最后一个下标处。 newidx++; int cstidx = maxdistidx[startidx[idx + 1] - 1]; // 将新发现的凸壳点从坐标点集中拷贝到新的凸壳点集中。 newconvexcst.tplMeta.tplData[2 * newidx] = cst.tplMeta.tplData[2 * cstidx]; newconvexcst.tplMeta.tplData[2 * newidx + 1] = cst.tplMeta.tplData[2 * cstidx + 1]; // 调试打印 #ifdef CH_DEBUG_KERNEL_PRINT printf("Kernel[UpdateConvex]: Add New (%3d, %3d) - " "%3d => %3d, Label %2d\n", cst.tplMeta.tplData[2 * cstidx], cst.tplMeta.tplData[2 * cstidx + 1], cstidx, newidx, idx); #endif } // Host 成员方法:updateConvexCst(生成新凸壳点集) __host__ int ConvexHull::updateConvexCst( CoordiSet *cst, CoordiSet *convexcst, int foundflag[], int foundacc[], int startidx[], int maxdistidx[], int convexcnt, CoordiSet *newconvexcst) { // 检查参数中所有的指针和数组是否为空。 if (cst == NULL || convexcst == NULL || foundacc == NULL || foundflag == NULL || startidx == NULL || maxdistidx == NULL || newconvexcst == NULL) return NULL_POINTER; // 检查当前凸壳点的数量,小于等于 0 则无效数据。 if (convexcnt <= 0) return INVALID_DATA; int errcode; // 将输入坐标集拷贝到当前 Device。 errcode = CoordiSetBasicOp::copyToCurrentDevice(cst); if (errcode != NO_ERROR) return errcode; // 将输入凸壳点集拷贝到当前 Device。 errcode = CoordiSetBasicOp::copyToCurrentDevice(convexcst); if (errcode != NO_ERROR) return errcode; // 将输出的新的凸壳集拷贝到当前 Device 端。 errcode = CoordiSetBasicOp::copyToCurrentDevice(newconvexcst); if (errcode != NO_ERROR) return errcode; // 获取各个坐标集的 CUDA 型数据 CoordiSetCuda *cstCud = COORDISET_CUDA(cst); CoordiSetCuda *convexcstCud = COORDISET_CUDA(convexcst); CoordiSetCuda *newconvexcstCud = COORDISET_CUDA(newconvexcst); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 // 矩阵方法分段扫描版本线程块大小。 size_t blocksize = DEF_BLOCK_1D; size_t gridsize = (convexcnt + blocksize - 1) / blocksize; // 调用 Kernel 函数完成计算。 _updateConvexCstKer<<<gridsize, blocksize>>>( *cstCud, *convexcstCud, foundflag, foundacc, startidx, maxdistidx, convexcnt, *newconvexcstCud); // 判断 Kernel 函数是否出错。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕退出。 return NO_ERROR; } // Host 成员方法:updateConvexCstCpu(生成新凸壳点集) __host__ int ConvexHull::updateConvexCstCpu( CoordiSet *cst, CoordiSet *convexcst, int foundflag[], int foundacc[], int startidx[], int maxdistidx[], int convexcnt, CoordiSet *newconvexcst) { // 检查参数中所有的指针和数组是否为空。 if (cst == NULL || convexcst == NULL || foundacc == NULL || foundflag == NULL || startidx == NULL || maxdistidx == NULL || newconvexcst == NULL) return NULL_POINTER; // 检查当前凸壳点的数量,小于等于 0 则无效数据。 if (convexcnt <= 0) return INVALID_DATA; // 错误码 int errcode; // 将输入坐标集拷贝到 Host。 errcode = CoordiSetBasicOp::copyToHost(cst); if (errcode != NO_ERROR) return errcode; // 将输入凸壳点集拷贝到当前 Host。 errcode = CoordiSetBasicOp::copyToHost(convexcst); if (errcode != NO_ERROR) return errcode; // 将输出的新的凸壳集拷贝到当前 Host。 errcode = CoordiSetBasicOp::copyToHost(newconvexcst); if (errcode != NO_ERROR) return errcode; // 获取各个坐标集的 CUDA 型数据 CoordiSetCuda *cstCud = COORDISET_CUDA(cst); CoordiSetCuda *convexcstCud = COORDISET_CUDA(convexcst); CoordiSetCuda *newconvexcstCud = COORDISET_CUDA(newconvexcst); // 本地变量 int newid; int cstidx; // 循环处理凸壳点集 for (int id = 0; id < convexcnt; id++) { // 计算原来的凸壳点在新凸壳点集中的下标,由于前面的 LABEL 区域共产生了 // foundacc[idx] 个凸壳点,因此,下标应相较于原来的下标(idx)增加了相 // 应的数量。 newid = id + foundacc[id]; // 将这个凸壳点的坐标从原来的凸壳点集中拷贝到新的凸壳点集中。 (*newconvexcstCud).tplMeta.tplData[2 * newid] = (*convexcstCud).tplMeta.tplData[2 * id]; (*newconvexcstCud).tplMeta.tplData[2 * newid + 1] = (*convexcstCud).tplMeta.tplData[2 * id + 1]; #ifdef CH_DEBUG_CPU_PRINT printf("[updateConvexCstCpu]: Add Old (%3d, %3d) - " "%3d => %3d\n", (*convexcstCud).tplMeta.tplData[2 * id], (*convexcstCud).tplMeta.tplData[2 * id + 1], id, newid); #endif // 计算新发现的凸壳点在凸壳点集中的下标(就把它放在原来凸壳点集的后面) // 和该凸壳点对应的坐标点集中的下标(就是该 LABEL 区域最大垂距点的下标) // 由于最大垂距点下标数组是记录的 Scanning 操作的结果,因此正确的结果存 // 放再该 LABEL 区域最后一个下标处。 if (foundflag[id]) { newid++; cstidx = maxdistidx[startidx[id + 1] - 1]; // 将新发现的凸壳点从坐标点集中拷贝到新的凸壳点集中。 (*newconvexcstCud).tplMeta.tplData[2 * newid] = (*cstCud).tplMeta.tplData[2 * cstidx]; (*newconvexcstCud).tplMeta.tplData[2 * newid + 1] = (*cstCud).tplMeta.tplData[2 * cstidx + 1]; #ifdef CH_DEBUG_CPU_PRINT printf("[updateConvexCstCpu]: Add New (%3d, %3d) - " "%3d => %3d\n", (*cstCud).tplMeta.tplData[2 * cstidx], (*cstCud).tplMeta.tplData[2 * cstidx + 1], cstidx, newid); #endif } } // 处理完毕退出。 return NO_ERROR; } // Kernel 函数: _markLeftPointsKer(标记左侧点) static __global__ void _markLeftPointsKer( CoordiSetCuda cst, CoordiSetCuda newconvexcst, int negdistflag[], int label[], int foundflag[], int foundacc[], int cstcnt, int leftflag[]) { // 记录了本 Kernel 所使用到的共享内存中各个下标所存储的数据的含义。其中, // SIDX_BLK_LABEL_LOW 和 SIDX_BLK_LABEL_UP 用来存当前 Block 中所加载的点集 // 的区域标签值的上下界。根据这个上下界,可以计算出当前点所在区域的最左最右 // 点,从而根据这两点确定的直线计算当前点的垂距。 // 从下标为 SIDX_BLK_CONVEX_X 开始的其后的所有共享内存空间存储了当前 Block // 所处理的所有的新的凸壳点的 X 坐标。 #define SIDX_BLK_LABEL_LOW 0 #define SIDX_BLK_LABEL_UP 1 #define SIDX_BLK_CONVEX_X 2 // 共享内存的声明。 extern __shared__ int shdmem[]; // 基准下标。表示当前 Block 第一个 Thread 所处理的下标。 int baseidx = blockIdx.x * blockDim.x; // 当前 Thread 的全局下标。 int idx = baseidx + threadIdx.x; // 初始化共享内存中的公共数据,为了防止写入冲突,这里只使用每个 Block 的第 // 一个 Thread 处理初始化工作。 if (threadIdx.x == 0) { // 读取当前 Block 所处理的所有坐标点中最小的 LABEL 值。 shdmem[SIDX_BLK_LABEL_LOW] = label[baseidx]; // 计算当前 Block 所处理的所有坐标点中最大的 LABEL 值。 if (baseidx + blockDim.x <= cstcnt) shdmem[SIDX_BLK_LABEL_UP] = label[baseidx + blockDim.x - 1]; else shdmem[SIDX_BLK_LABEL_UP] = label[cstcnt - 1]; } // 同步 Block 内的所有 Thread,使得上述初始化对所有的 Thread 都可见。 __syncthreads(); // 从 Shared Memory 中读取当前 Block 所处理的 LABEL 值范围。这一步骤没有实 // 际的逻辑含义,将数据从共享内存搬入寄存器仅仅是为了加快处理速度。 int labellower = shdmem[SIDX_BLK_LABEL_LOW]; int labelupper = shdmem[SIDX_BLK_LABEL_UP]; // 定义中心点(即新增加的凸壳点)的横坐标的哑值。这是由于并不是所有的 LABEL // 区域都会在该论迭代中发现新的凸壳点。该值要求非常的大,因为没有发现新凸壳 // 点的区域,相当于所有的坐标点放在左侧。 #define LP_DUMMY_CVXX CH_LARGE_ENOUGH // 将新凸壳点的 X 坐标存储 Shared Memory 提取出,用一个指针来表示,这样的写 // 法是为了代码更加易于理解。 int *newcvxxShd = &shdmem[SIDX_BLK_CONVEX_X]; // 在 Shared Memory 中初始化新凸壳点(中心点)的 X 坐标。 if (threadIdx.x < labelupper - labellower + 1) { // 计算新凸壳点在新的凸壳点集中的下标。 int labelidx = threadIdx.x + labellower; int newconvexidx = labelidx + foundacc[labelidx] + 1; // 初始化 Shared Memory 中的数据,对于没有产生新的凸壳点的 LABEL 区域来 // 说,该值直接赋哑值。 if (foundflag[labelidx]) newcvxxShd[threadIdx.x] = newconvexcst.tplMeta.tplData[2 * newconvexidx]; else newcvxxShd[threadIdx.x] = LP_DUMMY_CVXX; } // 同步 Block 内的所有 Thread,是的上述所有初始化计算对所有 Thread 可见。 __syncthreads(); // 如果当前 Thread 处理的是越界范围,则直接返回不进行任何处理。 if (idx >= cstcnt) return; // 读取当前坐标点所对应的 LABEL 值(经过校正的,表示 Shared Memory 中的下 // 标)。 int curlabel = label[idx] - labellower; // 读取当前坐标点的 x 坐标和该点的垂距值。 int curx = cst.tplMeta.tplData[2 * idx]; int curnegflag = negdistflag[idx]; // 对于所有垂距大于等于 0,且 x 坐标小于中心点坐标时认为该点在中心点左侧。 // (因为所有垂距小于 0 的点将在下一轮迭代中被排除,因此,这里没有将垂距小 // 于 0 的点设置左侧标志位) if (curx < newcvxxShd[curlabel] && curnegflag == 0) leftflag[idx] = 1; else leftflag[idx] = 0; // 调试打印 #ifdef CH_DEBUG_KERNEL_PRINT printf("Kernel[LeftPoint]: (%3d, %3d) d=%8.3f, " "Label %2d ( NC.x %3d ) Left %1d\n", cst.tplMeta.tplData[2 * idx], cst.tplMeta.tplData[2 * idx + 1], cst.attachedData[idx], curlabel + labellower, newcvxxShd[curlabel], leftflag[idx]); #endif // 清除函数内部的宏定义,防止同后面的函数造成冲突。 #undef LP_TMPX_DUMMY #undef SIDX_BLK_LABEL_LOW #undef SIDX_BLK_LABEL_UP #undef SIDX_BLK_CONVEX_X } // Host 成员方法:markLeftPoints(标记左侧点) __host__ int ConvexHull::markLeftPoints( CoordiSet *cst, CoordiSet *newconvexcst, int negdistflag[], int label[], int foundflag[], int foundacc[], int cstcnt, int leftflag[]) { // 检查参数中所有的指针和变量是否为空。 if (cst == NULL || newconvexcst == NULL || label == NULL || foundacc == NULL || foundflag == NULL || leftflag == NULL) return NULL_POINTER; // 检查当前点的数量,小于等于 0 则无效数据。 if (cstcnt <= 0) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 将输入坐标点集拷贝到当前 Device。 errcode = CoordiSetBasicOp::copyToCurrentDevice(cst); if (errcode != NO_ERROR) return errcode; // 将新的凸壳点集拷贝到当前 Device。 errcode = CoordiSetBasicOp::copyToCurrentDevice(newconvexcst); if (errcode != NO_ERROR) return errcode; // 获取坐标点集和凸壳点集的 CUDA 相关数据 CoordiSetCuda *cstCud = COORDISET_CUDA(cst); CoordiSetCuda *newconvexcstCud = COORDISET_CUDA(newconvexcst); // 计算 Kernel 函数所需要的 Block 尺寸和数量,以及每个 Block 所使用的 // Shared Memory 的数量。 size_t blocksize = DEF_BLOCK_1D; size_t gridsize = (cstcnt + blocksize - 1) / blocksize; size_t sharedsize = (2 + blocksize) * sizeof (int); // 调用 Kernel 函数,完成计算。 _markLeftPointsKer<<<gridsize, blocksize, sharedsize>>>( *cstCud, *newconvexcstCud, negdistflag, label, foundflag, foundacc, cstcnt, leftflag); // 判断 Kernel 函数运行是否出错。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕退出。 return NO_ERROR; } // Host 成员方法:markLeftPointsCpu(标记左侧点) __host__ int ConvexHull::markLeftPointsCpu( CoordiSet *cst, CoordiSet *newconvexcst, int negdistflag[], int label[], int foundflag[], int foundacc[], int cstcnt, int leftflag[]) { // 检查参数中所有的指针和变量是否为空。 if (cst == NULL || newconvexcst == NULL || label == NULL || foundacc == NULL || foundflag == NULL || leftflag == NULL) return NULL_POINTER; // 检查当前点的数量,小于等于 0 则无效数据。 if (cstcnt <= 0) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 将输入坐标点集拷贝到 Host。 errcode = CoordiSetBasicOp::copyToHost(cst); if (errcode != NO_ERROR) return errcode; // 将新的凸壳点集拷贝到当前 Host。 errcode = CoordiSetBasicOp::copyToHost(newconvexcst); if (errcode != NO_ERROR) return errcode; // 获取坐标点集和凸壳点集的 CUDA 相关数据 CoordiSetCuda *cstCud = COORDISET_CUDA(cst); CoordiSetCuda *newconvexcstCud = COORDISET_CUDA(newconvexcst); // 本地变量 int newconvexcstx; int newconvexidx; // 定义中心点(即新增加的凸壳点)的横坐标的哑值。这是由于并不是所有的 LABEL // 区域都会在该论迭代中发现新的凸壳点。该值要求非常的大,因为没有发现新凸壳 // 点的区域,相当于所有的坐标点放在左侧。 #define LP_DUMMY_CVXX_CPU CH_LARGE_ENOUGH for (int id = 0; id < cstcnt; id++) { // 计算新凸壳点在新的凸壳点集中的下标。 newconvexidx = label[id] + foundacc[label[id]] + 1; // 初始化新凸壳点 x 坐标,对于没有产生新的凸壳点的 LABEL 区域来 // 说,该值直接赋哑值。 if (foundflag[label[id]]) newconvexcstx = (*newconvexcstCud).tplMeta.tplData[2 * newconvexidx]; else newconvexcstx = LP_DUMMY_CVXX_CPU; // 对于所有垂距大于等于 0,且 x 坐标小于中心点坐标时认为该点在中心点左侧。 // (因为所有垂距小于 0 的点将在下一轮迭代中被排除,因此,这里没有将垂距小 // 于 0 的点设置左侧标志位) if ((*cstCud).tplMeta.tplData[2 * id] < newconvexcstx && negdistflag[id] == 0) leftflag[id] = 1; else leftflag[id] = 0; #ifdef CH_DEBUG_CPU_PRINT printf("[markLeftPointsCpu]: (%3d, %3d) d=%8.3f, " "Label %2d ( NC.x %3d ) Left %1d\n", (*cstCud).tplMeta.tplData[2 * id], (*cstCud).tplMeta.tplData[2 * id + 1], (*cstCud).attachedData[id], label[id], newconvexidx, leftflag[id]); #endif } // 处理完毕退出。 return NO_ERROR; } // Kernel 函数: _updatePropertyKer(计算新下标) static __global__ void _updatePropertyKer( int leftflag[], int leftacc[], int negdistflag[], int negdistacc[], int startidx[], int label[], int foundacc[], int cstcnt, int newidx[], int tmplabel[]) { // 记录了本 Kernel 所使用到的共享内存中各个下标所存储的数据的含义。其中, // SIDX_BLK_LABEL_LOW 和 SIDX_BLK_LABEL_UP 用来存当前 Block 中所加载的点集 // 的区域标签值的上下界。根据这个上下界,可以计算出当前点所在区域的最左最右 // 点,从而根据这两点确定的直线计算当前点的垂距。 // SIDX_BLK_START_IDX 表示当前 Block 所囊括的所有 LABEL 区域对应的起始下 // 标。 // SIDX_BLK_NEG_ACC 表示当前 Block 所囊括的所有 LABEL 区域对应的负垂距累加 // 值,该值用于计算坐标点在下一轮迭代中所对应的新下标值。 // SIDX_BLK_LEFT_ACC 表示当前 Block 所囊括的所有 LABEL 区域对应的左侧点累加 // 值,该值用于计算坐标点在下一轮迭代中所对应的新下标值。 // SIDX_BLK_NEG_ACC 表示当前 Block 所囊括的所有 LABEL 区域对应的新发现凸壳 // 点的累加值。该值用来计算本轮结束后目前所有找到的凸壳点在新凸壳点集中的下 // 标值。 #define SIDX_BLK_LABEL_LOW 0 #define SIDX_BLK_LABEL_UP 1 #define SIDX_BLK_START_IDX (2 + 0 * blockDim.x) #define SIDX_BLK_NEG_ACC (2 + 1 * blockDim.x) #define SIDX_BLK_LEFT_ACC (2 + 2 * blockDim.x) #define SIDX_BLK_FOUND_ACC (2 + 3 * blockDim.x) // 共享内存的声明。 extern __shared__ int shdmem[]; // 基准下标。当前 Block 所有线程的起始全局下标。 int baseidx = blockIdx.x * blockDim.x; // 当前 Thread 的全局下标 int idx = baseidx + threadIdx.x; // 初始化 Shared Memory 公用部分。只需要一个线程来做这件事情即可。 if (threadIdx.x == 0) { // 计算当前 Block 处理的最小的 LABEL 区域。 shdmem[SIDX_BLK_LABEL_LOW] = label[baseidx]; // 计算当前 Block 处理的最大的 LABEL 区域。这里针对最后一个 Block,需要 // 考虑越界读取数据的情况。 if (baseidx + blockDim.x < cstcnt) shdmem[SIDX_BLK_LABEL_UP] = label[baseidx + blockDim.x - 1] + 1; else shdmem[SIDX_BLK_LABEL_UP] = label[cstcnt - 1]; } // 针对上面的初始化进行同步,使其结果对所有 Thread 可见。 __syncthreads(); // 将共享内存的各个数组指针取出。这一步骤没有逻辑上的实际意义,只是为了后续 // 步骤表达方便。 int *startidxShd = &shdmem[SIDX_BLK_START_IDX]; int *negdistaccShd = &shdmem[SIDX_BLK_NEG_ACC]; int *leftaccShd = &shdmem[SIDX_BLK_LEFT_ACC]; int *foundaccShd = &shdmem[SIDX_BLK_FOUND_ACC]; // 将存放于 Shared Memory 中的 LABEL 区域上下界转存到寄存器中。该步骤也没有 // 实际的逻辑意义,目的在于使程序运行更加高效。 int labellower = shdmem[SIDX_BLK_LABEL_LOW]; int labelupper = shdmem[SIDX_BLK_LABEL_UP]; // 初始化 Shared Memory 中的各个数组的值。 if (threadIdx.x < labelupper - labellower + 1) { // 从 Global Memory 中读取各个 LABEL 的起始下标。 startidxShd[threadIdx.x] = startidx[threadIdx.x + labellower]; // 根据起始下标,计算各个 LABEL 区域所对应的负垂距和左侧点累加值。 negdistaccShd[threadIdx.x] = negdistacc[startidxShd[threadIdx.x]]; leftaccShd[threadIdx.x] = leftacc[startidxShd[threadIdx.x]]; // 从 Global Memory 中读取新凸壳点的累加值。 foundaccShd[threadIdx.x] = foundacc[threadIdx.x + labellower]; } // 针对上面的初始化进行 Block 内部的同步,是的这些初始化结果对所有的 // Thread 可见。 __syncthreads(); // 若当前 Thread 处理的是越界数据,则直接退出。 if (idx >= cstcnt) return; // 若当前 Thread 处理的坐标点具有负垂距,则直接退出,因为负垂距坐标点再下一 // 轮迭代的过程中则不再使用。 if (negdistflag[idx] == 1) return; // 读取当前坐标点的 LABEL 值,由于后面只使用 Shared Memory 中的数据,因此这 // 里直接将其转换为 Shared Memory 所对应的下标。 int curlabel = label[idx] - labellower; // 宏:CHNI_ENABLE_FAST_CALC(下标值快速计算开关) // 在下面的代码中新下标值的计算可分为快速和普通两种方式。如果开启该定义,则 // 使用快速下标值计算;如果关闭,则是用普通下标值计算。无论是快速计算还是普 // 通计算,两者在计算公式上归根结底是一样的,只是为了减少计算量,某些变量被 // 合并同类项后消掉了。因此快速下标值计算的公式不易理解,普通计算的公式易于 // 理解。快速计算仅仅是普通计算的推导结果。 #define CHNI_ENABLE_FAST_CALC // 针对当前在新发现的凸壳点的左侧还有右侧,需要进行不同的计算公式来确定其在 // 下一轮迭代中的 LABEL 值和在坐标点集中的下标值。 if (leftflag[idx] == 1) { // 对于当前点在新的凸壳点的左侧,计算新的 LABEL 值。这里 foundacc 的物 // 理含义是当前 LABEL 值之前的各个 LABEL 区域中总共找到的新增凸壳点的数 // 量。 tmplabel[idx] = label[idx] + foundaccShd[curlabel]; // 对于当前点在新的凸壳点的左侧,计算坐标点在新一轮迭代中的下标值。这里 // 首先确定当前 LABEL 的新的起始下标值,然后再加上该点在其 LABEL 区域内 // 其前面的左侧点的数量,就得到了其新的下标值。 #ifndef CHNI_ENABLE_FAST_CALC int basenewidx = startidxShd[curlabel] - negdistaccShd[curlabel]; int innernewidx = leftacc[idx] - leftaccShd[curlabel]; newidx[idx] = basenewidx + innernewidx; #else newidx[idx] = startidxShd[curlabel] - negdistaccShd[curlabel] + leftacc[idx] - leftaccShd[curlabel]; #endif } else { // 对于当前点在新的凸壳点的右侧,计算新的 LABEL 值。这里 foundacc 的物 // 理含义是当前 LABEL 值之前的各个 LABEL 区域中总共找到的新增凸壳点的数 // 量。 tmplabel[idx] = label[idx] + foundaccShd[curlabel] + 1; // 对于当前点在新的凸壳点的右侧,计算坐标点在新一轮迭代中的下标值。计算 // 该值,首先计算右侧构成的新的 LABEL 区域的起始位置,这部分只需要在左 // 侧起始下标处加上当前 LABEL 区域总共检出的左侧坐标的数量即可;之后, // 需要计算该坐标点在新的区域内部的偏移量,即内部的原来下标值,减去其前 // 面的负垂距点数量和左侧点数量。 #ifndef CHNI_ENABLE_FAST_CALC int leftcnt = leftaccShd[curlabel + 1] - leftaccShd[curlabel]; int basenewidx = startidxShd[curlabel] - negdistaccShd[curlabel] + leftcnt; int inidx = idx - startidxShd[curlabel]; int innegacc = negdistacc[idx] - negdistaccShd[curlabel]; int inleftacc = leftacc[idx] - leftaccShd[curlabel]; int innernewidx = inidx - innegacc - inleftacc; newidx[idx] = basenewidx + innernewidx; #else newidx[idx] = idx - negdistacc[idx] + leftaccShd[curlabel + 1] - leftacc[idx]; #endif } // 调试打印 #ifdef CH_DEBUG_KERNEL_PRINT printf("Kernel[NewLabel]: Label %2d => %2d, " "Idx %2d => %2d, Left %1d\n", label[idx], tmplabel[idx], idx, newidx[idx], leftflag[idx]); #endif // 消除本 Kernel 函数内部的宏定义,防止后面的函数使用造成冲突。 #ifdef CHNI_ENABLE_FAST_CALC # undef CHNI_ENABLE_FAST_CALC #endif #undef SIDX_BLK_LABEL_LOW #undef SIDX_BLK_LABEL_UP #undef SIDX_BLK_START_IDX #undef SIDX_BLK_NEG_ACC #undef SIDX_BLK_LEFT_ACC #undef SIDX_BLK_FOUND_ACC } // 成员方法:updateProperty(计算新下标) __host__ int ConvexHull::updateProperty( int leftflag[], int leftacc[], int negdistflag[], int negdistacc[], int startidx[], int label[], int foundacc[], int cstcnt, int newidx[], int newlabel[]) { // 检查所有参数中的指针和数组,是否为 NULL。 if (leftflag == NULL || leftacc == NULL || negdistflag == NULL || negdistacc == NULL || startidx == NULL || label == NULL || foundacc == NULL || newidx == NULL || newlabel == NULL) return NULL_POINTER; // 如果坐标点集的数量小于等于 0,则报错退出。 if (cstcnt <= 0) return INVALID_DATA; // 计算调用 Kernel 函数所需要的 Grid 和 Block 尺寸,以及每个 Block 所使用的 // Shared Memory 的字节数量。 size_t blocksize = DEF_BLOCK_1D; size_t gridsize = (cstcnt + blocksize - 1) / blocksize; size_t sharedsize = (2 + blocksize * 4) * sizeof (int); // 调用 Kernel 函数, 完成计算。 _updatePropertyKer<<<gridsize, blocksize, sharedsize>>>( leftflag, leftacc, negdistflag, negdistacc, startidx, label, foundacc, cstcnt, newidx, newlabel); // 判断 Kernel 函数的执行是否出错。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕退出。 return NO_ERROR; } // 成员方法:updatePropertyCpu(计算新下标) __host__ int ConvexHull::updatePropertyCpu( int leftflag[], int leftacc[], int negdistflag[], int negdistacc[], int startidx[], int label[], int foundacc[], int cstcnt, int newidx[], int newlabel[]) { // 检查所有参数中的指针和数组,是否为 NULL。 if (leftflag == NULL || leftacc == NULL || negdistflag == NULL || negdistacc == NULL || startidx == NULL || label == NULL || foundacc == NULL || newidx == NULL || newlabel == NULL) return NULL_POINTER; // 如果坐标点集的数量小于等于 0,则报错退出。 if (cstcnt <= 0) return INVALID_DATA; // 宏:CHNI_ENABLE_FAST_CALC(下标值快速计算开关) // 在下面的代码中新下标值的计算可分为快速和普通两种方式。如果开启该定义,则 // 使用快速下标值计算;如果关闭,则是用普通下标值计算。无论是快速计算还是普 // 通计算,两者在计算公式上归根结底是一样的,只是为了减少计算量,某些变量被 // 合并同类项后消掉了。因此快速下标值计算的公式不易理解,普通计算的公式易于 // 理解。快速计算仅仅是普通计算的推导结果。 #define CHNI_ENABLE_FAST_CALC for (int idx = 0; idx < cstcnt; idx++) { if (negdistflag[idx] == 0) { // 针对当前在新发现的凸壳点的左侧还有右侧,需要进行不同的计算公式来 // 确定其在下一轮迭代中的 LABEL 值和在坐标点集中的下标值。 if (leftflag[idx] == 1) { // 对于当前点在新的凸壳点的左侧,计算新的 LABEL 值。这里 // foundacc 的物理含义是当前 LABEL 值之前的各个 LABEL 区域中总 // 共找到的新增凸壳点的数量。 newlabel[idx] = label[idx] + foundacc[label[idx]]; // 对于当前点在新的凸壳点的左侧,计算坐标点在新一轮迭代中的下标 // 值。这里首先确定当前 LABEL 的新的起始下标值,然后再加上该点在 // 其 LABEL 区域内其前面的左侧点的数量,就得到了其新的下标值。 #ifndef CHNI_ENABLE_FAST_CALC int basenewidx = startidx[label[idx]] - negdistacc[startidx[label[idx]]]; int innernewidx = leftacc[idx] - leftacc[startidx[label[idx]]]; newidx[idx] = basenewidx + innernewidx; #else newidx[idx] = startidx[label[idx]] - negdistacc[startidx[label[idx]]] + leftacc[idx] - leftacc[startidx[label[idx]]]; #endif } else { // 对于当前点在新的凸壳点的右侧,计算新的 LABEL 值。这里 // foundacc 的物理含义是当前 LABEL 值之前的各个 LABEL 区域中总共 // 找到的新增凸壳点的数量。 newlabel[idx] = label[idx] + foundacc[label[idx]] + 1; // 对于当前点在新的凸壳点的右侧,计算坐标点在新一轮迭代中的下标 // 值。计算该值,首先计算右侧构成的新的 LABEL 区域的起始位置,这 // 部分只需要在左侧起始下标处加上当前 LABEL 区域总共检出的左侧坐 // 标的数量即可;之后,需要计算该坐标点在新的区域内部的偏移量, // 即内部的原来下标值,减去其前 面的负垂距点数量和左侧点数量。 #ifndef CHNI_ENABLE_FAST_CALC int leftcnt = leftacc[startidx[label[idx] + 1]] - leftacc[startidx[label[idx]]]; int basenewidx = startidx[label[idx]] - negdistacc[startidx[label[idx]]] + leftcnt; int inidx = idx - startidx[label[idx]]; int innegacc = negdistacc[idx] - negdistacc[startidx[label[idx]]]; int inleftacc = leftacc[idx] - leftacc[startidx[label[idx]]]; int innernewidx = inidx - innegacc - inleftacc; newidx[idx] = basenewidx + innernewidx; #else newidx[idx] = idx - negdistacc[idx] + leftacc[startidx[label[idx] + 1]] - leftacc[idx]; #endif } } } // 消除本 Kernel 函数内部的宏定义,防止后面的函数使用造成冲突。 #ifdef CHNI_ENABLE_FAST_CALC # undef CHNI_ENABLE_FAST_CALC #endif return NO_ERROR; } // Kernel 函数: _arrangeCstKer(生成新坐标点集) static __global__ void _arrangeCstKer( CoordiSetCuda cst, int negdistflag[], int newidx[], int tmplabel[], int cstcnt, CoordiSetCuda newcst, int newlabel[]) { // 计算当前 Thread 的全局索引。 int idx = blockIdx.x * blockDim.x + threadIdx.x; // 若当前 Thread 处理的是越界数据,则直接返回 if (idx >= cstcnt) return; // 如果当前 Thread 对应的坐标点是应该在下一轮计算中被排除的负垂距点,那么 // 该 Thread 直接退出。 if (negdistflag[idx] == 1) return; // 读取当前线程所处理的 int newindex = newidx[idx]; // 将坐标集按照新的位置拷贝到新的坐标集中。由于并行拷贝,无法保证顺序,因此 // 这里使用了两个数组。 newcst.tplMeta.tplData[2 * newindex] = cst.tplMeta.tplData[2 * idx]; newcst.tplMeta.tplData[2 * newindex + 1] = cst.tplMeta.tplData[2 * idx + 1]; // 将新的 LABEL 标记从原来的下标处拷贝到新的下标处,由于并行拷贝,无法保证 // 顺序,因此这里使用了两个数组。 newlabel[newindex] = tmplabel[idx]; } // 成员方法:arrangeCst(生成新坐标点集) __host__ int ConvexHull::arrangeCst( CoordiSet *cst, int negdistflag[], int newidx[], int tmplabel[], int cstcnt, CoordiSet *newcst, int newlabel[]) { // 检查参数中所有的指针和数组是否为空。 if (cst == NULL || newcst == NULL || negdistflag == NULL || tmplabel == NULL || newidx == NULL || newlabel == NULL) return NULL_POINTER; // 检查坐标点数量,必须要大于 0。 if (cstcnt <= 0) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 将输入坐标集拷贝到当前 Device。 errcode = CoordiSetBasicOp::copyToCurrentDevice(cst); if (errcode != NO_ERROR) return errcode; // 将输出坐标集拷贝到当前 Device。 errcode = CoordiSetBasicOp::copyToCurrentDevice(newcst); if (errcode != NO_ERROR) return errcode; // 坐标集的 CUDA 相关数据 CoordiSetCuda *cstCud = COORDISET_CUDA(cst); CoordiSetCuda *newcstCud = COORDISET_CUDA(newcst); // 计算调用 Kernel 函数 Block 尺寸和 Block 数量。 size_t blocksize = DEF_BLOCK_1D; size_t gridsize = (cstcnt + blocksize - 1) / blocksize; // 调用 Kernel 函数,完成计算。 _arrangeCstKer<<<gridsize, blocksize>>>( *cstCud, negdistflag, newidx, tmplabel, cstcnt, *newcstCud, newlabel); // 判断 Kernel 函数执行是否出错。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕退出。 return NO_ERROR; } // 成员方法:arrangeCstCpu(生成新坐标点集) __host__ int ConvexHull::arrangeCstCpu( CoordiSet *cst, int negdistflag[], int newidx[], int tmplabel[], int cstcnt, CoordiSet *newcst, int newlabel[]) { // 检查参数中所有的指针和数组是否为空。 if (cst == NULL || newcst == NULL || negdistflag == NULL || tmplabel == NULL || newidx == NULL || newlabel == NULL) return NULL_POINTER; // 检查坐标点数量,必须要大于 0。 if (cstcnt <= 0) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 将输入坐标集拷贝到当前 Host。 errcode = CoordiSetBasicOp::copyToHost(cst); if (errcode != NO_ERROR) return errcode; // 将输出坐标集拷贝到当前 Host。 errcode = CoordiSetBasicOp::copyToHost(newcst); if (errcode != NO_ERROR) return errcode; // 坐标集的 CUDA 相关数据 CoordiSetCuda *cstCud = COORDISET_CUDA(cst); CoordiSetCuda *newcstCud = COORDISET_CUDA(newcst); for (int idx = 0; idx < cstcnt; idx++) { if (negdistflag[idx] == 0) { int newindex = newidx[idx]; // 将坐标集按照新的位置拷贝到新的坐标集中。由于并行拷贝,无法保证顺 // 序,因此这里使用了两个数组。 (*newcstCud).tplMeta.tplData[2 * newindex] = (*cstCud).tplMeta.tplData[2 * idx]; (*newcstCud).tplMeta.tplData[2 * newindex + 1] = (*cstCud).tplMeta.tplData[2 * idx + 1]; // 将新的 LABEL 标记从原来的下标处拷贝到新的下标处,由于并行拷贝, // 无法保证顺序,因此这里使用了两个数组。 newlabel[newindex] = tmplabel[idx]; } } return NO_ERROR; } // Kernel 函数:_flipWholeCstKer(整体翻转坐标点集) static __global__ void _flipWholeCstKer( CoordiSetCuda incst, CoordiSetCuda outcst) { // 计算当前 Thread 的全局下标 int idx = blockIdx.x * blockDim.x + threadIdx.x; // 如果当前 Thread 处理的是越界数据,则直接退出。 if (idx >= incst.tplMeta.count) return; // 将 x 和 y 坐标的相反数赋值给输出坐标点集。 outcst.tplMeta.tplData[2 * idx] = -incst.tplMeta.tplData[2 * idx]; outcst.tplMeta.tplData[2 * idx + 1] = -incst.tplMeta.tplData[2 * idx + 1]; } // Host 成员方法:flipWholeCstCpu(整体翻转坐标点集) __host__ int ConvexHull::flipWholeCstCpu(CoordiSet *incst, CoordiSet *outcst) { // 检查输入坐标点集是否为 NULL。 if (incst == NULL) return NULL_POINTER; // 检查输入坐标点集是否包含有效的坐标点。 if (incst->count <= 0 || incst->tplData == NULL) return INVALID_DATA; // 如果输出点集为 NULL,则函数会进行 In-Place 操作,即将输出点集赋值为输入 // 点集。 if (outcst == NULL) outcst = incst; // 声明局部变量,错误码。 int errcode; // 将输入坐标点集拷贝到当前 Host 中。 errcode = CoordiSetBasicOp::copyToHost(incst); if (errcode != NO_ERROR) return errcode; // 对于 Out-Place 方法还需要对输出坐标点集进行初始化操作。 if (incst != outcst) { // 将输出坐标集拷贝入 Host 内存。 errcode = CoordiSetBasicOp::copyToHost(outcst); if (errcode != NO_ERROR) { // 如果输出坐标集无数据(故上面的拷贝函数会失败),则会创建一个和 // 输入坐标集寸相同的图像。 errcode = CoordiSetBasicOp::makeAtHost( outcst, incst->count); // 如果创建坐标集也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } } // 取出两个坐标点集对应的 CUDA 型变量。 CoordiSetCuda *incstCud = COORDISET_CUDA(incst); CoordiSetCuda *outcstCud = COORDISET_CUDA(outcst); // 为了防止越界访存,这里临时将输入点集的尺寸切换为输入和输出点集中较小的那 // 个。当然,在操作后还需要将点集的数量恢复,因此,通过另一个变量保存原始的 // 坐标点数量。 int incstcntorg = incst->count; if (incst->count > outcst->count) incst->count = outcst->count; for (int idx = 0; idx < (*incstCud).tplMeta.count; idx++) { // 将 x 和 y 坐标的相反数赋值给输出坐标点集。 (*outcstCud).tplMeta.tplData[2 * idx] = -(*incstCud).tplMeta.tplData[2 * idx]; (*outcstCud).tplMeta.tplData[2 * idx + 1] = -(*incstCud).tplMeta.tplData[2 * idx + 1]; } // 回复输入坐标点集中坐标点的数量。 incst->count = incstcntorg; // 处理完毕退出。 return NO_ERROR; } // Host 成员方法:flipWholeCst(整体翻转坐标点集) __host__ int ConvexHull::flipWholeCst(CoordiSet *incst, CoordiSet *outcst) { // 检查输入坐标点集是否为 NULL。 if (incst == NULL) return NULL_POINTER; // 检查输入坐标点集是否包含有效的坐标点。 if (incst->count <= 0 || incst->tplData == NULL) return INVALID_DATA; // 如果输出点集为 NULL,则函数会进行 In-Place 操作,即将输出点集赋值为输入 // 点集。 if (outcst == NULL) outcst = incst; // 声明局部变量,错误码。 int errcode; // 将输入坐标点集拷贝到当前 Device 中。 errcode = CoordiSetBasicOp::copyToCurrentDevice(incst); if (errcode != NO_ERROR) return errcode; // 对于 Out-Place 方法还需要对输出坐标点集进行初始化操作。 if (incst != outcst) { // 将输出坐标集拷贝入 Device 内存。 errcode = CoordiSetBasicOp::copyToCurrentDevice(outcst); if (errcode != NO_ERROR) { // 如果输出坐标集无数据(故上面的拷贝函数会失败),则会创建一个和 // 输入坐标集寸相同的图像。 errcode = CoordiSetBasicOp::makeAtCurrentDevice( outcst, incst->count); // 如果创建坐标集也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } } // 取出两个坐标点集对应的 CUDA 型变量。 CoordiSetCuda *incstCud = COORDISET_CUDA(incst); CoordiSetCuda *outcstCud = COORDISET_CUDA(outcst); // 为了防止越界访存,这里临时将输入点集的尺寸切换为输入和输出点集中较小的那 // 个。当然,在操作后还需要将点集的数量恢复,因此,通过另一个变量保存原始的 // 坐标点数量。 int incstcntorg = incst->count; if (incst->count > outcst->count) incst->count = outcst->count; // 计算启动 Kernel 函数所需要的 Thread 数量。 size_t blocksize = DEF_BLOCK_1D; size_t gridsize = (incst->count + blocksize - 1) / blocksize; // 启动 Kernel 函数完成计算。 _flipWholeCstKer<<<gridsize, blocksize>>>(*incstCud, *outcstCud); // 回复输入坐标点集中坐标点的数量。 incst->count = incstcntorg; // 检查 Kernel 函数是否执行正确。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕退出。 return NO_ERROR; } // 宏:FAIL_CONVEXHULL_FREE // 如果出错,就释放之前申请的内存。 #define FAIL_CONVEXHULL_FREE do { \ if (tmpmem != NULL) \ delete tmpmem; \ if (tmpcstin != NULL) \ CoordiSetBasicOp::deleteCoordiSet(tmpcstin); \ if (tmpcstout != NULL) \ CoordiSetBasicOp::deleteCoordiSet(tmpcstout); \ if (tmpconvexin != NULL) \ CoordiSetBasicOp::deleteCoordiSet(tmpconvexin); \ if (tmpconvexout != NULL) \ CoordiSetBasicOp::deleteCoordiSet(tmpconvexout); \ } while (0) // 成员方法:convexHullIter(迭代法求凸壳上的点集) __host__ int ConvexHull::convexHullIterCpu( CoordiSet *inputcst, CoordiSet *convexcst, bool lowerconvex) { // 检查输入坐标集,输出坐标集是否为空。 if (inputcst == NULL || convexcst == NULL) return NULL_POINTER; // 如果输入点集中不含有任何的坐标点,则直接退出。 if (inputcst->count < 1 || inputcst->tplData == NULL) return INVALID_DATA; // 如果输入点集中点的数量少于 2 个点时,则不需要任何求解过程,直接将输入点 // 集拷贝到输出点集即可。虽然当坐标点集中仅包含两个点时也可以直接判定为凸壳 // 点,但考虑到顺序问题,代码还是让仅有两个点的情况走完整个流程。 if (inputcst->count < 2) return CoordiSetBasicOp::copyToHost(inputcst, convexcst); // 局部变量 int errcode; // 定义扫描所用的二元操作符。 add_class<int> add; // 采用 CPU scan this->aryScan.setScanType(CPU_IN_SCAN); int cstcnt = inputcst->count; // 坐标点集中点的数量。这里之所以将其使用另 // 外一个变量保存出来是因为这个值随着迭代会 // 变化,如果直接使用 CoordiSet 中的 count // 域会带来内存管理上的不便。 int convexcnt = 2; // 当前凸壳点的数量,由于迭代开始时,已经实 // 现找到了点集中的最左和最有两点作为凸壳 // 点,因此这里直接赋值为 2。 int foundcnt; // 当前迭代时找到的新凸壳点的数量,这一数量 // 并不包含往次所找到的凸壳点。 int negdistcnt; // 当前负垂距点的数量。 //int itercnt = 0; // 迭代次数记录器。 int *tmpmem = NULL; // 存放中间变量的内存空间。 CoordiSet *tmpcstin = NULL; // 每次迭代中作为输入坐标点集的临时坐标点 // 集。 CoordiSet *tmpcstout = NULL; // 每次迭代中作为输出坐标点击的临时坐标点 // 集。 CoordiSet *tmpconvexin = NULL; // 每次迭代中作为输入凸壳点集的临时坐标点 // 集。 CoordiSet *tmpconvexout = NULL; // 每次迭代中作为输出凸壳点集(新凸壳点 // 集)的临时坐标点集。 size_t datacnt = 0; // 所需要的数据元素的数量。 size_t datasize = 0; // 书需要的数据元素的字节尺寸。 // 宏:CHI_DATA_DECLARE(中间变量声明器) // 为了消除中间变量声明过程中大量的重复代码,这里提供了一个宏,使代码看起来 // 整洁一些。 #define CHI_DATA_DECLARE(dataname, type, count) \ type *dataname = NULL; \ size_t dataname##cnt = (count); \ datacnt += dataname##cnt; \ datasize += dataname##cnt * sizeof (type) // 声明各个中间变量的 Device 数组。 CHI_DATA_DECLARE(label, int, // 记录当前迭代中每个像素点所在的 inputcst->count); // LABEL 区域。 CHI_DATA_DECLARE(negdistflag, int, // 记录当前迭代中每个像素点是否具有 inputcst->count); // 负垂距。 CHI_DATA_DECLARE(negdistacc, int, // 记录当前迭代中具有负垂距点的累加 inputcst->count + 1); // 和,其物理含义是在当前点之前存在 // 多少个负垂距点,其最后一个元素表 // 示当前迭代共找到了多少个负垂距 // 点。 CHI_DATA_DECLARE(maxdistidx, int, // 记录当前迭代中每个坐标点前面的所 inputcst->count); // 有点中和其在同一个 LABEL 区域的 // 所有点中具有最大垂距的下标。 CHI_DATA_DECLARE(foundflag, int, // 记录当前迭代中各个 LABEL 区域是 inputcst->count); // 否找到了新的凸壳点。 CHI_DATA_DECLARE(foundacc, int, // 记录当前迭代中每个 LABEL 区域其 inputcst->count + 1); // 前面的所有 LABEL 区域共找到的新 // 的凸壳点的数量。该值用于计算各个 // 凸壳点(无论是旧的还是新的)在新 // 的凸壳点集中的新下标。 CHI_DATA_DECLARE(leftflag, int, // 记录当前的坐标点是否处于新发现的 inputcst->count); // 坐标点的左侧 CHI_DATA_DECLARE(leftacc, int, // 记录当前的坐标点之前的左侧点的数 inputcst->count + 1); // 量。该数组用于计算坐标点在下一轮 // 计算中的下标。 CHI_DATA_DECLARE(startidx, int, // 记录每个 LABEL 区域在坐标点集中 inputcst->count); // 的起始下标 CHI_DATA_DECLARE(newidx, int, // 记录当前坐标点集中各个坐标点在下 inputcst->count); // 一轮迭代中的新的下标。 CHI_DATA_DECLARE(tmplabel, int, inputcst->count); CHI_DATA_DECLARE(newlabel, int, // 记录当前坐标点集中各个坐标点在下 inputcst->count); // 一轮迭代中新的 LABEL 区域。 // 消除中间变量声明器这个宏,防止后续步骤的命名冲突。 #undef CHI_DATA_DECLARE // 中间变量申请 Host 内存空间,并将这些空间分配给各个中间变量。 tmpmem = new int[datasize]; // 为各个中间变量分配内存空间,采用这种一次申请一个大空间的做法是为了减少申 // 请内存的开销,同时也减少因内存对齐导致的内存浪费。 label = tmpmem; negdistflag = label + labelcnt; negdistacc = negdistflag + negdistflagcnt; maxdistidx = negdistacc + negdistacccnt; foundflag = maxdistidx + maxdistidxcnt; foundacc = foundflag + foundflagcnt; leftflag = foundacc + foundacccnt; leftacc = leftflag + leftflagcnt; startidx = leftacc + leftacccnt; newidx = startidx + startidxcnt; newlabel = newidx + newidxcnt; tmplabel = newlabel + newlabelcnt; // 宏:CHI_USE_SYS_FUNC // 该开关宏用于指示是否在后续步骤中尽量使用 CUDA 提供的函数,而不是启动由开 // 发方自行编写的 Kernel 函数完成操作。 //#define CHI_USE_SYS_FUNC // 初始化 LABEL 数组。 #ifdef CHI_USE_SYS_FUNC // 首先将 LABEL 数组中所有内存元素全部置零。 memset(label, 0, labelcnt * sizeof (int)); // 将 LABEL 数组中最后一个元素置 1。 label[cstcnt - 1] = 1; #else // 调用 LABEL 初始化函数,完成 LABEL 初始化。初始化后,除最后一个元素为 1 // 外,其余元素皆为 0。 errcode = this->initLabelAryCpu(label, cstcnt); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #endif // 初始化迭代过程中使用的坐标点集,这里一共需要使用到两个坐标点集,为了不破 // 坏输入坐标点集,这里在迭代过程中我们使用内部申请的坐标点集。 #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]init CoordiSet" << endl; #endif // 初始化第一个坐标点集。 errcode = CoordiSetBasicOp::newCoordiSet(&tmpcstin); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 将输入坐标点集中的数据从输入点集中拷贝到第一个坐标点集中。此后所有的操作 // 仅在临时坐标点集中处理,不再碰触输入坐标点集。这里如果是求解上半凸壳,则 // 直接调用翻转坐标点的函数。 if (lowerconvex) errcode = CoordiSetBasicOp::copyToHost(inputcst, tmpcstin); else errcode = this->flipWholeCst(inputcst, tmpcstin); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 初始化第二个坐标点集。 errcode = CoordiSetBasicOp::newCoordiSet(&tmpcstout); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 在 Device 内存中初始化第二个坐标点集,为其申请足够长度的内存空间。 errcode = CoordiSetBasicOp::makeAtHost(tmpcstout, inputcst->count); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 初始化迭代过程中使用到的凸壳点集,这里一共需要两个凸壳点集。我们不急于更 // 新输出参数 convexcst,是因为避免不必要的麻烦,等到凸壳计算完毕后,再将凸 // 壳内容拷贝到输出参数中。 // 初始化第一个凸壳点集。 errcode = CoordiSetBasicOp::newCoordiSet(&tmpconvexin); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 在 Device 内存中初始化第一个凸壳点集,为其申请足够长度的内存空间。 errcode = CoordiSetBasicOp::makeAtHost(tmpconvexin, inputcst->count); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 初始化第二个凸壳点集。 errcode = CoordiSetBasicOp::newCoordiSet(&tmpconvexout); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 在 Device 内存中初始化第二个凸壳点集,为其申请足够长度的内存空间。 errcode = CoordiSetBasicOp::makeAtHost(tmpconvexout, inputcst->count); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]init CoordiSet finish" << endl; #endif // 寻找最左最右点,并利用这两个点初始化输入点集和凸壳点集。初始化后,输入点 // 集的第一个点为最左点,最后一个点为最右点;凸壳点集中仅包含最左最右两个 // 点。 errcode = swapEdgePointCpu(tmpcstin, tmpconvexin); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]swap finish" << endl; cout << "[convexHullIterCpu]interation begin" << endl; #endif // 所有的初始化过程至此全部完毕,开始进行迭代。每次迭代都需要重新计算坐标点 // 在其 LABEL 区域内的垂距,然后根据垂距信息判断每个 LABEL 区域内是否存在新 // 的凸壳点(如果有需要确定是哪一个点),之后根据这个新发现的凸壳点,计算所 // 有坐标点在下一轮迭代中的下标。计算后的下标要求属于一个 LABEL 的点都在一 // 起,并且排除所有具有负垂距的点,因为这些点在下一轮迭代中已经毫无意义。迭 // 代的过程知道无法在从当前所有的 LABEL 区域内找到新的凸壳点为止。此处循环 // 的判断条件只是一个防护性措施,若坐标点集的数量同凸壳点相等,那就说明没有 // 任何可能在找到新的凸壳点了。 while (cstcnt >= convexcnt) { #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]new: convexcnt is " << convexcnt << endl; cout << endl; cout << "[convexHullIterCpu]updatedist begin" << endl; cout << "[convexHullIterCpu]cstcnt is " << cstcnt << endl; #endif // 调用更新垂距函数。更新点集中每个点的垂距值和负垂距标志数组。 errcode = this->updateDistCpu(tmpcstin, tmpconvexin, label, cstcnt, negdistflag); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << endl; cout << "[convexHullIterCpu]segscan begin" << endl; #endif // 利用分段扫描得到各个 LABEL 区域的最大垂距,记忆最大垂距坐标点的下标 // 值。 errcode = this->segScan.segmentedScanCpu( ATTACHED_DATA(tmpcstin), label, ATTACHED_DATA(tmpcstout), maxdistidx, cstcnt, false); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]segscan end" << endl; cout << "[convexHullIterCpu]updateFoundInfoCpu begin" << endl; #endif // 根据所求出来的垂距信息判断各个 LABEL 区域是否有新的凸壳点存在。 errcode = this->updateFoundInfoCpu( label, ATTACHED_DATA(tmpcstin), maxdistidx, cstcnt, foundflag, startidx); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]updateFoundInfoCpu end" << endl; cout << "[convexHullIterCpu]scan foudnd begin" << endl; #endif // 通过扫描,计算出 LABEL 区域新发现凸壳点标记值对应的累加值。 errcode = this->aryScan.scanArrayExclusive(foundflag, foundacc, convexcnt, add, false, true, true); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]scan found end" << endl; #endif // 将新凸壳点标记累加值的最后一个拷贝到 Host 内存中,这个累加值的含义是 // 当前迭代下所有新发现的凸壳点的数量。 foundcnt = foundacc[convexcnt]; #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]foundcnt now is " << foundcnt << endl; #endif // 如果新发现的凸壳点的数量小于等于 0,则说明说有的凸壳点都已经被找到, // 没有必要在继续做下去了,因此退出迭代。 if (foundcnt <= 0) break; #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]updateConvexCstCpu begin" << endl; #endif // 更新凸壳点集,将新发现的凸壳点集更新到凸壳点集中。 errcode = this->updateConvexCstCpu( tmpcstin, tmpconvexin, foundflag, foundacc, startidx, maxdistidx, convexcnt, tmpconvexout); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]updateConvexCstCpu end" << endl; #endif // 更新凸壳点集中点的数量。 convexcnt += foundcnt; #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]convexcnt now is " << convexcnt << endl; cout << "[convexHullIterCpu]markLeftPointsCpu begin" << endl; #endif // 标记左侧点。所谓左侧点是在某 LABEL 区域内处于新发现的凸壳点左侧的 // 点。 errcode = this->markLeftPointsCpu( tmpcstin, tmpconvexout, negdistflag, label, foundflag, foundacc, cstcnt, leftflag); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]markLeftPointsCpu end" << endl; cout << "[convexHullIterCpu]scanArrayExclusive neg begin" << endl; #endif // 通过扫描,计算出负垂距点标记数组对应的累加数组。negdistflagDev 实在 // 第一步更新垂距的时候获得的,之所以这么晚才计算其对应的累加数组,是因 // 为在前面检查 foundcnt 退出循环之前不需要这个数据,这样,如果真的在该 // 处退出,则程序进行了多余的计算,为了避免这一多余计算,我们延后计算 // negdistaccDev 至此处。 errcode = this->aryScan.scanArrayExclusive( negdistflag, negdistacc, cstcnt, add, false, true, true); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]scanArrayExclusive neg end" << endl; #endif // 将负垂距点累加总和拷贝出来,用来更新下一轮循环的坐标点数量值。 negdistcnt = negdistacc[cstcnt]; #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]negdistcnt now is " << negdistcnt << endl; cout << "[convexHullIterCpu]scanArrayExclusive left begin" << endl; #endif // 通过扫描计算处左侧点标记数组对应的累加数组。 errcode = this->aryScan.scanArrayExclusive( leftflag, leftacc, cstcnt, add, false, true, true); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]scanArrayExclusive left end" << endl; cout << "[convexHullIterCpu]updatePropertyCpu begin" << endl; #endif // 计算各个坐标点在下一轮迭代中的新下标。 errcode = this->updatePropertyCpu( leftflag, leftacc, negdistflag, negdistacc, startidx, label, foundacc, cstcnt, newidx, tmplabel); // Merlin debug cudaDeviceSynchronize(); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]updatePropertyCpu end" << endl; cout << "[convexHullIterCpu]arrangeCstCpu begin" << endl; #endif // 根据上一步计算得到的新下标,生成下一轮迭代所需要的坐标点集。 errcode = this->arrangeCstCpu( tmpcstin, negdistflag, newidx, tmplabel, cstcnt, tmpcstout, newlabel); #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]arrangeCstCpu end" << endl; #endif // 交还部分中间变量,将本轮迭代得到的结果给到下一轮迭代的参数。 int *labelswptmp = label; label = newlabel; newlabel = labelswptmp; CoordiSet *cstswptmp = tmpcstin; tmpcstin = tmpcstout; tmpcstout = cstswptmp; cstswptmp = tmpconvexin; tmpconvexin = tmpconvexout; tmpconvexout = cstswptmp; cstcnt -= negdistcnt; #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullIterCpu]cstcnt now is " << cstcnt << endl; #endif // 一轮迭代到此结束。 } // 将计算出来的凸壳点拷贝到输出点集中。迭代完成后,tmpconvexin 保存有最后的 // 结果。如果在 while 判断条件处退出迭代,则上一轮求出的凸壳点集是最终结 // 果,此时在上一轮末,由于交换指针,使得原本存放在tmpconvexout 的最终结果 // 变为了存放在 tmpconvexin 中;如果迭代实在判断有否新发现点处退出,则说明 // 当前并未发现新的凸壳点,那么 tmpconvexin 和 tmpconvexout 内容应该是一致 // 的,但本着稳定的原则,应该取更早形成的变量,即 tmpconvexin。 // 首先临时将这个存放结果的点集的点数量修改为凸壳点的数量。 tmpconvexin->count = convexcnt; // 然后,将计算出来的凸壳点拷贝到输出参数中。如果是求解上半凸壳点,则需要将 // 结果翻转后输出,但是由于翻转函数不能改变输出点集的点的数量,因此,这里还 // 需要先使用拷贝函数,调整输出点的数量(好在,通常凸壳点的数量不错,这一步 // 骤不会造成太能的性能下降,若日后发现有严重的性能下降,还需要额外写一个更 // 加复杂一些的翻转函数。) errcode = CoordiSetBasicOp::copyToHost(tmpconvexin, convexcst); if (errcode != NO_ERROR) { tmpconvexin->count = inputcst->count; FAIL_CONVEXHULL_FREE; return errcode; } // 最后,为了程序稳定性的考虑,回复其凸壳点的数量。 tmpconvexin->count = inputcst->count; // 释放内存 delete tmpmem; // cudaFree(tmpmemDev); CoordiSetBasicOp::deleteCoordiSet(tmpcstin); CoordiSetBasicOp::deleteCoordiSet(tmpcstout); CoordiSetBasicOp::deleteCoordiSet(tmpconvexin); CoordiSetBasicOp::deleteCoordiSet(tmpconvexout); // 最后,如果所求点是上半凸壳,则还需要翻转所有凸壳点。 if (!lowerconvex) { errcode = this->flipWholeCstCpu(convexcst, convexcst); if (errcode != NO_ERROR) return errcode; } // 操作完毕,退出。 return NO_ERROR; } #undef FAIL_CONVEXHULL_FREE // 宏:FAIL_CONVEXHULL_FREE // 如果出错,就释放之前申请的内存。 #define FAIL_CONVEXHULL_FREE do { \ if (tmpmemDev != NULL) \ cudaFree(tmpmemDev); \ if (tmpcstin != NULL) \ CoordiSetBasicOp::deleteCoordiSet(tmpcstin); \ if (tmpcstout != NULL) \ CoordiSetBasicOp::deleteCoordiSet(tmpcstout); \ if (tmpconvexin != NULL) \ CoordiSetBasicOp::deleteCoordiSet(tmpconvexin); \ if (tmpconvexout != NULL) \ CoordiSetBasicOp::deleteCoordiSet(tmpconvexout); \ } while (0) // 成员方法:convexHullIter(迭代法求凸壳上的点集) __host__ int ConvexHull::convexHullIter( CoordiSet *inputcst, CoordiSet *convexcst, bool lowerconvex) { // 检查输入坐标集,输出坐标集是否为空。 if (inputcst == NULL || convexcst == NULL) return NULL_POINTER; // 如果输入点集中不含有任何的坐标点,则直接退出。 if (inputcst->count < 1 || inputcst->tplData == NULL) return INVALID_DATA; // 如果输入点集中点的数量少于 2 个点时,则不需要任何求解过程,直接将输入点 // 集拷贝到输出点集即可。虽然当坐标点集中仅包含两个点时也可以直接判定为凸壳 // 点,但考虑到顺序问题,代码还是让仅有两个点的情况走完整个流程。 if (inputcst->count < 2) return CoordiSetBasicOp::copyToCurrentDevice(inputcst, convexcst); // 局部变量 cudaError_t cuerrcode; // CUDA 函数调用返回的错误码 int errcode; // 调用函数返回的错误码 // 定义扫描所用的二元操作符。 add_class<int> add; int cstcnt = inputcst->count; // 坐标点集中点的数量。这里之所以将其使用另 // 外一个变量保存出来是因为这个值随着迭代会 // 变化,如果直接使用 CoordiSet 中的 count // 域会带来内存管理上的不便。 int convexcnt = 2; // 当前凸壳点的数量,由于迭代开始时,已经实 // 现找到了点集中的最左和最有两点作为凸壳 // 点,因此这里直接赋值为 2。 int foundcnt; // 当前迭代时找到的新凸壳点的数量,这一数量 // 并不包含往次所找到的凸壳点。 int negdistcnt; // 当前负垂距点的数量。 //int itercnt = 0; // 迭代次数记录器。 int *tmpmemDev = NULL; // 存放中间变量的 Device 内存空间。 CoordiSet *tmpcstin = NULL; // 每次迭代中作为输入坐标点集的临时坐标点 // 集。 CoordiSet *tmpcstout = NULL; // 每次迭代中作为输出坐标点击的临时坐标点 // 集。 CoordiSet *tmpconvexin = NULL; // 每次迭代中作为输入凸壳点集的临时坐标点 // 集。 CoordiSet *tmpconvexout = NULL; // 每次迭代中作为输出凸壳点集(新凸壳点 // 集)的临时坐标点集。 size_t datacnt = 0; // 所需要的数据元素的数量。 size_t datasize = 0; // 书需要的数据元素的字节尺寸。 // 宏:CHI_DATA_DECLARE(中间变量声明器) // 为了消除中间变量声明过程中大量的重复代码,这里提供了一个宏,使代码看起来 // 整洁一些。 #define CHI_DATA_DECLARE(dataname, type, count) \ type *dataname##Dev = NULL; \ size_t dataname##cnt = (count); \ datacnt += dataname##cnt; \ datasize += dataname##cnt * sizeof (type) // 声明各个中间变量的 Device 数组。 CHI_DATA_DECLARE(label, int, // 记录当前迭代中每个像素点所在的 inputcst->count); // LABEL 区域。 CHI_DATA_DECLARE(negdistflag, int, // 记录当前迭代中每个像素点是否具有 inputcst->count); // 负垂距。 CHI_DATA_DECLARE(negdistacc, int, // 记录当前迭代中具有负垂距点的累加 inputcst->count + 1); // 和,其物理含义是在当前点之前存在 // 多少个负垂距点,其最后一个元素表 // 示当前迭代共找到了多少个负垂距 // 点。 CHI_DATA_DECLARE(maxdistidx, int, // 记录当前迭代中每个坐标点前面的所 inputcst->count); // 有点中和其在同一个 LABEL 区域的 // 所有点中具有最大垂距的下标。 CHI_DATA_DECLARE(foundflag, int, // 记录当前迭代中各个 LABEL 区域是 inputcst->count); // 否找到了新的凸壳点。 CHI_DATA_DECLARE(foundacc, int, // 记录当前迭代中每个 LABEL 区域其 inputcst->count + 1); // 前面的所有 LABEL 区域共找到的新 // 的凸壳点的数量。该值用于计算各个 // 凸壳点(无论是旧的还是新的)在新 // 的凸壳点集中的新下标。 CHI_DATA_DECLARE(leftflag, int, // 记录当前的坐标点是否处于新发现的 inputcst->count); // 坐标点的左侧 CHI_DATA_DECLARE(leftacc, int, // 记录当前的坐标点之前的左侧点的数 inputcst->count + 1); // 量。该数组用于计算坐标点在下一轮 // 计算中的下标。 CHI_DATA_DECLARE(startidx, int, // 记录每个 LABEL 区域在坐标点集中 inputcst->count); // 的起始下标 CHI_DATA_DECLARE(newidx, int, // 记录当前坐标点集中各个坐标点在下 inputcst->count); // 一轮迭代中的新的下标。 CHI_DATA_DECLARE(tmplabel, int, inputcst->count); CHI_DATA_DECLARE(newlabel, int, // 记录当前坐标点集中各个坐标点在下 inputcst->count); // 一轮迭代中新的 LABEL 区域。 // 消除中间变量声明器这个宏,防止后续步骤的命名冲突。 #undef CHI_DATA_DECLARE // 中间变量申请 Device 内存空间,并将这些空间分配给各个中间变量。 cuerrcode = cudaMalloc((void **)&tmpmemDev, datasize); if (cuerrcode != cudaSuccess) { FAIL_CONVEXHULL_FREE; return CUDA_ERROR; } // 为各个中间变量分配内存空间,采用这种一次申请一个大空间的做法是为了减少申 // 请内存的开销,同时也减少因内存对齐导致的内存浪费。 labelDev = tmpmemDev; negdistflagDev = labelDev + labelcnt; negdistaccDev = negdistflagDev + negdistflagcnt; maxdistidxDev = negdistaccDev + negdistacccnt; foundflagDev = maxdistidxDev + maxdistidxcnt; foundaccDev = foundflagDev + foundflagcnt; leftflagDev = foundaccDev + foundacccnt; leftaccDev = leftflagDev + leftflagcnt; startidxDev = leftaccDev + leftacccnt; newidxDev = startidxDev + startidxcnt; newlabelDev = newidxDev + newidxcnt; tmplabelDev = newlabelDev + newlabelcnt; // 宏:CHI_USE_SYS_FUNC // 该开关宏用于指示是否在后续步骤中尽量使用 CUDA 提供的函数,而不是启动由开 // 发方自行编写的 Kernel 函数完成操作。 //#define CHI_USE_SYS_FUNC // 初始化 LABEL 数组。 #ifdef CHI_USE_SYS_FUNC // 首先将 LABEL 数组中所有内存元素全部置零。 cuerrcode = cudaMemset(labelDev, 0, labelcnt * sizeof (int)); if (cuerrcode != cudaSuccess) { FAIL_CONVEXHULL_FREE; return CUDA_ERROR; } // 将 LABEL 数组中最后一个元素置 1。 int tmp_one = 1; cuerrcode = cudaMemcpy(&labelDev[cstcnt - 1], &tmp_one, sizeof (int), cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { FAIL_CONVEXHULL_FREE; return CUDA_ERROR; } #else // 调用 LABEL 初始化函数,完成 LABEL 初始化。初始化后,除最后一个元素为 1 // 外,其余元素皆为 0。 errcode = this->initLabelAry(labelDev, cstcnt); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #endif // 初始化迭代过程中使用的坐标点集,这里一共需要使用到两个坐标点集,为了不破 // 坏输入坐标点集,这里在迭代过程中我们使用内部申请的坐标点集。 // 初始化第一个坐标点集。 errcode = CoordiSetBasicOp::newCoordiSet(&tmpcstin); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 将输入坐标点集中的数据从输入点集中拷贝到第一个坐标点集中。此后所有的操作 // 仅在临时坐标点集中处理,不再碰触输入坐标点集。这里如果是求解上半凸壳,则 // 直接调用翻转坐标点的函数。 if (lowerconvex) errcode = CoordiSetBasicOp::copyToCurrentDevice(inputcst, tmpcstin); else errcode = this->flipWholeCst(inputcst, tmpcstin); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 初始化第二个坐标点集。 errcode = CoordiSetBasicOp::newCoordiSet(&tmpcstout); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 在 Device 内存中初始化第二个坐标点集,为其申请足够长度的内存空间。 errcode = CoordiSetBasicOp::makeAtCurrentDevice(tmpcstout, inputcst->count); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 初始化迭代过程中使用到的凸壳点集,这里一共需要两个凸壳点集。我们不急于更 // 新输出参数 convexcst,是因为避免不必要的麻烦,等到凸壳计算完毕后,再将凸 // 壳内容拷贝到输出参数中。 // 初始化第一个凸壳点集。 errcode = CoordiSetBasicOp::newCoordiSet(&tmpconvexin); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 在 Device 内存中初始化第一个凸壳点集,为其申请足够长度的内存空间。 errcode = CoordiSetBasicOp::makeAtCurrentDevice(tmpconvexin, inputcst->count); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 初始化第二个凸壳点集。 errcode = CoordiSetBasicOp::newCoordiSet(&tmpconvexout); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 在 Device 内存中初始化第二个凸壳点集,为其申请足够长度的内存空间。 errcode = CoordiSetBasicOp::makeAtCurrentDevice(tmpconvexout, inputcst->count); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 寻找最左最右点,并利用这两个点初始化输入点集和凸壳点集。初始化后,输入点 // 集的第一个点为最左点,最后一个点为最右点;凸壳点集中仅包含最左最右两个 // 点。 errcode = swapEdgePoint(tmpcstin, tmpconvexin); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 所有的初始化过程至此全部完毕,开始进行迭代。每次迭代都需要重新计算坐标点 // 在其 LABEL 区域内的垂距,然后根据垂距信息判断每个 LABEL 区域内是否存在新 // 的凸壳点(如果有需要确定是哪一个点),之后根据这个新发现的凸壳点,计算所 // 有坐标点在下一轮迭代中的下标。计算后的下标要求属于一个 LABEL 的点都在一 // 起,并且排除所有具有负垂距的点,因为这些点在下一轮迭代中已经毫无意义。迭 // 代的过程知道无法在从当前所有的 LABEL 区域内找到新的凸壳点为止。此处循环 // 的判断条件只是一个防护性措施,若坐标点集的数量同凸壳点相等,那就说明没有 // 任何可能在找到新的凸壳点了。 while (cstcnt >= convexcnt) { // 调用更新垂距函数。更新点集中每个点的垂距值和负垂距标志数组。 errcode = this->updateDist(tmpcstin, tmpconvexin, labelDev, cstcnt, negdistflagDev); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 利用分段扫描得到各个 LABEL 区域的最大垂距,记忆最大垂距坐标点的下标 // 值。 errcode = this->segScan.segmentedScan( ATTACHED_DATA(tmpcstin), labelDev, ATTACHED_DATA(tmpcstout), maxdistidxDev, cstcnt, false); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 根据所求出来的垂距信息判断各个 LABEL 区域是否有新的凸壳点存在。 errcode = this->updateFoundInfo( labelDev, ATTACHED_DATA(tmpcstin), maxdistidxDev, cstcnt, foundflagDev, startidxDev); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 通过扫描,计算出 LABEL 区域新发现凸壳点标记值对应的累加值。 errcode = this->aryScan.scanArrayExclusive(foundflagDev, foundaccDev, convexcnt, add, false, false, false); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 将新凸壳点标记累加值的最后一个拷贝到 Host 内存中,这个累加值的含义是 // 当前迭代下所有新发现的凸壳点的数量。 cuerrcode = cudaMemcpy(&foundcnt, &foundaccDev[convexcnt], sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CONVEXHULL_FREE; return errcode; } // 如果新发现的凸壳点的数量小于等于 0,则说明说有的凸壳点都已经被找到, // 没有必要在继续做下去了,因此退出迭代。 if (foundcnt <= 0) break; // 更新凸壳点集,将新发现的凸壳点集更新到凸壳点集中。 errcode = this->updateConvexCst( tmpcstin, tmpconvexin, foundflagDev, foundaccDev, startidxDev, maxdistidxDev, convexcnt, tmpconvexout); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 更新凸壳点集中点的数量。 convexcnt += foundcnt; // 标记左侧点。所谓左侧点是在某 LABEL 区域内处于新发现的凸壳点左侧的 // 点。 errcode = this->markLeftPoints( tmpcstin, tmpconvexout, negdistflagDev, labelDev, foundflagDev, foundaccDev, cstcnt, leftflagDev); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 通过扫描,计算出负垂距点标记数组对应的累加数组。negdistflagDev 实在 // 第一步更新垂距的时候获得的,之所以这么晚才计算其对应的累加数组,是因 // 为在前面检查 foundcnt 退出循环之前不需要这个数据,这样,如果真的在该 // 处退出,则程序进行了多余的计算,为了避免这一多余计算,我们延后计算 // negdistaccDev 至此处。 errcode = this->aryScan.scanArrayExclusive( negdistflagDev, negdistaccDev, cstcnt, add, false, false, false); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 将负垂距点累加总和拷贝出来,用来更新下一轮循环的坐标点数量值。 cuerrcode = cudaMemcpy(&negdistcnt, &negdistaccDev[cstcnt], sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CONVEXHULL_FREE; return errcode; } // 通过扫描计算处左侧点标记数组对应的累加数组。 errcode = this->aryScan.scanArrayExclusive( leftflagDev, leftaccDev, cstcnt, add, false, false, false); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 计算各个坐标点在下一轮迭代中的新下标。 errcode = this->updateProperty( leftflagDev, leftaccDev, negdistflagDev, negdistaccDev, startidxDev, labelDev, foundaccDev, cstcnt, newidxDev, tmplabelDev); cudaDeviceSynchronize(); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 根据上一步计算得到的新下标,生成下一轮迭代所需要的坐标点集。 errcode = this->arrangeCst( tmpcstin, negdistflagDev, newidxDev, tmplabelDev, cstcnt, tmpcstout, newlabelDev); // 交还部分中间变量,将本轮迭代得到的结果给到下一轮迭代的参数。 int *labelswptmp = labelDev; labelDev = newlabelDev; newlabelDev = labelswptmp; CoordiSet *cstswptmp = tmpcstin; tmpcstin = tmpcstout; tmpcstout = cstswptmp; cstswptmp = tmpconvexin; tmpconvexin = tmpconvexout; tmpconvexout = cstswptmp; cstcnt -= negdistcnt; // 一轮迭代到此结束。 } // 将计算出来的凸壳点拷贝到输出点集中。迭代完成后,tmpconvexin 保存有最后的 // 结果。如果在 while 判断条件处退出迭代,则上一轮求出的凸壳点集是最终结 // 果,此时在上一轮末,由于交换指针,使得原本存放在tmpconvexout 的最终结果 // 变为了存放在 tmpconvexin 中;如果迭代实在判断有否新发现点处退出,则说明 // 当前并未发现新的凸壳点,那么 tmpconvexin 和 tmpconvexout 内容应该是一致 // 的,但本着稳定的原则,应该取更早形成的变量,即 tmpconvexin。 // 首先临时将这个存放结果的点集的点数量修改为凸壳点的数量。 tmpconvexin->count = convexcnt; // 然后,将计算出来的凸壳点拷贝到输出参数中。如果是求解上半凸壳点,则需要将 // 结果翻转后输出,但是由于翻转函数不能改变输出点集的点的数量,因此,这里还 // 需要先使用拷贝函数,调整输出点的数量(好在,通常凸壳点的数量不错,这一步 // 骤不会造成太能的性能下降,若日后发现有严重的性能下降,还需要额外写一个更 // 加复杂一些的翻转函数。) errcode = CoordiSetBasicOp::copyToCurrentDevice(tmpconvexin, convexcst); if (errcode != NO_ERROR) { tmpconvexin->count = inputcst->count; FAIL_CONVEXHULL_FREE; return errcode; } // 最后,为了程序稳定性的考虑,回复其凸壳点的数量。 tmpconvexin->count = inputcst->count; // 释放内存 cudaFree(tmpmemDev); CoordiSetBasicOp::deleteCoordiSet(tmpcstin); CoordiSetBasicOp::deleteCoordiSet(tmpcstout); CoordiSetBasicOp::deleteCoordiSet(tmpconvexin); CoordiSetBasicOp::deleteCoordiSet(tmpconvexout); // 最后,如果所求点是上半凸壳,则还需要翻转所有凸壳点。 if (!lowerconvex) { errcode = this->flipWholeCst(convexcst, convexcst); if (errcode != NO_ERROR) return errcode; } // 操作完毕,退出。 return NO_ERROR; } #undef FAIL_CONVEXHULL_FREE // Kernel 函数:_joinConvexKer(合并凸壳点集) static __global__ void _joinConvexKer( CoordiSetCuda lconvex, CoordiSetCuda uconvex, CoordiSetCuda convex, int *convexcnt) { // 共享内存,用来记录上下凸壳在最左最右点处是否重合,如果重合,应该在整合后 // 的坐标点中排除重合的点。其中,[0] 表示最左点,[1] 表示最右点。用 1 表示 // 有重合的点,用 0 表示没有重合的点。 __shared__ int sameedge[2]; // 为了代码中的简化表示,这里将比较长的变量换成了比较短的变量。该语句在编译 // 中不会带来额外的运行性能下降。 int *ldata = lconvex.tplMeta.tplData; int *udata = uconvex.tplMeta.tplData; int lcnt = lconvex.tplMeta.count; int ucnt = uconvex.tplMeta.count; // 由每个 Block 的第一个 Thread 计算是否存在重合的最左最右点。 if (threadIdx.x == 0) { // 判断最左点是否重合,对于上半凸壳,最左点存放在其首部,对于下半凸壳, // 最左点存放在其尾部。 if (ldata[0] == udata[2 * (ucnt - 1)] && ldata[1] == udata[2 * (ucnt - 1) + 1]) { sameedge[0] = 1; } else { sameedge[0] = 0; } // 判断最右点是否重合,对于上半凸壳,最右点存放在其尾部,对于下半凸壳, // 最右点存放在其首部。 if (ldata[2 * (lcnt - 1)] == udata[0] && ldata[2 * (lcnt - 1) + 1] == udata[1]) { sameedge[1] = 1; } else { sameedge[1] = 0; } // 根据对最左最右点的判断,就可以得到最终凸壳点集的数量,这里用整个 // Kernel 的第一个 Thread 写入最终凸壳点集的数量。 if (blockIdx.x == 0) *convexcnt = lcnt + ucnt - sameedge[0] - sameedge[1]; } // 同步 Block 内部的所有线程,使得求解结果对所有的 Thread 可见。 __syncthreads(); // 计算当前线程的全局下标。该下标对应于输出凸壳点的下标。 int idx = blockIdx.x * blockDim.x + threadIdx.x; // 判断当前线程是对应于下半凸壳和上半凸壳。将上半凸壳放在输出凸壳的前半部 // 分,下半凸壳放在其后半部分。 if (idx >= lcnt) { // 对于处理上半凸壳,首先计算出当前线程对应的上半凸壳下标,这里还需要经 // 过最右点重合的校正。 int inidx = idx - lcnt + sameedge[1]; // 如果对应的下标是越界的,则直接返回,这里仍需要经过最左点重合的校正。 if (inidx >= ucnt - sameedge[0]) return; // 将上半凸壳拷贝到整体的凸壳中。 convex.tplMeta.tplData[2 * idx] = udata[2 * inidx]; convex.tplMeta.tplData[2 * idx + 1] = udata[2 * inidx + 1]; } else { // 将下半凸壳拷贝到整体的凸壳中。由于上半凸壳内部坐标和整体凸壳的坐标是 // 一致的,且越界情况通过上面的 if 语句已经屏蔽,故没有进行下标的计算和 // 判断。 convex.tplMeta.tplData[2 * idx] = ldata[2 * idx]; convex.tplMeta.tplData[2 * idx + 1] = ldata[2 * idx + 1]; } } // 宏:FAIL_JOINCONVEX_FREE // 该宏用于完成下面函数运行出现错误退出前的内存清理工作。 #define FAIL_JOINCONVEX_FREE do { \ if (tmpconvex != NULL && tmpconvex != convex) \ CoordiSetBasicOp::deleteCoordiSet(tmpconvex); \ if (convexcntDev != NULL) \ cudaFree(convexcntDev); \ } while (0) // Host 成员方法:joinConvex(合并凸壳点) __host__ int ConvexHull::joinConvex( CoordiSet *lconvex, CoordiSet *uconvex, CoordiSet *convex) { // 检查指针性参数是否为 NULL。 if (lconvex == NULL || uconvex == NULL || convex == NULL) return NULL_POINTER; // 检查输入坐标点是否包含了有效的坐标点数量,如果输入坐标点中点数小于 2,则 // 无法完成相应的计算工作。 if (lconvex->count < 2 || lconvex->tplData == NULL || uconvex->count < 2 || uconvex->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 int errcode; cudaError_t cuerrcode; // 局部变量,输出凸壳点数量上限,即上下凸壳点数量加和。 int tmptotal = lconvex->count + uconvex->count; // 将下半凸壳点数据拷贝到当前 Device。 errcode = CoordiSetBasicOp::copyToCurrentDevice(lconvex); if (errcode != NO_ERROR) return errcode; // 将上半凸壳点数据拷贝到当前 Device。 errcode = CoordiSetBasicOp::copyToCurrentDevice(uconvex); if (errcode != NO_ERROR) return errcode; // 如果输出凸壳点是一个空点集,则为其开辟适当的内存空间,以用于存放最后的凸 // 壳点。 if (convex->tplData == NULL) { errcode = CoordiSetBasicOp::makeAtCurrentDevice(convex, tmptotal); if (errcode != NO_ERROR) return errcode; } // 局部变量。 CoordiSet *tmpconvex = NULL; // 临时输出凸壳点集。由于参数给定输出用凸壳点 // 集不一定具有合适数量的存储空间,因此,先用 // 一个临时的凸壳点集存放 Kernel 返回的结果, // 然后在归放到参数所对应的凸壳点集中。 int *convexcntDev = NULL; // 用于存放 Kernel 返回的最终凸壳点数量。 // 给临时输出凸壳点集初始化。 if (convex->count < tmptotal || convex->count >= tmptotal * 2) { // 如果给定的输出凸壳点集点的数量不合适,则需要重新申请一个凸壳点集并赋 // 值给临时凸壳点集。 errcode = CoordiSetBasicOp::newCoordiSet(&tmpconvex); if (errcode != NO_ERROR) return errcode; // 申请后,还需要给该凸壳点集开辟合适的内存空间。 errcode = CoordiSetBasicOp::makeAtCurrentDevice(tmpconvex, tmptotal); if (errcode != NO_ERROR) { FAIL_JOINCONVEX_FREE; return errcode; } } else { // 如果输出土克点击中点的数量合适,则直接使用输出凸壳点集承接 Kernel 的 // 输出。 tmpconvex = convex; } // 取出坐标点集对应的 CUDA 型数据。 CoordiSetCuda *lconvexCud = COORDISET_CUDA(lconvex); CoordiSetCuda *uconvexCud = COORDISET_CUDA(uconvex); CoordiSetCuda *tmpconvexCud = COORDISET_CUDA(tmpconvex); // 为 Kernel 输出的凸壳点集数量开辟 Device 内存空间。 cuerrcode = cudaMalloc((void **)&convexcntDev, sizeof (int)); if (cuerrcode != cudaSuccess) { FAIL_JOINCONVEX_FREE; return CUDA_ERROR; } // 计算启动 Kernel 所需要的 Block 尺寸和数量。 size_t blocksize = DEF_BLOCK_1D; size_t gridsize = (tmptotal + blocksize - 1) / blocksize; // 启动 Kernel 完成计算。 _joinConvexKer<<<gridsize, blocksize>>>(*lconvexCud, *uconvexCud, *tmpconvexCud, convexcntDev); // 检查 Kernel 函数是否执行正确。 if (cudaGetLastError() != cudaSuccess) { FAIL_JOINCONVEX_FREE; return CUDA_ERROR; } // 从 Device 内存中读取 Kernel 返回的凸壳点数量,并将其赋值到临时凸壳点集的 // 坐标点数量中。 cuerrcode = cudaMemcpy(&(tmpconvex->count), convexcntDev, sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_JOINCONVEX_FREE; return CUDA_ERROR; } // 如果使用的是临时凸壳点集,则需要将点集从临时凸壳点集中拷贝到输出凸壳点集 // 中,在拷贝的过程中,输出凸壳点集的坐标点数量会被安全的重新定义。 if (tmpconvex != convex) { errcode = CoordiSetBasicOp::copyToCurrentDevice(tmpconvex, convex); if (errcode != NO_ERROR) { FAIL_JOINCONVEX_FREE; return errcode; } // 至此,临时凸壳点集的使命完成,清除其占用的内存空间。 CoordiSetBasicOp::deleteCoordiSet(tmpconvex); } // 释放 Device 内存空间。 cudaFree(convexcntDev); // 操作结束,返回。 return NO_ERROR; } #undef FAIL_JOINCONVEX_FREE // FAIL_JOINCONVEXCPU_FREE // 该宏用于完成下面函数运行出现错误退出前的内存清理工作。 #define FAIL_JOINCONVEXCPU_FREE do { \ if (tmpconvex != NULL && tmpconvex != convex) \ CoordiSetBasicOp::deleteCoordiSet(tmpconvex); \ } while (0) // Host 成员方法:joinConvexCpu(合并凸壳点) __host__ int ConvexHull::joinConvexCpu( CoordiSet *lconvex, CoordiSet *uconvex, CoordiSet *convex) { // 检查指针性参数是否为 NULL。 if (lconvex == NULL || uconvex == NULL || convex == NULL) return NULL_POINTER; // 检查输入坐标点是否包含了有效的坐标点数量,如果输入坐标点中点数小于 2,则 // 无法完成相应的计算工作。 if (lconvex->count < 2 || lconvex->tplData == NULL || uconvex->count < 2 || uconvex->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 局部变量,输出凸壳点数量上限,即上下凸壳点数量加和。 int tmptotal = lconvex->count + uconvex->count; // 将下半凸壳点数据拷贝到当前 Host。 errcode = CoordiSetBasicOp::copyToHost(lconvex); if (errcode != NO_ERROR) return errcode; // 将上半凸壳点数据拷贝到 Host。 errcode = CoordiSetBasicOp::copyToHost(uconvex); if (errcode != NO_ERROR) return errcode; // 如果输出凸壳点是一个空点集,则为其开辟适当的内存空间,以用于存放最后的凸 // 壳点。 if (convex->tplData == NULL) { errcode = CoordiSetBasicOp::makeAtHost(convex, tmptotal); if (errcode != NO_ERROR) return errcode; } // 局部变量。 CoordiSet *tmpconvex = NULL; // 临时输出凸壳点集。由于参数给定输出用凸壳点 // 集不一定具有合适数量的存储空间,因此,先用 // 一个临时的凸壳点集存放 Kernel 返回的结果, // 然后在归放到参数所对应的凸壳点集中。 int convexcnt = 0; // 用于存放最终凸壳点数量。 // 给临时输出凸壳点集初始化。 if (convex->count < tmptotal || convex->count >= tmptotal * 2) { // 如果给定的输出凸壳点集点的数量不合适,则需要重新申请一个凸壳点集并赋 // 值给临时凸壳点集。 errcode = CoordiSetBasicOp::newCoordiSet(&tmpconvex); if (errcode != NO_ERROR) return errcode; // 申请后,还需要给该凸壳点集开辟合适的内存空间。 errcode = CoordiSetBasicOp::makeAtHost(tmpconvex, tmptotal); if (errcode != NO_ERROR) { FAIL_JOINCONVEXCPU_FREE; return errcode; } } else { // 如果输出土克点击中点的数量合适,则直接使用输出凸壳点集承接 Kernel 的 // 输出。 tmpconvex = convex; } // 取出坐标点集对应的 CUDA 型数据。 CoordiSetCuda *lconvexCud = COORDISET_CUDA(lconvex); CoordiSetCuda *uconvexCud = COORDISET_CUDA(uconvex); CoordiSetCuda *tmpconvexCud = COORDISET_CUDA(tmpconvex); // 共享内存,用来记录上下凸壳在最左最右点处是否重合,如果重合,应该在整合后 // 的坐标点中排除重合的点。其中,[0] 表示最左点,[1] 表示最右点。用 1 表示 // 有重合的点,用 0 表示没有重合的点。 int sameedge[2]; // 为了代码中的简化表示,这里将比较长的变量换成了比较短的变量。该语句在编译 // 中不会带来额外的运行性能下降。 int *ldata = (*lconvexCud).tplMeta.tplData; int *udata = (*uconvexCud).tplMeta.tplData; int lcnt = (*lconvexCud).tplMeta.count; int ucnt = (*uconvexCud).tplMeta.count; // 判断最左点是否重合,对于上半凸壳,最左点存放在其首部,对于下半凸壳, // 最左点存放在其尾部。 if (ldata[0] == udata[2 * (ucnt - 1)] && ldata[1] == udata[2 * (ucnt - 1) + 1]) { sameedge[0] = 1; } else { sameedge[0] = 0; } // 判断最右点是否重合,对于上半凸壳,最右点存放在其尾部,对于下半凸壳, // 最右点存放在其首部。 if (ldata[2 * (lcnt - 1)] == udata[0] && ldata[2 * (lcnt - 1) + 1] == udata[1]) { sameedge[1] = 1; } else { sameedge[1] = 0; } // 根据对最左最右点的判断,就可以得到最终凸壳点集的数量 convexcnt = lcnt + ucnt - sameedge[0] - sameedge[1]; for (int idx = 0; idx < tmptotal; idx++) { // 判断当前线程是对应于下半凸壳和上半凸壳。将上半凸壳放在输出凸壳的前半 // 部分,下半凸壳放在其后半部分。 if (idx >= lcnt) { // 对于处理上半凸壳,首先计算出当前线程对应的上半凸壳下标,这里还需 // 要经过最右点重合的校正。 int inidx = idx - lcnt + sameedge[1]; // 如果对应的下标是不越界的 if (inidx < ucnt - sameedge[0]) { // 将上半凸壳拷贝到整体的凸壳中。 (*tmpconvexCud).tplMeta.tplData[2 * idx] = udata[2 * inidx]; (*tmpconvexCud).tplMeta.tplData[2 * idx + 1] = udata[2 * inidx + 1]; } } else { // 将下半凸壳拷贝到整体的凸壳中。由于上半凸壳内部坐标和整体凸壳的坐 // 标是一致的,且越界情况通过上面的 if 语句已经屏蔽,故没有进行下标 // 的计算和判断。 (*tmpconvexCud).tplMeta.tplData[2 * idx] = ldata[2 * idx]; (*tmpconvexCud).tplMeta.tplData[2 * idx + 1] = ldata[2 * idx + 1]; } } // 从 Device 内存中读取 Kernel 返回的凸壳点数量,并将其赋值到临时凸壳点集的 // 坐标点数量中。 tmpconvex->count = convexcnt; // 如果使用的是临时凸壳点集,则需要将点集从临时凸壳点集中拷贝到输出凸壳点集 // 中,在拷贝的过程中,输出凸壳点集的坐标点数量会被安全的重新定义。 if (tmpconvex != convex) { errcode = CoordiSetBasicOp::copyToHost(tmpconvex, convex); if (errcode != NO_ERROR) { FAIL_JOINCONVEXCPU_FREE; return errcode; } // 至此,临时凸壳点集的使命完成,清除其占用的内存空间。 CoordiSetBasicOp::deleteCoordiSet(tmpconvex); } // 操作结束,返回。 return NO_ERROR; } #undef FAIL_JOINCONVEXCPU_FREE // 宏:FAIL_CONVEXHULL_FREE // 该宏用于完成下面函数运行出现错误退出前的内存清理工作。 #define FAIL_CONVEXHULL_FREE do { \ if (lconvex != NULL) \ CoordiSetBasicOp::deleteCoordiSet(lconvex); \ if (uconvex != NULL) \ CoordiSetBasicOp::deleteCoordiSet(uconvex); \ } while (0) // Host 成员方法:convexHullCpu(求一个点集对应的凸壳点集) __host__ int ConvexHull::convexHullCpu(CoordiSet *inputcst, CoordiSet *convex) { // 检查指针性参数是否为 NULL。 if (inputcst == NULL || convex == NULL) return NULL_POINTER; // 如果输入点集中不包含任何点,则报错退出。 if (inputcst->count < 1 || inputcst->tplData == NULL) return INVALID_DATA; // 如果输入点集中只有一个点,那么该点直接输出,作为凸壳点。 if (inputcst->count == 1) return CoordiSetBasicOp::copyToHost(inputcst, convex); // 如果输入点集中只有两个点,则直接将其下半凸壳输出(显然此时上半凸壳也是这 // 两个点) if (inputcst->count == 2) return this->convexHullIterCpu(inputcst, convex, true); // 局部变量,错误码。 int errcode; // 局部变量,下半凸壳和上半凸壳点集变量。 CoordiSet *lconvex = NULL; CoordiSet *uconvex = NULL; // 申请一个临时点集,用来存放下半凸壳。 errcode = CoordiSetBasicOp::newCoordiSet(&lconvex); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 申请一个临时点集,用来存放上半凸壳。 errcode = CoordiSetBasicOp::newCoordiSet(&uconvex); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } cout << endl; #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullCpu]convexHullIterCpu upper begin" << endl; #endif // 调用凸壳迭代,求输入点集的下半凸壳。 errcode = this->convexHullIterCpu(inputcst, lconvex, true); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullCpu]convexHullIterCpu upper end" << endl; cout << endl; cout << "[convexHullCpu]convexHullIterCpu lower begin" << endl; #endif // 调用凸壳迭代,求输入点集的上半凸壳。 errcode = this->convexHullIterCpu(inputcst, uconvex, false); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } #ifdef CH_DEBUG_CPU_PRINT cout << "[convexHullCpu]convexHullIterCpu lower end" << endl; cout << endl; #endif // 调用合并两个凸壳的函数,将下半凸壳和上半凸壳粘在一起。 errcode = this->joinConvexCpu(lconvex, uconvex, convex); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 清除临时申请的两个坐标点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(lconvex); CoordiSetBasicOp::deleteCoordiSet(uconvex); // 处理完毕,退出 return NO_ERROR; } // Host 成员方法:convexHull(求一个点集对应的凸壳点集) __host__ int ConvexHull::convexHull(CoordiSet *inputcst, CoordiSet *convex) { // 检查指针性参数是否为 NULL。 if (inputcst == NULL || convex == NULL) return NULL_POINTER; // 如果输入点集中不包含任何点,则报错退出。 if (inputcst->count < 1 || inputcst->tplData == NULL) return INVALID_DATA; // 如果输入点集中只有一个点,那么该点直接输出,作为凸壳点。 if (inputcst->count == 1) return CoordiSetBasicOp::copyToCurrentDevice(inputcst, convex); // 如果输入点集中只有两个点,则直接将其下半凸壳输出(显然此时上半凸壳也是这 // 两个点) if (inputcst->count == 2) return this->convexHullIter(inputcst, convex, true); cout << "GPU convex 1" << endl; // 局部变量,错误码。 int errcode; // 局部变量,下半凸壳和上半凸壳点集变量。 CoordiSet *lconvex = NULL; CoordiSet *uconvex = NULL; // 申请一个临时点集,用来存放下半凸壳。 errcode = CoordiSetBasicOp::newCoordiSet(&lconvex); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 申请一个临时点集,用来存放上半凸壳。 errcode = CoordiSetBasicOp::newCoordiSet(&uconvex); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } cout << "GPU convex lower" << endl; // 调用凸壳迭代,求输入点集的下半凸壳。 errcode = this->convexHullIter(inputcst, lconvex, true); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } cout << "GPU convex lower cnt is " << lconvex->count << endl; cout << "GPU convex up" << endl; // 调用凸壳迭代,求输入点集的上半凸壳。 errcode = this->convexHullIter(inputcst, uconvex, false); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } cout << "GPU convex up cnt is " << uconvex->count << endl; cout << "GPU joinConvex" << endl; // 调用合并两个凸壳的函数,将下半凸壳和上半凸壳粘在一起。 errcode = this->joinConvex(lconvex, uconvex, convex); if (errcode != NO_ERROR) { FAIL_CONVEXHULL_FREE; return errcode; } // 清除临时申请的两个坐标点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(lconvex); CoordiSetBasicOp::deleteCoordiSet(uconvex); // 处理完毕,退出 return NO_ERROR; } #undef FAIL_CONVEXHULL_FREE // 宏:FAIL_CONVEXHULLONIMG_FREE // 该宏用于完成下面函数运行出现错误退出前的内存清理工作。 #define FAIL_CONVEXHULLONIMG_FREE do { \ if (cst != NULL) \ CoordiSetBasicOp::deleteCoordiSet(cst); \ } while (0) // Host 成员方法:convexHullCpu(求图像中阈值给定的对象对应的凸壳点集) __host__ int ConvexHull::convexHullCpu(Image *inimg, CoordiSet *convex) { // 检查输入图像和输出包围矩形是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || convex == NULL) return NULL_POINTER; // 局部变量,错误码。 int errcode; // 新建点集。 CoordiSet *cst; // 构造点集。 errcode = CoordiSetBasicOp::newCoordiSet(&cst); if (errcode != NO_ERROR) { FAIL_CONVEXHULLONIMG_FREE; return errcode; } // 调用图像转点集的函数。 errcode = this->imgCvt.imgConvertToCst(inimg, cst); if (errcode != NO_ERROR) { FAIL_CONVEXHULLONIMG_FREE; return errcode; } // 调用求给定点集的凸壳函数。 errcode = convexHullCpu(cst, convex); if (errcode != NO_ERROR) { FAIL_CONVEXHULLONIMG_FREE; return errcode; } // 清除点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(cst); // 退出。 return NO_ERROR; } // Host 成员方法:convexHull(求图像中阈值给定的对象对应的凸壳点集) __host__ int ConvexHull::convexHull(Image *inimg, CoordiSet *convex) { // 检查输入图像和输出包围矩形是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || convex == NULL) return NULL_POINTER; // 局部变量,错误码。 int errcode; // 新建点集。 CoordiSet *cst; // 构造点集。 errcode = CoordiSetBasicOp::newCoordiSet(&cst); if (errcode != NO_ERROR) { FAIL_CONVEXHULLONIMG_FREE; return errcode; } // 调用图像转点集的函数。 errcode = this->imgCvt.imgConvertToCst(inimg, cst); if (errcode != NO_ERROR) { FAIL_CONVEXHULLONIMG_FREE; return errcode; } // 调用求给定点集的凸壳函数。 errcode = convexHull(cst, convex); if (errcode != NO_ERROR) { FAIL_CONVEXHULLONIMG_FREE; return errcode; } // 清除点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(cst); // 退出。 return NO_ERROR; } #undef FAIL_CONVEXHULLONIMG_FREE
the_stack
#ifdef USE_CUDA #include "thrust/device_vector.h" #endif #include "caffe/layers/softmax_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype, typename MItype, typename MOtype> void SoftmaxLayer<Dtype, MItype, MOtype>::GenerateProgram() { this->device_program_ = this->device_->CreateProgram(); stringstream ss; ss << this->device_program_->setup(); ss << this->device_program_->template define_type<Dtype>("Dtype"); ss << this->device_program_->template define_type<MItype>("MItype"); ss << this->device_program_->template define_type<MOtype>("MOtype"); ss << this->device_program_->template helper_functions<Dtype>(); #ifdef USE_HALF if (std::is_same<MItype, half_fp>::value) { ss << "#define DTYPE_MAX HALF_MAX" << std::endl; ss << "#define DTYPE_MIN HALF_MIN" << std::endl; } else if (std::is_same<MItype, float>::value || std::is_same<MItype, double>::value) { #endif ss << "#define DTYPE_MAX FLT_MAX" << std::endl; ss << "#define DTYPE_MIN FLT_MIN" << std::endl; #ifdef USE_HALF } else { ss << "#define DTYPE_MAX " << type_max_val<MItype>() << std::endl; ss << "#define DTYPE_MIN " << 0 << std::endl; } #endif { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "spatial_dim", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "out", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("kernel_channel_max", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "num * spatial_dim"); ss << "int_tp n = index / spatial_dim;" << std::endl; ss << "int_tp s = index % spatial_dim;" << std::endl; ss << "Dtype maxval = -DTYPE_MAX;" << std::endl; ss << "for (int_tp c = 0; c < channels; ++c) {" << std::endl; ss << "maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);" << std::endl; ss << "}" << std::endl; ss << "out[index] = maxval;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "count", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "spatial_dim", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "channel_max", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "data", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("kernel_channel_subtract", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "count"); ss << "int_tp n = index / channels / spatial_dim;" << std::endl; ss << "int_tp s = index % spatial_dim;" << std::endl; ss << "data[index] -= channel_max[n * spatial_dim + s];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "count", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "out", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("kernel_exp", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "count"); ss << "out[index] = exp(data[index]);" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "spatial_dim", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "channel_sum", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("kernel_channel_sum", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "num * spatial_dim"); ss << "int_tp n = index / spatial_dim;" << std::endl; ss << "int_tp s = index % spatial_dim;" << std::endl; ss << "Dtype sum = 0;" << std::endl; ss << "for (int_tp c = 0; c < channels; ++c) {" << std::endl; ss << "sum += data[(n * channels + c) * spatial_dim + s];" << std::endl; ss << "}" << std::endl; ss << "channel_sum[index] = sum;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "count", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "spatial_dim", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "channel_sum", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "data", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("kernel_channel_div", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "count"); ss << "int_tp n = index / channels / spatial_dim;" << std::endl; ss << "int_tp s = index % spatial_dim;" << std::endl; ss << "data[index] /= channel_sum[n * spatial_dim + s];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "spatial_dim", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "data_1", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "data_2", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "channel_dot", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("kernel_channel_dot", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "num * spatial_dim"); ss << "int_tp n = index / spatial_dim;" << std::endl; ss << "int_tp s = index % spatial_dim;" << std::endl; ss << "Dtype dot = 0;" << std::endl; ss << "for (int_tp c = 0; c < channels; ++c) {" << std::endl; ss << "dot += (data_1[(n * channels + c) * spatial_dim + s]" << " * data_2[(n * channels + c) * spatial_dim + s]);" << std::endl; ss << "}" << std::endl; ss << "channel_dot[index] = dot;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } this->device_program_->set_source(ss.str()); this->device_program_->Compile(true, true); } template<typename Dtype, typename MItype, typename MOtype> void SoftmaxLayer<Dtype, MItype, MOtype>::Forward_gpu( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { vptr<const Dtype> bottom_data = bottom[0]->gpu_data(); vptr<Dtype> top_data = top[0]->mutable_gpu_data(); vptr<Dtype> scale_data = scale_.mutable_gpu_data(); int_tp count = bottom[0]->count(); int_tp channels = top[0]->shape(softmax_axis_); // CUDA backend code this->device_->template copy<Dtype>(count, bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("kernel_channel_max"); kernel->add_arg(&outer_num_); kernel->add_arg(&channels); kernel->add_arg(&inner_num_); kernel->add_arg(&top_data); kernel->add_arg(&scale_data); vector<size_t> work_size(1, outer_num_ * inner_num_); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } // subtract { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("kernel_channel_subtract"); kernel->add_arg(&count); kernel->add_arg(&outer_num_); kernel->add_arg(&channels); kernel->add_arg(&inner_num_); kernel->add_arg(&scale_data); kernel->add_arg(&top_data); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } // exponentiate { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("kernel_exp"); kernel->add_arg(&count); kernel->add_arg(&top_data); kernel->add_arg(&top_data); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } // sum after exp { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("kernel_channel_sum"); kernel->add_arg(&outer_num_); kernel->add_arg(&channels); kernel->add_arg(&inner_num_); kernel->add_arg(&top_data); kernel->add_arg(&scale_data); vector<size_t> work_size(1, outer_num_ * inner_num_); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } // divide { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("kernel_channel_div"); kernel->add_arg(&count); kernel->add_arg(&outer_num_); kernel->add_arg(&channels); kernel->add_arg(&inner_num_); kernel->add_arg(&scale_data); kernel->add_arg(&top_data); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } } template<typename Dtype, typename MItype, typename MOtype> void SoftmaxLayer<Dtype, MItype, MOtype>::Backward_gpu( const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<MItype>*>& bottom) { vptr<const Dtype> top_diff = top[0]->gpu_diff(); vptr<const Dtype> top_data = top[0]->gpu_data(); vptr<Dtype> bottom_diff = bottom[0]->mutable_gpu_diff(); vptr<Dtype> scale_data = scale_.mutable_gpu_data(); int_tp count = top[0]->count(); int_tp channels = top[0]->shape(softmax_axis_); this->device_->template copy<Dtype>(top[0]->count(), top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and // subtract them from the bottom diff. { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("kernel_channel_dot"); kernel->add_arg(&outer_num_); kernel->add_arg(&channels); kernel->add_arg(&inner_num_); kernel->add_arg(&top_diff); kernel->add_arg(&top_data); kernel->add_arg(&scale_data); vector<size_t> work_size(1, outer_num_ * inner_num_); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("kernel_channel_subtract"); kernel->add_arg(&count); kernel->add_arg(&outer_num_); kernel->add_arg(&channels); kernel->add_arg(&inner_num_); kernel->add_arg(&scale_data); kernel->add_arg(&bottom_diff); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } // Elementwise multiplication this->device_->template mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff); } INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, GenerateProgram, (half_fp), (half_fp), (half_fp)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, GenerateProgram, (float), (float), (float)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, GenerateProgram, (double), (double), (double)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, GenerateProgram, (uint8_t), (uint8_t), (uint8_t)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, GenerateProgram, (uint16_t), (uint16_t), (uint16_t)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, GenerateProgram, (uint32_t), (uint32_t), (uint32_t)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, GenerateProgram, (uint64_t), (uint64_t), (uint64_t)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Forward_gpu, (half_fp), (half_fp), (half_fp)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Forward_gpu, (float), (float), (float)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Forward_gpu, (double), (double), (double)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Forward_gpu, (uint8_t), (uint8_t), (uint8_t)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Forward_gpu, (uint16_t), (uint16_t), (uint16_t)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Forward_gpu, (uint32_t), (uint32_t), (uint32_t)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Forward_gpu, (uint64_t), (uint64_t), (uint64_t)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Backward_gpu, (half_fp), (half_fp), (half_fp)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Backward_gpu, (float), (float), (float)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Backward_gpu, (double), (double), (double)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Backward_gpu, (uint8_t), (uint8_t), (uint8_t)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Backward_gpu, (uint16_t), (uint16_t), (uint16_t)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Backward_gpu, (uint32_t), (uint32_t), (uint32_t)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxLayer, Backward_gpu, (uint64_t), (uint64_t), (uint64_t)); } // namespace caffe
the_stack
int main() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int inputs = 1000; int size = 1000; ai::TensorCUDA_float weights(size * inputs); ai::TensorCUDA_float deltas(size * inputs); weights.fill(1); ai::TensorCUDA_float bias(size); ai::TensorCUDA_float errors(size); //errors.fill(1); errors.fill(0, 1); ai::TensorCUDA_float outerrors(inputs); ai::TensorCUDA_float in(inputs); ai::Tensor in_host(inputs); for (int i = 0; i < inputs; i++) if (i % 2 == 0) in_host[i] = 0; else in_host[i] = 1; in.copyToDevice(in_host.pointer(), inputs); ai::TensorCUDA_float outputs(size); ai::TensorCUDA_float deviation(size); deviation.fill(0); ai::TensorCUDA_float normalized(size); normalized.fill(0); ai::TensorCUDA_float params(5); params.fill(0); ai::Tensor params_host(5); params_host[0] = 0; params_host[1] = 1; params_host[2] = 0; params_host[3] = 0; params_host[4] = 0; params.copyToDevice(&params_host[0], 5); int _filter_count = 32; int _filter_size = 4; int _input_count = 3; int _input_width = 28; int _input_height = 28; int _stride = 2; int _output_width = (_input_width - _filter_size + 1.0) / _stride; int _output_height = (_input_height - _filter_size + 1.0) / _stride; int _output_size = _output_width * _output_height; int _size = _output_size * _filter_count; ai::TensorCUDA_float _outputs; _outputs.setshape(_output_width, _output_height, _filter_count); _outputs.fill(0); ai::TensorCUDA_float _inputs; _inputs.setshape(_input_width, _input_height, _input_count); _inputs.fill(1); ai::TensorCUDA_float _out_errors; _out_errors.setshape(_input_width, _input_height, _input_count); _out_errors.fill(0); ai::TensorCUDA_float _errors; _errors.setshape(_size); _errors.fill(1); ai::TensorCUDA_float _deltas; _deltas.setshape((_filter_size * _filter_size * _input_count + 1) * _filter_count); //+1 for bias _deltas.fill(1); //Initialize weights ai::TensorCUDA_float _weights; _weights.setshape(_filter_size * _filter_size, _input_count, _filter_count); //_weights.fill(1); _weights.fill(0.0, sqrt(6.0 / ((_filter_size * _filter_size) * _input_count + 1))); ai::TensorCUDA_float _bias; _bias.setshape(_filter_count); //_bias.fill(2); _bias.fill(0.0, sqrt(6.0 / ((_filter_size * _filter_size) * _input_count + 1))); printf("outputs: %d\n", _output_size * _filter_count); int _convmap[_output_width * _output_height * _filter_size * _filter_size]; for (int x = 0; x < _output_width; x++) { for (int y = 0; y < _output_height; y++) { const float input_x = x * _stride; const float input_y = y * _stride; for (int kx = 0; kx < _filter_size; kx++) { for (int ky = 0; ky < _filter_size; ky++) { _convmap[(y * _output_width + x) * _filter_size * _filter_size + ky * _filter_size + kx] = (input_y + ky) * _input_width + input_x + kx; } } } } int _in_out_map[_input_width * _input_height * _filter_size * _filter_size]; for (int i = 0; i < _input_width * _input_height * _filter_size * _filter_size; i++) _in_out_map[i] = -1; int _in_weight_map[_input_width * _input_height * _filter_size * _filter_size]; for (int i = 0; i < _input_width * _input_height * _filter_size * _filter_size; i++) _in_weight_map[i] = -1; for (int x = 0; x < _output_width; x++) { for (int y = 0; y < _output_height; y++) { for (int w = 0; w < _filter_size * _filter_size; w++) { _in_out_map[_convmap[(y * _output_width + x) * _filter_size * _filter_size + w] * _filter_size * _filter_size + w] = y * _output_width + x; _in_weight_map[_convmap[(y * _output_width + x) * _filter_size * _filter_size + w] * _filter_size * _filter_size + w] = w; } } } ai::TensorCUDA_int _convmap_gpu(_filter_size * _filter_size, _output_width * _output_height); _convmap_gpu.copyToDevice(&_convmap[0], _convmap_gpu.size()); ai::TensorCUDA_int _in_out_map_gpu(_input_width * _input_height * _filter_size * _filter_size); _in_out_map_gpu.copyToDevice(&_in_out_map[0], _in_out_map_gpu.size()); ai::TensorCUDA_int _in_weight_map_gpu(_input_width * _input_height * _filter_size * _filter_size); _in_weight_map_gpu.copyToDevice(&_in_weight_map[0], _in_weight_map_gpu.size()); cudaEventRecord(start); //start for (int i = 0; i < 100; i++) { //amplify //ai::cuda::conv_foreward(_weights.pointer(), _bias.pointer(), _inputs.pointer(), _outputs.pointer(), // _convmap_gpu.pointer(), _input_width, _input_height, _input_count, _stride, _output_width, _output_height, // _filter_count, _filter_size); //ai::cuda::conv_accumulate_deltas(_deltas.pointer(), _errors.pointer(), _inputs.pointer(), // outputs.pointer(), _convmap_gpu.pointer(), _input_count, _input_width, _input_height, // _output_size, _filter_size, _filter_count); //ai::cuda::conv_backward(_weights.pointer(), _out_errors.pointer(), // _errors.pointer(), _in_weight_map_gpu.pointer(), _in_out_map_gpu.pointer(), _input_count, // _output_size, _input_width, _input_height, _filter_size, _filter_count); //ai::cuda::batchnorm_foreward(in.pointer(), deviation.pointer(), normalized.pointer(), outputs.pointer(), // &params.pointer()[0], &params.pointer()[1], &params.pointer()[2], 0.0001, size); //ai::cuda::batchnorm_backward(errors.pointer(), outerrors.pointer(), deviation.pointer(), // &params.pointer()[0], &params.pointer()[1], &params.pointer()[2], 0.0001, size); //ai::cuda::conv_update_parameters(_weights.pointer(), _bias.pointer(), _deltas.pointer(), _filter_size, _input_count, _filter_count, 0.1); //ai::cuda::linear_foreward(weights.pointer(), bias.pointer(), in.pointer(), outputs.pointer(), inputs, size); //ai::cuda::linear_accumulate_deltas(deltas.pointer(), in.pointer(), errors.pointer(), inputs, size); //ai::cuda::linear_backward(weights.pointer(), outerrors.pointer(), errors.pointer(), inputs, size); //ai::cuda::cost_crossentropy(outputs.pointer(), errors.pointer(), outerrors.pointer(), _size); //ai::cuda::sigmoid_foreward(in.pointer(), outputs.pointer(), size); } cudaEventRecord(stop); //stop cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("%f ms\n", milliseconds); /* ai::Tensor o(_out_errors.size()); _out_errors.copyToHost(o.pointer(), o.size()); for (int k = 0; k < 3; k++) { for (int i = 0; i < _input_width; i++) printf("%f\n", o[k * _input_width + i]); printf("\n"); } */ ai::Tensor out(size); /* outputs.copyToHost(out.pointer(), size); for (int i = 0; i < 40; i++) { printf("%f\n", out[i]); } */ printf("\n"); outerrors.copyToHost(out.pointer(), size); for (int i = 0; i < 40; i++) { printf("%f\n", out[i]); } return 0; } ////////////////////////////////////////////////// /// TEST /// 2017-01-06 /// questo codice dimostra che knl_conv_update_weights /// funziona correttamente ///////////////////////////////////////////////// /* int main() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int inputs = 1000; int size = 100; ai::TensorCUDA_float weights(size * inputs); ai::TensorCUDA_float deltas(size * inputs); weights.fill(1); ai::TensorCUDA_float bias(size); ai::TensorCUDA_float errors(size); errors.fill(1); ai::TensorCUDA_float outerrors(inputs); ai::TensorCUDA_float in(inputs); in.fill(1); ai::TensorCUDA_float outputs(size); int _filter_count = 2; int _filter_size = 3; int _input_count = 2; int _input_width = 32; int _input_height = 32; int _stride = 1; int _output_width = (_input_width - _filter_size + 1.0) / _stride; int _output_height = (_input_height - _filter_size + 1.0) / _stride; int _output_size = _output_width * _output_height; int _size = _output_size * _filter_count; ai::TensorCUDA_float _outputs; _outputs.setshape(_output_width, _output_height, _filter_count); _outputs.fill(0); ai::TensorCUDA_float _inputs; _inputs.setshape(_input_width, _input_height, _input_count); _inputs.fill(1); ai::TensorCUDA_float _errors; _errors.setshape(_size); _errors.fill(1); ai::TensorCUDA_float _deltas; _deltas.setshape((_filter_size * _filter_size * _input_count + 1) * _filter_count); //+1 for bias _deltas.fill(1); //Initialize weights ai::TensorCUDA_float _weights; _weights.setshape(_filter_size * _filter_size, _input_count, _filter_count); _weights.fill(1); //_weights.fill(0.0, sqrt(6.0 / ((_filter_size * _filter_size) * _input_count + 1))); ai::TensorCUDA_float _bias; _bias.setshape(_filter_count); _bias.fill(3); //_bias.fill(0.0, sqrt(6.0 / ((_filter_size * _filter_size) * _input_count + 1))); printf("outputs: %d\n", _output_size * _filter_count); int _convmap[_output_width * _output_height * _filter_size * _filter_size]; for (int x = 0; x < _output_width; x++) { for (int y = 0; y < _output_height; y++) { const float input_x = x * _stride; const float input_y = y * _stride; for (int kx = 0; kx < _filter_size; kx++) { for (int ky = 0; ky < _filter_size; ky++) { _convmap[(y * _output_width + x) * _filter_size * _filter_size + ky * _filter_size + kx] = (input_y + ky) * _input_width + input_x + kx; } } } } ai::TensorCUDA_int _convmap_gpu(_filter_size * _filter_size, _output_width * _output_height); _convmap_gpu.copyToDevice(&_convmap[0], _convmap_gpu.size()); cudaEventRecord(start); //start for (int i = 0; i < 100; i++) { //amplify //ai::cuda::conv_foreward(_weights.pointer(), _bias.pointer(), _inputs.pointer(), _outputs.pointer(), //_convmap_gpu.pointer(), _input_width, _input_height, _input_count, _stride, _output_width, _output_height, //_filter_count, _filter_size); //ai::cuda::conv_accumulate_deltas(_deltas.pointer(), _errors.pointer(), _inputs.pointer(), // outputs.pointer(), _convmap_gpu.pointer(), _input_count, _input_width, _input_height, // _output_size, _filter_size, _filter_count); ai::cuda::conv_update_weights(_weights.pointer(), _bias.pointer(), _deltas.pointer(), _filter_size, _input_count, _filter_count, 0.1); //ai::cuda::linear_foreward(weights.pointer(), bias.pointer(), in.pointer(), outputs.pointer(), inputs, size); //ai::cuda::linear_accumulate_deltas(deltas.pointer(), in.pointer(), errors.pointer(), inputs, size); //ai::cuda::linear_backward(weights.pointer(), outerrors.pointer(), errors.pointer(), inputs, size); //ai::cuda::cost_crossentropy(outputs.pointer(), errors.pointer(), outerrors.pointer(), _size); //ai::cuda::sigmoid_foreward(in.pointer(), outputs.pointer(), size); } cudaEventRecord(stop); //stop cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("%f ms\n", milliseconds); ai::Tensor o(_weights.size()); ai::Tensor o2(_bias.size()); _weights.copyToHost(o.pointer(), o.size()); _bias.copyToHost(o2.pointer(), o2.size()); for (int i = 0; i < 10; i++) printf("%f\n", o[i]); for (int i = 0; i < o2.size(); i++) printf("%f\n", o2[i]); return 0; } */ ////////////////////////////////////////////////// /// TEST /// 2017-01-06 /// questo codice dimostra che knl_conv_accumulate_deltas /// funziona correttamente ///////////////////////////////////////////////// /* void cpu_conv_accumulate_deltas(float* data, float* _errors, float* _deltas, int* _convmap, int _output_size, int _input_count, int _input_width, int _input_height, int _filter_size, int _filter_count) { for (int f = 0; f < _filter_count; f++) { //Each filter //Shortcut for this filter output const float *upcomming_errors = &_errors[f * _output_size]; float *t_filter_deltas = &_deltas[f * _input_count * _filter_size * _filter_size]; //For each output for (int o = 0; o < _output_size; o++) { //Jump computation if (upcomming_errors[o] == 0) continue; //Compute each input group for (int k = 0; k < _input_count; k++) { float *filter_deltas = &t_filter_deltas[k * _filter_size * _filter_size]; //Shortcut for this input group const float *in = &data[_input_width * _input_height * k]; //For each weight for (int w = 0; w < _filter_size * _filter_size; w++) filter_deltas[w] += in[_convmap[o * (_filter_size * _filter_size) + w]] * upcomming_errors[o]; } // for each output //Bias _deltas[_input_count * _filter_size * _filter_size * _filter_count + f] += upcomming_errors[o]; } // for each input group } // for each filter } int main() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int inputs = 1000; int size = 100; ai::TensorCUDA_float weights(size * inputs); ai::TensorCUDA_float deltas(size * inputs); weights.fill(1); ai::TensorCUDA_float bias(size); ai::TensorCUDA_float errors(size); errors.fill(1); ai::TensorCUDA_float outerrors(inputs); ai::TensorCUDA_float in(inputs); in.fill(1); ai::TensorCUDA_float outputs(size); int _filter_count = 2; int _filter_size = 3; int _input_count = 2; int _input_width = 32; int _input_height = 32; int _stride = 1; int _output_width = (_input_width - _filter_size + 1.0) / _stride; int _output_height = (_input_height - _filter_size + 1.0) / _stride; int _output_size = _output_width * _output_height; int _size = _output_size * _filter_count; ai::TensorCUDA_float _outputs; _outputs.setshape(_output_width, _output_height, _filter_count); _outputs.fill(0); ai::TensorCUDA_float _inputs; _inputs.setshape(_input_width, _input_height, _input_count); float* _cpu_input = (float*)malloc(sizeof(float) * _input_width * _input_height * _input_count); for (int i = 0; i < _input_width * _input_height * _input_count; i++) _cpu_input[i] = rand() % 3; _inputs.copyToDevice(_cpu_input, _inputs.size()); ai::TensorCUDA_float _errors; _errors.setshape(_size); _errors.fill(1); ai::TensorCUDA_float _deltas; _deltas.setshape((_filter_size * _filter_size * _input_count + 1) * _filter_count); //+1 for bias _deltas.fill(0); //Initialize weights ai::TensorCUDA_float _weights; _weights.setshape(_filter_size * _filter_size, _input_count, _filter_count); _weights.fill(1); //_weights.fill(0.0, sqrt(6.0 / ((_filter_size * _filter_size) * _input_count + 1))); ai::TensorCUDA_float _bias; _bias.setshape(_filter_count); _bias.fill(3); //_bias.fill(0.0, sqrt(6.0 / ((_filter_size * _filter_size) * _input_count + 1))); printf("outputs: %d\n", _output_size * _filter_count); int _convmap[_output_width * _output_height * _filter_size * _filter_size]; for (int x = 0; x < _output_width; x++) { for (int y = 0; y < _output_height; y++) { const float input_x = x * _stride; const float input_y = y * _stride; for (int kx = 0; kx < _filter_size; kx++) { for (int ky = 0; ky < _filter_size; ky++) { _convmap[(y * _output_width + x) * _filter_size * _filter_size + ky * _filter_size + kx] = (input_y + ky) * _input_width + input_x + kx; } } } } ai::TensorCUDA_int _convmap_gpu(_filter_size * _filter_size, _output_width * _output_height); _convmap_gpu.copyToDevice(&_convmap[0], _convmap_gpu.size()); cudaEventRecord(start); //start for (int i = 0; i < 100; i++) { //amplify //ai::cuda::conv_foreward(_weights.pointer(), _bias.pointer(), _inputs.pointer(), _outputs.pointer(), //_convmap_gpu.pointer(), _input_width, _input_height, _input_count, _stride, _output_width, _output_height, //_filter_count, _filter_size); ai::cuda::conv_accumulate_deltas(_deltas.pointer(), _errors.pointer(), _inputs.pointer(), outputs.pointer(), _convmap_gpu.pointer(), _input_count, _input_width, _input_height, _output_size, _filter_size, _filter_count); //ai::cuda::conv_update_weights(_weights.pointer(), _bias.pointer(), _deltas.pointer(), _filter_size, _input_count, _filter_count, 0.1); //ai::cuda::linear_foreward(weights.pointer(), bias.pointer(), in.pointer(), outputs.pointer(), inputs, size); //ai::cuda::linear_accumulate_deltas(deltas.pointer(), in.pointer(), errors.pointer(), inputs, size); //ai::cuda::linear_backward(weights.pointer(), outerrors.pointer(), errors.pointer(), inputs, size); //ai::cuda::cost_crossentropy(outputs.pointer(), errors.pointer(), outerrors.pointer(), _size); //ai::cuda::sigmoid_foreward(in.pointer(), outputs.pointer(), size); } cudaEventRecord(stop); //stop //cpu convolution accumualte deltas test float* _cpu_errors = (float*)malloc(sizeof(float) * _output_width * _output_height * _filter_count); for (int i = 0; i < _output_width * _output_height * _filter_count; i++) _cpu_errors[i] = 1; float* _cpu_deltas = (float*)malloc(sizeof(float) * (_filter_size * _filter_size * _input_count + 1) * _filter_count); for (int i = 0; i < (_filter_size * _filter_size * _input_count + 1) * _filter_count; i++) _cpu_deltas[i] = 0; for (int i = 0; i < 100; i++) cpu_conv_accumulate_deltas(_cpu_input, _cpu_errors, _cpu_deltas, _convmap, _output_size, _input_count, _input_width, _input_height, _filter_size, _filter_count); for (int i = 0; i < 20; i++) printf("d %f\n", _cpu_deltas[i]); printf("\n"); for (int i = _deltas.size()-1; i > _deltas.size()-10; i--) printf("d %f\n", _cpu_deltas[i]); free(_cpu_input); free(_cpu_errors); free(_cpu_deltas); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("%f ms\n", milliseconds); ai::Tensor d(_deltas.size()); _deltas.copyToHost(d.pointer(), d.size()); for (int i = 0; i < 20; i++) printf("%f\n", d[i]); printf("\n"); for (int i = _deltas.size()-1; i > _deltas.size()-10; i--) printf("%f\n", d[i]); return 0; } */
the_stack
// FillUp.cu // 实现对输入图像像素的处理 #include <iostream> using namespace std; #include "FillUp.h" #include "ErrorCode.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // static变量:_defTpl // 当用户未定义有效的模板时,使用此默认模板,默认为 3 x 3。 static Template *_defTpl = NULL; // Kernel 函数:_fillupFirstKer(实现检查像素操作) // 检查一个像素的邻域,若其邻域同时存在 l 像素和 v 像素, // 当 v 像素的个数大于等于某一值时,就将所设标记置为 1, // 否则置为0。 static __global__ void // Kernel 函数无返回值。 _fillupFirstKer( ImageCuda inimg, // 输入图像。 ImageCuda flagimg, // 标记图像。 Template tpl, // 模板。 unsigned char l, // 主要像素。 unsigned char v, // 替换像素。 int percentage // 定值,即 r * w * w。 ); // Kernel 函数:_fillupFinalKer(将修改过的像素输出到图像) // 当 v 的像素的个数大于等于某一值时,将标记置为 1 后,把标记 // 为 1 的像素值 v 输出到图像上。 static __global__ void // Kernel 函数无返回值。 _fillupFinalKer( ImageCuda inimg, // 输入图像。 ImageCuda flagimg, // 标记图像。 ImageCuda outimg, // 输出图像。 Template tpl, // 模板。 unsigned char v // 替换像素。 ); // Kernel 函数:_fillupLastKer(对 l 像素的领域检查,并修改像素) // 对所有 l 像素的 8 个领域进行检查,如果它的 8 个领 // 域当中有 5 个或 5 个以上的 v 的像素值,就将 v 的像素值赋给 l。 static __global__ void // Kernel 函数无返回值。 _fillupLastKer( ImageCuda inimg, // 输入图像。 ImageCuda outimg, // 输出图像。 unsigned char l, // 主要像素。 unsigned char v, // 替换像素。 int lastpercentage // 定值,值为 5。 ); // Host 函数:_initDefTemplate(初始化默认的模板指针) // 函数初始化默认模板指针 _defTpl,如果原来模板不为空,则直接返回,否则初始化 // 为 3 x 3 的默认模板。 static __host__ Template* // 返回值:返回默认模板指针 _defTpl。 _initDefTemplate(); // Host 函数:_preOp(在算法操作前进行预处理) // 在进行处理像素操作前,先进行预处理,包括:(1)对输入和输出图像 // 进行数据准备,包括申请当前Device存储空间;(2)对模板进行处理,包 // 申请当前Device存储空间。 static __host__ int // 返回值:函数是否正确执行,若正确执行,返回 // NO_ERROR 。 _preOp( Image *inimg, // 输入图像。 Image *outimg, // 输出图像。 Template *tp // 模板。 ); // Host 函数:_adjustRoiSize(调整 ROI 子图的大小) // 调整 ROI 子图的大小,使输入和输出的子图大小统一 static __host__ void // 无返回值。 _adjustRoiSize( ImageCuda *inimg, // 输入图像。 ImageCuda *outimg // 输出图像。 ); // Host 函数:_getBlockSize(获取 Block 和 Grid 的尺寸) // 根据默认的 Block 尺寸,使用最普通的线程划分方法获取 Grid 的尺寸 static __host__ int // 返回值:函数是否正确执行,若正确执行,返回 // NO_ERROR 。 _getBlockSize( int width, // 需要处理的宽度。 int height, // 需要处理的高度。 dim3 *gridsize, // 计算获得的 Grid 的尺寸。 dim3 *blocksize // 计算获得的 Block 的尺寸。 ); // 构造函数:FillUp __host__ FillUp::FillUp(Template *tp) { setTemplate(tp); } // 构造函数:FillUp __host__ FillUp::FillUp(Template *tp, unsigned char l, unsigned char v , int maxw, float r) { // 使用默认值为类的各个成员变量赋值,防止用户在构造函数的参数中给了非法的 // 初始值而使系统进入一个未知的状态。 setTemplate(tp); this->l = 255; // l 值默认为 255。 this->v = 0; // v 值默认为 0。 this->maxw = 5; // maxw 值默认为 5。 this->r = 0.2; // r 值默认为 0.2。 // 根据参数列表中的值设定成员变量的初值 setL(l); setV(v); setMaxw(maxw); setR(r); } // 成员方法:getTemplate __host__ Template* FillUp::getTemplate() const { // 如果模板指针和默认模板指针相同,则返回空 if (this->tpl == _defTpl) return NULL; // 否则返回设置的模板指针 return this->tpl; } // 成员方法:setTemplate __host__ int FillUp::setTemplate(Template *tp) { if (tp == NULL) { // 如果 tp 为空,则只用默认的模板指针 this->tpl = _initDefTemplate(); } else { // 否则将 tp 赋值给 tpl this->tpl = tp; } return NO_ERROR; } // Kernel 函数:_fillupFirstKer(实现检查像素操作) static __global__ void _fillupFirstKer( ImageCuda inimg, ImageCuda flagimg, Template tpl, unsigned char l, unsigned char v, int percentage) { // c 和 r 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中, // c 表示 column, r 表示 row)。由于采用并行度缩减策略 , // 令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 // 行上,因此,对于 r 需要进行乘 4 的计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 用来保存临时像素点的坐标的 x 和 y 分量 int dx, dy; // 用来记录当前模板所在的位置的指针 int *curtplptr = tpl.tplData; // 用来记录当前输入图像所在位置的指针 unsigned char *curinptr; // 用来保存 4 个像素中 v 的个数 int sum[4] = { 0 }; // 存放临时像素点的像素值 unsigned char pixel; // 用来记录输出图像所在位置的指针 unsigned char *outptr; // 存放输出像素的值 unsigned char outvalue[4] = { 1, 1, 1, 1 }; // 扫描模板范围内的每个输入图像的像素点 for (int i = 0; i < tpl.count; i++) { // 计算当前模板位置所在像素的 x 和 y 分量,模板使用相邻的两个下标的 // 数组表示一个点,所以使当前模板位置的指针作加一操作 dx = c + *(curtplptr++); dy = r + *(curtplptr++); // 先判断当前像素的 x 分量是否越界,如果越界,则跳过,扫描下一个模板 // 点,如果没有越界,则分别处理当前列的相邻的 4 个像素 if (dx < 0 || dx >= inimg.imgMeta.width) continue; if (dx != c || dy != r) { // 得到当前位置的像素值,并且判断该像素值是否等于 v,如果相等, // 就计数它。如果该像素值既不等于 v,也不等于 l,就将标记置为 0。 // 得到第一个点 curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes; if (dy >= 0 && dy < inimg.imgMeta.height) { pixel = *curinptr; (pixel == v) ? (sum[0]++) : 0; (pixel != v && pixel != l) ? (outvalue[0] = 0) : 0; } // 处理当前列的剩下的 3 个像素 for (int j = 1; j < 4; j++) { // 获取当前列的下一行的像素的位置 curinptr = curinptr + inimg.pitchBytes; // 使 dy 加一,得到当前要处理的像素的 y 分量 dy++; // 检测 dy 是否越界 if (dy >= 0 && dy < inimg.imgMeta.height) { // 判断该像素值是否等于 v,如果相等,就计数它。如果该像素值 // 既不等于 v,也不等于 l,就将标记置为 0。 pixel = *curinptr; (pixel == v) ? (sum[j]++) : 0; (pixel != v && pixel != l) ? (outvalue[j] = 0) : 0; } } } } // 如果 v 的像素的数量大于 percentage 这个定值,就将标记置为 1, // 如果不是,将标记置为 0。 // 获取对应的第一个输出图像的位置 outptr = flagimg.imgMeta.imgData + r * flagimg.pitchBytes + c; *outptr = ((sum[0] >= percentage) && outvalue[0]); // 检测 y 分量是否越界,如果越界,直接返回。 if (++r >= flagimg.imgMeta.height) return; outptr = outptr + flagimg.pitchBytes; *outptr = ((sum[1] >= percentage) && outvalue[1]); // 检测 y 分量是否越界,如果越界,直接返回。 if (++r >= flagimg.imgMeta.height) return; outptr = outptr + flagimg.pitchBytes; *outptr = ((sum[2] >= percentage) && outvalue[2]); // 检测 y 分量是否越界,如果越界,直接返回。 if (++r >= flagimg.imgMeta.height) return; outptr = outptr + flagimg.pitchBytes; *outptr = ((sum[3] >= percentage) && outvalue[3]); } // Kernel 函数:_fillupFinalKer(将修改过的像素输出到图像) static __global__ void _fillupFinalKer( ImageCuda inimg, ImageCuda flagimg, ImageCuda outimg, Template tpl, unsigned char v) { // c 和 r 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 // column, r 表示 row)。由于采用并行度缩减策略 ,令一个线程处理 4 // 个输出像素,这四个像素位于统一列的相邻 4 行上,因此,对于 r 需要进行 // 乘 4 的计算 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 定义变量 int j; unsigned char *rowinptr[4]; unsigned char outvalue[4]; unsigned char curvalue; unsigned char *outptr; unsigned char *curflagptr; // 用来记录当前模板所在的位置的指针 int *curtplptr = tpl.tplData; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 用来保存临时像素点的坐标的 x 和 y 分量 int dx, dy; // 一个线程处理四个像素点。 // 计算当前像素点的第一个像素值,然后计算同一列的剩下的三个像素值。 rowinptr[0] = inimg.imgMeta.imgData + c + r * inimg.pitchBytes; outvalue[0] = *rowinptr[0]; rowinptr[1] = rowinptr[0] + inimg.pitchBytes; outvalue[1] = *rowinptr[1]; rowinptr[2] = rowinptr[1] + inimg.pitchBytes; outvalue[2] = *rowinptr[2]; rowinptr[3] = rowinptr[2] + inimg.pitchBytes; outvalue[3] = *rowinptr[3]; for (j = 0; j < tpl.count; j++) { // 计算当前模板位置所在像素的 x 和 y 分量,模板使用相邻的两个下标的 // 数组表示一个点,所以使当前模板位置的指针作加一操作 dx = c + *(curtplptr++); dy = r + *(curtplptr++); // 如果找到标志为 1 的像素,就把像素 v 输出到图像 if (dx >= 0 && dx < flagimg.imgMeta.width) { curflagptr = flagimg.imgMeta.imgData + dx + dy * flagimg.pitchBytes; if (dy >= 0 && dy < flagimg.imgMeta.height) { curvalue = *curflagptr; (curvalue != 0) ? (outvalue[0] = v) : 0; } // 使 dy 加一,得到当前要处理的像素的 y 分量 dy++; // 获取当前列的下一行的像素的位置 curflagptr = curflagptr + flagimg.pitchBytes; // 第二个像素点。 if (dy >= 0 && dy < flagimg.imgMeta.height) { curvalue = *curflagptr; (curvalue != 0) ? (outvalue[1] = v) : 0; } // 使 dy 加一,得到当前要处理的像素的 y 分量 dy++; // 获取当前列的下一行的像素的位置 curflagptr = curflagptr + flagimg.pitchBytes; // 第三个像素点。 if (dy >= 0 && dy < flagimg.imgMeta.height) { curvalue = *curflagptr; (curvalue != 0) ? (outvalue[2] = v) : 0; } // 使 dy 加一,得到当前要处理的像素的 y 分量 dy++; // 获取当前列的下一行的像素的位置 curflagptr = curflagptr + flagimg.pitchBytes; // 第四个像素点。 if (dy >= 0 && dy < flagimg.imgMeta.height) { curvalue = *curflagptr; (curvalue != 0) ? (outvalue[3] = v) : 0; } } } // 将像素值赋给输出图像。 outptr = outimg.imgMeta.imgData + r * outimg.pitchBytes + c; *outptr = outvalue[0]; // 检测 y 分量是否越界,如果越界,直接返回。 if (++r >= outimg.imgMeta.height) return; outptr = outptr + outimg.pitchBytes; *outptr = outvalue[1]; // 检测 y 分量是否越界,如果越界,直接返回。 if (++r >= outimg.imgMeta.height) return; outptr = outptr + outimg.pitchBytes; *outptr = outvalue[2]; // 检测 y 分量是否越界,如果越界,直接返回。 if (++r >= outimg.imgMeta.height) return; outptr = outptr + outimg.pitchBytes; *outptr = outvalue[3]; } // Kernel 函数:_fillupLastKer(对 l 像素的领域检查,并修改像素) static __global__ void _fillupLastKer( ImageCuda inimg, ImageCuda outimg, unsigned char l, unsigned char v, int lastpercentage) { // 计算当前线程的位置。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 定义变量 int i, j; int sum = 0; unsigned char *inptr, *outptr; unsigned char orivalue, curvalue; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃。 if (r >= inimg.imgMeta.height || c >= inimg.imgMeta.width) return; // 得到输入图像和输出图像当前像素的位置。 inptr = inimg.imgMeta.imgData + c + r * inimg.pitchBytes; outptr = outimg.imgMeta.imgData + c + r * outimg.pitchBytes; orivalue = *inptr; // 如果当前像素值不等于 l,就直接将此值赋给输出图像。 if (orivalue != l){ *outptr = orivalue; return; } // 对所有 l 像素的 8 个领域进行检查,如果它的 8 个领域当中有 5 个 // 或 5 个以上的 v 的像素值,就将 v 的像素值赋给 l。 for (j = r - 1; j <= r + 1; j++) { for (i = c - 1; i <= c + 1; i++) { // 判断当前像素是否越界。 if (j >= 0 && j < inimg.imgMeta.height && i >= 0 && i < inimg.imgMeta.width) { // 得到当前位置的像素值。 curvalue = *(inimg.imgMeta.imgData + i + j * inimg.pitchBytes); // 如果其值等于 v,就计数它。 sum += ((curvalue == v) ? 1 : 0); } } } // 如果v的像素的数量大于 5 个以上,就将原来的值置成 v, // 如果不大于,就输出原来的值。 *outptr = (sum > lastpercentage) ? v : orivalue; } // Host 函数:_initDefTemplate(初始化默认的模板指针) static __host__ Template* _initDefTemplate() { // 如果 _defTpl 不为空,说明已经初始化了,则直接返回 if (_defTpl != NULL) return _defTpl; // 如果 _defTpl 为空,则初始化为 3 x 3 的模板 TemplateBasicOp::newTemplate(&_defTpl); TemplateBasicOp::makeAtHost(_defTpl, 9); // 分别处理每一个点 for (int i = 0; i < 9; i++) { // 分别计算每一个点的横坐标和纵坐标 _defTpl->tplData[2 * i] = i % 3 - 1; _defTpl->tplData[2 * i + 1] = i / 3 - 1; } return _defTpl; } // Host 函数:_preOp(在算法操作前进行预处理) static __host__ int _preOp(Image *inimg, Image *outimg, Template *tp) { // 局部变量,错误码 int errcode; // 将输入图像拷贝到 Device 内存中 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝到 Device 内存中 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 计算 roi 子图的宽和高 int roiwidth = inimg->roiX2 - inimg->roiX1; int roiheight = inimg->roiY2 - inimg->roiY1; // 如果输出图像无数据,则会创建一个和输出图像子图像尺寸相同的图像 errcode = ImageBasicOp::makeAtCurrentDevice(outimg, roiwidth, roiheight); // 如果创建图像依然操作失败,则返回错误 if (errcode != NO_ERROR) return errcode; } // 将模板拷贝到 Device 内存中 errcode = TemplateBasicOp::copyToCurrentDevice(tp); if (errcode != NO_ERROR) return errcode; return NO_ERROR; } // Host 函数:_adjustRoiSize(调整输入和输出图像的 ROI 的大小) inline static __host__ void _adjustRoiSize(ImageCuda *inimg, ImageCuda *outimg) { if (inimg->imgMeta.width > outimg->imgMeta.width) inimg->imgMeta.width = outimg->imgMeta.width; else outimg->imgMeta.width = inimg->imgMeta.width; if (inimg->imgMeta.height > outimg->imgMeta.height) inimg->imgMeta.height = outimg->imgMeta.height; else outimg->imgMeta.height = inimg->imgMeta.height; } // Host 函数:_getBlockSize(获取 Block 和 Grid 的尺寸) inline static __host__ int _getBlockSize(int width, int height, dim3 *gridsize, dim3 *blocksize) { // 检测 girdsize 和 blocksize 是否是空指针 if (gridsize == NULL || blocksize == NULL) return NULL_POINTER; // blocksize 使用默认的尺寸 blocksize->x = DEF_BLOCK_X; blocksize->y = DEF_BLOCK_Y; // 使用最普通的方法划分 Grid gridsize->x = (width + blocksize->x - 1) / blocksize->x; gridsize->y = (height + blocksize->y * 4 - 1) / (blocksize->y * 4); return NO_ERROR; } // 成员方法:fillUp __host__ int FillUp::fillUp(Image *inimg, Image *outimg) { // 局部变量,错误码。 int errcode; dim3 gridsize; dim3 blocksize; Image *flagimg; // 检查输入图像,输出图像,以及模板是否为空 if (inimg == NULL || outimg == NULL || tpl == NULL) return NULL_POINTER; // 新建一个中间标记图像。 errcode = ImageBasicOp::newImage(&flagimg); if (errcode != NO_ERROR) return errcode; // 为新建的图像在设备端分配空间。 errcode = ImageBasicOp::makeAtCurrentDevice(flagimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { // 计算 roi 子图的宽和高 int roiwidth = inimg->roiX2 - inimg->roiX1; int roiheight = inimg->roiY2 - inimg->roiY1; // 如果输出图像无数据,则会创建一个和输出图像子图像尺寸相同的图像 errcode = ImageBasicOp::makeAtCurrentDevice(flagimg, roiwidth, roiheight); // 如果创建图像依然操作失败,则返回错误 if (errcode != NO_ERROR) return errcode; } // 用 r 和 maxw 得到 percentage 的值。 int percentage = r * maxw * maxw; // 对输入图像,输出图像和模板进行预处理 errcode = _preOp(inimg, outimg, tpl); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取标记图像的 ROI 子图像 ImageCuda flagsubimgCud; errcode = ImageBasicOp::roiSubImage(flagimg, &flagsubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 调整输入和输出图像的 ROI 子图,使大小统一 _adjustRoiSize(&insubimgCud, &outsubimgCud); _adjustRoiSize(&insubimgCud, &flagsubimgCud); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量 errcode = _getBlockSize(outsubimgCud.imgMeta.width, outsubimgCud.imgMeta.height, &gridsize, &blocksize); if (errcode != NO_ERROR) return errcode; // 首先调用 _fillupFirstKer 这个 Kernel 函数进行处理像素操作 _fillupFirstKer<<<gridsize, blocksize>>>(insubimgCud, flagsubimgCud, *tpl, l, v, percentage); // 再调用这个 Kernel 函数进行输出 _fillupFinalKer<<<gridsize, blocksize>>>(insubimgCud, flagsubimgCud, outsubimgCud, *tpl, v); // 将输出图像拷贝到 Host 上 errcode = ImageBasicOp::copyToHost(outimg); // 释放标记图像 errcode = ImageBasicOp::deleteImage(flagimg); if (errcode != NO_ERROR) return errcode; return errcode; } // 成员方法:fillUpAdv __host__ int FillUp::fillUpAdv(Image *inimg, Image *outimg, int *stateflag) { // 局部变量,错误码。 int errcode; dim3 gridsize; dim3 blocksize; // 计算迭代次数。 int step = 0; Image *midimg1, *midimg2; int curw, nextw; Image *curin, *curout, *tempimg; Template *tl; // 检查输入图像,输出图像,以及模板是否为空 if (inimg == NULL || outimg == NULL || tpl == NULL) return NULL_POINTER; // 申请需要用到的中间图片。 ImageBasicOp::newImage(&midimg1); ImageBasicOp::makeAtHost(midimg1, inimg->width, inimg->height); ImageBasicOp::newImage(&midimg2); ImageBasicOp::makeAtHost(midimg2, inimg->width, inimg->height); ImageBasicOp::newImage(&curout); ImageBasicOp::makeAtHost(curout, inimg->width, inimg->height); ImageBasicOp::newImage(&curin); ImageBasicOp::makeAtHost(curin, inimg->width, inimg->height); // 设置最后一步处理时的比例值。 int lastpercentage = 4; // 将当前图像的指针指向输入图像,对输入图像进行计算。 curin = inimg; curout = midimg1; // 设置开始时模板的大小赋给当前模板。 curw = maxw; nextw = maxw >> 1; // 如果开始时模板的尺寸小于 3,则不做处理。 if (curw <= 3) curout = inimg; // 如果模板的尺寸大于 3,做以下处理。 while (curw > 3) { // 申请模板 errcode = TemplateBasicOp::newTemplate(&tl); // 在主机内存中初始化模板。 errcode = TemplateBasicOp::makeAtHost(tl, curw * curw); if (errcode != NO_ERROR) return errcode; // 为模板赋值。 for (int i = 0; i < curw * curw; i++) { tl->tplData[2 * i] = i % curw - curw / 2; tl->tplData[2 * i + 1] = i / curw - curw / 2; } // 调用set函数为成员变量赋值。 setTemplate(tl); // 调用FillUp算法。 errcode = fillUp(curin, curout); if (errcode != NO_ERROR) return errcode; // 如果模板下一次的大小小于 3,就跳出此循环。 if (nextw <= 3){ step += 1; break; } // 交换当前输入图像和当前输出图像。 tempimg = curin; curin = curout; curout = tempimg; // 如果有更多 fillUp 操作,将 midimg2 赋给 curout。 if (step == 0 && nextw > 3) curout = midimg2; // 释放模板。 TemplateBasicOp::deleteTemplate(tl); // 对当前模板尺寸赋予新的值。 curw = nextw; // 将当前模板的尺寸减为它的一半。 nextw = nextw / 2; // 迭代次数增加 1. step += 1; } if (maxw == 3) { // 将输入图像拷贝到 Device 内存中 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; } // 将输出图像拷贝到 Device 内存中 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 计算 roi 子图的宽和高 int roiwidth = inimg->roiX2 - inimg->roiX1; int roiheight = inimg->roiY2 - inimg->roiY1; // 如果输出图像无数据,则会创建一个和输出图像子图像尺寸相同的图像 errcode = ImageBasicOp::makeAtCurrentDevice(outimg, roiwidth, roiheight); // 如果创建图像依然操作失败,则返回错误 if (errcode != NO_ERROR) return errcode; } // 将下一步输入图像curout拷贝至当前设备。 ImageBasicOp::copyToCurrentDevice(curout); // 提取输入图像的 ROI 子图像 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(curout, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 调整输入和输出图像的 ROI 子图,使大小统一 _adjustRoiSize(&insubimgCud, &outsubimgCud); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (curout->width + blocksize.x - 1) / blocksize.x; gridsize.y = (curout->height + blocksize.y - 1) / blocksize.y; if (errcode != NO_ERROR) return errcode; // 调用 Kernel 函数进行操作 _fillupLastKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud, l, v, lastpercentage); // 将输出图像拷贝到 Host 上 errcode = ImageBasicOp::copyToHost(outimg); if (errcode != NO_ERROR) return errcode; // 计算最终的迭代次数。 if (stateflag != NULL) *stateflag = step; // 删除申请的中间图片。 ImageBasicOp::deleteImage(midimg1); ImageBasicOp::deleteImage(midimg2); return errcode; }
the_stack
#include <map> struct GPUScene { Primitive* primitives; int numPrimitives; Primitive* lights; int numLights; Sky sky; BVH bvh; }; #define kBsdfSamples 1.0f #define kProbeSamples 1.0f #define kRayEpsilon 0.0001f #define LAUNCH_BOUNDS __launch_bounds__(256, 4) __device__ inline int getGlobalIndex() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } // create a texture object from memory and store it in a 64-bit pointer void CreateIntTexture(int** deviceBuffer, const int* hostBuffer, int sizeInBytes) { int* buffer; cudaMalloc(&buffer, sizeInBytes); cudaMemcpy(buffer, hostBuffer, sizeInBytes, cudaMemcpyHostToDevice); #if USE_TEXTURES // create texture object cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.devPtr = (void*)buffer; resDesc.res.linear.desc.f = cudaChannelFormatKindSigned; resDesc.res.linear.desc.x = 32; // bits per channel resDesc.res.linear.sizeInBytes = sizeInBytes; cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; cudaTextureObject_t tex; cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL); // cast to pointer *deviceBuffer = (int*)tex; #else *deviceBuffer = buffer; #endif } // create a texture object from memory and store it in a 64-bit pointer void CreateFloatTexture(float** deviceBuffer, const float* hostBuffer, int sizeInBytes) { float* buffer; cudaMalloc(&buffer, sizeInBytes); cudaMemcpy(buffer, hostBuffer, sizeInBytes, cudaMemcpyHostToDevice); #if USE_TEXTURES // create texture object cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.devPtr = (void*)buffer; resDesc.res.linear.desc.f = cudaChannelFormatKindFloat; resDesc.res.linear.desc.x = 32; // bits per channel resDesc.res.linear.sizeInBytes = sizeInBytes; cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; cudaTextureObject_t tex; cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL); // cast to pointer *deviceBuffer = (float*)tex; #else *deviceBuffer = buffer; #endif } // create a texture object from memory and store it in a 64-bit pointer void CreateVec4Texture(Vec4** deviceBuffer, const Vec4* hostBuffer, int sizeInBytes) { Vec4* buffer; cudaMalloc(&buffer, sizeInBytes); cudaMemcpy(buffer, hostBuffer, sizeInBytes, cudaMemcpyHostToDevice); #if USE_TEXTURES // create texture object cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.devPtr = (void*)buffer; resDesc.res.linear.desc.f = cudaChannelFormatKindFloat; resDesc.res.linear.desc.x = 32; // bits per channel resDesc.res.linear.desc.y = 32; // bits per channel resDesc.res.linear.desc.z = 32; // bits per channel resDesc.res.linear.desc.w = 32; // bits per channel resDesc.res.linear.sizeInBytes = sizeInBytes; cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; cudaTextureObject_t tex; cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL); // cast to pointer *deviceBuffer = (Vec4*)tex; #else *deviceBuffer = buffer; #endif } MeshGeometry CreateGPUMesh(const MeshGeometry& hostMesh) { const int numVertices = hostMesh.numVertices; const int numIndices = hostMesh.numIndices; const int numNodes = hostMesh.numNodes; MeshGeometry gpuMesh; #if USE_TEXTURES // expand positions out to vec4 std::vector<Vec4> positions; std::vector<Vec4> normals; for (int i=0; i < numVertices; ++i) { positions.push_back(Vec4(hostMesh.positions[i], 1.0f)); normals.push_back(Vec4(hostMesh.normals[i], 0.0f)); } CreateVec4Texture((Vec4**)&gpuMesh.positions, (Vec4*)&positions[0], sizeof(Vec4)*numVertices); CreateVec4Texture((Vec4**)&gpuMesh.normals, (Vec4*)&normals[0], sizeof(Vec4)*numVertices); #else CreateFloatTexture((float**)&gpuMesh.positions, (float*)&hostMesh.positions[0], sizeof(Vec3)*numVertices); CreateFloatTexture((float**)&gpuMesh.normals, (float*)&hostMesh.normals[0], sizeof(Vec3)*numVertices); #endif CreateIntTexture((int**)&gpuMesh.indices, (int*)&hostMesh.indices[0], sizeof(int)*numIndices); /* cudaMalloc((Vec3**)&gpuMesh.positions, sizeof(Vec3)*numVertices); cudaMemcpy((Vec3*)gpuMesh.positions, &hostMesh.positions[0], sizeof(Vec3)*numVertices, cudaMemcpyHostToDevice); cudaMalloc((Vec3**)&gpuMesh.normals, sizeof(Vec3)*numVertices); cudaMemcpy((Vec3*)gpuMesh.normals, &hostMesh.normals[0], sizeof(Vec3)*numVertices, cudaMemcpyHostToDevice); cudaMalloc((int**)&gpuMesh.indices, sizeof(int)*numIndices); cudaMemcpy((int*)gpuMesh.indices, &hostMesh.indices[0], sizeof(int)*numIndices, cudaMemcpyHostToDevice); */ //cudaMalloc((BVHNode**)&gpuMesh.nodes, sizeof(BVHNode)*numNodes); //cudaMemcpy((BVHNode*)gpuMesh.nodes, &hostMesh.nodes[0], sizeof(BVHNode)*numNodes, cudaMemcpyHostToDevice); CreateVec4Texture((Vec4**)&gpuMesh.nodes, (Vec4*)&hostMesh.nodes[0], sizeof(BVHNode)*numNodes); cudaMalloc((float**)&gpuMesh.cdf, sizeof(float)*numIndices/3); cudaMemcpy((float*)gpuMesh.cdf, &hostMesh.cdf[0], sizeof(float)*numIndices/3, cudaMemcpyHostToDevice); gpuMesh.numIndices = numIndices; gpuMesh.numVertices = numVertices; gpuMesh.numNodes = numNodes; gpuMesh.area = hostMesh.area; return gpuMesh; } void DestroyGPUMesh(const MeshGeometry& m) { } Texture CreateGPUTexture(const Texture& tex) { const int numTexels = tex.width*tex.height*tex.depth; Texture gpuTex = tex; cudaMalloc((void**)&gpuTex.data, sizeof(float)*numTexels); cudaMemcpy(gpuTex.data, tex.data, sizeof(float)*numTexels, cudaMemcpyHostToDevice); return gpuTex; } Sky CreateGPUSky(const Sky& sky) { Sky gpuSky = sky; // copy probe if (sky.probe.valid) { const int numPixels = sky.probe.width*sky.probe.height; // copy pixel data CreateVec4Texture((Vec4**)&gpuSky.probe.data, sky.probe.data, numPixels*sizeof(float)*4); // copy cdf tables CreateFloatTexture((float**)&gpuSky.probe.cdfValuesX, sky.probe.cdfValuesX, numPixels*sizeof(float)); CreateFloatTexture((float**)&gpuSky.probe.pdfValuesX, sky.probe.pdfValuesX, numPixels*sizeof(float)); CreateFloatTexture((float**)&gpuSky.probe.cdfValuesY, sky.probe.cdfValuesY, sky.probe.height*sizeof(float)); CreateFloatTexture((float**)&gpuSky.probe.pdfValuesY, sky.probe.pdfValuesY, sky.probe.height*sizeof(float)); } return gpuSky; } void DestroyGPUSky(const Sky& gpuSky) { if (gpuSky.probe.valid) { // todo } } #if 1 inline __device__ bool Trace(const GPUScene& scene, const Vec3& rayOrigin, const Vec3& rayDir, float rayTime, float& outT, Vec3& outNormal, const Primitive** RESTRICT outPrimitive) { int stack[64]; stack[0] = 0; unsigned int count = 1; Vec3 dir, rcpDir; Vec3 origin; rcpDir.x = 1.0f/rayDir.x; rcpDir.y = 1.0f/rayDir.y; rcpDir.z = 1.0f/rayDir.z; origin = rayOrigin; dir = rayDir; const BVHNode* RESTRICT root = scene.bvh.nodes; MeshGeometry mesh; int primitiveIndex = -1; float closestT = FLT_MAX; //float closestU; float closestV; float closestW; Vec3 closestNormal; int closestPrimitive = -1; int closestTri; while(count) { const int nodeIndex = stack[--count]; if (nodeIndex < 0) { // reset to scene bvh dir and address rcpDir.x = 1.0f/rayDir.x; rcpDir.y = 1.0f/rayDir.y; rcpDir.z = 1.0f/rayDir.z; origin = rayOrigin; dir = rayDir; root = scene.bvh.nodes; primitiveIndex = -1; continue; } BVHNode node = fetchNode(root, nodeIndex); int leftIndex = node.leftIndex; int rightIndex = node.rightIndex; if (node.leaf) { if (primitiveIndex < 0) { const Primitive& p = scene.primitives[leftIndex]; Transform transform = InterpolateTransform(p.startTransform, p.endTransform, rayTime); switch (p.type) { case eSphere: { float minT, maxT; Vec3 n; bool hit = IntersectRaySphere(transform.p, p.sphere.radius*transform.s, origin, dir, minT, maxT, &n); if (hit && minT < closestT) { closestT = minT; closestNormal = n; closestPrimitive = leftIndex; } break; } case ePlane: { float t; bool hit = IntersectRayPlane(origin, dir, (const Vec4&)p.plane, t); if (hit && t < closestT) { closestT = t; closestNormal = (const Vec3&)p.plane; closestPrimitive = leftIndex; } break; } case eMesh: { // push a back-tracking marker in the stack stack[count++] = -1; // push root of the mesh bvh stack[count++] = 0; // transform ray to primitive local space origin = InverseTransformPoint(transform, rayOrigin); dir = InverseTransformVector(transform, rayDir); rcpDir.x = 1.0f/dir.x; rcpDir.y = 1.0f/dir.y; rcpDir.z = 1.0f/dir.z; // set bvh and mesh sources root = p.mesh.nodes; mesh = p.mesh; primitiveIndex = leftIndex; break; } }; } else { // mesh mode int i0 = fetchInt(mesh.indices, leftIndex*3+0); int i1 = fetchInt(mesh.indices, leftIndex*3+1); int i2 = fetchInt(mesh.indices, leftIndex*3+2); const Vec3 a = fetchVec3(mesh.positions, i0); const Vec3 b = fetchVec3(mesh.positions, i1); const Vec3 c = fetchVec3(mesh.positions, i2); float t, u, v, w; float sign; Vec3 n; //if (IntersectRayTri(rayOrigin, rayDir, a, b, c, t, u, v, w, &n)) if (IntersectRayTriTwoSided(origin, dir, a, b, c, t, u, v, w, sign, &n)) { if (t > 0.0f && t < closestT) { closestT = t; //closestU = u; closestV = v; closestW = w; closestTri = leftIndex; closestNormal = n*sign; closestPrimitive = primitiveIndex; } } } } else { // check children BVHNode left = fetchNode(root, leftIndex); BVHNode right = fetchNode(root, rightIndex); float tLeft; bool hitLeft = IntersectRayAABBFast(origin, rcpDir, left.bounds.lower, left.bounds.upper, tLeft);// && tLeft < closestT; float tRight; bool hitRight = IntersectRayAABBFast(origin, rcpDir, right.bounds.lower, right.bounds.upper, tRight);// && tRight < closestT; // traverse closest first if (hitLeft && hitRight && (tLeft < tRight)) { //Swap(leftIndex, rightIndex); } if (hitLeft) stack[count++] = leftIndex; if (hitRight) stack[count++] = rightIndex; } } if (closestPrimitive >= 0) { const Primitive& p = scene.primitives[closestPrimitive]; if (p.type == eMesh) { Transform transform = InterpolateTransform(p.startTransform, p.endTransform, rayTime); // interpolate vertex normals int i0 = fetchInt(p.mesh.indices, closestTri*3+0); int i1 = fetchInt(p.mesh.indices, closestTri*3+1); int i2 = fetchInt(p.mesh.indices, closestTri*3+2); const Vec3 n1 = fetchVec3(p.mesh.normals, i0); const Vec3 n2 = fetchVec3(p.mesh.normals, i1); const Vec3 n3 = fetchVec3(p.mesh.normals, i2); Vec3 smoothNormal = (1.0f-closestV-closestW)*n1 + closestV*n2 + closestW*n3; // ensure smooth normal lies on the same side of the geometric normal if (Dot(smoothNormal, closestNormal) < 0.0f) smoothNormal *= -1.0f; closestNormal = SafeNormalize(TransformVector(transform, smoothNormal), closestNormal); } outT = closestT; outNormal = FaceForward(closestNormal, -rayDir); if (outPrimitive) *outPrimitive = &p; return true; } else { // no hit return false; } } #else // trace a ray against the scene returning the closest intersection inline __device__ bool Trace(const GPUScene& scene, const Vec3& rayOrigin, const Vec3& rayDir, float rayTime, float& outT, Vec3& outNormal, const Primitive** outPrimitive) { #if 0 struct Callback { float minT; Vec3 closestNormal; const Primitive* closestPrimitive; const Ray& ray; const GPUScene& scene; CUDA_CALLABLE inline Callback(const GPUScene& s, const Ray& r) : minT(REAL_MAX), closestPrimitive(NULL), ray(r), scene(s) { } CUDA_CALLABLE inline void operator()(int index) { float t; Vec3 n, ns; const Primitive& primitive = scene.primitives[index]; if (PrimitiveIntersect(primitive, ray, t, &n)) { if (t < minT && t > 0.0f) { minT = t; closestPrimitive = &primitive; closestNormal = n; } } } }; Callback callback(scene, ray); QueryBVH(callback, scene.bvh.nodes, ray.origin, ray.dir); outT = callback.minT; outNormal = FaceForward(callback.closestNormal, -ray.dir); if (outPrimitive) *outPrimitive = callback.closestPrimitive; return callback.closestPrimitive != NULL; #else float minT = REAL_MAX; const Primitive* closestPrimitive = NULL; Vec3 closestNormal(0.0f); for (int i=0; i < scene.numPrimitives; ++i) { const Primitive& primitive = scene.primitives[i]; float t; Vec3 n; if (PrimitiveIntersect(primitive, Ray(rayOrigin, rayDir, rayTime), t, &n)) { if (t < minT && t > 0.0f) { minT = t; closestPrimitive = &primitive; closestNormal = n; } } } outT = minT; outNormal = FaceForward(closestNormal, -rayDir); if (outPrimitive) *outPrimitive = closestPrimitive; return closestPrimitive != NULL; #endif } #endif __device__ inline float SampleTexture(const Texture& map, int i, int j, int k) { int x = int(Abs(i))%map.width; int y = int(Abs(j))%map.height; int z = int(Abs(k))%map.depth; return map.data[z*map.width*map.height + y*map.width + x]; } __device__ inline float LinearInterp(const Texture& map, const Vec3& pos) { int i = floorf(pos.x*map.width); int j = floorf(pos.y*map.height); int k = floorf(pos.z*map.depth); // trilinear interpolation float tx = pos.x*map.width-i; float ty = pos.y*map.height-j; float tz = pos.z*map.depth-k; float a = Lerp(SampleTexture(map, i, j, k), SampleTexture(map, i, j, k+1), tz); float b = Lerp(SampleTexture(map, i+1, j, k), SampleTexture(map, i+1, j, k+1), tz); float c = Lerp(SampleTexture(map, i, j+1, k), SampleTexture(map, i, j+1, k+1), tz); float d = Lerp(SampleTexture(map, i+1, j+1, k), SampleTexture(map, i+1, j+1, k+1), tz); float e = Lerp(a, b, tx); float f = Lerp(c, d, tx); float g = Lerp(e, f, ty); return g; } __device__ inline Vec3 EvaluateBumpNormal(const Vec3& surfaceNormal, const Vec3& surfacePos, const Texture& bumpMap, const Vec3& bumpTile, float bumpStrength, Random& rand) { Vec3 u, v; BasisFromVector(surfaceNormal, &u, &v); float eps = 0.01f; Vec3 dpdu = u + bumpStrength*surfaceNormal*(LinearInterp(bumpMap, bumpTile*(surfacePos)+u*eps) - LinearInterp(bumpMap, bumpTile*surfacePos))/eps; Vec3 dpdv = v + bumpStrength*surfaceNormal*(LinearInterp(bumpMap, bumpTile*(surfacePos)+v*eps) - LinearInterp(bumpMap, bumpTile*surfacePos))/eps; return SafeNormalize(Cross(dpdu, dpdv), surfaceNormal); } __device__ inline Vec3 SampleLights(const GPUScene& scene, const Primitive& surfacePrimitive, float etaI, float etaO, const Vec3& surfacePos, const Vec3& surfaceNormal, const Vec3& shadingNormal, const Vec3& wo, float time, Random& rand) { Vec3 sum(0.0f); if (scene.sky.probe.valid) { for (int i=0; i < kProbeSamples; ++i) { Vec3 skyColor; float skyPdf; Vec3 wi; ProbeSample(scene.sky.probe, wi, skyColor, skyPdf, rand); //wi = UniformSampleSphere(rand); //skyColor = ProbeEval(scene.sky.probe, ProbeDirToUV(wi)); //skyPdf = 0.5f*kInv2Pi; //if (Dot(wi, surfaceNormal) <= 0.0f) // continue; // check if occluded float t; Vec3 n; if (Trace(scene, surfacePos + FaceForward(surfaceNormal, wi)*kRayEpsilon, wi, time, t, n, NULL) == false) { float bsdfPdf = BSDFPdf(surfacePrimitive.material, etaI, etaO, surfacePos, surfaceNormal, wo, wi); Vec3 f = BSDFEval(surfacePrimitive.material, etaI, etaO, surfacePos, surfaceNormal, wo, wi); if (bsdfPdf > 0.0f) { int N = kProbeSamples+kBsdfSamples; float cbsdf = kBsdfSamples/N; float csky = float(kProbeSamples)/N; float weight = csky*skyPdf/(cbsdf*bsdfPdf + csky*skyPdf); Validate(weight); if (weight > 0.0f) sum += weight*skyColor*f*Abs(Dot(wi, surfaceNormal))/skyPdf; } } } if (kProbeSamples > 0) sum /= float(kProbeSamples); } for (int i=0; i < scene.numLights; ++i) { // assume all lights are area lights for now const Primitive& lightPrimitive = scene.lights[i]; Vec3 L(0.0f); int numSamples = lightPrimitive.lightSamples; if (numSamples == 0) continue; for (int s=0; s < numSamples; ++s) { // sample light source Vec3 lightPos; Vec3 lightNormal; PrimitiveSample(lightPrimitive, time, lightPos, lightNormal, rand); Vec3 wi = lightPos-surfacePos; float dSq = LengthSq(wi); wi /= sqrtf(dSq); // light is behind surface //if (Dot(wi, surfaceNormal) <= 0.0f) //continue; // surface is behind light if (Dot(wi, lightNormal) >= 0.0f) continue; // check visibility float t; Vec3 n; if (Trace(scene, surfacePos + FaceForward(surfaceNormal, wi)*kRayEpsilon, wi, time, t, n, NULL)) { float tSq = t*t; // if our next hit was further than distance to light then accept // sample, this works for portal sampling where you have a large light // that you sample through a small window const float kTolerance = 1.e-2f; if (fabsf(t - sqrtf(dSq)) <= kTolerance) { const float nl = Abs(Dot(lightNormal, wi)); // light pdf with respect to area and convert to pdf with respect to solid angle float lightArea = PrimitiveArea(lightPrimitive); float lightPdf = ((1.0f/lightArea)*tSq)/nl; // bsdf pdf for light's direction float bsdfPdf = BSDFPdf(surfacePrimitive.material, etaI, etaO, surfacePos, shadingNormal, wo, wi); Vec3 f = BSDFEval(surfacePrimitive.material, etaI, etaO, surfacePos, shadingNormal, wo, wi); // this branch is only necessary to exclude specular paths from light sampling (always have zero brdf) // todo: make BSDFEval alwasy return zero for pure specular paths and roll specular eval into BSDFSample() if (bsdfPdf > 0.0f) { // calculate relative weighting of the light and bsdf sampling int N = lightPrimitive.lightSamples+kBsdfSamples; float cbsdf = kBsdfSamples/N; float clight = float(lightPrimitive.lightSamples)/N; float weight = clight*lightPdf/(cbsdf*bsdfPdf + clight*lightPdf); L += weight*f*lightPrimitive.material.emission*(Abs(Dot(wi, shadingNormal))/Max(1.e-3f, lightPdf)); } } } } sum += L * (1.0f/numSamples); } return sum; } struct Tile { int x; int y; int width; int height; }; enum PathMode { ePathGenerate, ePathAdvance, ePathProbeSample, ePathLightSample, ePathBsdfSample, ePathTerminate, ePathDisabled, }; struct PathState { Vec3* __restrict__ rayOrigin; Vec3* __restrict__ rayDir; float* __restrict__ rayTime; Vec3* __restrict__ pos; Vec3* __restrict__ normal; int* __restrict__ depth; Vec3* __restrict__ pathThroughput; Vec3* __restrict__ absorption; const Primitive** __restrict__ primitive; Vec3* __restrict__ totalRadiance; float* __restrict__ etaI; float* __restrict__ etaO; PathMode* __restrict__ mode; // pdf from last brdf sampling float* __restrict__ bsdfPdf; BSDFType* __restrict__ bsdfType; // sample coordinate float* __restrict__ rasterX; float* __restrict__ rasterY; Random* __restrict__ rand; }; template <typename T> void Alloc(T** ptr, int num) { cudaMalloc(ptr, sizeof(T)*num); cudaMemset(*ptr, 0, sizeof(T)*num); } PathState AllocatePaths(int num) { PathState state; Alloc(&state.rayOrigin, num); Alloc(&state.rayDir, num); Alloc(&state.rayTime, num); Alloc(&state.pos, num); Alloc(&state.normal, num); Alloc(&state.depth, num); Alloc(&state.pathThroughput, num); Alloc(&state.absorption, num); Alloc(&state.primitive, num); Alloc(&state.totalRadiance, num); Alloc(&state.etaI, num); Alloc(&state.etaO, num); Alloc(&state.mode, num); Alloc(&state.bsdfPdf, num); Alloc(&state.bsdfType, num); Alloc(&state.rasterX, num); Alloc(&state.rasterY, num); Alloc(&state.rand, num); return state; } void FreePaths(PathState state) { // todo: } LAUNCH_BOUNDS __global__ void TerminatePaths(Color* output, Options options, PathState paths, int numPaths) { const int i = getGlobalIndex(); { if (paths.mode[i] != ePathDisabled) { float rasterX = paths.rasterX[i]; float rasterY = paths.rasterY[i]; Vec3 sample = paths.totalRadiance[i]; // sample = paths[i].normal*0.5f + 0.5f; int width = options.width; int height = options.height; Filter filter = options.filter; switch (filter.type) { case eFilterBox: { int x = Clamp(int(rasterX), 0, width-1); int y = Clamp(int(rasterY), 0, height-1); output[y*width+x] += Color(sample.x, sample.y, sample.z, 1.0f); break; } case eFilterGaussian: { int startX = Max(0, int(rasterX - filter.width)); int startY = Max(0, int(rasterY - filter.width)); int endX = Min(int(rasterX + filter.width), width-1); int endY = Min(int(rasterY + filter.width), height-1); Vec3 c = ClampLength(sample, options.clamp); for (int x=startX; x <= endX; ++x) { for (int y=startY; y <= endY; ++y) { float w = filter.Eval(x-rasterX, y-rasterY); //output[(height-1-y)*width+x] += Vec3(Min(sample.x, clamp), Min(sample.y, clamp), Min(sample.z, clamp), 1.0f)*w; const int index = y*width+x; atomicAdd(&output[index].x, c.x*w); atomicAdd(&output[index].y, c.y*w); atomicAdd(&output[index].z, c.z*w); atomicAdd(&output[index].w, w); } } break; } }; } paths.mode[i] = ePathGenerate; } } LAUNCH_BOUNDS __global__ void SampleLights(GPUScene scene, PathState paths, int numPaths) { const int i = getGlobalIndex(); { if (paths.mode[i] == ePathLightSample) { // calculate a basis for this hit point const Primitive* hit = paths.primitive[i]; float etaI = paths.etaI[i]; float etaO = paths.etaO[i]; const Vec3 rayDir = paths.rayDir[i]; float rayTime = paths.rayTime[i]; const Vec3 p = paths.pos[i]; const Vec3 n = paths.normal[i]; // integrate direct light over hemisphere paths.totalRadiance[i] += paths.pathThroughput[i]*SampleLights(scene, *hit, etaI, etaO, p, n, n, -rayDir, rayTime, paths.rand[i]); paths.mode[i] = ePathBsdfSample; } } } LAUNCH_BOUNDS __global__ void SampleBsdfs(PathState paths, int numPaths) { const int i = getGlobalIndex(); { if (paths.mode[i] == ePathBsdfSample) { const Vec3 p = paths.pos[i]; const Vec3 n = paths.normal[i]; const Vec3 rayDir = paths.rayDir[i]; const Primitive* hit = paths.primitive[i]; Random& rand = paths.rand[i]; float etaI = paths.etaI[i]; float etaO = paths.etaO[i]; // integrate indirect light by sampling BRDF Vec3 u, v; BasisFromVector(n, &u, &v); Vec3 bsdfDir; BSDFType bsdfType; float bsdfPdf; BSDFSample(hit->material, etaI, etaO, p, u, v, n, -rayDir, bsdfDir, bsdfPdf, bsdfType, rand); if (bsdfPdf <= 0.0f) { paths.mode[i] = ePathTerminate; } else { // reflectance Vec3 f = BSDFEval(hit->material, etaI, etaO, p, n, -rayDir, bsdfDir); // update ray medium if we are transmitting through the material if (Dot(bsdfDir, n) <= 0.0f) { paths.etaI[i] = etaO; paths.bsdfType[i] = eTransmitted; if (etaI != 1.0f) { // entering a medium, update the aborption (assume zero in air) paths.absorption[i] = hit->material.absorption; } } else { paths.bsdfType[i] = eReflected; } // update throughput with primitive reflectance paths.pathThroughput[i] *= f * Abs(Dot(n, bsdfDir))/bsdfPdf; paths.bsdfPdf[i] = bsdfPdf; paths.bsdfType[i] = bsdfType; paths.rayDir[i] = bsdfDir; paths.rayOrigin[i] = p + FaceForward(n, bsdfDir)*kRayEpsilon; paths.mode[i] = ePathAdvance; } } } } LAUNCH_BOUNDS __global__ void SampleProbes(PathState paths, int numPaths) { } LAUNCH_BOUNDS __global__ void AdvancePaths(GPUScene scene, PathState paths, int numPaths) { const int i = getGlobalIndex(); { if (paths.mode[i] == ePathAdvance) { Vec3 rayOrigin = paths.rayOrigin[i]; Vec3 rayDir = paths.rayDir[i]; float rayTime = paths.rayTime[i]; float etaI = paths.etaI[i]; Vec3 pathThroughput = paths.pathThroughput[i]; Vec3 n; float t; const Primitive* hit; // find closest hit if (Trace(scene, rayOrigin, rayDir, rayTime, t, n, &hit)) { float etaO; // index of refraction for transmission, 1.0 corresponds to air if (etaI == 1.0f) { etaO = hit->material.GetIndexOfRefraction(); } else { // returning to free space etaO = 1.0f; } pathThroughput *= Exp(-paths.absorption[i]*t); if (paths.depth[i] == 0) { // first trace is our only chance to add contribution from directly visible light sources paths.totalRadiance[i] += hit->material.emission; } else if (kBsdfSamples > 0) { // area pdf that this dir was already included by the light sampling from previous step float lightArea = PrimitiveArea(*hit); if (lightArea > 0.0f) { // convert to pdf with respect to solid angle float lightPdf = ((1.0f/lightArea)*t*t)/Clamp(Dot(-rayDir, n), 1.e-3f, 1.0f); // calculate weight for bsdf sampling int N = hit->lightSamples+kBsdfSamples; float cbsdf = kBsdfSamples/N; float clight = float(hit->lightSamples)/N; float weight = cbsdf*paths.bsdfPdf[i]/(cbsdf*paths.bsdfPdf[i] + clight*lightPdf); // specular paths have zero chance of being included by direct light sampling (zero pdf) if (paths.bsdfType[i] == eSpecular) weight = 1.0f; // pathThroughput already includes the bsdf pdf paths.totalRadiance[i] += weight*pathThroughput*hit->material.emission; } } // terminate ray if we hit a light source if (hit->lightSamples) { paths.mode[i] = ePathTerminate; } else { // update throughput based on absorption through the medium paths.pos[i] = rayOrigin + rayDir*t; paths.normal[i] = n; paths.primitive[i] = hit; paths.etaO[i] = etaO; paths.pathThroughput[i] = pathThroughput; paths.depth[i] += 1; paths.mode[i] = ePathLightSample; } } else { // todo: sky // no hit, terminate path paths.mode[i] = ePathTerminate; } } } } LAUNCH_BOUNDS __global__ void GeneratePaths(Camera camera, CameraSampler sampler, Tile tile, int seed, PathState paths, int numPaths) { const int tx = blockIdx.x*blockDim.x; const int ty = blockIdx.y*blockDim.y; const int x = tx + threadIdx.x + tile.x; const int y = ty + threadIdx.y + tile.y; const int i = getGlobalIndex(); { if (paths.mode[i] == ePathGenerate || paths.mode[i] == ePathDisabled || paths.mode[i] == ePathTerminate) { // if we're inside the tile if (threadIdx.x < tile.width && threadIdx.y < tile.height) { Random rand(i + tile.y*tile.width + tile.x + seed); // offset //float x, y, t; //StratifiedSample2D(i, tile.width, tile.height, rand, x, y); float t; StratifiedSample1D(i, 64, rand, t); // shutter time float time = Lerp(camera.shutterStart, camera.shutterEnd, t); //float px = tile.x + x*tile.width; //float py = tile.y + y*tile.height; float px = x + rand.Randf(-0.5f, 0.5f); float py = y + rand.Randf(-0.5f, 0.5f); Vec3 origin, dir; sampler.GenerateRay(px, py, origin, dir); // advance paths paths.depth[i] = 0; paths.rayOrigin[i] = origin; paths.rayDir[i] = dir; paths.rayTime[i] = time; paths.mode[i] = ePathAdvance; paths.rand[i] = rand; paths.totalRadiance[i] = 0.0f; paths.pathThroughput[i] = 1.0f; paths.etaI[i] = 1.0f; paths.bsdfType[i] = eReflected; paths.bsdfPdf[i] = 1.0f; paths.rasterX[i] = px; paths.rasterY[i] = py; } else { paths.mode[i] = ePathDisabled; } } } } //LAUNCH_BOUNDS __global__ void VisualizeNormals(GPUScene scene, PathState paths, int numPaths) { const int i = getGlobalIndex(); if (i < numPaths) { Vec3 rayOrigin = paths.rayOrigin[i]; Vec3 rayDir = paths.rayDir[i]; Vec3 n; float t; // find closest hit if (Trace(scene, rayOrigin, rayDir, 0.0f, t, n, NULL)) { paths.totalRadiance[i] = n; } paths.mode[i] = ePathTerminate; } } struct GpuWaveFrontRenderer : public Renderer { Color* output = NULL; GPUScene sceneGPU; Random rand; int tileWidth; int tileHeight; PathState paths; // map id to geometry struct std::map<int, MeshGeometry> gpuMeshes; GpuWaveFrontRenderer(const Scene* s) { // build GPU primitive and light lists std::vector<Primitive> primitives; std::vector<Primitive> lights; for (int i=0; i < s->primitives.size(); ++i) { Primitive primitive = s->primitives[i]; // if mesh primitive then copy to the GPU if (primitive.type == eMesh) { // see if we have already uploaded the mesh to the GPU if (gpuMeshes.find(primitive.mesh.id) == gpuMeshes.end()) { MeshGeometry geo = CreateGPUMesh(primitive.mesh); gpuMeshes[geo.id] = geo; // replace CPU mesh with GPU copy primitive.mesh = geo; } } if (primitive.material.bump > 0.0f) { primitive.material.bumpMap = CreateGPUTexture(primitive.material.bumpMap); } // create explicit list of light primitives if (primitive.lightSamples) { lights.push_back(primitive); } primitives.push_back(primitive); } // convert scene BVH CreateVec4Texture((Vec4**)&(sceneGPU.bvh.nodes), (Vec4*)s->bvh.nodes, sizeof(BVHNode)*s->bvh.numNodes); sceneGPU.bvh.numNodes = s->bvh.numNodes; // upload to the GPU sceneGPU.numPrimitives = primitives.size(); sceneGPU.numLights = lights.size(); if (sceneGPU.numLights > 0) { cudaMalloc(&sceneGPU.lights, sizeof(Primitive)*lights.size()); cudaMemcpy(sceneGPU.lights, &lights[0], sizeof(Primitive)*lights.size(), cudaMemcpyHostToDevice); } if (sceneGPU.numPrimitives > 0) { cudaMalloc(&sceneGPU.primitives, sizeof(Primitive)*primitives.size()); cudaMemcpy(sceneGPU.primitives, &primitives[0], sizeof(Primitive)*primitives.size(), cudaMemcpyHostToDevice); } // copy sky and probe texture sceneGPU.sky = CreateGPUSky(s->sky); tileWidth = 1024; tileHeight = 1024; const int numPaths = tileWidth*tileHeight; // allocate paths //cudaMalloc(&paths, sizeof(PathState)*numPaths); //cudaMemset(paths, 0, sizeof(PathState)*numPaths); paths = AllocatePaths(numPaths); } virtual ~GpuWaveFrontRenderer() { cudaFree(output); cudaFree(sceneGPU.primitives); cudaFree(sceneGPU.lights); FreePaths(paths); } void Init(int width, int height) { cudaFree(output); cudaMalloc(&output, sizeof(Color)*width*height); cudaMemset(output, 0, sizeof(Color)*width*height); } void Render(const Camera& camera, const Options& options, Color* outputHost) { std::vector<Tile> tiles; const int tilesx = (options.width + tileWidth - 1)/tileWidth; const int tilesy = (options.height + tileHeight - 1)/tileHeight; for (int y=0; y < tilesy; ++y) { for (int x=0; x < tilesx; ++x) { Tile tile; tile.x = x*tileWidth; tile.y = y*tileHeight; tile.width = Min(tileWidth, options.width-tile.x); tile.height = Min(tileHeight, options.height-tile.y); tiles.push_back(tile); } } const int numPaths = tileWidth*tileHeight; // create a sampler for the camera CameraSampler sampler( Transform(camera.position, camera.rotation), camera.fov, 0.001f, 1.0f, options.width, options.height); for (int tileIndex=0; tileIndex < tiles.size(); ++tileIndex) { Tile tile = tiles[tileIndex]; // a tile consists of many thread blocks const int blockWidth = 16; const int blockHeight = 16; const int gridWidth = (tile.width + blockWidth - 1)/blockWidth; const int gridHeight = (tile.height + blockHeight - 1)/blockHeight; dim3 blockDim(blockWidth, blockHeight); dim3 gridDim(gridWidth, gridHeight); /* const int kNumThreadsPerBlock = 256; const int kNumBlocks = (numPaths + kNumThreadsPerBlock - 1)/kNumThreadsPerBlock; dim3 gridDim(kNumBlocks); dim3 blockDim(kNumThreadsPerBlock); */ GeneratePaths<<<gridDim, blockDim>>>(camera, sampler, tile, rand.Rand(), paths, numPaths); if (options.mode == eNormals) { VisualizeNormals<<<gridDim, blockDim>>>(sceneGPU, paths, numPaths); } else { for (int i=0; i < options.maxDepth; ++i) { AdvancePaths<<<gridDim, blockDim>>>(sceneGPU, paths, numPaths); SampleLights<<<gridDim, blockDim>>>(sceneGPU, paths, numPaths); //SampleProbes(); SampleBsdfs<<<gridDim, blockDim>>>(paths, numPaths); } } TerminatePaths<<<gridDim, blockDim>>>(output, options, paths, numPaths); } // copy back to output cudaMemcpy(outputHost, output, sizeof(Color)*options.width*options.height, cudaMemcpyDeviceToHost); } }; Renderer* CreateGpuWavefrontRenderer(const Scene* s) { return new GpuWaveFrontRenderer(s); }
the_stack
#include <cudf/io/types.hpp> #include <cudf/utilities/span.hpp> #include <io/utilities/trie.cuh> #include "column_type_histogram.hpp" #include <rmm/device_uvector.hpp> #include <thrust/execution_policy.h> #include <thrust/iterator/reverse_iterator.h> #include <optional> using cudf::device_span; namespace cudf { namespace io { /** * @brief Structure for holding various options used when parsing and * converting CSV/json data to cuDF data type values. */ struct parse_options_view { char delimiter; char terminator; char quotechar; char decimal; char thousands; char comment; bool keepquotes; bool doublequote; bool dayfirst; bool skipblanklines; cudf::detail::trie_view trie_true; cudf::detail::trie_view trie_false; cudf::detail::trie_view trie_na; bool multi_delimiter; }; struct parse_options { char delimiter; char terminator; char quotechar; char decimal; char thousands; char comment; bool keepquotes; bool doublequote; bool dayfirst; bool skipblanklines; cudf::detail::optional_trie trie_true; cudf::detail::optional_trie trie_false; cudf::detail::optional_trie trie_na; bool multi_delimiter; [[nodiscard]] parse_options_view view() const { return {delimiter, terminator, quotechar, decimal, thousands, comment, keepquotes, doublequote, dayfirst, skipblanklines, cudf::detail::make_trie_view(trie_true), cudf::detail::make_trie_view(trie_false), cudf::detail::make_trie_view(trie_na), multi_delimiter}; } }; /** * @brief Returns the numeric value of an ASCII/UTF-8 character. Specialization * for integral types. Handles hexadecimal digits, both uppercase and lowercase. * If the character is not a valid numeric digit then `0` is returned and * valid_flag is set to false. * * @param c ASCII or UTF-8 character * @param valid_flag Set to false if input is not valid. Unchanged otherwise. * * @return uint8_t Numeric value of the character, or `0` */ template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr> constexpr uint8_t decode_digit(char c, bool* valid_flag) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; *valid_flag = false; return 0; } /** * @brief Returns the numeric value of an ASCII/UTF-8 character. Specialization * for non-integral types. Handles only decimal digits. If the character is not * a valid numeric digit then `0` is returned and valid_flag is set to false. * * @param c ASCII or UTF-8 character * @param valid_flag Set to false if input is not valid. Unchanged otherwise. * * @return uint8_t Numeric value of the character, or `0` */ template <typename T, std::enable_if_t<!std::is_integral_v<T>>* = nullptr> constexpr uint8_t decode_digit(char c, bool* valid_flag) { if (c >= '0' && c <= '9') return c - '0'; *valid_flag = false; return 0; } // Converts character to lowercase. constexpr char to_lower(char const c) { return c >= 'A' && c <= 'Z' ? c + ('a' - 'A') : c; } /** * @brief Checks if string is infinity, case insensitive with/without sign * Valid infinity strings are inf, +inf, -inf, infinity, +infinity, -infinity * String comparison is case insensitive. * * @param begin Pointer to the first element of the string * @param end Pointer to the first element after the string * @return true if string is valid infinity, else false. */ constexpr bool is_infinity(char const* begin, char const* end) { if (*begin == '-' || *begin == '+') begin++; char const* cinf = "infinity"; auto index = begin; while (index < end) { if (*cinf != to_lower(*index)) break; index++; cinf++; } return ((index == begin + 3 || index == begin + 8) && index >= end); } /** * @brief Parses a character string and returns its numeric value. * * @param begin Pointer to the first element of the string * @param end Pointer to the first element after the string * @param opts The global parsing behavior options * @param error_result Value to return on parse error * @tparam base Base (radix) to use for conversion * * @return The parsed and converted value */ template <typename T, int base = 10> constexpr T parse_numeric(const char* begin, const char* end, parse_options_view const& opts, T error_result = std::numeric_limits<T>::quiet_NaN()) { T value{}; bool all_digits_valid = true; // Handle negative values if necessary int32_t sign = (*begin == '-') ? -1 : 1; // Handle infinity if (std::is_floating_point_v<T> && is_infinity(begin, end)) { return sign * std::numeric_limits<T>::infinity(); } if (*begin == '-' || *begin == '+') begin++; // Skip over the "0x" prefix for hex notation if (base == 16 && begin + 2 < end && *begin == '0' && *(begin + 1) == 'x') { begin += 2; } // Handle the whole part of the number // auto index = begin; while (begin < end) { if (*begin == opts.decimal) { ++begin; break; } else if (base == 10 && (*begin == 'e' || *begin == 'E')) { break; } else if (*begin != opts.thousands && *begin != '+') { value = (value * base) + decode_digit<T>(*begin, &all_digits_valid); } ++begin; } if (std::is_floating_point_v<T>) { // Handle fractional part of the number if necessary double divisor = 1; while (begin < end) { if (*begin == 'e' || *begin == 'E') { ++begin; break; } else if (*begin != opts.thousands && *begin != '+') { divisor /= base; value += decode_digit<T>(*begin, &all_digits_valid) * divisor; } ++begin; } // Handle exponential part of the number if necessary if (begin < end) { const int32_t exponent_sign = *begin == '-' ? -1 : 1; if (*begin == '-' || *begin == '+') { ++begin; } int32_t exponent = 0; while (begin < end) { exponent = (exponent * 10) + decode_digit<T>(*(begin++), &all_digits_valid); } if (exponent != 0) { value *= exp10(double(exponent * exponent_sign)); } } } if (!all_digits_valid) { return error_result; } return value * sign; } namespace gpu { /** * @brief CUDA kernel iterates over the data until the end of the current field * * Also iterates over (one or more) delimiter characters after the field. * Function applies to formats with field delimiters and line terminators. * * @param begin Pointer to the first element of the string * @param end Pointer to the first element after the string * @param opts A set of parsing options * @param escape_char A boolean value to signify whether to consider `\` as escape character or * just a character. * * @return Pointer to the last character in the field, including the * delimiter(s) following the field data */ __device__ __inline__ char const* seek_field_end(char const* begin, char const* end, parse_options_view const& opts, bool escape_char = false) { bool quotation = false; auto current = begin; bool escape_next = false; while (current < end) { // Use simple logic to ignore control chars between any quote seq // Handles nominal cases including doublequotes within quotes, but // may not output exact failures as PANDAS for malformed fields. // Check for instances such as "a2\"bc" and "\\" if `escape_char` is true. if (*current == opts.quotechar and not escape_next) { quotation = !quotation; } else if (!quotation) { if (*current == opts.delimiter) { while (opts.multi_delimiter && (current + 1 < end) && *(current + 1) == opts.delimiter) { ++current; } break; } else if (*current == opts.terminator) { break; } else if (*current == '\r' && (current + 1 < end && *(current + 1) == '\n')) { --end; break; } } if (escape_char == true) { // If a escape character is encountered, escape next character in next loop. if (escape_next == false and *current == '\\') { escape_next = true; } else { escape_next = false; } } if (current < end) { current++; } } return current; } /** * @brief Lexicographically compare digits in input against string * representing an integer * * @param raw_data The pointer to beginning of character string * @param golden The pointer to beginning of character string representing * the value to be compared against * @return bool True if integer represented by character string is less * than or equal to golden data */ template <int N> __device__ __inline__ bool less_equal_than(const char* data, const char (&golden)[N]) { auto mismatch_pair = thrust::mismatch(thrust::seq, data, data + N - 1, golden); if (mismatch_pair.first != data + N - 1) { return *mismatch_pair.first <= *mismatch_pair.second; } else { // Exact match return true; } } /** * @brief Determine which counter to increment when a sequence of digits * and a parity sign is encountered. * * @param raw_data The pointer to beginning of character string * @param digit_count Total number of digits * @param stats Reference to structure with counters * @return Pointer to appropriate counter that belong to * the interpreted data type */ __device__ __inline__ cudf::size_type* infer_integral_field_counter(char const* data_begin, char const* data_end, bool is_negative, column_type_histogram& stats) { static constexpr char uint64_max_abs[] = "18446744073709551615"; static constexpr char int64_min_abs[] = "9223372036854775808"; static constexpr char int64_max_abs[] = "9223372036854775807"; auto digit_count = data_end - data_begin; // Remove preceding zeros if (digit_count >= (sizeof(int64_max_abs) - 1)) { // Trim zeros at the beginning of raw_data while (*data_begin == '0' && (data_begin < data_end)) { data_begin++; } } digit_count = data_end - data_begin; // After trimming the number of digits could be less than maximum // int64 digit count if (digit_count < (sizeof(int64_max_abs) - 1)) { // CASE 0 : Accept validity // If the length of the string representing the integer is smaller // than string length of Int64Max then count this as an integer // representable by int64 // If digit_count is 0 then ignore - sign, i.e. -000..00 should // be treated as a positive small integer return is_negative && (digit_count != 0) ? &stats.negative_small_int_count : &stats.positive_small_int_count; } else if (digit_count > (sizeof(uint64_max_abs) - 1)) { // CASE 1 : Reject validity // If the length of the string representing the integer is greater // than string length of UInt64Max then count this as a string // since it cannot be represented as an int64 or uint64 return &stats.string_count; } else if (digit_count == (sizeof(uint64_max_abs) - 1) && is_negative) { // A negative integer of length UInt64Max digit count cannot be represented // as a 64 bit integer return &stats.string_count; } if (digit_count == (sizeof(int64_max_abs) - 1) && is_negative) { return less_equal_than(data_begin, int64_min_abs) ? &stats.negative_small_int_count : &stats.string_count; } else if (digit_count == (sizeof(int64_max_abs) - 1) && !is_negative) { return less_equal_than(data_begin, int64_max_abs) ? &stats.positive_small_int_count : &stats.big_int_count; } else if (digit_count == (sizeof(uint64_max_abs) - 1)) { return less_equal_than(data_begin, uint64_max_abs) ? &stats.big_int_count : &stats.string_count; } return &stats.string_count; } } // namespace gpu /** * @brief Searches the input character array for each of characters in a set. * Sums up the number of occurrences. If the 'positions' parameter is not void*, * positions of all occurrences are stored in the output device array. * * @param[in] d_data Input character array in device memory * @param[in] keys Vector containing the keys to count in the buffer * @param[in] result_offset Offset to add to the output positions * @param[out] positions Array containing the output positions * @param[in] stream CUDA stream used for device memory operations and kernel launches * * @return cudf::size_type total number of occurrences */ template <class T> cudf::size_type find_all_from_set(device_span<char const> data, std::vector<char> const& keys, uint64_t result_offset, T* positions, rmm::cuda_stream_view stream); /** * @brief Searches the input character array for each of characters in a set. * Sums up the number of occurrences. If the 'positions' parameter is not void*, * positions of all occurrences are stored in the output device array. * * Does not load the entire file into the GPU memory at any time, so it can * be used to parse large files. Output array needs to be preallocated. * * @param[in] h_data Pointer to the input character array * @param[in] h_size Number of bytes in the input array * @param[in] keys Vector containing the keys to count in the buffer * @param[in] result_offset Offset to add to the output positions * @param[out] positions Array containing the output positions * @param[in] stream CUDA stream used for device memory operations and kernel launches * * @return cudf::size_type total number of occurrences */ template <class T> cudf::size_type find_all_from_set(host_span<char const> data, const std::vector<char>& keys, uint64_t result_offset, T* positions, rmm::cuda_stream_view stream); /** * @brief Searches the input character array for each of characters in a set * and sums up the number of occurrences. * * @param d_data Input data buffer in device memory * @param keys Vector containing the keys to count in the buffer * @param stream CUDA stream used for device memory operations and kernel launches * * @return cudf::size_type total number of occurrences */ cudf::size_type count_all_from_set(device_span<char const> data, std::vector<char> const& keys, rmm::cuda_stream_view stream); /** * @brief Searches the input character array for each of characters in a set * and sums up the number of occurrences. * * Does not load the entire buffer into the GPU memory at any time, so it can * be used with buffers of any size. * * @param h_data Pointer to the data in host memory * @param h_size Size of the input data, in bytes * @param keys Vector containing the keys to count in the buffer * @param stream CUDA stream used for device memory operations and kernel launches * * @return cudf::size_type total number of occurrences */ cudf::size_type count_all_from_set(host_span<char const> data, const std::vector<char>& keys, rmm::cuda_stream_view stream); /** * @brief Checks whether the given character is a whitespace character. * * @param[in] ch The character to check * * @return True if the input is whitespace, False otherwise */ __inline__ __device__ bool is_whitespace(char ch) { return ch == '\t' || ch == ' '; } /** * @brief Skips past the current character if it matches the given value. */ template <typename It> __inline__ __device__ It skip_character(It const& it, char ch) { return it + (*it == ch); } /** * @brief Adjusts the range to ignore starting/trailing whitespace and quotation characters. * * @param[in] begin Pointer to the first character in the parsing range * @param[in] end pointer to the first character after the parsing range * @param[in] quotechar The character used to denote quotes; '\0' if none * * @return Trimmed range */ __inline__ __device__ std::pair<char const*, char const*> trim_whitespaces_quotes( char const* begin, char const* end, char quotechar = '\0') { auto not_whitespace = [] __device__(auto c) { return !is_whitespace(c); }; auto const trim_begin = thrust::find_if(thrust::seq, begin, end, not_whitespace); auto const trim_end = thrust::find_if(thrust::seq, thrust::make_reverse_iterator(end), thrust::make_reverse_iterator(trim_begin), not_whitespace); return {skip_character(trim_begin, quotechar), skip_character(trim_end, quotechar).base()}; } /** * @brief Excludes the prefix from the input range if the string starts with the prefix. * * @tparam N length on the prefix, plus one * @param[in, out] begin Pointer to the first element of the string * @param end Pointer to the first element after the string * @param prefix String we're searching for at the start of the input range */ template <int N> __inline__ __device__ auto skip_if_starts_with(char const* begin, char const* end, const char (&prefix)[N]) { static constexpr size_t prefix_len = N - 1; if (end - begin < prefix_len) return begin; return thrust::equal(thrust::seq, begin, begin + prefix_len, prefix) ? begin + prefix_len : begin; } /** * @brief Finds the first element after the leading space characters. * * @param begin Pointer to the first element of the string * @param end Pointer to the first element after the string */ __inline__ __device__ auto skip_spaces(char const* begin, char const* end) { return thrust::find_if(thrust::seq, begin, end, [](auto elem) { return elem != ' '; }); } } // namespace io } // namespace cudf
the_stack
#include "Static/TriangleCounting/triangle.cuh" using namespace hornets_nest; namespace hornets_nest { __device__ __forceinline__ void initialize(degree_t diag_id, degree_t u_len, degree_t v_len, vid_t* __restrict__ u_min, vid_t* __restrict__ u_max, vid_t* __restrict__ v_min, vid_t* __restrict__ v_max, int* __restrict__ found) { if (diag_id == 0) { *u_min = *u_max = *v_min = *v_max = 0; *found = 1; } else if (diag_id < u_len) { *u_min = 0; *u_max = diag_id; *v_max = diag_id; *v_min = 0; } else if (diag_id < v_len) { *u_min = 0; *u_max = u_len; *v_max = diag_id; *v_min = diag_id - u_len; } else { *u_min = diag_id - v_len; *u_max = u_len; *v_min = diag_id - u_len; *v_max = v_len; } } __device__ __forceinline__ void workPerThread(degree_t uLength, degree_t vLength, int threadsPerIntersection, int threadId, int* __restrict__ outWorkPerThread, int* __restrict__ outDiagonalId) { int totalWork = uLength + vLength; int remainderWork = totalWork % threadsPerIntersection; int workPerThread = totalWork / threadsPerIntersection; int longDiagonals = threadId > remainderWork ? remainderWork : threadId; int shortDiagonals = threadId > remainderWork ? threadId - remainderWork : 0; *outDiagonalId = (workPerThread + 1) * longDiagonals + workPerThread * shortDiagonals; *outWorkPerThread = workPerThread + (threadId < remainderWork); } __device__ __forceinline__ void bSearch(unsigned found, degree_t diagonalId, const vid_t* __restrict__ uNodes, const vid_t* __restrict__ vNodes, const degree_t* __restrict__ uLength, vid_t* __restrict__ outUMin, vid_t* __restrict__ outUMax, vid_t* __restrict__ outVMin, vid_t* __restrict__ outVMax, vid_t* __restrict__ outUCurr, vid_t* __restrict__ outVCurr) { vid_t length; while (!found){ *outUCurr = (*outUMin + *outUMax) >> 1; *outVCurr = diagonalId - *outUCurr; if (*outVCurr >= *outVMax){ length = *outUMax - *outUMin; if (length == 1){ found = 1; continue; } } unsigned comp1 = uNodes[*outUCurr] > vNodes[*outVCurr - 1]; unsigned comp2 = uNodes[*outUCurr - 1] > vNodes[*outVCurr]; if (comp1 && !comp2) found = 1; else if (comp1){ *outVMin = *outVCurr; *outUMax = *outUCurr; } else{ *outVMax = *outVCurr; *outUMin = *outUCurr; } } if (*outVCurr >= *outVMax && length == 1 && *outVCurr > 0 && *outUCurr > 0 && *outUCurr < *uLength - 1) { unsigned comp1 = uNodes[*outUCurr] > vNodes[*outVCurr - 1]; unsigned comp2 = uNodes[*outUCurr - 1] > vNodes[*outVCurr]; if (!comp1 && !comp2) { (*outUCurr)++; (*outVCurr)--; } } } __device__ __forceinline__ int fixStartPoint(degree_t uLength, degree_t vLength, vid_t* __restrict__ uCurr, vid_t* __restrict__ vCurr, const vid_t* __restrict__ uNodes, const vid_t* __restrict__ vNodes) { unsigned uBigger = (*uCurr > 0) && (*vCurr < vLength) && (uNodes[*uCurr - 1] == vNodes[*vCurr]); unsigned vBigger = (*vCurr > 0) && (*uCurr < uLength) && (vNodes[*vCurr - 1] == uNodes[*uCurr]); *uCurr += vBigger; *vCurr += uBigger; return uBigger + vBigger; } __device__ __forceinline__ void indexBinarySearch(vid_t* data, vid_t arrLen, vid_t key, int& pos) { int low = 0; int high = arrLen - 1; while (high >= low) { int middle = (low + high) / 2; if (data[middle] == key) { pos = middle; return; } if (data[middle] < key) low = middle + 1; if (data[middle] > key) high = middle - 1; } } template<typename HornetDevice> __device__ __forceinline__ void intersectCount(const HornetDevice& hornet, degree_t uLength, degree_t vLength, const vid_t* __restrict__ uNodes, const vid_t* __restrict__ vNodes, vid_t* __restrict__ uCurr, vid_t* __restrict__ vCurr, int* __restrict__ workIndex, const int* __restrict__ workPerThread, triangle_t* __restrict__ triangles, int found, triangle_t* __restrict__ outPutTriangles, vid_t src, vid_t dest, vid_t u, vid_t v) { if (*uCurr < uLength && *vCurr < vLength) { int comp; int vmask; int umask; while (*workIndex < *workPerThread) { vmask = umask = 0; comp = uNodes[*uCurr] - vNodes[*vCurr]; *triangles += (comp == 0); *uCurr += (comp <= 0 && !vmask) || umask; *vCurr += (comp >= 0 && !umask) || vmask; *workIndex += (comp == 0 && !umask && !vmask) + 1; if (*vCurr >= vLength || *uCurr >= uLength) break; } *triangles -= ((comp == 0) && (*workIndex > *workPerThread) && found); } } // u_len < v_len template<typename HornetDevice> __device__ __forceinline__ triangle_t count_triangles(const HornetDevice& hornet, vid_t u, const vid_t* __restrict__ u_nodes, degree_t u_len, vid_t v, const vid_t* __restrict__ v_nodes, degree_t v_len, int threads_per_block, volatile triangle_t* __restrict__ firstFound, int tId, triangle_t* __restrict__ outPutTriangles, const vid_t* __restrict__ uMask, const vid_t* __restrict__ vMask, triangle_t multiplier, vid_t src, vid_t dest) { // Partitioning the work to the multiple thread of a single GPU processor. //The threads should get a near equal number of the elements to //Tersect - this number will be off by 1. int work_per_thread, diag_id; workPerThread(u_len, v_len, threads_per_block, tId, &work_per_thread, &diag_id); triangle_t triangles = 0; int work_index = 0; int found = 0; vid_t u_min, u_max, v_min, v_max, u_curr, v_curr; firstFound[tId] = 0; if (work_per_thread > 0) { // For the binary search, we are figuring out the initial poT of search. initialize(diag_id, u_len, v_len, &u_min, &u_max, &v_min, &v_max, &found); u_curr = 0; v_curr = 0; bSearch(found, diag_id, u_nodes, v_nodes, &u_len, &u_min, &u_max, &v_min, &v_max, &u_curr, &v_curr); int sum = fixStartPoint(u_len, v_len, &u_curr, &v_curr, u_nodes, v_nodes); work_index += sum; if (tId > 0) firstFound[tId - 1] = sum; triangles += sum; intersectCount (hornet, u_len, v_len, u_nodes, v_nodes, &u_curr, &v_curr, &work_index, &work_per_thread, &triangles, firstFound[tId], outPutTriangles, src, dest, u, v); } return triangles; } __device__ __forceinline__ void workPerBlock(vid_t numVertices, vid_t* __restrict__ outMpStart, vid_t* __restrict__ outMpEnd, int blockSize) { vid_t verticesPerMp = numVertices / gridDim.x; vid_t remainderBlocks = numVertices % gridDim.x; vid_t extraVertexBlocks = (blockIdx.x > remainderBlocks) ? remainderBlocks : blockIdx.x; vid_t regularVertexBlocks = (blockIdx.x > remainderBlocks) ? blockIdx.x - remainderBlocks : 0; vid_t mpStart = (verticesPerMp + 1) * extraVertexBlocks + verticesPerMp * regularVertexBlocks; *outMpStart = mpStart; *outMpEnd = mpStart + verticesPerMp + (blockIdx.x < remainderBlocks); } template<typename HornetDevice> __global__ void devicecuStaticTriangleCounting(HornetDevice hornet, triangle_t* __restrict__ outPutTriangles, int threads_per_block, int number_blocks, int shifter, int cutoff, HostDeviceVar<TriangleData> hd_data) { TriangleData* __restrict__ devData = hd_data.ptr(); vid_t nv = hornet.nV(); // Partitioning the work to the multiple thread of a single GPU processor. //The threads should get a near equal number of the elements //to intersect - this number will be off by no more than one. int tx = threadIdx.x; vid_t this_mp_start, this_mp_stop; const int blockSize = blockDim.x; workPerBlock(nv, &this_mp_start, &this_mp_stop, blockSize); __shared__ vid_t firstFound[1024]; vid_t adj_offset = tx >> shifter; vid_t* firstFoundPos = firstFound + (adj_offset << shifter); for (vid_t src = this_mp_start; src < this_mp_stop; src++) { auto vertex = hornet.vertex(src); vid_t srcLen = vertex.degree(); for(int k = adj_offset; k < srcLen; k += number_blocks) { vid_t dest = vertex.edge(k).dst_id(); degree_t destLen = hornet.vertex(dest).degree(); if (dest < src) //opt continue; //opt bool avoidCalc = (src == dest) || (destLen < 2) || (srcLen < 2); if (avoidCalc) continue; bool sourceSmaller = srcLen < destLen; vid_t small = sourceSmaller ? src : dest; vid_t large = sourceSmaller ? dest : src; degree_t small_len = sourceSmaller ? srcLen : destLen; degree_t large_len = sourceSmaller ? destLen : srcLen; /* if(large_len + small_len > cutoff) continue; */ const vid_t* small_ptr = hornet.vertex(small).neighbor_ptr(); const vid_t* large_ptr = hornet.vertex(large).neighbor_ptr(); triangle_t triFound = count_triangles (hornet, small, small_ptr, small_len, large, large_ptr, large_len, threads_per_block, (triangle_t*)firstFoundPos, tx % threads_per_block, outPutTriangles, nullptr, nullptr, 1, src, dest); atomicAdd(outPutTriangles+src,triFound); atomicAdd(outPutTriangles+dest,triFound); } } } void staticTriangleCounting(HornetGraph& hornet, triangle_t* __restrict__ outPutTriangles, int threads_per_block, int number_blocks, int shifter, int thread_blocks, int blockdim, int cutoff, HostDeviceVar<TriangleData> hd_data) { devicecuStaticTriangleCounting <<< thread_blocks, blockdim >>> (hornet.device_side(), outPutTriangles, threads_per_block, number_blocks, shifter, cutoff, hd_data); hd_data.sync(); } // ----------------------- // ----------------------- // ----------------------- // ----------------------- // The above functions are responsible for doing the actual triangle counting. // The functions below are the StaticAlgorithm functions used for running the algorithm. // ----------------------- // ----------------------- // ----------------------- // ----------------------- TriangleCounting::TriangleCounting(HornetGraph& hornet) : StaticAlgorithm(hornet), hd_triangleData(hornet){ } TriangleCounting::~TriangleCounting(){ release(); } struct OPERATOR_InitTriangleCounts { HostDeviceVar<TriangleData> d_triangleData; OPERATOR (Vertex &vertex) { d_triangleData().triPerVertex[vertex.id()] = 0; } }; void TriangleCounting::reset(){ forAllVertices(hornet, OPERATOR_InitTriangleCounts { hd_triangleData }); } void TriangleCounting::run(){ run(0); } void TriangleCounting::run(int cutoff){ staticTriangleCounting(hornet, hd_triangleData().triPerVertex, hd_triangleData().threadsPerIntersection, hd_triangleData().numberInterPerBlock, hd_triangleData().logThreadsPerInter, hd_triangleData().threadBlocks, hd_triangleData().blockSize, cutoff, hd_triangleData); } void TriangleCounting::release(){ if(memReleased) return; memReleased=true; gpu::free(hd_triangleData().triPerVertex); } void TriangleCounting::setInitParameters(int threadBlocks, int blockSize, int threadsPerIntersection){ hd_triangleData().threadBlocks = threadBlocks; hd_triangleData().blockSize = blockSize; if(hd_triangleData().blockSize%32 != 0){ printf("The block size has to be a multiple of 32\n"); printf("The block size has to be a reduced to the closet multiple of 32\n"); hd_triangleData().blockSize = (hd_triangleData().blockSize/32)*32; } if(hd_triangleData().blockSize < 0){ printf("The block size has to be a positive numbe\n"); exit(0); } hd_triangleData().threadsPerIntersection = threadsPerIntersection; if(hd_triangleData().threadsPerIntersection <= 0 || hd_triangleData().threadsPerIntersection >32 ){ printf("Threads per intersection have to be a power of two between 1 and 32\n"); exit(0); } int temp = hd_triangleData().threadsPerIntersection,logtemp=0; while (temp>>=1) ++logtemp; hd_triangleData().logThreadsPerInter = logtemp; hd_triangleData().numberInterPerBlock=hd_triangleData().blockSize/hd_triangleData().threadsPerIntersection; } void TriangleCounting::init(){ memReleased=false; gpu::allocate(hd_triangleData().triPerVertex, hd_triangleData().nv+10); reset(); } triangle_t TriangleCounting::countTriangles(){ hd_triangleData.sync(); triangle_t* outputArray = (triangle_t*)malloc((hd_triangleData().nv+2)*sizeof(triangle_t)); gpu::copyToHost(hd_triangleData().triPerVertex, (hd_triangleData().nv+2), outputArray); triangle_t sum=0; for(int i=0; i<(hd_triangleData().nv); i++){ // printf("%d %ld\n", i,outputArray[i]); sum+=outputArray[i]; } free(outputArray); //triangle_t sum=gpu::reduce(hd_triangleData().triPerVertex, hd_triangleData().nv+1); return sum; } } // namespace hornets_nest
the_stack
namespace at { namespace native { namespace { template<typename T, template<class> class Op> struct BinaryOpScalarFunctor_ { __device__ void operator() ( int chunk_size, TensorListMetadata<1>& tl, T scalar) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; T r_x[kILP]; // to make things simple, we put aligned case in a different code path if(n % kILP == 0 && chunk_size % kILP == 0 && is_aligned(x)) { for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0 , i_start); #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), scalar); } // store load_store(x, r_x, i_start, 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = 0; int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) { r_x[ii] = x[i]; } } #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), scalar); } #pragma unroll for(int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) x[i] = r_x[ii]; } } } } }; template<typename T, template<class> class Op> struct BinaryOpScalarFunctor { __device__ void operator() ( int chunk_size, TensorListMetadata<2>& tl, T scalar) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; T* out = (T*)tl.addresses[1][tensor_loc]; out += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; T r_x[kILP]; // to make things simple, we put aligned case in a different code path if(n % kILP == 0 && chunk_size % kILP == 0 && is_aligned(x) && is_aligned(out)) { for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0 , i_start); #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), scalar); } // store load_store(out, r_x, i_start, 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = 0; int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) { r_x[ii] = x[i]; } } #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), scalar); } #pragma unroll for(int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) out[i] = r_x[ii]; } } } } }; template<typename T, template<class> class Op> struct BinaryOpScalarListFunctor_ { __device__ void operator() ( int chunk_size, TensorListScalarListMetadata<1>& tl) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; double y = tl.scalar_vals[tensor_loc]; n -= chunk_idx * chunk_size; T r_x[kILP]; // to make things simple, we put aligned case in a different code path if(n % kILP == 0 && chunk_size % kILP == 0 && is_aligned(x)) { for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0 , i_start); #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), y); } // store load_store(x, r_x, i_start, 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = 0; int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) { r_x[ii] = x[i]; } } #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), y); } #pragma unroll for(int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) x[i] = r_x[ii]; } } } } }; template<typename T, template<class> class Op> struct BinaryOpScalarListFunctor { __device__ void operator() ( int chunk_size, TensorListScalarListMetadata<2>& tl) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; T* out = (T*)tl.addresses[1][tensor_loc]; out += chunk_idx * chunk_size; double y = tl.scalar_vals[tensor_loc]; n -= chunk_idx * chunk_size; T r_x[kILP]; // to make things simple, we put aligned case in a different code path if(n % kILP == 0 && chunk_size % kILP == 0 && is_aligned(x) && is_aligned(out)) { for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0 , i_start); #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), y); } // store load_store(out, r_x, i_start, 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = 0; int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) { r_x[ii] = x[i]; } } #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), y); } #pragma unroll for(int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) out[i] = r_x[ii]; } } } } }; template<typename T, template<class> class Op> struct BinaryOpListAlphaFunctor_ { __device__ void operator() ( int chunk_size, TensorListMetadata<2>& tl, T alpha) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; T* y = (T*)tl.addresses[1][tensor_loc]; y += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; T r_x[kILP]; T r_y[kILP]; // to make things simple, we put aligned case in a different code path if(n % kILP == 0 && chunk_size % kILP == 0 && is_aligned(x) && is_aligned(y)) { for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0 , i_start); load_store(r_y, y, 0 , i_start); #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), alpha * static_cast<T>(r_y[ii])); } // store load_store(x, r_x, i_start , 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = 0; r_y[ii] = 0; int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) { r_x[ii] = x[i]; r_y[ii] = y[i]; } } #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), alpha * static_cast<T>(r_y[ii])); } #pragma unroll for(int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) x[i] = r_x[ii]; } } } } }; template<typename T, template<class> class Op> struct BinaryOpListAlphaFunctor { __device__ void operator() ( int chunk_size, TensorListMetadata<3>& tl, T alpha) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; T* y = (T*)tl.addresses[1][tensor_loc]; y += chunk_idx * chunk_size; T* out = (T*)tl.addresses[2][tensor_loc]; out += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; T r_x[kILP]; T r_y[kILP]; // to make things simple, we put aligned case in a different code path if(n % kILP == 0 && chunk_size % kILP == 0 && is_aligned(x) && is_aligned(y) && is_aligned(out)) { for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0 , i_start); load_store(r_y, y, 0 , i_start); #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), alpha * static_cast<T>(r_y[ii])); } // store load_store(out, r_x, i_start , 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = 0; r_y[ii] = 0; int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) { r_x[ii] = x[i]; r_y[ii] = y[i]; } } #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii]), alpha * static_cast<T>(r_y[ii])); } #pragma unroll for(int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) out[i] = r_x[ii]; } } } } }; template<typename T, template<class> class Op> struct UnaryOpFunctor_ { __device__ void operator() ( int chunk_size, TensorListMetadata<1>& tl) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; T r_x[kILP]; // to make things simple, we put aligned case in a different code path if(n % kILP == 0 && chunk_size % kILP == 0 && is_aligned(x)) { for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0 , i_start); #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii])); } // store load_store(x, r_x, i_start, 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = 0; int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) { r_x[ii] = x[i]; } } #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii])); } #pragma unroll for(int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) x[i] = r_x[ii]; } } } } }; template<typename T, template<class> class Op> struct UnaryOpFunctor { __device__ void operator() ( int chunk_size, TensorListMetadata<2>& tl) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; T* out = (T*)tl.addresses[1][tensor_loc]; out += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; T r_x[kILP]; // to make things simple, we put aligned case in a different code path if(n % kILP == 0 && chunk_size % kILP == 0 && is_aligned(x) && is_aligned(out)) { for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0 , i_start); #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii])); } // store load_store(out, r_x, i_start, 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = 0; int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) { r_x[ii] = x[i]; } } #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = Op<T>()(static_cast<T>(r_x[ii])); } #pragma unroll for(int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) out[i] = r_x[ii]; } } } } }; template<typename T, template<class> class Op> struct PointwiseOpFunctor_ { __device__ void operator() ( int chunk_size, TensorListMetadata<3>& tl, T scalar) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; T* y = (T*)tl.addresses[1][tensor_loc]; y += chunk_idx * chunk_size; T* z = (T*)tl.addresses[2][tensor_loc]; z += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; T r_x[kILP]; T r_y[kILP]; T r_z[kILP]; // to make things simple, we put aligned case in a different code path if(n % kILP == 0 && chunk_size % kILP == 0 && is_aligned(x) && is_aligned(y) && is_aligned(z)) { for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0 , i_start); load_store(r_y, y, 0 , i_start); load_store(r_z, z, 0 , i_start); #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = static_cast<T>(r_x[ii]) + scalar * Op<T>()(static_cast<T>(r_y[ii]), static_cast<T>(r_z[ii])); } // store load_store(x, r_x, i_start, 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = 0; r_y[ii] = 0; r_z[ii] = 0; int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) { r_x[ii] = x[i]; r_y[ii] = y[i]; r_z[ii] = z[i]; } } #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = static_cast<T>(r_x[ii]) + scalar * Op<T>()(static_cast<T>(r_y[ii]), static_cast<T>(r_z[ii])); } #pragma unroll for(int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) x[i] = r_x[ii]; } } } } }; template<typename T, template<class> class Op> struct PointwiseOpFunctor { __device__ void operator() ( int chunk_size, TensorListMetadata<4>& tl, T scalar) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* x = (T*)tl.addresses[0][tensor_loc]; x += chunk_idx * chunk_size; T* y = (T*)tl.addresses[1][tensor_loc]; y += chunk_idx * chunk_size; T* z = (T*)tl.addresses[2][tensor_loc]; z += chunk_idx * chunk_size; T* out = (T*)tl.addresses[3][tensor_loc]; out += chunk_idx * chunk_size; n -= chunk_idx * chunk_size; T r_x[kILP]; T r_y[kILP]; T r_z[kILP]; // to make things simple, we put aligned case in a different code path if(n % kILP == 0 && chunk_size % kILP == 0 && is_aligned(x) && is_aligned(y) && is_aligned(z) && is_aligned(out)) { for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { // load load_store(r_x, x, 0 , i_start); load_store(r_y, y, 0 , i_start); load_store(r_z, z, 0 , i_start); #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = static_cast<T>(r_x[ii]) + scalar * Op<T>()(static_cast<T>(r_y[ii]), static_cast<T>(r_z[ii])); } // store load_store(out, r_x, i_start, 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = 0; r_y[ii] = 0; r_z[ii] = 0; int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) { r_x[ii] = x[i]; r_y[ii] = y[i]; r_z[ii] = z[i]; } } #pragma unroll for(int ii = 0; ii < kILP; ii++) { r_x[ii] = static_cast<T>(r_x[ii]) + scalar * Op<T>()(static_cast<T>(r_y[ii]), static_cast<T>(r_z[ii])); } #pragma unroll for(int ii = 0; ii < kILP; ii++) { int i = i_start + threadIdx.x + ii * blockDim.x; if(i < n && i < chunk_size) out[i] = r_x[ii]; } } } } }; } // namespace }} // namespace at::native
the_stack
#include <thrust/extrema.h> // for thrust::max_element namespace amgx { template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int threads_per_block, int warps_per_block, bool diag> __global__ void getLambdaEstimate(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices, const int num_rows, ValueTypeB *out) { int row_id = blockDim.x * blockIdx.x + threadIdx.x; ValueTypeB max_sum = (ValueTypeB)0.0; while (row_id < num_rows) { ValueTypeB cur_sum = (ValueTypeB)0.0; for (int j = row_offsets[row_id]; j < row_offsets[row_id + 1]; j++) { cur_sum += abs(values[j]); } if (diag) { cur_sum += abs(values[dia_indices[row_id]]); } max_sum = max(max_sum, cur_sum); row_id += gridDim.x * blockDim.x; } out[blockDim.x * blockIdx.x + threadIdx.x] = max_sum; } // Method to compute the inverse of the diagonal blocks template <class T_Config> void Chebyshev_Solver<T_Config>::compute_eigenmax_estimate(const Matrix<T_Config> &A, ValueTypeB &lambda) { #define LAMBDA_BLOCK_SIZE 256 VVector tsum(A.get_num_rows()); const int threads_per_block = 256; const int blockrows_per_cta = threads_per_block; const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / blockrows_per_cta + 1); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); if (A.hasProps(DIAG)) { cudaFuncSetCacheConfig(getLambdaEstimate < IndexType, ValueTypeA, ValueTypeB, LAMBDA_BLOCK_SIZE, LAMBDA_BLOCK_SIZE / 32, true >, cudaFuncCachePreferL1); getLambdaEstimate < IndexType, ValueTypeA, ValueTypeB, LAMBDA_BLOCK_SIZE, LAMBDA_BLOCK_SIZE / 32, true > <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, A.get_num_rows(), tsum.raw()); } else { cudaFuncSetCacheConfig(getLambdaEstimate < IndexType, ValueTypeA, ValueTypeB, LAMBDA_BLOCK_SIZE, LAMBDA_BLOCK_SIZE / 32, false >, cudaFuncCachePreferL1); getLambdaEstimate < IndexType, ValueTypeA, ValueTypeB, LAMBDA_BLOCK_SIZE, LAMBDA_BLOCK_SIZE / 32, false > <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, A.get_num_rows(), tsum.raw()); } lambda = *(thrust::max_element(tsum.begin(), tsum.end())); } // Constructor template< class T_Config> Chebyshev_Solver<T_Config>::Chebyshev_Solver( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope), m_buffer_N(0), m_eigsolver(NULL) { std::string solverName, new_scope, tmp_scope; cfg.getParameter<std::string>( "preconditioner", solverName, cfg_scope, new_scope ); m_lambda_mode = cfg.AMG_Config::getParameter<int>("chebyshev_lambda_estimate_mode", cfg_scope); m_cheby_order = cfg.AMG_Config::getParameter<int>("chebyshev_polynomial_order", cfg_scope); // 0 - use eigensolver to get BOTH estimates // 1 - use eigensolver to get maximum estimate // 2 - use max sum of abs values as a rough estimate for maximum eigenvalue // 3 - use user provided cheby_max_lambda and cheby_min_lambda if (m_lambda_mode == 3) { m_user_max_lambda = cfg.AMG_Config::getParameter<double>("cheby_max_lambda", cfg_scope); m_user_min_lambda = cfg.AMG_Config::getParameter<double>("cheby_min_lambda", cfg_scope); } if (solverName.compare("NOSOLVER") == 0) { no_preconditioner = true; m_preconditioner = NULL; } else { no_preconditioner = false; m_preconditioner = SolverFactory<T_Config>::allocate( cfg, cfg_scope, "preconditioner" ); } std::string eig_cfg_string = "algorithm=AGGREGATION,\n" "eig_solver=LANCZOS,\n" "verbosity_level=0,\n" "eig_max_iters=128,\n" "eig_tolerance=1e-4,\n" "eig_which=largest,\n" "eig_eigenvector=0,\n" "eig_eigenvector_solver=default"; /*std::ifstream t("/home/marsaev/work/perforce/marsaev_sw/sw/gpgpu/amgx/amg/eigen_examples/POWER_ITERATION"); std::stringstream buffer; buffer << t.rdbuf(); std::string eig_cfg_string = buffer.str();*/ if (m_lambda_mode < 2) { AMG_Configuration eig_cfg; eig_cfg.parseParameterString(eig_cfg_string.c_str()); m_eigsolver = EigenSolverFactory<T_Config>::allocate(*eig_cfg.getConfigObject(), "default", "eig_solver"); } } template<class T_Config> Chebyshev_Solver<T_Config>::~Chebyshev_Solver() { if (!no_preconditioner) { delete m_preconditioner; } if (!m_eigsolver) { delete m_eigsolver; } } template<class T_Config> void Chebyshev_Solver<T_Config>::solver_setup(bool reuse_matrix_structure) { AMGX_CPU_PROFILER( "Chebyshev_Solver::solver_setup " ); ViewType oldView = this->m_A->currentView(); this->m_A->setViewExterior(); // Setup the preconditionner if (!no_preconditioner) { m_preconditioner->setup(*this->m_A, reuse_matrix_structure); } // The number of elements in temporary vectors. this->m_buffer_N = static_cast<int>( this->m_A->get_num_cols() * this->m_A->get_block_dimy() ); Matrix<T_Config> *mtx_A = dynamic_cast<Matrix<T_Config>*>(this->m_A); VVector eig_solver_t_x; // m_lambda_mode: // 0: use eigensolver to get lmin and lmax estimate // 1: use eigensolver to get lmax estimate, set lmin = lmax/8 // 2: use max row sum as lmax estimate, set lmin = lmax/8 if (m_lambda_mode < 2) { if (!no_preconditioner) { SolverOperator<T_Config> *MA = new SolverOperator<T_Config> (this->m_A, m_preconditioner); m_eigsolver->setup(*MA); m_eigsolver->solve(eig_solver_t_x); delete MA; } else { m_eigsolver->setup(*mtx_A); m_eigsolver->solve(eig_solver_t_x); } const std::vector<ValueTypeB> &lambdas = m_eigsolver->get_eigenvalues(); this->m_lmax = lambdas[0] * 1.05; if (m_lambda_mode == 0) { this->m_lmin = lambdas[lambdas.size() - 1] * 0.95; } else { this->m_lmin = this->m_lmax * 0.125; } } else if (m_lambda_mode == 2) { if (no_preconditioner) { Matrix<T_Config> *pA = dynamic_cast< Matrix<T_Config>* > (this->m_A); compute_eigenmax_estimate(*pA, this->m_lmax); this->m_lmin = this->m_lmax * 0.125; } else { // assuming that this preconditioner would be good enough to reduce spectrum to the largest eigen value = 1.0 this->m_lmax = 0.9; this->m_lmin = this->m_lmax * 0.125; } } else if (m_lambda_mode == 3) { if (no_preconditioner) { Matrix<T_Config> *pA = dynamic_cast< Matrix<T_Config>* > (this->m_A); compute_eigenmax_estimate(*pA, this->m_lmax); this->m_lmin = this->m_lmax * 0.125; } else { // Use user input estimates this->m_lmax = this->m_user_max_lambda; this->m_lmin = this->m_user_min_lambda; } } else { FatalError("Not supported chebyshev_lambda_estimate_mode.", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } // Allocate memory needed for iterating. m_p.resize( this->m_buffer_N ); m_z.resize( this->m_buffer_N ); m_Ap.resize( this->m_buffer_N ); m_xp.resize( this->m_buffer_N ); m_rp.resize( this->m_buffer_N ); m_p.set_block_dimy(this->m_A->get_block_dimy()); m_p.set_block_dimx(1); m_p.dirtybit = 1; m_p.delayed_send = 1; m_p.tag = this->tag * 100 + 1; m_Ap.set_block_dimy(this->m_A->get_block_dimy()); m_Ap.set_block_dimx(1); m_Ap.dirtybit = 1; m_Ap.delayed_send = 1; m_Ap.tag = this->tag * 100 + 2; m_z.set_block_dimy(this->m_A->get_block_dimy()); m_z.set_block_dimx(1); m_z.dirtybit = 1; m_z.delayed_send = 1; m_z.tag = this->tag * 100 + 3; m_xp.set_block_dimy(this->m_A->get_block_dimy()); m_xp.set_block_dimx(1); m_xp.dirtybit = 1; m_xp.delayed_send = 1; m_xp.tag = this->tag * 100 + 4; this->m_A->setView(oldView); } template<class T_Config> void Chebyshev_Solver<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) { AMGX_CPU_PROFILER( "Chebyshev_Solver::solve_init " ); Operator<T_Config> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); int offset, size; A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size); // Run one iteration of preconditioner with zero initial guess if (no_preconditioner) { copy(*this->m_r, m_z, offset, size); } else { m_z.delayed_send = 1; this->m_r->delayed_send = 1; m_preconditioner->solve( *this->m_r, m_z, true ); m_z.delayed_send = 1; this->m_r->delayed_send = 1; } // m_p - res after precond copy( m_z, m_p, offset, size ); A.setView(oldView); m_gamma = 0.; m_beta = 0.; first_iter = 0; } template<class T_Config> bool Chebyshev_Solver<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { AMGX_CPU_PROFILER( "Chebyshev_Solver::solve_iteration " ); Operator<T_Config> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); int offset, size; A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size); ValueTypeB a = (this->m_lmax + this->m_lmin) / 2; ValueTypeB c = (this->m_lmax - this->m_lmin) / 2; for (int i = 0; i < m_cheby_order; i++) { // apply precond if (no_preconditioner) { copy(*this->m_r, m_z, offset, size); } else { m_z.delayed_send = 1; this->m_r->delayed_send = 1; m_preconditioner->solve( *this->m_r, m_z, true ); m_z.delayed_send = 1; this->m_r->delayed_send = 1; } if (first_iter == 0) { m_gamma = 1. / a; first_iter = 1; } else { m_beta = c * c * m_gamma * m_gamma / 4.; if (m_gamma != ValueTypeB(0) && (a - (m_beta / m_gamma)) != ValueTypeB(0)) { m_gamma = 1. / (a - m_beta / m_gamma); } axpby( m_z, m_p, m_p, ValueTypeB( 1 ), m_beta, offset, size); } axpy( m_p, x, m_gamma, offset, size ); this->compute_residual( b, x); } // Do we converge ? if ( this->m_monitor_convergence && this->compute_norm_and_converged() ) { A.setView(oldView); return true; } // No convergence so far. A.setView(oldView); return !this->m_monitor_convergence; } template<class T_Config> void Chebyshev_Solver<T_Config>::solve_finalize( VVector &b, VVector &x ) {} template<class T_Config> void Chebyshev_Solver<T_Config>::printSolverParameters() const { if (!no_preconditioner) { std::cout << "preconditioner: " << this->m_preconditioner->getName() << " with scope name: " << this->m_preconditioner->getScope() << std::endl; } } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class Chebyshev_Solver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace amgx
the_stack
#include "include/common.h" #define BLOCK_SIZE_X 32 #define BLOCK_SIZE_Y 32 using Matf31da = Eigen::Matrix<float, 3, 1, Eigen::DontAlign>; namespace kinectfusion { namespace internal { namespace cuda { template<int SIZE> static __device__ __forceinline__ void reduce(volatile double* buffer) { const int thread_id = threadIdx.y * blockDim.x + threadIdx.x; double value = buffer[thread_id]; if (SIZE >= 1024) { if (thread_id < 512) buffer[thread_id] = value = value + buffer[thread_id + 512]; __syncthreads(); } if (SIZE >= 512) { if (thread_id < 256) buffer[thread_id] = value = value + buffer[thread_id + 256]; __syncthreads(); } if (SIZE >= 256) { if (thread_id < 128) buffer[thread_id] = value = value + buffer[thread_id + 128]; __syncthreads(); } if (SIZE >= 128) { if (thread_id < 64) buffer[thread_id] = value = value + buffer[thread_id + 64]; __syncthreads(); } if (thread_id < 32) { if (SIZE >= 64) buffer[thread_id] = value = value + buffer[thread_id + 32]; if (SIZE >= 32) buffer[thread_id] = value = value + buffer[thread_id + 16]; if (SIZE >= 16) buffer[thread_id] = value = value + buffer[thread_id + 8]; if (SIZE >= 8) buffer[thread_id] = value = value + buffer[thread_id + 4]; if (SIZE >= 4) buffer[thread_id] = value = value + buffer[thread_id + 2]; if (SIZE >= 2) buffer[thread_id] = value = value + buffer[thread_id + 1]; } } __global__ void estimate_kernel(const Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation_current, const Matf31da translation_current, const PtrStep<float3> vertex_map_current, const PtrStep<float3> normal_map_current, const Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation_previous_inv, const Matf31da translation_previous, const CameraParameters cam_params, const PtrStep<float3> vertex_map_previous, const PtrStep<float3> normal_map_previous, const float distance_threshold, const float angle_threshold, const int cols, const int rows, PtrStep<double> global_buffer) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; Matf31da n, d, s; bool correspondence_found = false; if (x < cols && y < rows) { Matf31da normal_current; normal_current.x() = normal_map_current.ptr(y)[x].x; if (!isnan(normal_current.x())) { Matf31da vertex_current; vertex_current.x() = vertex_map_current.ptr(y)[x].x; vertex_current.y() = vertex_map_current.ptr(y)[x].y; vertex_current.z() = vertex_map_current.ptr(y)[x].z; Matf31da vertex_current_global = rotation_current * vertex_current + translation_current; Matf31da vertex_current_camera = rotation_previous_inv * (vertex_current_global - translation_previous); Eigen::Vector2i point; point.x() = __float2int_rd( vertex_current_camera.x() * cam_params.focal_x / vertex_current_camera.z() + cam_params.principal_x + 0.5f); point.y() = __float2int_rd( vertex_current_camera.y() * cam_params.focal_y / vertex_current_camera.z() + cam_params.principal_y + 0.5f); if (point.x() >= 0 && point.y() >= 0 && point.x() < cols && point.y() < rows && vertex_current_camera.z() >= 0) { Matf31da normal_previous_global; normal_previous_global.x() = normal_map_previous.ptr(point.y())[point.x()].x; if (!isnan(normal_previous_global.x())) { Matf31da vertex_previous_global; vertex_previous_global.x() = vertex_map_previous.ptr(point.y())[point.x()].x; vertex_previous_global.y() = vertex_map_previous.ptr(point.y())[point.x()].y; vertex_previous_global.z() = vertex_map_previous.ptr(point.y())[point.x()].z; const float distance = (vertex_previous_global - vertex_current_global).norm(); if (distance <= distance_threshold) { normal_current.y() = normal_map_current.ptr(y)[x].y; normal_current.z() = normal_map_current.ptr(y)[x].z; Matf31da normal_current_global = rotation_current * normal_current; normal_previous_global.y() = normal_map_previous.ptr(point.y())[point.x()].y; normal_previous_global.z() = normal_map_previous.ptr(point.y())[point.x()].z; const float sine = normal_current_global.cross(normal_previous_global).norm(); if (sine >= angle_threshold) { n = normal_previous_global; d = vertex_previous_global; s = vertex_current_global; correspondence_found = true; } } } } } } float row[7]; if (correspondence_found) { *(Matf31da*) &row[0] = s.cross(n); *(Matf31da*) &row[3] = n; row[6] = n.dot(d - s); } else row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f; __shared__ double smem[BLOCK_SIZE_X * BLOCK_SIZE_Y]; const int tid = threadIdx.y * blockDim.x + threadIdx.x; int shift = 0; for (int i = 0; i < 6; ++i) { // Rows for (int j = i; j < 7; ++j) { // Columns and B __syncthreads(); smem[tid] = row[i] * row[j]; __syncthreads(); reduce<BLOCK_SIZE_X * BLOCK_SIZE_Y>(smem); if (tid == 0) global_buffer.ptr(shift++)[gridDim.x * blockIdx.y + blockIdx.x] = smem[0]; } } } __global__ void reduction_kernel(PtrStep<double> global_buffer, const int length, PtrStep<double> output) { double sum = 0.0; for (int t = threadIdx.x; t < length; t += 512) sum += *(global_buffer.ptr(blockIdx.x) + t); __shared__ double smem[512]; smem[threadIdx.x] = sum; __syncthreads(); reduce<512>(smem); if (threadIdx.x == 0) output.ptr(blockIdx.x)[0] = smem[0]; }; void estimate_step(const Eigen::Matrix3f& rotation_current, const Matf31da& translation_current, const cv::cuda::GpuMat& vertex_map_current, const cv::cuda::GpuMat& normal_map_current, const Eigen::Matrix3f& rotation_previous_inv, const Matf31da& translation_previous, const CameraParameters& cam_params, const cv::cuda::GpuMat& vertex_map_previous, const cv::cuda::GpuMat& normal_map_previous, float distance_threshold, float angle_threshold, Eigen::Matrix<double, 6, 6, Eigen::RowMajor>& A, Eigen::Matrix<double, 6, 1>& b) { const int cols = vertex_map_current.cols; const int rows = vertex_map_current.rows; dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y); dim3 grid(1, 1); grid.x = static_cast<unsigned int>(std::ceil(cols / block.x)); grid.y = static_cast<unsigned int>(std::ceil(rows / block.y)); cv::cuda::GpuMat sum_buffer { cv::cuda::createContinuous(27, 1, CV_64FC1) }; cv::cuda::GpuMat global_buffer { cv::cuda::createContinuous(27, grid.x * grid.y, CV_64FC1) }; estimate_kernel<<<grid, block>>>(rotation_current, translation_current, vertex_map_current, normal_map_current, rotation_previous_inv, translation_previous, cam_params, vertex_map_previous, normal_map_previous, distance_threshold, angle_threshold, cols, rows, global_buffer); reduction_kernel<<<27, 512>>>(global_buffer, grid.x * grid.y, sum_buffer); cv::Mat host_data { 27, 1, CV_64FC1 }; sum_buffer.download(host_data); int shift = 0; for (int i = 0; i < 6; ++i) { // Rows for (int j = i; j < 7; ++j) { // Columns and B double value = host_data.ptr<double>(shift++)[0]; if (j == 6) b.data()[i] = value; else A.data()[j * 6 + i] = A.data()[i * 6 + j] = value; } } } } } }
the_stack
namespace faiss { namespace gpu { // Number of warps that the kernel is instantiated with constexpr int kWarps = 8; constexpr int kLanes = kWarpSize; constexpr int kMaxDistance = std::numeric_limits<int>::max(); // Performs a binary matrix multiplication, returning the lowest k results in // `vecs` for each `query` in terms of Hamming distance (a fused kernel) // Each warp calculates distance for a single query template <int NumWarpQ, int NumThreadQ, typename BinaryType> __launch_bounds__(kWarps * kLanes) __global__ void binaryDistanceAnySize(const Tensor<BinaryType, 2, true> vecs, const Tensor<BinaryType, 2, true> query, Tensor<int, 2, true> outK, Tensor<int, 2, true> outV, int k) { // A matrix tile (query, k) __shared__ BinaryType queryTile[kWarps][kLanes + 1]; // avoid bank conflict // B matrix tile (vec, k) __shared__ BinaryType vecTile[kLanes][kLanes + 1]; // avoid bank conflict WarpSelect<int, int, false, Comparator<int>, NumWarpQ, NumThreadQ, kWarps * kLanes> heap(kMaxDistance, -1, k); int warpId = threadIdx.y; int laneId = threadIdx.x; // Each warp handles a single query int warpQuery = blockIdx.x * kWarps + warpId; bool queryInBounds = warpQuery < query.getSize(0); // Each warp loops through the entire chunk of vectors for (int blockVec = 0; blockVec < vecs.getSize(0); blockVec += kLanes) { int threadDistance = 0; // Reduction dimension for (int blockK = 0; blockK < vecs.getSize(1); blockK += kLanes) { int laneK = blockK + laneId; bool kInBounds = laneK < vecs.getSize(1); queryTile[warpId][laneId] = queryInBounds && kInBounds ? query[warpQuery][laneK] : 0; // kWarps warps are responsible for loading 32 vecs #pragma unroll for (int i = 0; i < kLanes / kWarps; ++i) { int warpVec = i * kWarps + warpId; int vec = blockVec + warpVec; bool vecInBounds = vec < vecs.getSize(0); vecTile[warpVec][laneId] = vecInBounds && kInBounds ? vecs[vec][laneK] : 0; } __syncthreads(); // Compare distances #pragma unroll for (int i = 0; i < kLanes; ++i) { threadDistance += __popc(queryTile[warpId][i] ^ vecTile[laneId][i]); } __syncthreads(); } // Lanes within a warp are different vec results against the same query // Only submit distances which represent real (query, vec) pairs bool valInBounds = queryInBounds && (blockVec + laneId < vecs.getSize(0)); threadDistance = valInBounds ? threadDistance : kMaxDistance; int id = valInBounds ? blockVec + laneId : -1; heap.add(threadDistance, id); } heap.reduce(); if (warpQuery < query.getSize(0)) { heap.writeOut(outK[warpQuery].data(), outV[warpQuery].data(), k); } } // Version of the kernel that avoids a loop over the reduction dimension, and // thus avoids reloading the query vectors template <int NumWarpQ, int NumThreadQ, typename BinaryType, int ReductionLimit = kLanes> __global__ void __launch_bounds__(kWarps * kLanes) binaryDistanceLimitSize(const Tensor<BinaryType, 2, true> vecs, const Tensor<BinaryType, 2, true> query, Tensor<int, 2, true> outK, Tensor<int, 2, true> outV, int k) { // A matrix tile (query, k) __shared__ BinaryType queryTile[kWarps][kLanes + 1]; // avoid bank conflict // B matrix tile (vec, k) __shared__ BinaryType vecTile[kLanes][kLanes + 1]; // avoid bank conflict WarpSelect<int, int, false, Comparator<int>, NumWarpQ, NumThreadQ, kWarps * kLanes> heap(kMaxDistance, -1, k); int warpId = threadIdx.y; int laneId = threadIdx.x; // Each warp handles a single query int laneK = laneId; int warpQuery = blockIdx.x * kWarps + warpId; bool kInBounds = laneK < vecs.getSize(1); bool queryInBounds = warpQuery < query.getSize(0); queryTile[warpId][laneId] = queryInBounds && kInBounds ? query[warpQuery][laneK] : 0; // Each warp loops through the entire chunk of vectors for (int blockVec = 0; blockVec < vecs.getSize(0); blockVec += kLanes) { int threadDistance = 0; // kWarps warps are responsible for loading 32 vecs #pragma unroll for (int i = 0; i < kLanes / kWarps; ++i) { int warpVec = i * kWarps + warpId; int vec = blockVec + warpVec; bool vecInBounds = vec < vecs.getSize(0); vecTile[warpVec][laneId] = vecInBounds && kInBounds ? vecs[vec][laneK] : 0; } __syncthreads(); // Compare distances #pragma unroll for (int i = 0; i < ReductionLimit; ++i) { threadDistance += __popc(queryTile[warpId][i] ^ vecTile[laneId][i]); } __syncthreads(); // Lanes within a warp are different vec results against the same query // Only submit distances which represent real (query, vec) pairs bool valInBounds = queryInBounds && (blockVec + laneId < vecs.getSize(0)); threadDistance = valInBounds ? threadDistance : kMaxDistance; int id = valInBounds ? blockVec + laneId : -1; heap.add(threadDistance, id); } heap.reduce(); if (warpQuery < query.getSize(0)) { heap.writeOut(outK[warpQuery].data(), outV[warpQuery].data(), k); } } template <typename BinaryType> void runBinaryDistanceAnySize(Tensor<BinaryType, 2, true>& vecs, Tensor<BinaryType, 2, true>& query, Tensor<int, 2, true>& outK, Tensor<int, 2, true>& outV, int k, cudaStream_t stream) { dim3 grid(utils::divUp(query.getSize(0), kWarps)); dim3 block(kLanes, kWarps); if (k == 1) { binaryDistanceAnySize<1, 1, BinaryType> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 32) { binaryDistanceAnySize<32, 2, BinaryType> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 64) { binaryDistanceAnySize<64, 3, BinaryType> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 128) { binaryDistanceAnySize<128, 3, BinaryType> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 256) { binaryDistanceAnySize<256, 4, BinaryType> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 512) { binaryDistanceAnySize<512, 8, BinaryType> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 1024) { binaryDistanceAnySize<1024, 8, BinaryType> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } #if GPU_MAX_SELECTION_K >= 2048 else if (k <= 2048) { binaryDistanceAnySize<2048, 8, BinaryType> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } #endif } template <typename BinaryType, int ReductionLimit> void runBinaryDistanceLimitSize(Tensor<BinaryType, 2, true>& vecs, Tensor<BinaryType, 2, true>& query, Tensor<int, 2, true>& outK, Tensor<int, 2, true>& outV, int k, cudaStream_t stream) { dim3 grid(utils::divUp(query.getSize(0), kWarps)); dim3 block(kLanes, kWarps); if (k == 1) { binaryDistanceLimitSize<1, 1, BinaryType, ReductionLimit> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 32) { binaryDistanceLimitSize<32, 2, BinaryType, ReductionLimit> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 64) { binaryDistanceLimitSize<64, 3, BinaryType, ReductionLimit> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 128) { binaryDistanceLimitSize<128, 3, BinaryType, ReductionLimit> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 256) { binaryDistanceLimitSize<256, 4, BinaryType, ReductionLimit> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 512) { binaryDistanceLimitSize<512, 8, BinaryType, ReductionLimit> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } else if (k <= 1024) { binaryDistanceLimitSize<1024, 8, BinaryType, ReductionLimit> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } #if GPU_MAX_SELECTION_K >= 2048 else if (k <= 2048) { binaryDistanceLimitSize<2048, 8, BinaryType, ReductionLimit> <<<grid, block, 0, stream>>>( vecs, query, outK, outV, k); } #endif } void runBinaryDistance(Tensor<unsigned char, 2, true>& vecs, Tensor<unsigned char, 2, true>& query, Tensor<int, 2, true>& outK, Tensor<int, 2, true>& outV, int k, cudaStream_t stream) { FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); FAISS_ASSERT(vecs.getSize(1) == query.getSize(1)); FAISS_ASSERT(outK.getSize(1) == k); FAISS_ASSERT(outV.getSize(1) == k); // For the optimized uint32 kernel, we handle 32 * 8 = 256 max dims constexpr int kReductionLimit32 = 8; // For the optimized uint8 kernel, we handle 8 * 16 = 128 max dims constexpr int kReductionLimit8 = 16; // All other cases (large or small) go through the general kernel if (vecs.getSize(1) % sizeof(unsigned int) == 0 && (vecs.getSize(1) / sizeof(unsigned int)) <= kReductionLimit32) { auto vecs32 = vecs.castResize<unsigned int>(); auto query32 = query.castResize<unsigned int>(); // Optimize for vectors with dimensions a multiple of 32 that are less than // 32 * kReductionLimit (256) dimensions in size runBinaryDistanceLimitSize<unsigned int, kReductionLimit32>( vecs32, query32, outK, outV, k, stream); } else if (vecs.getSize(1) <= kReductionLimit8) { // Optimize for vectors with dimensions a multiple of 32 that are less than // 32 * kReductionLimit (256) dimensions in size runBinaryDistanceLimitSize<unsigned char, kReductionLimit8>( vecs, query, outK, outV, k, stream); } else { // Arbitrary size kernel runBinaryDistanceAnySize<unsigned char>( vecs, query, outK, outV, k, stream); } } } } // namespace
the_stack
Implements the Romein convolutional algorithm onto a GPU using CUDA. */ #include <iostream> #include <bifrost/romein.h> #include "romein_kernels.cuh" #include "assert.hpp" #include "trace.hpp" #include "utils.hpp" #include "cuda.hpp" #include "cuda/stream.hpp" #include "Complex.hpp" struct __attribute__((aligned(1))) nibble2 { // Yikes! This is dicey since the packing order is implementation dependent! signed char y:4, x:4; }; struct __attribute__((aligned(1))) blenib2 { // Yikes! This is dicey since the packing order is implementation dependent! signed char x:4, y:4; }; template<typename RealType> __host__ __device__ inline Complex<RealType> Complexfcma(Complex<RealType> x, Complex<RealType> y, Complex<RealType> d) { RealType real_res; RealType imag_res; real_res = (x.x * y.x) + d.x; imag_res = (x.x * y.y) + d.y; real_res = (x.y * y.y) + real_res; imag_res = -(x.y * y.x) + imag_res; return Complex<RealType>(real_res, imag_res); } template<typename InType, typename OutType> __global__ void romein_kernel(int nbaseline, int npol, int maxsupport, int gridsize, int nbatch, const int* __restrict__ x, const int* __restrict__ y, const int* __restrict__ z, const OutType* __restrict__ kernels, const InType* __restrict__ d_in, OutType* d_out) { int batch_no = blockIdx.x; int pol_no = threadIdx.y; int vi_s = batch_no*nbaseline*npol+pol_no; int grid_s = batch_no*npol*gridsize*gridsize + pol_no*gridsize*gridsize; for(int i = threadIdx.x; i < maxsupport * maxsupport; i += blockDim.x) { int myU = i % maxsupport; int myV = i / maxsupport; int grid_point_u = myU; int grid_point_v = myV; OutType sum = OutType(0.0, 0.0); int vi = 0; for(vi = 0; vi < (nbaseline*npol); vi+=npol) { int xl = x[vi+vi_s]; int yl = y[vi+vi_s]; // Determine convolution point. This is basically just an // optimised way to calculate. //int myConvU = myU - u; //int myConvV = myV - v; int myConvU = 0; int myConvV = 0; if( maxsupport > 1 ) { myConvU = (xl - myU) % maxsupport; myConvV = (yl - myV) % maxsupport; if (myConvU < 0) myConvU += maxsupport; if (myConvV < 0) myConvV += maxsupport; } // Determine grid point. Because of the above we know here that // myGridU % max_supp = myU // myGridV % max_supp = myV int myGridU = xl + myConvU; int myGridV = yl + myConvV; // Grid point changed? if (myGridU == grid_point_u && myGridV == grid_point_v) { // Nothin' } else { // Atomically add to grid. This is the bottleneck of this kernel. if( grid_point_u >= 0 && grid_point_u < gridsize && \ grid_point_v >= 0 && grid_point_v < gridsize ) { atomicAdd(&d_out[grid_s + gridsize*grid_point_v + grid_point_u].x, sum.x); atomicAdd(&d_out[grid_s + gridsize*grid_point_v + grid_point_u].y, sum.y); } // Switch to new point sum = OutType(0.0, 0.0); grid_point_u = myGridU; grid_point_v = myGridV; } //TODO: Re-do the w-kernel/gcf for our data. OutType px = kernels[(vi+vi_s)*maxsupport*maxsupport + myConvV * maxsupport + myConvU];// ?? // Sum up InType temp = d_in[vi+vi_s]; OutType vi_v = OutType(temp.x, temp.y); sum = Complexfcma(px, vi_v, sum); } if( grid_point_u >= 0 && grid_point_u < gridsize && \ grid_point_v >= 0 && grid_point_v < gridsize ) { atomicAdd(&d_out[grid_s + gridsize*grid_point_v + grid_point_u].x, sum.x); atomicAdd(&d_out[grid_s + gridsize*grid_point_v + grid_point_u].y, sum.y); } } } template<typename InType, typename OutType> __global__ void romein_kernel_sloc(int nbaseline, int npol, int maxsupport, int gridsize, int nbatch, const int* __restrict__ x, const int* __restrict__ y, const int* __restrict__ z, const OutType* __restrict__ kernels, const InType* __restrict__ d_in, OutType* d_out) { int batch_no = blockIdx.x; int pol_no = threadIdx.y; int vi_s = batch_no*nbaseline*npol+pol_no; int grid_s = batch_no*npol*gridsize*gridsize + pol_no*gridsize*gridsize; extern __shared__ int shared[]; int* xdata = shared; int* ydata = xdata + nbaseline * npol; for(int i = threadIdx.x; i < nbaseline; i += blockDim.x){ xdata[i*npol + pol_no] = x[vi_s + npol * i]; ydata[i*npol + pol_no] = y[vi_s + npol * i]; } __syncthreads(); for(int i = threadIdx.x; i < maxsupport * maxsupport; i += blockDim.x) { int myU = i % maxsupport; int myV = i / maxsupport; int grid_point_u = myU; int grid_point_v = myV; OutType sum = OutType(0.0, 0.0); int vi = 0; for(vi = 0; vi < (nbaseline*npol); vi+=npol) { int xl = xdata[vi+pol_no]; int yl = ydata[vi+pol_no]; // Determine convolution point. This is basically just an // optimised way to calculate. //int myConvU = myU - u; //int myConvV = myV - v; int myConvU = 0; int myConvV = 0; if( maxsupport > 1 ) { myConvU = (xl - myU) % maxsupport; myConvV = (yl - myV) % maxsupport; if (myConvU < 0) myConvU += maxsupport; if (myConvV < 0) myConvV += maxsupport; } // Determine grid point. Because of the above we know here that // myGridU % max_supp = myU // myGridV % max_supp = myV int myGridU = xl + myConvU; int myGridV = yl + myConvV; // Grid point changed? if (myGridU == grid_point_u && myGridV == grid_point_v) { // Nothin' } else { // Atomically add to grid. This is the bottleneck of this kernel. if( grid_point_u >= 0 && grid_point_u < gridsize && \ grid_point_v >= 0 && grid_point_v < gridsize ) { atomicAdd(&d_out[grid_s + gridsize*grid_point_v + grid_point_u].x, sum.x); atomicAdd(&d_out[grid_s + gridsize*grid_point_v + grid_point_u].y, sum.y); } // Switch to new point sum = OutType(0.0, 0.0); grid_point_u = myGridU; grid_point_v = myGridV; } //TODO: Re-do the w-kernel/gcf for our data. OutType px = kernels[(vi+vi_s)*maxsupport*maxsupport + myConvV * maxsupport + myConvU];// ?? // Sum up InType temp = d_in[vi+vi_s]; OutType vi_v = OutType(temp.x, temp.y); sum = Complexfcma(px, vi_v, sum); } if( grid_point_u >= 0 && grid_point_u < gridsize && \ grid_point_v >= 0 && grid_point_v < gridsize ) { atomicAdd(&d_out[grid_s + gridsize*grid_point_v + grid_point_u].x, sum.x); atomicAdd(&d_out[grid_s + gridsize*grid_point_v + grid_point_u].y, sum.y); } } } template<typename InType, typename OutType> inline void launch_romein_kernel(int nbaseline, int npol, bool polmajor, int maxsupport, int gridsize, int nbatch, int* xpos, int* ypos, int* zpos, OutType* kernels, InType* d_in, OutType* d_out, cudaStream_t stream=0) { //cout << "LAUNCH for " << nelement << endl; dim3 block(8,1); dim3 grid(nbatch*npol,1); if( polmajor ) { npol = 1; } else { block.y = npol; grid.x = nbatch; } /* cout << " Block size is " << block.x << " by " << block.y << endl; cout << " Grid size is " << grid.x << " by " << grid.y << endl; */ void* args[] = {&nbaseline, &npol, &maxsupport, &gridsize, &nbatch, &xpos, &ypos, &zpos, &kernels, &d_in, &d_out}; size_t loc_size = 2 * nbaseline * npol * sizeof(int); if(loc_size <= BF_GPU_SHAREDMEM) { BF_CHECK_CUDA_EXCEPTION(cudaLaunchKernel((void*)romein_kernel_sloc<InType,OutType>, grid, block, &args[0], 2*nbaseline*npol*sizeof(int), stream), BF_STATUS_INTERNAL_ERROR); } else { BF_CHECK_CUDA_EXCEPTION(cudaLaunchKernel((void*)romein_kernel<InType,OutType>, grid, block, &args[0], 0, stream), BF_STATUS_INTERNAL_ERROR); } } class BFromein_impl { typedef int IType; typedef double FType; public: // HACK WAR for what looks like a bug in the CUDA 7.0 compiler typedef float DType; private: IType _nbaseline; IType _npol; bool _polmajor; IType _maxsupport; IType _gridsize; IType _nxyz = 0; int* _x = NULL; int* _y = NULL; int* _z = NULL; IType _nkernels = 0; BFdtype _tkernels = BF_DTYPE_INT_TYPE; void* _kernels = NULL; cudaStream_t _stream; public: BFromein_impl() : _nbaseline(1), _npol(1), _polmajor(true), \ _maxsupport(1), _stream(g_cuda_stream) {} inline IType nbaseline() const { return _nbaseline; } inline IType npol() const { return _npol; } inline bool polmajor() const { return _polmajor; } inline IType maxsupport() const { return _maxsupport; } inline IType gridsize() const { return _gridsize; } inline IType nxyz() const { return _nxyz; } inline IType nkernels() const { return _nkernels; } inline IType tkernels() const { return _tkernels; } void init(IType nbaseline, IType npol, bool polmajor, IType maxsupport, IType gridsize) { BF_TRACE(); _nbaseline = nbaseline; _npol = npol; _polmajor = polmajor; _maxsupport = maxsupport; _gridsize = gridsize; } void set_positions(BFarray const* positions) { BF_TRACE(); BF_TRACE_STREAM(_stream); BF_ASSERT_EXCEPTION(positions->dtype == BF_DTYPE_I32, BF_STATUS_UNSUPPORTED_DTYPE); int npositions = positions->shape[1]; int stride = positions->shape[1]; for(int i=2; i<positions->ndim-2; ++i) { npositions *= positions->shape[i]; stride *= positions->shape[i]; } stride *= positions->shape[positions->ndim-2]; stride *= positions->shape[positions->ndim-1]; _nxyz = npositions; _x = (int *) positions->data; _y = _x + stride; _z = _y + stride; } void set_kernels(BFarray const* kernels) { BF_TRACE(); BF_TRACE_STREAM(_stream); BF_ASSERT_EXCEPTION(kernels->dtype == BF_DTYPE_CF32 \ || BF_DTYPE_CF64, BF_STATUS_UNSUPPORTED_DTYPE); int nkernels = kernels->shape[0]; for(int i=1; i<kernels->ndim-4; ++i) { nkernels *= kernels->shape[i]; } _nkernels = nkernels; _tkernels = kernels->dtype; _kernels = (void*) kernels->data; } void execute(BFarray const* in, BFarray const* out) { BF_TRACE(); BF_TRACE_STREAM(_stream); BF_ASSERT_EXCEPTION(_x != NULL, BF_STATUS_INVALID_STATE); BF_ASSERT_EXCEPTION(_y != NULL, BF_STATUS_INVALID_STATE); BF_ASSERT_EXCEPTION(_z != NULL, BF_STATUS_INVALID_STATE); BF_ASSERT_EXCEPTION(_kernels != NULL, BF_STATUS_INVALID_STATE); BF_ASSERT_EXCEPTION(out->dtype == BF_DTYPE_CF32 \ || BF_DTYPE_CF64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUDA_EXCEPTION(cudaGetLastError(), BF_STATUS_INTERNAL_ERROR); int nbatch = in->shape[0]; #define LAUNCH_ROMEIN_KERNEL(IterType,OterType) \ launch_romein_kernel(_nbaseline, _npol, _polmajor, _maxsupport, _gridsize, nbatch, \ _x, _y, _z, (OterType)_kernels, \ (IterType)in->data, (OterType)out->data, \ _stream) switch( in->dtype ) { case BF_DTYPE_CI4: if( in->big_endian ) { switch( out->dtype ) { case BF_DTYPE_CF32: LAUNCH_ROMEIN_KERNEL(nibble2*, Complex32*); break; case BF_DTYPE_CF64: LAUNCH_ROMEIN_KERNEL(nibble2*, Complex64*); break; default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE); }; } else { switch( out->dtype ) { case BF_DTYPE_CF32: LAUNCH_ROMEIN_KERNEL(blenib2*, Complex32*); break; case BF_DTYPE_CF64: LAUNCH_ROMEIN_KERNEL(blenib2*, Complex64*); break; default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE); }; } break; case BF_DTYPE_CI8: switch( out->dtype ) { case BF_DTYPE_CF32: LAUNCH_ROMEIN_KERNEL(char2*, Complex32*); break; case BF_DTYPE_CF64: LAUNCH_ROMEIN_KERNEL(char2*, Complex64*); break; default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE); }; break; case BF_DTYPE_CI16: switch( out->dtype ) { case BF_DTYPE_CF32: LAUNCH_ROMEIN_KERNEL(short2*, Complex32*); break; case BF_DTYPE_CF64: LAUNCH_ROMEIN_KERNEL(short2*, Complex64*); break; default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE); } break; case BF_DTYPE_CI32: switch( out->dtype ) { case BF_DTYPE_CF32: LAUNCH_ROMEIN_KERNEL(int2*, Complex32*); break; case BF_DTYPE_CF64: LAUNCH_ROMEIN_KERNEL(int2*, Complex64*); break; default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE); } break; case BF_DTYPE_CI64: switch( out->dtype ) { case BF_DTYPE_CF32: LAUNCH_ROMEIN_KERNEL(long2*, Complex32*); break; case BF_DTYPE_CF64: LAUNCH_ROMEIN_KERNEL(long2*, Complex64*); break; default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE); } break; case BF_DTYPE_CF32: switch( out->dtype ) { case BF_DTYPE_CF32: LAUNCH_ROMEIN_KERNEL(float2*, Complex32*); break; case BF_DTYPE_CF64: LAUNCH_ROMEIN_KERNEL(float2*, Complex64*); break; default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE); } break; case BF_DTYPE_CF64: switch( out->dtype ) { case BF_DTYPE_CF32: LAUNCH_ROMEIN_KERNEL(double2*, Complex32*); break; case BF_DTYPE_CF64: LAUNCH_ROMEIN_KERNEL(double2*, Complex64*); break; default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE); } break; default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE); } #undef LAUNCH_ROMEIN_KERNEL BF_CHECK_CUDA_EXCEPTION(cudaGetLastError(), BF_STATUS_INTERNAL_ERROR); } void set_stream(cudaStream_t stream) { _stream = stream; } }; BFstatus bfRomeinCreate(BFromein* plan_ptr) { BF_TRACE(); BF_ASSERT(plan_ptr, BF_STATUS_INVALID_POINTER); BF_TRY_RETURN_ELSE(*plan_ptr = new BFromein_impl(), *plan_ptr = 0); } BFstatus bfRomeinInit(BFromein plan, BFarray const* positions, BFarray const* kernels, BFsize gridsize, BFbool polmajor) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); BF_ASSERT(positions, BF_STATUS_INVALID_POINTER); BF_ASSERT(positions->ndim >= 4, BF_STATUS_INVALID_SHAPE); BF_ASSERT(positions->shape[0] == 3, BF_STATUS_INVALID_SHAPE); BF_ASSERT(space_accessible_from(positions->space, BF_SPACE_CUDA), BF_STATUS_INVALID_SPACE); BF_ASSERT(kernels, BF_STATUS_INVALID_POINTER); BF_ASSERT(kernels->ndim >= 5, BF_STATUS_INVALID_SHAPE); BF_ASSERT(kernels->shape[kernels->ndim-2] \ == kernels->shape[kernels->ndim-1], BF_STATUS_INVALID_SHAPE); BF_ASSERT(space_accessible_from(kernels->space, BF_SPACE_CUDA), BF_STATUS_INVALID_SPACE); // Discover the dimensions of the positions/kernels. int npositions, nbaseline, npol, nkernels, maxsupport; npositions = positions->shape[1]; for(int i=2; i<positions->ndim-2; ++i) { npositions *= positions->shape[i]; } if( polmajor ) { npol = positions->shape[positions->ndim-2]; nbaseline = positions->shape[positions->ndim-1]; } else { nbaseline = positions->shape[positions->ndim-2]; npol = positions->shape[positions->ndim-1]; } nkernels = kernels->shape[0]; for(int i=1; i<kernels->ndim-4; ++i) { nkernels *= kernels->shape[i]; } maxsupport = kernels->shape[kernels->ndim-1]; // Validate BF_ASSERT(npositions == nkernels, BF_STATUS_INVALID_SHAPE); BF_ASSERT(kernels->shape[kernels->ndim-4] \ == positions->shape[positions->ndim-2], BF_STATUS_INVALID_SHAPE); BF_ASSERT(kernels->shape[kernels->ndim-3] \ == positions->shape[positions->ndim-1], BF_STATUS_INVALID_SHAPE); BF_ASSERT(kernels->shape[kernels->ndim-2] \ == kernels->shape[kernels->ndim-1], BF_STATUS_INVALID_SHAPE); BF_TRY(plan->init(nbaseline, npol, polmajor, maxsupport, gridsize)); BF_TRY(plan->set_positions(positions)); BF_TRY_RETURN(plan->set_kernels(kernels)); } BFstatus bfRomeinSetStream(BFromein plan, void const* stream) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); BF_ASSERT(stream, BF_STATUS_INVALID_POINTER); BF_TRY_RETURN(plan->set_stream(*(cudaStream_t*)stream)); } BFstatus bfRomeinSetPositions(BFromein plan, BFarray const* positions) { BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); BF_ASSERT(positions, BF_STATUS_INVALID_POINTER); BF_ASSERT(positions->ndim >= 4, BF_STATUS_INVALID_SHAPE ); BF_ASSERT(positions->shape[0] == 3, BF_STATUS_INVALID_SHAPE ); BF_ASSERT(space_accessible_from(positions->space, BF_SPACE_CUDA), BF_STATUS_INVALID_SPACE); if( plan->polmajor() ) { BF_ASSERT(positions->shape[positions->ndim-2] == plan->npol(), BF_STATUS_INVALID_SHAPE ); BF_ASSERT(positions->shape[positions->ndim-1] == plan->nbaseline(), BF_STATUS_INVALID_SHAPE ); } else { BF_ASSERT(positions->shape[positions->ndim-2] == plan->nbaseline(), BF_STATUS_INVALID_SHAPE ); BF_ASSERT(positions->shape[positions->ndim-1] == plan->npol(), BF_STATUS_INVALID_SHAPE ); } BF_TRY_RETURN(plan->set_positions(positions)); } BFstatus bfRomeinSetKernels(BFromein plan, BFarray const* kernels) { BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); BF_ASSERT(kernels, BF_STATUS_INVALID_POINTER); BF_ASSERT(kernels->ndim >= 5, BF_STATUS_INVALID_SHAPE ); if( plan->polmajor() ) { BF_ASSERT(kernels->shape[kernels->ndim-4] == plan->npol(), BF_STATUS_INVALID_SHAPE ); BF_ASSERT(kernels->shape[kernels->ndim-3] == plan->nbaseline(), BF_STATUS_INVALID_SHAPE ); } else { BF_ASSERT(kernels->shape[kernels->ndim-4] == plan->nbaseline(), BF_STATUS_INVALID_SHAPE ); BF_ASSERT(kernels->shape[kernels->ndim-3] == plan->npol(), BF_STATUS_INVALID_SHAPE ); } BF_ASSERT(kernels->shape[kernels->ndim-2] == plan->maxsupport(), BF_STATUS_INVALID_SHAPE ); BF_ASSERT(kernels->shape[kernels->ndim-1] == plan->maxsupport(), BF_STATUS_INVALID_SHAPE ); BF_ASSERT(space_accessible_from(kernels->space, BF_SPACE_CUDA), BF_STATUS_INVALID_SPACE); BF_TRY_RETURN(plan->set_kernels(kernels)); } BFstatus bfRomeinExecute(BFromein plan, BFarray const* in, BFarray const* out) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); BF_ASSERT(in, BF_STATUS_INVALID_POINTER); BF_ASSERT(out, BF_STATUS_INVALID_POINTER); BF_ASSERT( in->ndim >= 3, BF_STATUS_INVALID_SHAPE); BF_ASSERT(out->ndim == in->ndim+1, BF_STATUS_INVALID_SHAPE); BFarray in_flattened; if( in->ndim > 3 ) { // Keep the last two dim but attempt to flatten all others unsigned long keep_dims_mask = padded_dims_mask(in); keep_dims_mask |= 0x1 << (in->ndim-1); keep_dims_mask |= 0x1 << (in->ndim-2); keep_dims_mask |= 0x1 << (in->ndim-3); flatten(in, &in_flattened, keep_dims_mask); in = &in_flattened; BF_ASSERT(in_flattened.ndim == 3, BF_STATUS_UNSUPPORTED_SHAPE); } /* std::cout << "ndim = " << in->ndim << std::endl; std::cout << " 0 = " << in->shape[0] << std::endl; std::cout << " 1 = " << in->shape[1] << std::endl; std::cout << " 2 = " << in->shape[2] << std::endl; */ BF_ASSERT( in->shape[0] == plan->nxyz(), BF_STATUS_INVALID_SHAPE); BF_ASSERT( in->shape[0] == plan->nkernels(), BF_STATUS_INVALID_SHAPE); if( plan->polmajor() ) { BF_ASSERT( in->shape[1] == plan->npol(), BF_STATUS_INVALID_SHAPE); BF_ASSERT( in->shape[2] == plan->nbaseline(), BF_STATUS_INVALID_SHAPE); } else { BF_ASSERT( in->shape[1] == plan->nbaseline(), BF_STATUS_INVALID_SHAPE); BF_ASSERT( in->shape[2] == plan->npol(), BF_STATUS_INVALID_SHAPE); } BFarray out_flattened; if( out->ndim > 4 ) { // Keep the last three dim but attempt to flatten all others unsigned long keep_dims_mask = padded_dims_mask(out); keep_dims_mask |= 0x1 << (out->ndim-1); keep_dims_mask |= 0x1 << (out->ndim-2); keep_dims_mask |= 0x1 << (out->ndim-3); keep_dims_mask |= 0x1 << (out->ndim-4); flatten(out, &out_flattened, keep_dims_mask); out = &out_flattened; BF_ASSERT(out_flattened.ndim == 4, BF_STATUS_UNSUPPORTED_SHAPE); } /* std::cout << "ndim = " << out->ndim << std::endl; std::cout << " 0 = " << out->shape[0] << std::endl; std::cout << " 1 = " << out->shape[1] << std::endl; std::cout << " 2 = " << out->shape[2] << std::endl; std::cout << " 3 = " << out->shape[3] << std::endl; */ BF_ASSERT(out->shape[0] == plan->nxyz(), BF_STATUS_INVALID_SHAPE); BF_ASSERT(out->shape[0] == plan->nkernels(), BF_STATUS_INVALID_SHAPE); BF_ASSERT(out->shape[1] == plan->npol(), BF_STATUS_INVALID_SHAPE); BF_ASSERT(out->shape[2] == plan->gridsize(), BF_STATUS_INVALID_SHAPE); BF_ASSERT(out->shape[3] == plan->gridsize(), BF_STATUS_INVALID_SHAPE); BF_ASSERT(out->dtype == plan->tkernels(), BF_STATUS_UNSUPPORTED_DTYPE); BF_ASSERT(space_accessible_from( in->space, BF_SPACE_CUDA), BF_STATUS_INVALID_SPACE); BF_ASSERT(space_accessible_from(out->space, BF_SPACE_CUDA), BF_STATUS_INVALID_SPACE); BF_TRY_RETURN(plan->execute(in, out)); } BFstatus bfRomeinDestroy(BFromein plan) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); delete plan; return BF_STATUS_SUCCESS; }
the_stack
extern "c" __sync_lock_test_and_set(...); using namespace std; /* TODO: - Allow it to run with max_width > 512 (maximum thread block width) - tanh function that gives bit-for-bit equivalent results as on the host - Remove learning rate from the update (apply it when updating the weights) and use a constant that conditions the numbers to work well within the range of the update - Try using textures for the W arrays (caching could make a big difference) */ typedef ML::FixedPointAccum32 UpdateFloat; //typedef float UpdateFloat; /** Given an activation function and an input, apply that activation function */ __device__ float transform(float input, int activation) { switch (activation) { case ML::ACT_TANH: { return tanh(input); //float exp2i = __expf(input + input); //return __fdividef(exp2i - 1.0f, exp2i + 1.0f); } case ML::ACT_IDENTITY: return input; default: return 0.0; } } /** Given an output and an error, what's the delta (derivative * error)? */ __device__ float delta(float output, float error, int activation) { switch (activation) { case ML::ACT_TANH: return (1.0f - output * output) * error; case ML::ACT_IDENTITY: return output * error; default: return 0.0; } } texture<float, 1, cudaReadModeElementType> weights_tex; texture<float, 1, cudaReadModeElementType> biases_tex;; template<const texture<float, 1, cudaReadModeElementType> & Tex> struct WeightsAccess { const float * base; // if zero, then texture access int offset; __device__ WeightsAccess(const float * base = 0) : base(base), offset(0) { } __device__ void init(const float * base) { this->base = base; offset = 0; } __device__ void operator += (int val) { offset += val; } __device__ void operator -= (int val) { offset -= val; } __device__ float operator [] (int ofs) { if (base) return base[offset + ofs]; else return tex1Dfetch(Tex, offset + ofs); } }; /** Train a fully-connected neural network architecture via backpropagation one a single training example. The work is split over all of the cores within a single multiprocessor. (So, on a Geforce 260 core 216, we have 27 multiprocessors with 8 cores each, and so we could train on 27 different feature vectors in parallel. */ #define N 4 #define train_N_examples train_4_examples #include "backprop_cuda_train_N_examples.cu" #undef N #undef train_N_examples #define N 1 #define train_N_examples train_1_example #include "backprop_cuda_train_N_examples.cu" #undef N #undef train_N_examples #include "backprop_cuda_one_example.cu" __global__ void train_examples_kernel(const float * feature_vectors, // feature vector [ni] int feature_vector_width, const int * labels, const float * example_weights, int num_layers, const float * w, // weights for each layer const float * biases, // for each layer const int * architecture, const int * w_strides, UpdateFloat * const * w_updates, // wt updates for each layer UpdateFloat * const * b_updates, // bias upd for each layer UpdateFloat * const * w_updates2, // wt updates for each layer UpdateFloat * const * b_updates2, // bias upd for each layer int activation, // activation function float inhibit, // target value for inhibited neuron) float fire, // target value for firing neuron float learning_rate, int num_threads_in_block, int num_threads_on_multiprocessor, int total_neurons, float * layer_outputs, // scratch space[total neurons] float * layer_outputs2, // scratch space[total neurons] int examples_per_block, int total_num_examples, int max_width, bool use_textures) { const unsigned block_num = blockIdx.x; /* Where we accumulate our errors, layer by layer. The size is that of the largest dimension. */ extern __shared__ float scratch[]; /* The layer outputs (activation of the neurons). This is where the shared memory goes to. Note that we store only the activated outputs, not the inputs. blockDim.x gives us the number of threads, which is also the size of the errors array, so that our layer outputs have to start at this offset. */ // Get our private scratch memory for this block layer_outputs += block_num * total_neurons; unsigned example_num_base = block_num * examples_per_block; unsigned last_example = min(total_num_examples, example_num_base + examples_per_block); unsigned example_num = example_num_base; WeightsAccess<weights_tex> weights_access; WeightsAccess<biases_tex> biases_access; if (!use_textures) { weights_access.init(w); biases_access.init(biases); } #if 1 for (; example_num < last_example; example_num += 4) { const float * input = feature_vectors + example_num * feature_vector_width; train_4_examples(input, labels + example_num, example_weights + example_num, min(4, last_example - example_num), num_layers, scratch, weights_access, biases_access, architecture, w_strides, w_updates, b_updates, activation, inhibit, fire, learning_rate, num_threads_in_block, num_threads_on_multiprocessor, total_neurons, max_width, layer_outputs); train_4_examples(input, labels + example_num, example_weights + example_num, min(4, last_example - example_num), num_layers, scratch, weights_access, biases_access, architecture, w_strides, w_updates2, b_updates2, activation, inhibit, fire, learning_rate, num_threads_in_block, num_threads_on_multiprocessor, total_neurons, max_width, layer_outputs2); } #elif 0 // Do any others singly for (; example_num < last_example; ++example_num) { const float * input = feature_vectors + example_num * feature_vector_width; train_4_examples(input, labels + example_num, example_weights + example_num, 1 /* num valid examples */, num_layers, scratch, weights_access, biases_access, architecture, w_strides, w_updates, b_updates, activation, inhibit, fire, learning_rate, num_threads_in_block, num_threads_on_multiprocessor, total_neurons, max_width, layer_outputs); } #endif // Do any others singly for (; example_num < last_example; ++example_num) { const float * input = feature_vectors + example_num * feature_vector_width; #if 1 train_1_example(input, labels + example_num, example_weights + example_num, 1 /* num valid examples */, num_layers, scratch, weights_access, biases_access, architecture, w_strides, w_updates, b_updates, activation, inhibit, fire, learning_rate, num_threads_in_block, num_threads_on_multiprocessor, total_neurons, max_width, layer_outputs); train_1_example(input, labels + example_num, example_weights + example_num, 1 /* num valid examples */, num_layers, scratch, weights_access, biases_access, architecture, w_strides, w_updates2, b_updates2, activation, inhibit, fire, learning_rate, num_threads_in_block, num_threads_on_multiprocessor, total_neurons, max_width, layer_outputs); #else train_example(input, labels[example_num], example_weights[example_num], num_layers, scratch, weights_access, biases_access, architecture, w_strides, w_updates, b_updates, activation, inhibit, fire, learning_rate, num_threads_in_block, num_threads_on_multiprocessor, total_neurons, layer_outputs); #endif } } namespace ML { namespace CUDA { struct Backprop::Plan { int num_layers; vector<int> architecture; DeviceData<int> d_architecture; DeviceData<float> d_weights; DeviceData<float> d_biases; vector<int> w_strides; DeviceData<int> d_w_strides; Activation activation; float inhibit; float fire; float learning_rate; int max_width; int total_neurons; // We need our grid size to be exactly the maximum width of the output dim3 threads; int shared_mem_stride; size_t shared_mem_size; bool use_textures; Plan(int num_layers, const int * architecture, const float * const * weights, const float * const * biases, const int * w_strides, Activation activation, float inhibit, float fire, float learning_rate, bool on_host, bool use_textures) : num_layers(num_layers), architecture(architecture, architecture + num_layers + 1), w_strides(w_strides, w_strides + num_layers), activation(activation), inhibit(inhibit), fire(fire), learning_rate(learning_rate), use_textures(use_textures) { //cerr << "plan: num_layers = " << num_layers << endl; d_architecture.init(architecture, num_layers + 1); size_t total_weights_size = 0; size_t total_bias_size = 0; for (unsigned l = 0; l < num_layers; ++l) { int ni = architecture[l]; int no = architecture[l + 1]; int w_stride = w_strides[l]; total_weights_size += ni * w_stride; total_bias_size += no; // TODO: align? } d_weights.init(total_weights_size); d_biases.init(total_bias_size); // Now copy them all in size_t weights_start_offset = 0; size_t bias_start_offset = 0; for (unsigned l = 0; l < num_layers; ++l) { int ni = architecture[l]; int no = architecture[l + 1]; int w_stride = w_strides[l]; size_t w_size = ni * w_stride; cudaError_t err = cudaMemcpy(d_weights + weights_start_offset, weights[l], w_size * sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) throw Exception(cudaGetErrorString(err)); err = cudaMemcpy(d_biases + bias_start_offset, biases[l], no * sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) throw Exception(cudaGetErrorString(err)); weights_start_offset += ni * w_stride; bias_start_offset += no; // TODO: align? } d_w_strides.init(w_strides, num_layers); max_width = 0; total_neurons = 0; for (unsigned l = 0; l <= num_layers; ++l) { max_width = max(max_width, architecture[l]); total_neurons += architecture[l]; } // We need our grid size to be exactly the maximum width of the output threads = dim3(max_width); // Storage for max_width shared_mem_stride = max_width * sizeof(float); // Since we do 4 examples per loop, we need enough memory for all of // the four outputs for a single layer shared_mem_size = shared_mem_stride * 4; if (use_textures) { cudaError_t err; err = cudaBindTexture(0, weights_tex, d_weights); if (err != cudaSuccess) throw Exception(cudaGetErrorString(err)); err = cudaBindTexture(0, biases_tex, d_biases); if (err != cudaSuccess) throw Exception(cudaGetErrorString(err)); } } }; struct Backprop::Context { const Plan & plan; DeviceData<float> d_feature_vectors; DeviceData<float> d_example_weights; DeviceData<int> d_labels; float * const * weight_updates; float * const * bias_updates; vector<DeviceData<UpdateFloat> > d_weight_updates_storage; vector<UpdateFloat *> weight_updates_vec; DeviceData<UpdateFloat *> d_weight_updates; vector<DeviceData<UpdateFloat> > d_bias_updates_storage; vector<UpdateFloat *> bias_updates_vec; DeviceData<UpdateFloat *> d_bias_updates; vector<DeviceData<UpdateFloat> > d_weight_updates2_storage; vector<UpdateFloat *> weight_updates2_vec; DeviceData<UpdateFloat *> d_weight_updates2; vector<DeviceData<UpdateFloat> > d_bias_updates2_storage; vector<UpdateFloat *> bias_updates2_vec; DeviceData<UpdateFloat *> d_bias_updates2; DeviceData<float> d_layer_outputs; DeviceData<float> d_layer_outputs2; dim3 grid; int num_feature_vectors; int feature_vector_width; int num_examples_per_invocation; Context(const Plan & plan, const float * feature_vectors, int num_feature_vectors, const float * example_weights, const int * labels, float * const * weight_updates, float * const * bias_updates, float & correct, float & total, float & rms_error) : plan(plan), weight_updates(weight_updates), bias_updates(bias_updates), num_feature_vectors(num_feature_vectors), feature_vector_width(feature_vector_width) { feature_vector_width = plan.architecture[0]; //cerr << "num_feature_vectors = " << num_feature_vectors << endl; //cerr << "feature_vector_width = " << feature_vector_width // << endl; d_feature_vectors.init(feature_vectors, num_feature_vectors * feature_vector_width); d_example_weights.init(example_weights, num_feature_vectors); d_labels.init(labels, num_feature_vectors); d_weight_updates_storage.resize(plan.num_layers); weight_updates_vec.resize(plan.num_layers); for (unsigned l = 0; l < plan.num_layers; ++l) { int ni = plan.architecture[l]; int w_stride = plan.w_strides[l]; d_weight_updates_storage[l].init_zeroed(ni * w_stride); weight_updates_vec[l] = d_weight_updates_storage[l]; } d_weight_updates.init(&weight_updates_vec[0], plan.num_layers); d_bias_updates_storage.resize(plan.num_layers); bias_updates_vec.resize(plan.num_layers); for (unsigned l = 0; l < plan.num_layers; ++l) { int no = plan.architecture[l + 1]; d_bias_updates_storage[l].init_zeroed(no); bias_updates_vec[l] = d_bias_updates_storage[l]; } d_bias_updates.init(&bias_updates_vec[0], plan.num_layers); d_weight_updates2_storage.resize(plan.num_layers); weight_updates2_vec.resize(plan.num_layers); for (unsigned l = 0; l < plan.num_layers; ++l) { int ni = plan.architecture[l]; int w_stride = plan.w_strides[l]; d_weight_updates2_storage[l].init_zeroed(ni * w_stride); weight_updates2_vec[l] = d_weight_updates2_storage[l]; } d_weight_updates2.init(&weight_updates2_vec[0], plan.num_layers); d_bias_updates2_storage.resize(plan.num_layers); bias_updates2_vec.resize(plan.num_layers); for (unsigned l = 0; l < plan.num_layers; ++l) { int no = plan.architecture[l + 1]; d_bias_updates2_storage[l].init_zeroed(no); bias_updates2_vec[l] = d_bias_updates2_storage[l]; } d_bias_updates2.init(&bias_updates2_vec[0], plan.num_layers); num_examples_per_invocation = 4;//16; int grid_size = rudiv(num_feature_vectors, num_examples_per_invocation); // Get the scratch space. This is 4 in flight examples for each // of the concurrent threads. d_layer_outputs.init(plan.total_neurons * grid_size * 4); d_layer_outputs2.init(plan.total_neurons * grid_size * 4); // Our grid size is one per example grid = dim3(grid_size); } void execute() { train_examples_kernel<<<grid, plan.threads, plan.shared_mem_size>>> (d_feature_vectors, feature_vector_width, d_labels, d_example_weights, plan.num_layers, plan.d_weights, plan.d_biases, plan.d_architecture, plan.d_w_strides, d_weight_updates, d_bias_updates, d_weight_updates2, d_bias_updates2, plan.activation, plan.inhibit, plan.fire, plan.learning_rate, grid.x, plan.threads.x, plan.total_neurons, d_layer_outputs, d_layer_outputs2, num_examples_per_invocation, num_feature_vectors /* total num examples */, plan.max_width, plan.use_textures); //cerr << "launched" << endl; } void synchronize() { //cerr << "waiting for execution" << endl; cudaError_t err = cudaThreadSynchronize(); if (err != cudaSuccess) throw Exception(cudaGetErrorString(err)); //cerr << "copying memory back" << endl; /* Copy back the layer outputs */ for (unsigned l = 0; l < plan.num_layers; ++l) { int ni = plan.architecture[l]; int w_stride = plan.w_strides[l]; UpdateFloat sync_to[ni * w_stride]; d_weight_updates_storage[l].sync(sync_to); std::copy(sync_to, sync_to + ni * w_stride, weight_updates[l]); #if 0 cerr << "first 10 weight updates for layer " << l << ": "; for (unsigned i = 0; i < 10; ++i) cerr << sync_to[i] << " "; cerr << endl; #endif } for (unsigned l = 0; l < plan.num_layers; ++l) { int ni = plan.architecture[l]; int w_stride = plan.w_strides[l]; UpdateFloat sync_to[ni * w_stride]; d_weight_updates_storage[l].sync(sync_to); std::copy(sync_to, sync_to + ni * w_stride, weight_updates[l]); #if 0 cerr << "first 10 weight updates for layer " << l << ": "; for (unsigned i = 0; i < 10; ++i) cerr << sync_to[i] << " "; cerr << endl; #endif } for (unsigned l = 0; l < plan.num_layers; ++l) { int ni = plan.architecture[l]; int w_stride = plan.w_strides[l]; UpdateFloat sync_to[ni * w_stride]; d_weight_updates_storage[l].sync(sync_to); std::copy(sync_to, sync_to + ni * w_stride, weight_updates[l]); #if 0 cerr << "first 10 weight updates for layer " << l << ": "; for (unsigned i = 0; i < 10; ++i) cerr << sync_to[i] << " "; cerr << endl; #endif } for (unsigned l = 0; l < plan.num_layers; ++l) { int no = plan.architecture[l + 1]; UpdateFloat sync_to[no]; d_bias_updates_storage[l].sync(sync_to); std::copy(sync_to, sync_to + no, bias_updates[l]); #if 0 cerr << "first 10 bias updates for layer " << l << ": "; for (unsigned i = 0; i < 10; ++i) cerr << sync_to[i] << " "; cerr << endl; #endif } } }; boost::shared_ptr<Backprop::Plan> Backprop:: plan(int num_layers, const int * architecture, const float * const * weights, const float * const * biases, const int * w_strides, Activation activation, float inhibit, float fire, float learning_rate, bool on_host, bool use_textures) const { boost::shared_ptr<Plan> result (new Plan(num_layers, architecture, weights, biases, w_strides, activation, inhibit, fire, learning_rate, on_host, use_textures)); return result; } boost::shared_ptr<Backprop::Context> Backprop:: execute(const Plan & plan, const float * feature_vectors, int num_feature_vectors, const float * example_weights, const int * labels, float * const * weight_updates, float * const * bias_updates, float & correct, float & total, float & rms_error) const { boost::shared_ptr<Context> result (new Context(plan, feature_vectors, num_feature_vectors, example_weights, labels, weight_updates, bias_updates, correct, total, rms_error)); result->execute(); return result; } /** Wait for the given context to be finished. */ void Backprop:: synchronize(Context & context) const { context.synchronize(); } } // namespace CUDA } // namespace ML
the_stack
* \file * The cub::BlockRadixSort class provides [<em>collective</em>](index.html#sec0) methods for radix sorting of items partitioned across a CUDA thread block. */ #pragma once #include "block_exchange.cuh" #include "block_radix_rank.cuh" #include "../util_ptx.cuh" #include "../util_arch.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief The BlockRadixSort class provides [<em>collective</em>](index.html#sec0) methods for sorting items partitioned across a CUDA thread block using a radix sorting method. ![](sorting_logo.png) * \ingroup BlockModule * * \tparam KeyT KeyT type * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension * \tparam ITEMS_PER_THREAD The number of items per thread * \tparam ValueT <b>[optional]</b> ValueT type (default: cub::NullType, which indicates a keys-only sort) * \tparam RADIX_BITS <b>[optional]</b> The number of radix bits per digit place (default: 4 bits) * \tparam MEMOIZE_OUTER_SCAN <b>[optional]</b> Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure (default: true for architectures SM35 and newer, false otherwise). * \tparam INNER_SCAN_ALGORITHM <b>[optional]</b> The cub::BlockScanAlgorithm algorithm to use (default: cub::BLOCK_SCAN_WARP_SCANS) * \tparam SMEM_CONFIG <b>[optional]</b> Shared memory bank mode (default: \p cudaSharedMemBankSizeFourByte) * \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1) * \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1) * \tparam PTX_ARCH <b>[optional]</b> \ptxversion * * \par Overview * - The [<em>radix sorting method</em>](http://en.wikipedia.org/wiki/Radix_sort) arranges * items into ascending order. It relies upon a positional representation for * keys, i.e., each key is comprised of an ordered sequence of symbols (e.g., digits, * characters, etc.) specified from least-significant to most-significant. For a * given input sequence of keys and a set of rules specifying a total ordering * of the symbolic alphabet, the radix sorting method produces a lexicographic * ordering of those keys. * - BlockRadixSort can sort all of the built-in C++ numeric primitive types * (<tt>unsigned char</tt>, \p int, \p double, etc.) as well as CUDA's \p __half * half-precision floating-point type. Within each key, the implementation treats fixed-length * bit-sequences of \p RADIX_BITS as radix digit places. Although the direct radix sorting * method can only be applied to unsigned integral types, BlockRadixSort * is able to sort signed and floating-point types via simple bit-wise transformations * that ensure lexicographic key ordering. * - \rowmajor * * \par Performance Considerations * - \granularity * * \par A Simple Example * \blockcollective{BlockRadixSort} * \par * The code snippet below illustrates a sort of 512 integer keys that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockRadixSort<int, 128, 4> BlockRadixSort; * * // Allocate shared memory for BlockRadixSort * __shared__ typename BlockRadixSort::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_keys[4]; * ... * * // Collectively sort the keys * BlockRadixSort(temp_storage).Sort(thread_keys); * * ... * \endcode * \par * Suppose the set of input \p thread_keys across the block of threads is * <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The * corresponding output \p thread_keys in those threads will be * <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>. * */ template < typename KeyT, int BLOCK_DIM_X, int ITEMS_PER_THREAD, typename ValueT = NullType, int RADIX_BITS = 4, bool MEMOIZE_OUTER_SCAN = (CUB_PTX_ARCH >= 350) ? true : false, BlockScanAlgorithm INNER_SCAN_ALGORITHM = BLOCK_SCAN_WARP_SCANS, cudaSharedMemConfig SMEM_CONFIG = cudaSharedMemBankSizeFourByte, int BLOCK_DIM_Y = 1, int BLOCK_DIM_Z = 1, int PTX_ARCH = CUB_PTX_ARCH> class BlockRadixSort { private: /****************************************************************************** * Constants and type definitions ******************************************************************************/ enum { // The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, // Whether or not there are values to be trucked along with keys KEYS_ONLY = Equals<ValueT, NullType>::VALUE, }; // KeyT traits and unsigned bits type typedef Traits<KeyT> KeyTraits; typedef typename KeyTraits::UnsignedBits UnsignedBits; /// Ascending BlockRadixRank utility type typedef BlockRadixRank< BLOCK_DIM_X, RADIX_BITS, false, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> AscendingBlockRadixRank; /// Descending BlockRadixRank utility type typedef BlockRadixRank< BLOCK_DIM_X, RADIX_BITS, true, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> DescendingBlockRadixRank; /// BlockExchange utility type for keys typedef BlockExchange<KeyT, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchangeKeys; /// BlockExchange utility type for values typedef BlockExchange<ValueT, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchangeValues; /// Shared memory storage layout type union _TempStorage { typename AscendingBlockRadixRank::TempStorage asending_ranking_storage; typename DescendingBlockRadixRank::TempStorage descending_ranking_storage; typename BlockExchangeKeys::TempStorage exchange_keys; typename BlockExchangeValues::TempStorage exchange_values; }; /****************************************************************************** * Thread fields ******************************************************************************/ /// Shared storage reference _TempStorage &temp_storage; /// Linear thread-id unsigned int linear_tid; /****************************************************************************** * Utility methods ******************************************************************************/ /// Internal storage allocator __device__ __forceinline__ _TempStorage& PrivateStorage() { __shared__ _TempStorage private_storage; return private_storage; } /// Rank keys (specialized for ascending sort) __device__ __forceinline__ void RankKeys( UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD], int (&ranks)[ITEMS_PER_THREAD], int begin_bit, int pass_bits, Int2Type<false> /*is_descending*/) { AscendingBlockRadixRank(temp_storage.asending_ranking_storage).RankKeys( unsigned_keys, ranks, begin_bit, pass_bits); } /// Rank keys (specialized for descending sort) __device__ __forceinline__ void RankKeys( UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD], int (&ranks)[ITEMS_PER_THREAD], int begin_bit, int pass_bits, Int2Type<true> /*is_descending*/) { DescendingBlockRadixRank(temp_storage.descending_ranking_storage).RankKeys( unsigned_keys, ranks, begin_bit, pass_bits); } /// ExchangeValues (specialized for key-value sort, to-blocked arrangement) __device__ __forceinline__ void ExchangeValues( ValueT (&values)[ITEMS_PER_THREAD], int (&ranks)[ITEMS_PER_THREAD], Int2Type<false> /*is_keys_only*/, Int2Type<true> /*is_blocked*/) { CTA_SYNC(); // Exchange values through shared memory in blocked arrangement BlockExchangeValues(temp_storage.exchange_values).ScatterToBlocked(values, ranks); } /// ExchangeValues (specialized for key-value sort, to-striped arrangement) __device__ __forceinline__ void ExchangeValues( ValueT (&values)[ITEMS_PER_THREAD], int (&ranks)[ITEMS_PER_THREAD], Int2Type<false> /*is_keys_only*/, Int2Type<false> /*is_blocked*/) { CTA_SYNC(); // Exchange values through shared memory in blocked arrangement BlockExchangeValues(temp_storage.exchange_values).ScatterToStriped(values, ranks); } /// ExchangeValues (specialized for keys-only sort) template <int IS_BLOCKED> __device__ __forceinline__ void ExchangeValues( ValueT (&/*values*/)[ITEMS_PER_THREAD], int (&/*ranks*/)[ITEMS_PER_THREAD], Int2Type<true> /*is_keys_only*/, Int2Type<IS_BLOCKED> /*is_blocked*/) {} /// Sort blocked arrangement template <int DESCENDING, int KEYS_ONLY> __device__ __forceinline__ void SortBlocked( KeyT (&keys)[ITEMS_PER_THREAD], ///< Keys to sort ValueT (&values)[ITEMS_PER_THREAD], ///< Values to sort int begin_bit, ///< The beginning (least-significant) bit index needed for key comparison int end_bit, ///< The past-the-end (most-significant) bit index needed for key comparison Int2Type<DESCENDING> is_descending, ///< Tag whether is a descending-order sort Int2Type<KEYS_ONLY> is_keys_only) ///< Tag whether is keys-only sort { UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD] = reinterpret_cast<UnsignedBits (&)[ITEMS_PER_THREAD]>(keys); // Twiddle bits if necessary #pragma unroll for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) { unsigned_keys[KEY] = KeyTraits::TwiddleIn(unsigned_keys[KEY]); } // Radix sorting passes while (true) { int pass_bits = CUB_MIN(RADIX_BITS, end_bit - begin_bit); // Rank the blocked keys int ranks[ITEMS_PER_THREAD]; RankKeys(unsigned_keys, ranks, begin_bit, pass_bits, is_descending); begin_bit += RADIX_BITS; CTA_SYNC(); // Exchange keys through shared memory in blocked arrangement BlockExchangeKeys(temp_storage.exchange_keys).ScatterToBlocked(keys, ranks); // Exchange values through shared memory in blocked arrangement ExchangeValues(values, ranks, is_keys_only, Int2Type<true>()); // Quit if done if (begin_bit >= end_bit) break; CTA_SYNC(); } // Untwiddle bits if necessary #pragma unroll for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) { unsigned_keys[KEY] = KeyTraits::TwiddleOut(unsigned_keys[KEY]); } } public: #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document /// Sort blocked -> striped arrangement template <int DESCENDING, int KEYS_ONLY> __device__ __forceinline__ void SortBlockedToStriped( KeyT (&keys)[ITEMS_PER_THREAD], ///< Keys to sort ValueT (&values)[ITEMS_PER_THREAD], ///< Values to sort int begin_bit, ///< The beginning (least-significant) bit index needed for key comparison int end_bit, ///< The past-the-end (most-significant) bit index needed for key comparison Int2Type<DESCENDING> is_descending, ///< Tag whether is a descending-order sort Int2Type<KEYS_ONLY> is_keys_only) ///< Tag whether is keys-only sort { UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD] = reinterpret_cast<UnsignedBits (&)[ITEMS_PER_THREAD]>(keys); // Twiddle bits if necessary #pragma unroll for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) { unsigned_keys[KEY] = KeyTraits::TwiddleIn(unsigned_keys[KEY]); } // Radix sorting passes while (true) { int pass_bits = CUB_MIN(RADIX_BITS, end_bit - begin_bit); // Rank the blocked keys int ranks[ITEMS_PER_THREAD]; RankKeys(unsigned_keys, ranks, begin_bit, pass_bits, is_descending); begin_bit += RADIX_BITS; CTA_SYNC(); // Check if this is the last pass if (begin_bit >= end_bit) { // Last pass exchanges keys through shared memory in striped arrangement BlockExchangeKeys(temp_storage.exchange_keys).ScatterToStriped(keys, ranks); // Last pass exchanges through shared memory in striped arrangement ExchangeValues(values, ranks, is_keys_only, Int2Type<false>()); // Quit break; } // Exchange keys through shared memory in blocked arrangement BlockExchangeKeys(temp_storage.exchange_keys).ScatterToBlocked(keys, ranks); // Exchange values through shared memory in blocked arrangement ExchangeValues(values, ranks, is_keys_only, Int2Type<true>()); CTA_SYNC(); } // Untwiddle bits if necessary #pragma unroll for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) { unsigned_keys[KEY] = KeyTraits::TwiddleOut(unsigned_keys[KEY]); } } #endif // DOXYGEN_SHOULD_SKIP_THIS /// \smemstorage{BlockRadixSort} struct TempStorage : Uninitialized<_TempStorage> {}; /******************************************************************//** * \name Collective constructors *********************************************************************/ //@{ /** * \brief Collective constructor using a private static allocation of shared memory as temporary storage. */ __device__ __forceinline__ BlockRadixSort() : temp_storage(PrivateStorage()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} /** * \brief Collective constructor using the specified memory allocation as temporary storage. */ __device__ __forceinline__ BlockRadixSort( TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} //@} end member group /******************************************************************//** * \name Sorting (blocked arrangements) *********************************************************************/ //@{ /** * \brief Performs an ascending block-wide radix sort over a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys. * * \par * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a sort of 512 integer keys that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each * typedef cub::BlockRadixSort<int, 128, 4> BlockRadixSort; * * // Allocate shared memory for BlockRadixSort * __shared__ typename BlockRadixSort::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_keys[4]; * ... * * // Collectively sort the keys * BlockRadixSort(temp_storage).Sort(thread_keys); * * \endcode * \par * Suppose the set of input \p thread_keys across the block of threads is * <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. * The corresponding output \p thread_keys in those threads will be * <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>. */ __device__ __forceinline__ void Sort( KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison { NullType values[ITEMS_PER_THREAD]; SortBlocked(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>()); } /** * \brief Performs an ascending block-wide radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys and values. * * \par * - BlockRadixSort can only accommodate one associated tile of values. To "truck along" * more than one tile of values, simply perform a key-value sort of the keys paired * with a temporary value array that enumerates the key indices. The reordered indices * can then be used as a gather-vector for exchanging other associated tile data through * shared memory. * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a sort of 512 integer keys and values that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive pairs. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each * typedef cub::BlockRadixSort<int, 128, 4, int> BlockRadixSort; * * // Allocate shared memory for BlockRadixSort * __shared__ typename BlockRadixSort::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_keys[4]; * int thread_values[4]; * ... * * // Collectively sort the keys and values among block threads * BlockRadixSort(temp_storage).Sort(thread_keys, thread_values); * * \endcode * \par * Suppose the set of input \p thread_keys across the block of threads is * <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The * corresponding output \p thread_keys in those threads will be * <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>. * */ __device__ __forceinline__ void Sort( KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison { SortBlocked(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>()); } /** * \brief Performs a descending block-wide radix sort over a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys. * * \par * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a sort of 512 integer keys that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each * typedef cub::BlockRadixSort<int, 128, 4> BlockRadixSort; * * // Allocate shared memory for BlockRadixSort * __shared__ typename BlockRadixSort::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_keys[4]; * ... * * // Collectively sort the keys * BlockRadixSort(temp_storage).Sort(thread_keys); * * \endcode * \par * Suppose the set of input \p thread_keys across the block of threads is * <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. * The corresponding output \p thread_keys in those threads will be * <tt>{ [511,510,509,508], [11,10,9,8], [7,6,5,4], ..., [3,2,1,0] }</tt>. */ __device__ __forceinline__ void SortDescending( KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison { NullType values[ITEMS_PER_THREAD]; SortBlocked(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>()); } /** * \brief Performs a descending block-wide radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys and values. * * \par * - BlockRadixSort can only accommodate one associated tile of values. To "truck along" * more than one tile of values, simply perform a key-value sort of the keys paired * with a temporary value array that enumerates the key indices. The reordered indices * can then be used as a gather-vector for exchanging other associated tile data through * shared memory. * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a sort of 512 integer keys and values that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive pairs. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each * typedef cub::BlockRadixSort<int, 128, 4, int> BlockRadixSort; * * // Allocate shared memory for BlockRadixSort * __shared__ typename BlockRadixSort::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_keys[4]; * int thread_values[4]; * ... * * // Collectively sort the keys and values among block threads * BlockRadixSort(temp_storage).Sort(thread_keys, thread_values); * * \endcode * \par * Suppose the set of input \p thread_keys across the block of threads is * <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The * corresponding output \p thread_keys in those threads will be * <tt>{ [511,510,509,508], [11,10,9,8], [7,6,5,4], ..., [3,2,1,0] }</tt>. * */ __device__ __forceinline__ void SortDescending( KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison { SortBlocked(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>()); } //@} end member group /******************************************************************//** * \name Sorting (blocked arrangement -> striped arrangement) *********************************************************************/ //@{ /** * \brief Performs an ascending radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys, leaving them in a [<em>striped arrangement</em>](index.html#sec5sec3). * * \par * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a sort of 512 integer keys that * are initially partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive keys. The final partitioning is striped. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each * typedef cub::BlockRadixSort<int, 128, 4> BlockRadixSort; * * // Allocate shared memory for BlockRadixSort * __shared__ typename BlockRadixSort::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_keys[4]; * ... * * // Collectively sort the keys * BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys); * * \endcode * \par * Suppose the set of input \p thread_keys across the block of threads is * <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The * corresponding output \p thread_keys in those threads will be * <tt>{ [0,128,256,384], [1,129,257,385], [2,130,258,386], ..., [127,255,383,511] }</tt>. * */ __device__ __forceinline__ void SortBlockedToStriped( KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison { NullType values[ITEMS_PER_THREAD]; SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>()); } /** * \brief Performs an ascending radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys and values, leaving them in a [<em>striped arrangement</em>](index.html#sec5sec3). * * \par * - BlockRadixSort can only accommodate one associated tile of values. To "truck along" * more than one tile of values, simply perform a key-value sort of the keys paired * with a temporary value array that enumerates the key indices. The reordered indices * can then be used as a gather-vector for exchanging other associated tile data through * shared memory. * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a sort of 512 integer keys and values that * are initially partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive pairs. The final partitioning is striped. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each * typedef cub::BlockRadixSort<int, 128, 4, int> BlockRadixSort; * * // Allocate shared memory for BlockRadixSort * __shared__ typename BlockRadixSort::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_keys[4]; * int thread_values[4]; * ... * * // Collectively sort the keys and values among block threads * BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys, thread_values); * * \endcode * \par * Suppose the set of input \p thread_keys across the block of threads is * <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The * corresponding output \p thread_keys in those threads will be * <tt>{ [0,128,256,384], [1,129,257,385], [2,130,258,386], ..., [127,255,383,511] }</tt>. * */ __device__ __forceinline__ void SortBlockedToStriped( KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison { SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<false>(), Int2Type<KEYS_ONLY>()); } /** * \brief Performs a descending radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys, leaving them in a [<em>striped arrangement</em>](index.html#sec5sec3). * * \par * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a sort of 512 integer keys that * are initially partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive keys. The final partitioning is striped. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each * typedef cub::BlockRadixSort<int, 128, 4> BlockRadixSort; * * // Allocate shared memory for BlockRadixSort * __shared__ typename BlockRadixSort::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_keys[4]; * ... * * // Collectively sort the keys * BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys); * * \endcode * \par * Suppose the set of input \p thread_keys across the block of threads is * <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The * corresponding output \p thread_keys in those threads will be * <tt>{ [511,383,255,127], [386,258,130,2], [385,257,128,1], ..., [384,256,128,0] }</tt>. * */ __device__ __forceinline__ void SortDescendingBlockedToStriped( KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison { NullType values[ITEMS_PER_THREAD]; SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>()); } /** * \brief Performs a descending radix sort across a [<em>blocked arrangement</em>](index.html#sec5sec3) of keys and values, leaving them in a [<em>striped arrangement</em>](index.html#sec5sec3). * * \par * - BlockRadixSort can only accommodate one associated tile of values. To "truck along" * more than one tile of values, simply perform a key-value sort of the keys paired * with a temporary value array that enumerates the key indices. The reordered indices * can then be used as a gather-vector for exchanging other associated tile data through * shared memory. * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a sort of 512 integer keys and values that * are initially partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive pairs. The final partitioning is striped. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_radix_sort.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each * typedef cub::BlockRadixSort<int, 128, 4, int> BlockRadixSort; * * // Allocate shared memory for BlockRadixSort * __shared__ typename BlockRadixSort::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_keys[4]; * int thread_values[4]; * ... * * // Collectively sort the keys and values among block threads * BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys, thread_values); * * \endcode * \par * Suppose the set of input \p thread_keys across the block of threads is * <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>. The * corresponding output \p thread_keys in those threads will be * <tt>{ [511,383,255,127], [386,258,130,2], [385,257,128,1], ..., [384,256,128,0] }</tt>. * */ __device__ __forceinline__ void SortDescendingBlockedToStriped( KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The beginning (least-significant) bit index needed for key comparison int end_bit = sizeof(KeyT) * 8) ///< [in] <b>[optional]</b> The past-the-end (most-significant) bit index needed for key comparison { SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type<true>(), Int2Type<KEYS_ONLY>()); } //@} end member group }; /** * \example example_block_radix_sort.cu */ } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
namespace cg = cooperative_groups; // Overload CUDA atomic for other 64bit unsinged/signed integer type __forceinline__ __device__ long atomicAdd(long* address, long val) { return (long)atomicAdd((unsigned long long*)address, (unsigned long long)val); } __forceinline__ __device__ long long atomicAdd(long long* address, long long val) { return (long long)atomicAdd((unsigned long long*)address, (unsigned long long)val); } __forceinline__ __device__ unsigned long atomicAdd(unsigned long* address, unsigned long val) { return (unsigned long)atomicAdd((unsigned long long*)address, (unsigned long long)val); } namespace gpu_cache { #ifdef LIBCUDACXX_VERSION template<int warp_size> __forceinline__ __device__ void warp_tile_copy(const size_t lane_idx, const size_t emb_vec_size_in_float, float* d_dst, const float* d_src){ #pragma unroll for(size_t i = lane_idx; i < emb_vec_size_in_float; i += warp_size){ d_dst[i] = d_src[i]; } } #else template<int warp_size> __forceinline__ __device__ void warp_tile_copy(const size_t lane_idx, const size_t emb_vec_size_in_float, volatile float* d_dst, volatile float* d_src){ #pragma unroll for(size_t i = lane_idx; i < emb_vec_size_in_float; i += warp_size){ d_dst[i] = d_src[i]; } } #endif #ifdef LIBCUDACXX_VERSION // Will be called by multiple thread_block_tile((sub-)warp) on the same mutex // Expect only one thread_block_tile return to execute critical section at any time template<typename mutex, int warp_size> __forceinline__ __device__ void warp_lock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, mutex& set_mutex){ // The first thread of this (sub-)warp to acquire the lock if(warp_tile.thread_rank() == 0){ set_mutex.acquire(); } warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence } // The (sub-)warp holding the mutex will unlock the mutex after finishing the critical section on a set // Expect any following (sub-)warp that acquire the mutex can see its modification done in the critical section template<typename mutex, int warp_size> __forceinline__ __device__ void warp_unlock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, mutex& set_mutex){ warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence // The first thread of this (sub-)warp to release the lock if(warp_tile.thread_rank() == 0){ set_mutex.release(); } } #else // Will be called by multiple thread_block_tile((sub-)warp) on the same mutex // Expect only one thread_block_tile return to execute critical section at any time template<int warp_size> __forceinline__ __device__ void warp_lock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, volatile int& set_mutex){ // The first thread of this (sub-)warp to acquire the lock if(warp_tile.thread_rank() == 0){ while (0 == atomicCAS((int*)&set_mutex, 1, 0)) ; } __threadfence(); warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence } // The (sub-)warp holding the mutex will unlock the mutex after finishing the critical section on a set // Expect any following (sub-)warp that acquire the mutex can see its modification done in the critical section template<int warp_size> __forceinline__ __device__ void warp_unlock_mutex(const cg::thread_block_tile<warp_size>& warp_tile, volatile int& set_mutex){ __threadfence(); warp_tile.sync(); // Synchronize the threads in the (sub-)warp. Execution barrier + memory fence // The first thread of this (sub-)warp to release the lock if(warp_tile.thread_rank() == 0){ atomicExch((int*)&set_mutex, 1); } } #endif // The (sub-)warp doing all reduction to find the slot with min slot_counter // The slot with min slot_counter is the LR slot. template<typename ref_counter_type, int warp_size> __forceinline__ __device__ void warp_min_reduction(const cg::thread_block_tile<warp_size>& warp_tile, ref_counter_type& min_slot_counter_val, size_t& slab_distance, size_t& slot_distance){ const size_t lane_idx = warp_tile.thread_rank(); slot_distance = lane_idx; for(size_t i = (warp_tile.size() >> 1); i > 0 ; i = i >> 1){ ref_counter_type input_slot_counter_val = warp_tile.shfl_xor(min_slot_counter_val, (int)i); size_t input_slab_distance = warp_tile.shfl_xor(slab_distance, (int)i); size_t input_slot_distance = warp_tile.shfl_xor(slot_distance, (int)i); if(input_slot_counter_val == min_slot_counter_val){ if(input_slab_distance == slab_distance){ if(input_slot_distance < slot_distance){ slot_distance = input_slot_distance; } } else if(input_slab_distance < slab_distance){ slab_distance = input_slab_distance; slot_distance = input_slot_distance; } } else if(input_slot_counter_val < min_slot_counter_val){ min_slot_counter_val = input_slot_counter_val; slab_distance = input_slab_distance; slot_distance = input_slot_distance; } } } /////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef LIBCUDACXX_VERSION // Kernel to initialize the GPU cache // Init every entry of the cache with <unused_key, value> pair template<typename slabset, typename ref_counter_type, typename atomic_ref_counter_type, typename key_type, typename mutex> __global__ void init_cache(slabset* keys, ref_counter_type* slot_counter, atomic_ref_counter_type* global_counter, const size_t num_slot, const key_type empty_key, mutex* set_mutex, const size_t capacity_in_set) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx < num_slot ) { // Set the key of this slot to unused key // Flatten the cache key_type * key_slot = (key_type *)keys; key_slot[idx] = empty_key; // Clear the counter for this slot slot_counter[idx] = 0; } // First CUDA thread clear the global counter if( idx == 0 ){ new(global_counter) atomic_ref_counter_type(0); } // First capacity_in_set CUDA thread initialize mutex if( idx < capacity_in_set ){ new(set_mutex + idx) mutex(1); } } template<typename atomic_ref_counter_type, typename mutex> __global__ void destruct_kernel(atomic_ref_counter_type* global_counter, mutex* set_mutex, const size_t capacity_in_set) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; // First CUDA thread destruct the global_counter if( idx == 0 ){ global_counter -> ~atomic_ref_counter_type(); } // First capacity_in_set CUDA thread destruct the set mutex if( idx < capacity_in_set ){ (set_mutex + idx) -> ~mutex(); } } #else // Kernel to initialize the GPU cache // Init every entry of the cache with <unused_key, value> pair template<typename slabset, typename ref_counter_type, typename key_type> __global__ void init_cache(slabset* keys, ref_counter_type* slot_counter, ref_counter_type* global_counter, const size_t num_slot, const key_type empty_key, int* set_mutex, const size_t capacity_in_set) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx < num_slot ) { // Set the key of this slot to unused key // Flatten the cache key_type * key_slot = (key_type *)keys; key_slot[idx] = empty_key; // Clear the counter for this slot slot_counter[idx] = 0; } // First CUDA thread clear the global counter if( idx == 0 ){ global_counter[idx] = 0; } // First capacity_in_set CUDA thread initialize mutex if( idx < capacity_in_set ){ set_mutex[idx] = 1; } } #endif // Kernel to update global counter // Resolve distance overflow issue as well #ifdef LIBCUDACXX_VERSION template<typename atomic_ref_counter_type> __global__ void update_kernel_overflow_ignore(atomic_ref_counter_type* global_counter, size_t* d_missing_len){ // Update global counter global_counter -> fetch_add(1, cuda::std::memory_order_relaxed); *d_missing_len = 0; } #else template<typename ref_counter_type> __global__ void update_kernel_overflow_ignore(ref_counter_type* global_counter, size_t* d_missing_len){ // Update global counter atomicAdd(global_counter, 1); *d_missing_len = 0; } #endif #ifdef LIBCUDACXX_VERSION // Kernel to read from cache // Also update locality information for touched slot template<typename key_type, typename ref_counter_type, typename atomic_ref_counter_type, typename slabset, typename set_hasher, typename slab_hasher, typename mutex, key_type empty_key, int set_associativity, int warp_size> __global__ void get_kernel(const key_type* d_keys, const size_t len, float* d_values, const size_t embedding_vec_size, uint64_t* d_missing_index, key_type* d_missing_keys, size_t* d_missing_len, const atomic_ref_counter_type* global_counter, ref_counter_type* slot_counter, const size_t capacity_in_set, const slabset* keys, const float* vals, mutex* set_mutex, const size_t task_per_warp_tile){ // Lane(thread) ID within a warp_tile cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block()); const size_t lane_idx = warp_tile.thread_rank(); // Warp tile global ID const size_t warp_tile_global_idx = (blockIdx.x * (blockDim.x / warp_size)) + warp_tile.meta_group_rank(); // The index of key for this thread const size_t key_idx = (warp_tile_global_idx * task_per_warp_tile) + lane_idx; // The assigned key for this lane(thread) key_type key; // The dst slabset and the dst slab inside this set size_t src_set; size_t src_slab; // The variable that contains the missing key key_type missing_key; // The variable that contains the index for the missing key uint64_t missing_index; // The counter for counting the missing key in this warp uint8_t warp_missing_counter = 0; // Active flag: whether current lane(thread) has unfinished task bool active = false; if(lane_idx < task_per_warp_tile){ if(key_idx < len){ active = true; key = d_keys[key_idx]; src_set = set_hasher::hash(key) % capacity_in_set; src_slab = slab_hasher::hash(key) % set_associativity; } } // Lane participate in warp_tile ballot to produce warp-level work queue unsigned active_mask = warp_tile.ballot(active); // The warp-level outer loop: finish all the tasks within the work queue while(active_mask != 0){ // Next task in the work quere, start from lower index lane(thread) int next_lane = __ffs(active_mask) - 1; // Broadcast the task and the global index to all lane in the warp_tile key_type next_key = warp_tile.shfl(key, next_lane); size_t next_idx = warp_tile.shfl(key_idx, next_lane); size_t next_set = warp_tile.shfl(src_set, next_lane); size_t next_slab = warp_tile.shfl(src_slab, next_lane); // Counter to record how many slab have been searched size_t counter = 0; // Working queue before task started const unsigned old_active_mask = active_mask; // Lock the slabset before operating the slabset warp_lock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]); // The warp-level inner loop: finish a single task in the work queue while(active_mask == old_active_mask){ // When all the slabs inside a slabset have been searched, mark missing task, task is completed if(counter >= set_associativity){ if(lane_idx == warp_missing_counter){ missing_key = next_key; missing_index = next_idx; } if(lane_idx == (size_t)next_lane){ active = false; } warp_missing_counter++; active_mask = warp_tile.ballot(active); break; } // The warp_tile read out the slab key_type read_key = keys[next_set].set_[next_slab].slab_[lane_idx]; // Compare the slab data with the target key int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1; // If found, mark hit task, copy the founded data, the task is completed if(found_lane >= 0){ size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane; if(lane_idx == (size_t)next_lane){ slot_counter[found_offset] = global_counter -> load(cuda::std::memory_order_relaxed); active = false; } warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, d_values + next_idx * embedding_vec_size, vals + found_offset * embedding_vec_size); active_mask = warp_tile.ballot(active); break; } // Compare the slab data with empty key, if found empty key, mark missing task, task is completed if(warp_tile.ballot(read_key == empty_key) != 0){ if(lane_idx == warp_missing_counter){ missing_key = next_key; missing_index = next_idx; } if(lane_idx == (size_t)next_lane){ active = false; } warp_missing_counter++; active_mask = warp_tile.ballot(active); break; } // Not found in this slab, the task is not completed, goto searching next slab counter++; next_slab = (next_slab + 1) % set_associativity; } // Unlock the slabset after operating the slabset warp_unlock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]); } // After warp_tile complete the working queue, save the result for output // First thread of the warp_tile accumulate the missing length to global variable size_t warp_position; if(lane_idx == 0){ warp_position = atomicAdd(d_missing_len, (size_t)warp_missing_counter); } warp_position = warp_tile.shfl(warp_position, 0); if(lane_idx < warp_missing_counter){ d_missing_keys[warp_position + lane_idx] = missing_key; d_missing_index[warp_position + lane_idx] = missing_index; } } #else // Kernel to read from cache // Also update locality information for touched slot template<typename key_type, typename ref_counter_type, typename slabset, typename set_hasher, typename slab_hasher, key_type empty_key, int set_associativity, int warp_size> __global__ void get_kernel(const key_type* d_keys, const size_t len, float* d_values, const size_t embedding_vec_size, uint64_t* d_missing_index, key_type* d_missing_keys, size_t* d_missing_len, ref_counter_type* global_counter, volatile ref_counter_type* slot_counter, const size_t capacity_in_set, volatile slabset* keys, volatile float* vals, volatile int* set_mutex, const size_t task_per_warp_tile){ // Lane(thread) ID within a warp_tile cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block()); const size_t lane_idx = warp_tile.thread_rank(); // Warp tile global ID const size_t warp_tile_global_idx = (blockIdx.x * (blockDim.x / warp_size)) + warp_tile.meta_group_rank(); // The index of key for this thread const size_t key_idx = (warp_tile_global_idx * task_per_warp_tile) + lane_idx; // The assigned key for this lane(thread) key_type key; // The dst slabset and the dst slab inside this set size_t src_set; size_t src_slab; // The variable that contains the missing key key_type missing_key; // The variable that contains the index for the missing key uint64_t missing_index; // The counter for counting the missing key in this warp uint8_t warp_missing_counter = 0; // Active flag: whether current lane(thread) has unfinished task bool active = false; if(lane_idx < task_per_warp_tile){ if(key_idx < len){ active = true; key = d_keys[key_idx]; src_set = set_hasher::hash(key) % capacity_in_set; src_slab = slab_hasher::hash(key) % set_associativity; } } // Lane participate in warp_tile ballot to produce warp-level work queue unsigned active_mask = warp_tile.ballot(active); // The warp-level outer loop: finish all the tasks within the work queue while(active_mask != 0){ // Next task in the work quere, start from lower index lane(thread) int next_lane = __ffs(active_mask) - 1; // Broadcast the task and the global index to all lane in the warp_tile key_type next_key = warp_tile.shfl(key, next_lane); size_t next_idx = warp_tile.shfl(key_idx, next_lane); size_t next_set = warp_tile.shfl(src_set, next_lane); size_t next_slab = warp_tile.shfl(src_slab, next_lane); // Counter to record how many slab have been searched size_t counter = 0; // Working queue before task started const unsigned old_active_mask = active_mask; // Lock the slabset before operating the slabset warp_lock_mutex<warp_size>(warp_tile, set_mutex[next_set]); // The warp-level inner loop: finish a single task in the work queue while(active_mask == old_active_mask){ // When all the slabs inside a slabset have been searched, mark missing task, task is completed if(counter >= set_associativity){ if(lane_idx == warp_missing_counter){ missing_key = next_key; missing_index = next_idx; } if(lane_idx == (size_t)next_lane){ active = false; } warp_missing_counter++; active_mask = warp_tile.ballot(active); break; } // The warp_tile read out the slab key_type read_key = ((volatile key_type*)(keys[next_set].set_[next_slab].slab_))[lane_idx]; // Compare the slab data with the target key int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1; // If found, mark hit task, copy the founded data, the task is completed if(found_lane >= 0){ size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane; if(lane_idx == (size_t)next_lane){ slot_counter[found_offset] = atomicAdd(global_counter, 0); active = false; } warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, (volatile float*)(d_values + next_idx * embedding_vec_size), (volatile float*)(vals + found_offset * embedding_vec_size)); active_mask = warp_tile.ballot(active); break; } // Compare the slab data with empty key, if found empty key, mark missing task, task is completed if(warp_tile.ballot(read_key == empty_key) != 0){ if(lane_idx == warp_missing_counter){ missing_key = next_key; missing_index = next_idx; } if(lane_idx == (size_t)next_lane){ active = false; } warp_missing_counter++; active_mask = warp_tile.ballot(active); break; } // Not found in this slab, the task is not completed, goto searching next slab counter++; next_slab = (next_slab + 1) % set_associativity; } // Unlock the slabset after operating the slabset warp_unlock_mutex<warp_size>(warp_tile, set_mutex[next_set]); } // After warp_tile complete the working queue, save the result for output // First thread of the warp_tile accumulate the missing length to global variable size_t warp_position; if(lane_idx == 0){ warp_position = atomicAdd(d_missing_len, (size_t)warp_missing_counter); } warp_position = warp_tile.shfl(warp_position, 0); if(lane_idx < warp_missing_counter){ d_missing_keys[warp_position + lane_idx] = missing_key; d_missing_index[warp_position + lane_idx] = missing_index; } } #endif #ifdef LIBCUDACXX_VERSION // Kernel to insert or replace the <k,v> pairs into the cache template<typename key_type, typename slabset, typename ref_counter_type, typename mutex, typename atomic_ref_counter_type, typename set_hasher, typename slab_hasher, key_type empty_key, int set_associativity, int warp_size, ref_counter_type max_ref_counter_type = std::numeric_limits<ref_counter_type>::max(), size_t max_slab_distance = std::numeric_limits<size_t>::max()> __global__ void insert_replace_kernel(const key_type* d_keys, const float* d_values, const size_t embedding_vec_size, const size_t len, slabset* keys, float* vals, ref_counter_type* slot_counter, mutex* set_mutex, const atomic_ref_counter_type* global_counter, const size_t capacity_in_set, const size_t task_per_warp_tile){ // Lane(thread) ID within a warp_tile cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block()); const size_t lane_idx = warp_tile.thread_rank(); // Warp tile global ID const size_t warp_tile_global_idx = (blockIdx.x * (blockDim.x / warp_size)) + warp_tile.meta_group_rank(); // The index of key for this thread const size_t key_idx = (warp_tile_global_idx * task_per_warp_tile) + lane_idx; // The assigned key for this lane(thread) key_type key; // The dst slabset and the dst slab inside this set size_t src_set; size_t src_slab; // Active flag: whether current lane(thread) has unfinished task bool active = false; if(lane_idx < task_per_warp_tile){ if(key_idx < len){ active = true; key = d_keys[key_idx]; src_set = set_hasher::hash(key) % capacity_in_set; src_slab = slab_hasher::hash(key) % set_associativity; } } // Lane participate in warp_tile ballot to produce warp-level work queue unsigned active_mask = warp_tile.ballot(active); // The warp-level outer loop: finish all the tasks within the work queue while(active_mask != 0){ // Next task in the work quere, start from lower index lane(thread) int next_lane = __ffs(active_mask) - 1; // Broadcast the task, the global index and the src slabset and slab to all lane in a warp_tile key_type next_key = warp_tile.shfl(key, next_lane); size_t next_idx = warp_tile.shfl(key_idx, next_lane); size_t next_set = warp_tile.shfl(src_set, next_lane); size_t next_slab = warp_tile.shfl(src_slab, next_lane); size_t first_slab = next_slab; // Counter to record how many slab have been searched size_t counter = 0; // Variable to keep the min slot counter during the probing ref_counter_type min_slot_counter_val = max_ref_counter_type; // Variable to keep the slab distance for slot with min counter size_t slab_distance = max_slab_distance; // Variable to keep the slot distance for slot with min counter within the slab size_t slot_distance; // Working queue before task started const unsigned old_active_mask = active_mask; // Lock the slabset before operating the slabset warp_lock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]); // The warp-level inner loop: finish a single task in the work queue while(active_mask == old_active_mask){ // When all the slabs inside a slabset have been searched // and no empty slots or target slots are found. Replace with LRU if(counter >= set_associativity){ // (sub)Warp all-reduction, the reduction result store in all threads warp_min_reduction<ref_counter_type, warp_size>(warp_tile, min_slot_counter_val, slab_distance, slot_distance); // Calculate the position of LR slot size_t target_slab = (first_slab + slab_distance) % set_associativity; size_t slot_index = (next_set * set_associativity + target_slab) * warp_size + slot_distance; // Replace the LR slot if(lane_idx == (size_t)next_lane){ keys[next_set].set_[target_slab].slab_[slot_distance] = key; slot_counter[slot_index] = global_counter -> load(cuda::std::memory_order_relaxed); } warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, vals + slot_index * embedding_vec_size, d_values + next_idx * embedding_vec_size); // Replace complete, mark this task completed if(lane_idx == (size_t)next_lane){ active = false; } active_mask = warp_tile.ballot(active); break; } // The warp_tile read out the slab key_type read_key = keys[next_set].set_[next_slab].slab_[lane_idx]; // Compare the slab data with the target key int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1; // If found target key, the insertion/replace is no longer needed. // Refresh the slot, the task is completed if(found_lane >= 0){ size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane; if(lane_idx == (size_t)next_lane){ slot_counter[found_offset] = global_counter -> load(cuda::std::memory_order_relaxed); active = false; } active_mask = warp_tile.ballot(active); break; } // Compare the slab data with empty key. // If found empty key, do insertion,the task is complete found_lane = __ffs(warp_tile.ballot(read_key == empty_key)) - 1; if(found_lane >= 0){ size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane; if(lane_idx == (size_t)next_lane){ keys[next_set].set_[next_slab].slab_[found_lane] = key; slot_counter[found_offset] = global_counter -> load(cuda::std::memory_order_relaxed); } warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, vals + found_offset * embedding_vec_size, d_values + next_idx * embedding_vec_size); if(lane_idx == (size_t)next_lane){ active = false; } active_mask = warp_tile.ballot(active); break; } // If no target or unused slot found in this slab, // Refresh LR info, continue probing ref_counter_type read_slot_counter = slot_counter[(next_set * set_associativity + next_slab) * warp_size + lane_idx]; if(read_slot_counter < min_slot_counter_val){ min_slot_counter_val = read_slot_counter; slab_distance = counter; } counter++; next_slab = (next_slab + 1) % set_associativity; } // Unlock the slabset after operating the slabset warp_unlock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]); } } #else // Kernel to insert or replace the <k,v> pairs into the cache template<typename key_type, typename slabset, typename ref_counter_type, typename set_hasher, typename slab_hasher, key_type empty_key, int set_associativity, int warp_size, ref_counter_type max_ref_counter_type = std::numeric_limits<ref_counter_type>::max(), size_t max_slab_distance = std::numeric_limits<size_t>::max()> __global__ void insert_replace_kernel(const key_type* d_keys, const float* d_values, const size_t embedding_vec_size, const size_t len, volatile slabset* keys, volatile float* vals, volatile ref_counter_type* slot_counter, volatile int* set_mutex, ref_counter_type* global_counter, const size_t capacity_in_set, const size_t task_per_warp_tile){ // Lane(thread) ID within a warp_tile cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block()); const size_t lane_idx = warp_tile.thread_rank(); // Warp tile global ID const size_t warp_tile_global_idx = (blockIdx.x * (blockDim.x / warp_size)) + warp_tile.meta_group_rank(); // The index of key for this thread const size_t key_idx = (warp_tile_global_idx * task_per_warp_tile) + lane_idx; // The assigned key for this lane(thread) key_type key; // The dst slabset and the dst slab inside this set size_t src_set; size_t src_slab; // Active flag: whether current lane(thread) has unfinished task bool active = false; if(lane_idx < task_per_warp_tile){ if(key_idx < len){ active = true; key = d_keys[key_idx]; src_set = set_hasher::hash(key) % capacity_in_set; src_slab = slab_hasher::hash(key) % set_associativity; } } // Lane participate in warp_tile ballot to produce warp-level work queue unsigned active_mask = warp_tile.ballot(active); // The warp-level outer loop: finish all the tasks within the work queue while(active_mask != 0){ // Next task in the work quere, start from lower index lane(thread) int next_lane = __ffs(active_mask) - 1; // Broadcast the task, the global index and the src slabset and slab to all lane in a warp_tile key_type next_key = warp_tile.shfl(key, next_lane); size_t next_idx = warp_tile.shfl(key_idx, next_lane); size_t next_set = warp_tile.shfl(src_set, next_lane); size_t next_slab = warp_tile.shfl(src_slab, next_lane); size_t first_slab = next_slab; // Counter to record how many slab have been searched size_t counter = 0; // Variable to keep the min slot counter during the probing ref_counter_type min_slot_counter_val = max_ref_counter_type; // Variable to keep the slab distance for slot with min counter size_t slab_distance = max_slab_distance; // Variable to keep the slot distance for slot with min counter within the slab size_t slot_distance; // Working queue before task started const unsigned old_active_mask = active_mask; // Lock the slabset before operating the slabset warp_lock_mutex<warp_size>(warp_tile, set_mutex[next_set]); // The warp-level inner loop: finish a single task in the work queue while(active_mask == old_active_mask){ // When all the slabs inside a slabset have been searched // and no empty slots or target slots are found. Replace with LRU if(counter >= set_associativity){ // (sub)Warp all-reduction, the reduction result store in all threads warp_min_reduction<ref_counter_type, warp_size>(warp_tile, min_slot_counter_val, slab_distance, slot_distance); // Calculate the position of LR slot size_t target_slab = (first_slab + slab_distance) % set_associativity; size_t slot_index = (next_set * set_associativity + target_slab) * warp_size + slot_distance; // Replace the LR slot if(lane_idx == (size_t)next_lane){ ((volatile key_type*)(keys[next_set].set_[target_slab].slab_))[slot_distance] = key; slot_counter[slot_index] = atomicAdd(global_counter, 0); } warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, (volatile float*)(vals + slot_index * embedding_vec_size), (volatile float*)(d_values + next_idx * embedding_vec_size)); // Replace complete, mark this task completed if(lane_idx == (size_t)next_lane){ active = false; } active_mask = warp_tile.ballot(active); break; } // The warp_tile read out the slab key_type read_key = ((volatile key_type*)(keys[next_set].set_[next_slab].slab_))[lane_idx]; // Compare the slab data with the target key int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1; // If found target key, the insertion/replace is no longer needed. // Refresh the slot, the task is completed if(found_lane >= 0){ size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane; if(lane_idx == (size_t)next_lane){ slot_counter[found_offset] = atomicAdd(global_counter, 0); active = false; } active_mask = warp_tile.ballot(active); break; } // Compare the slab data with empty key. // If found empty key, do insertion,the task is complete found_lane = __ffs(warp_tile.ballot(read_key == empty_key)) - 1; if(found_lane >= 0){ size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane; if(lane_idx == (size_t)next_lane){ ((volatile key_type*)(keys[next_set].set_[next_slab].slab_))[found_lane] = key; slot_counter[found_offset] = atomicAdd(global_counter, 0); } warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, (volatile float*)(vals + found_offset * embedding_vec_size), (volatile float*)(d_values + next_idx * embedding_vec_size)); if(lane_idx == (size_t)next_lane){ active = false; } active_mask = warp_tile.ballot(active); break; } // If no target or unused slot found in this slab, // Refresh LR info, continue probing ref_counter_type read_slot_counter = slot_counter[(next_set * set_associativity + next_slab) * warp_size + lane_idx]; if(read_slot_counter < min_slot_counter_val){ min_slot_counter_val = read_slot_counter; slab_distance = counter; } counter++; next_slab = (next_slab + 1) % set_associativity; } // Unlock the slabset after operating the slabset warp_unlock_mutex<warp_size>(warp_tile, set_mutex[next_set]); } } #endif #ifdef LIBCUDACXX_VERSION // Kernel to update the existing keys in the cache // Will not change the locality information template<typename key_type, typename slabset, typename set_hasher, typename slab_hasher, typename mutex, key_type empty_key, int set_associativity, int warp_size> __global__ void update_kernel(const key_type* d_keys, const size_t len, const float* d_values, const size_t embedding_vec_size, const size_t capacity_in_set, const slabset* keys, float* vals, mutex* set_mutex, const size_t task_per_warp_tile){ // Lane(thread) ID within a warp_tile cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block()); const size_t lane_idx = warp_tile.thread_rank(); // Warp tile global ID const size_t warp_tile_global_idx = (blockIdx.x * (blockDim.x / warp_size)) + warp_tile.meta_group_rank(); // The index of key for this thread const size_t key_idx = (warp_tile_global_idx * task_per_warp_tile) + lane_idx; // The assigned key for this lane(thread) key_type key; // The dst slabset and the dst slab inside this set size_t src_set; size_t src_slab; // Active flag: whether current lane(thread) has unfinished task bool active = false; if(lane_idx < task_per_warp_tile){ if(key_idx < len){ active = true; key = d_keys[key_idx]; src_set = set_hasher::hash(key) % capacity_in_set; src_slab = slab_hasher::hash(key) % set_associativity; } } // Lane participate in warp_tile ballot to produce warp-level work queue unsigned active_mask = warp_tile.ballot(active); // The warp-level outer loop: finish all the tasks within the work queue while(active_mask != 0){ // Next task in the work quere, start from lower index lane(thread) int next_lane = __ffs(active_mask) - 1; // Broadcast the task and the global index to all lane in the warp_tile key_type next_key = warp_tile.shfl(key, next_lane); size_t next_idx = warp_tile.shfl(key_idx, next_lane); size_t next_set = warp_tile.shfl(src_set, next_lane); size_t next_slab = warp_tile.shfl(src_slab, next_lane); // Counter to record how many slab have been searched size_t counter = 0; // Working queue before task started const unsigned old_active_mask = active_mask; // Lock the slabset before operating the slabset warp_lock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]); // The warp-level inner loop: finish a single task in the work queue while(active_mask == old_active_mask){ // When all the slabs inside a slabset have been searched, mark missing task, do nothing, task complete if(counter >= set_associativity){ if(lane_idx == (size_t)next_lane){ active = false; } active_mask = warp_tile.ballot(active); break; } // The warp_tile read out the slab key_type read_key = keys[next_set].set_[next_slab].slab_[lane_idx]; // Compare the slab data with the target key int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1; // If found, mark hit task, update the value, the task is completed if(found_lane >= 0){ size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane; if(lane_idx == (size_t)next_lane){ active = false; } warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, vals + found_offset * embedding_vec_size, d_values + next_idx * embedding_vec_size); active_mask = warp_tile.ballot(active); break; } // Compare the slab data with empty key, if found empty key, mark missing task, do nothing, task is completed if(warp_tile.ballot(read_key == empty_key) != 0){ if(lane_idx == (size_t)next_lane){ active = false; } active_mask = warp_tile.ballot(active); break; } // Not found in this slab, the task is not completed, goto searching next slab counter++; next_slab = (next_slab + 1) % set_associativity; } // Unlock the slabset after operating the slabset warp_unlock_mutex<mutex, warp_size>(warp_tile, set_mutex[next_set]); } } #else // Kernel to update the existing keys in the cache // Will not change the locality information template<typename key_type, typename slabset, typename set_hasher, typename slab_hasher, key_type empty_key, int set_associativity, int warp_size> __global__ void update_kernel(const key_type* d_keys, const size_t len, const float* d_values, const size_t embedding_vec_size, const size_t capacity_in_set, volatile slabset* keys, volatile float* vals, volatile int* set_mutex, const size_t task_per_warp_tile){ // Lane(thread) ID within a warp_tile cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block()); const size_t lane_idx = warp_tile.thread_rank(); // Warp tile global ID const size_t warp_tile_global_idx = (blockIdx.x * (blockDim.x / warp_size)) + warp_tile.meta_group_rank(); // The index of key for this thread const size_t key_idx = (warp_tile_global_idx * task_per_warp_tile) + lane_idx; // The assigned key for this lane(thread) key_type key; // The dst slabset and the dst slab inside this set size_t src_set; size_t src_slab; // Active flag: whether current lane(thread) has unfinished task bool active = false; if(lane_idx < task_per_warp_tile){ if(key_idx < len){ active = true; key = d_keys[key_idx]; src_set = set_hasher::hash(key) % capacity_in_set; src_slab = slab_hasher::hash(key) % set_associativity; } } // Lane participate in warp_tile ballot to produce warp-level work queue unsigned active_mask = warp_tile.ballot(active); // The warp-level outer loop: finish all the tasks within the work queue while(active_mask != 0){ // Next task in the work quere, start from lower index lane(thread) int next_lane = __ffs(active_mask) - 1; // Broadcast the task and the global index to all lane in the warp_tile key_type next_key = warp_tile.shfl(key, next_lane); size_t next_idx = warp_tile.shfl(key_idx, next_lane); size_t next_set = warp_tile.shfl(src_set, next_lane); size_t next_slab = warp_tile.shfl(src_slab, next_lane); // Counter to record how many slab have been searched size_t counter = 0; // Working queue before task started const unsigned old_active_mask = active_mask; // Lock the slabset before operating the slabset warp_lock_mutex<warp_size>(warp_tile, set_mutex[next_set]); // The warp-level inner loop: finish a single task in the work queue while(active_mask == old_active_mask){ // When all the slabs inside a slabset have been searched, mark missing task, do nothing, task complete if(counter >= set_associativity){ if(lane_idx == (size_t)next_lane){ active = false; } active_mask = warp_tile.ballot(active); break; } // The warp_tile read out the slab key_type read_key = ((volatile key_type*)(keys[next_set].set_[next_slab].slab_))[lane_idx]; // Compare the slab data with the target key int found_lane = __ffs(warp_tile.ballot(read_key == next_key)) - 1; // If found, mark hit task, update the value, the task is completed if(found_lane >= 0){ size_t found_offset = (next_set * set_associativity + next_slab) * warp_size + found_lane; if(lane_idx == (size_t)next_lane){ active = false; } warp_tile_copy<warp_size>(lane_idx, embedding_vec_size, (volatile float*)(vals + found_offset * embedding_vec_size), (volatile float*)(d_values + next_idx * embedding_vec_size)); active_mask = warp_tile.ballot(active); break; } // Compare the slab data with empty key, if found empty key, mark missing task, do nothing, task is completed if(warp_tile.ballot(read_key == empty_key) != 0){ if(lane_idx == (size_t)next_lane){ active = false; } active_mask = warp_tile.ballot(active); break; } // Not found in this slab, the task is not completed, goto searching next slab counter++; next_slab = (next_slab + 1) % set_associativity; } // Unlock the slabset after operating the slabset warp_unlock_mutex<warp_size>(warp_tile, set_mutex[next_set]); } } #endif #ifdef LIBCUDACXX_VERSION template<typename key_type, typename slabset, typename mutex, key_type empty_key, int set_associativity, int warp_size> __global__ void dump_kernel(key_type* d_keys, size_t* d_dump_counter, const slabset* keys, mutex* set_mutex, const size_t start_set_index, const size_t end_set_index){ // Block-level counter used by all warp tiles within a block __shared__ uint32_t block_acc; // Initialize block-level counter if(threadIdx.x == 0){ block_acc = 0; } __syncthreads(); // Lane(thread) ID within a warp tile cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block()); const size_t lane_idx = warp_tile.thread_rank(); // Warp tile target slabset id const size_t set_idx = ((blockIdx.x * (blockDim.x / warp_size)) + warp_tile.meta_group_rank()) + start_set_index; // Keys dump from cache key_type read_key[set_associativity]; // Lane(thread) offset for storing each key uint32_t thread_key_offset[set_associativity]; // Warp offset for storing each key uint32_t warp_key_offset; // Block offset for storing each key __shared__ size_t block_key_offset; // Warp tile dump target slabset if(set_idx < end_set_index){ // Lock the slabset before operating the slabset warp_lock_mutex<mutex, warp_size>(warp_tile, set_mutex[set_idx]); // The warp tile read out the slabset for(unsigned slab_id = 0; slab_id < set_associativity; slab_id++){ // The warp tile read out a slab read_key[slab_id] = keys[set_idx].set_[slab_id].slab_[lane_idx]; } // Finish dumping the slabset, unlock the slabset warp_unlock_mutex<mutex, warp_size>(warp_tile, set_mutex[set_idx]); // Each lane(thread) within the warp tile calculate the offset to store its keys uint32_t warp_tile_total_keys = 0; for(unsigned slab_id = 0; slab_id < set_associativity; slab_id++){ unsigned valid_mask = warp_tile.ballot(read_key[slab_id] != empty_key); thread_key_offset[slab_id] = __popc(valid_mask & ((1U << lane_idx) - 1U)) + warp_tile_total_keys; warp_tile_total_keys = warp_tile_total_keys + __popc(valid_mask); } // Each warp tile request a unique place from the block-level counter if(lane_idx == 0){ warp_key_offset = atomicAdd(&block_acc, warp_tile_total_keys); } warp_key_offset = warp_tile.shfl(warp_key_offset, 0); } // Each block request a unique place in global memory output buffer __syncthreads(); if(threadIdx.x == 0){ block_key_offset = atomicAdd(d_dump_counter, (size_t)block_acc); } __syncthreads(); // Warp tile store the (non-empty)keys back to output buffer if(set_idx < end_set_index){ for(unsigned slab_id = 0; slab_id < set_associativity; slab_id++){ if(read_key[slab_id] != empty_key){ d_keys[block_key_offset + warp_key_offset + thread_key_offset[slab_id]] = read_key[slab_id]; } } } } #else template<typename key_type, typename slabset, key_type empty_key, int set_associativity, int warp_size> __global__ void dump_kernel(key_type* d_keys, size_t* d_dump_counter, volatile slabset* keys, volatile int* set_mutex, const size_t start_set_index, const size_t end_set_index){ // Block-level counter used by all warp tiles within a block __shared__ uint32_t block_acc; // Initialize block-level counter if(threadIdx.x == 0){ block_acc = 0; } __syncthreads(); // Lane(thread) ID within a warp tile cg::thread_block_tile<warp_size> warp_tile = cg::tiled_partition<warp_size>(cg::this_thread_block()); const size_t lane_idx = warp_tile.thread_rank(); // Warp tile target slabset id const size_t set_idx = ((blockIdx.x * (blockDim.x / warp_size)) + warp_tile.meta_group_rank()) + start_set_index; // Keys dump from cache key_type read_key[set_associativity]; // Lane(thread) offset for storing each key uint32_t thread_key_offset[set_associativity]; // Warp offset for storing each key uint32_t warp_key_offset; // Block offset for storing each key __shared__ size_t block_key_offset; // Warp tile dump target slabset if(set_idx < end_set_index){ // Lock the slabset before operating the slabset warp_lock_mutex<warp_size>(warp_tile, set_mutex[set_idx]); // The warp tile read out the slabset for(unsigned slab_id = 0; slab_id < set_associativity; slab_id++){ // The warp tile read out a slab read_key[slab_id] = ((volatile key_type*)(keys[set_idx].set_[slab_id].slab_))[lane_idx]; } // Finish dumping the slabset, unlock the slabset warp_unlock_mutex<warp_size>(warp_tile, set_mutex[set_idx]); // Each lane(thread) within the warp tile calculate the offset to store its keys uint32_t warp_tile_total_keys = 0; for(unsigned slab_id = 0; slab_id < set_associativity; slab_id++){ unsigned valid_mask = warp_tile.ballot(read_key[slab_id] != empty_key); thread_key_offset[slab_id] = __popc(valid_mask & ((1U << lane_idx) - 1U)) + warp_tile_total_keys; warp_tile_total_keys = warp_tile_total_keys + __popc(valid_mask); } // Each warp tile request a unique place from the block-level counter if(lane_idx == 0){ warp_key_offset = atomicAdd(&block_acc, warp_tile_total_keys); } warp_key_offset = warp_tile.shfl(warp_key_offset, 0); } // Each block request a unique place in global memory output buffer __syncthreads(); if(threadIdx.x == 0){ block_key_offset = atomicAdd(d_dump_counter, (size_t)block_acc); } __syncthreads(); // Warp tile store the (non-empty)keys back to output buffer if(set_idx < end_set_index){ for(unsigned slab_id = 0; slab_id < set_associativity; slab_id++){ if(read_key[slab_id] != empty_key){ d_keys[block_key_offset + warp_key_offset + thread_key_offset[slab_id]] = read_key[slab_id]; } } } } #endif /////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef LIBCUDACXX_VERSION template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::gpu_cache(const size_t capacity_in_set, const size_t embedding_vec_size) :capacity_in_set_(capacity_in_set), embedding_vec_size_(embedding_vec_size){ // Check parameter if(capacity_in_set_ == 0){ printf("Error: Invalid value for capacity_in_set.\n"); return; } if(embedding_vec_size_ == 0){ printf("Error: Invalid value for embedding_vec_size.\n"); return; } if(set_associativity <= 0){ printf("Error: Invalid value for set_associativity.\n"); return; } if(warp_size != 1 && warp_size != 2 && warp_size != 4 && warp_size != 8 && warp_size != 16 && warp_size != 32){ printf("Error: Invalid value for warp_size.\n"); return; } // Get the current CUDA dev CUDA_CHECK(cudaGetDevice( &dev_ )); // Calculate # of slot num_slot_ = capacity_in_set_ * set_associativity * warp_size; // Allocate GPU memory for cache CUDA_CHECK(cudaMalloc((void**)&keys_, sizeof(slabset) * capacity_in_set_)); CUDA_CHECK(cudaMalloc((void**)&vals_, sizeof(float) * embedding_vec_size_ * num_slot_)); CUDA_CHECK(cudaMalloc((void**)&slot_counter_, sizeof(ref_counter_type) * num_slot_)); CUDA_CHECK(cudaMalloc((void**)&global_counter_, sizeof(atomic_ref_counter_type))); // Allocate GPU memory for set mutex CUDA_CHECK(cudaMalloc((void**)&set_mutex_, sizeof(mutex) * capacity_in_set_)); // Initialize the cache, set all entry to unused <K,V> init_cache<<<((num_slot_-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_>>>(keys_, slot_counter_, global_counter_, num_slot_, empty_key, set_mutex_, capacity_in_set_); // Wait for initialization to finish CUDA_CHECK(cudaStreamSynchronize(0)); CUDA_CHECK(cudaGetLastError()); } #else template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::gpu_cache(const size_t capacity_in_set, const size_t embedding_vec_size) :capacity_in_set_(capacity_in_set), embedding_vec_size_(embedding_vec_size){ // Check parameter if(capacity_in_set_ == 0){ printf("Error: Invalid value for capacity_in_set.\n"); return; } if(embedding_vec_size_ == 0){ printf("Error: Invalid value for embedding_vec_size.\n"); return; } if(set_associativity <= 0){ printf("Error: Invalid value for set_associativity.\n"); return; } if(warp_size != 1 && warp_size != 2 && warp_size != 4 && warp_size != 8 && warp_size != 16 && warp_size != 32){ printf("Error: Invalid value for warp_size.\n"); return; } // Get the current CUDA dev CUDA_CHECK(cudaGetDevice( &dev_ )); // Calculate # of slot num_slot_ = capacity_in_set_ * set_associativity * warp_size; // Allocate GPU memory for cache CUDA_CHECK(cudaMalloc((void**)&keys_, sizeof(slabset) * capacity_in_set_)); CUDA_CHECK(cudaMalloc((void**)&vals_, sizeof(float) * embedding_vec_size_ * num_slot_)); CUDA_CHECK(cudaMalloc((void**)&slot_counter_, sizeof(ref_counter_type) * num_slot_)); CUDA_CHECK(cudaMalloc((void**)&global_counter_, sizeof(ref_counter_type))); // Allocate GPU memory for set mutex CUDA_CHECK(cudaMalloc((void**)&set_mutex_, sizeof(int) * capacity_in_set_)); // Initialize the cache, set all entry to unused <K,V> init_cache<<<((num_slot_-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_>>>(keys_, slot_counter_, global_counter_, num_slot_, empty_key, set_mutex_, capacity_in_set_); // Wait for initialization to finish CUDA_CHECK(cudaStreamSynchronize(0)); CUDA_CHECK(cudaGetLastError()); } #endif #ifdef LIBCUDACXX_VERSION template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::~gpu_cache(){ // Device Restorer nv::CudaDeviceRestorer dev_restorer; // Set device CUDA_CHECK(cudaSetDevice(dev_)); // Destruct CUDA std object destruct_kernel<<<((capacity_in_set_-1)/BLOCK_SIZE_)+1, BLOCK_SIZE_>>>(global_counter_, set_mutex_, capacity_in_set_); // Wait for destruction to finish CUDA_CHECK(cudaStreamSynchronize(0)); // Free GPU memory for cache CUDA_CHECK(cudaFree( keys_ )); CUDA_CHECK(cudaFree( vals_ )); CUDA_CHECK(cudaFree( slot_counter_ )); CUDA_CHECK(cudaFree( global_counter_ )); // Free GPU memory for set mutex CUDA_CHECK(cudaFree( set_mutex_ )); } #else template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>::~gpu_cache() noexcept(false) { // Device Restorer nv::CudaDeviceRestorer dev_restorer; // Set device CUDA_CHECK(cudaSetDevice(dev_)); // Free GPU memory for cache CUDA_CHECK(cudaFree( keys_ )); CUDA_CHECK(cudaFree( vals_ )); CUDA_CHECK(cudaFree( slot_counter_ )); CUDA_CHECK(cudaFree( global_counter_ )); // Free GPU memory for set mutex CUDA_CHECK(cudaFree( set_mutex_ )); } #endif #ifdef LIBCUDACXX_VERSION template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>:: Query(const key_type* d_keys, const size_t len, float* d_values, uint64_t* d_missing_index, key_type* d_missing_keys, size_t* d_missing_len, cudaStream_t stream, const size_t task_per_warp_tile){ // Device Restorer nv::CudaDeviceRestorer dev_restorer; // Set to the device of this cache CUDA_CHECK(cudaSetDevice(dev_)); // Check if it is a valid query if(len == 0){ // Set the d_missing_len to 0 before return CUDA_CHECK(cudaMemsetAsync(d_missing_len, 0, sizeof(size_t), stream)); return; } // Update the global counter as user perform a new(most recent) read operation to the cache // Resolve distance overflow issue as well. update_kernel_overflow_ignore<atomic_ref_counter_type><<<1, 1, 0, stream>>>(global_counter_, d_missing_len); // Read from the cache // Touch and refresh the hitting slot const size_t keys_per_block = (BLOCK_SIZE_ / warp_size) * task_per_warp_tile; const size_t grid_size = ((len - 1) / keys_per_block) + 1; get_kernel<key_type, ref_counter_type, atomic_ref_counter_type, slabset, set_hasher, slab_hasher, mutex, empty_key, set_associativity, warp_size> <<<grid_size, BLOCK_SIZE_, 0, stream>>>(d_keys, len, d_values, embedding_vec_size_, d_missing_index, d_missing_keys, d_missing_len, global_counter_, slot_counter_, capacity_in_set_, keys_, vals_, set_mutex_, task_per_warp_tile); // Check for GPU error before return CUDA_CHECK(cudaGetLastError()); } #else template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>:: Query(const key_type* d_keys, const size_t len, float* d_values, uint64_t* d_missing_index, key_type* d_missing_keys, size_t* d_missing_len, cudaStream_t stream, const size_t task_per_warp_tile){ // Device Restorer nv::CudaDeviceRestorer dev_restorer; // Set to the device of this cache CUDA_CHECK(cudaSetDevice(dev_)); // Check if it is a valid query if(len == 0){ // Set the d_missing_len to 0 before return CUDA_CHECK(cudaMemsetAsync(d_missing_len, 0, sizeof(size_t), stream)); return; } // Update the global counter as user perform a new(most recent) read operation to the cache // Resolve distance overflow issue as well. update_kernel_overflow_ignore<ref_counter_type><<<1, 1, 0, stream>>>(global_counter_, d_missing_len); // Read from the cache // Touch and refresh the hitting slot const size_t keys_per_block = (BLOCK_SIZE_ / warp_size) * task_per_warp_tile; const size_t grid_size = ((len - 1) / keys_per_block) + 1; get_kernel<key_type, ref_counter_type, slabset, set_hasher, slab_hasher, empty_key, set_associativity, warp_size> <<<grid_size, BLOCK_SIZE_, 0, stream>>>(d_keys, len, d_values, embedding_vec_size_, d_missing_index, d_missing_keys, d_missing_len, global_counter_, slot_counter_, capacity_in_set_, keys_, vals_, set_mutex_, task_per_warp_tile); // Check for GPU error before return CUDA_CHECK(cudaGetLastError()); } #endif #ifdef LIBCUDACXX_VERSION template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>:: Replace(const key_type* d_keys, const size_t len, const float* d_values, cudaStream_t stream, const size_t task_per_warp_tile){ // Check if it is a valid replacement if(len == 0){ return; } // Device Restorer nv::CudaDeviceRestorer dev_restorer; // Set to the device of this cache CUDA_CHECK(cudaSetDevice(dev_)); // Try to insert the <k,v> paris into the cache as long as there are unused slot // Then replace the <k,v> pairs into the cache const size_t keys_per_block = (BLOCK_SIZE_ / warp_size) * task_per_warp_tile; const size_t grid_size = ((len - 1) / keys_per_block) + 1; insert_replace_kernel<key_type, slabset, ref_counter_type, mutex, atomic_ref_counter_type, set_hasher, slab_hasher, empty_key, set_associativity, warp_size> <<<grid_size, BLOCK_SIZE_, 0, stream>>> (d_keys, d_values, embedding_vec_size_, len, keys_, vals_, slot_counter_, set_mutex_, global_counter_, capacity_in_set_, task_per_warp_tile); // Check for GPU error before return CUDA_CHECK(cudaGetLastError()); } #else template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>:: Replace(const key_type* d_keys, const size_t len, const float* d_values, cudaStream_t stream, const size_t task_per_warp_tile){ // Check if it is a valid replacement if(len == 0){ return; } // Device Restorer nv::CudaDeviceRestorer dev_restorer; // Set to the device of this cache CUDA_CHECK(cudaSetDevice(dev_)); // Try to insert the <k,v> paris into the cache as long as there are unused slot // Then replace the <k,v> pairs into the cache const size_t keys_per_block = (BLOCK_SIZE_ / warp_size) * task_per_warp_tile; const size_t grid_size = ((len - 1) / keys_per_block) + 1; insert_replace_kernel<key_type, slabset, ref_counter_type, set_hasher, slab_hasher, empty_key, set_associativity, warp_size> <<<grid_size, BLOCK_SIZE_, 0, stream>>> (d_keys, d_values, embedding_vec_size_, len, keys_, vals_, slot_counter_, set_mutex_, global_counter_, capacity_in_set_, task_per_warp_tile); // Check for GPU error before return CUDA_CHECK(cudaGetLastError()); } #endif #ifdef LIBCUDACXX_VERSION template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>:: Update(const key_type* d_keys, const size_t len, const float* d_values, cudaStream_t stream, const size_t task_per_warp_tile){ // Check if it is a valid update request if(len == 0){ return; } // Device Restorer nv::CudaDeviceRestorer dev_restorer; // Set to the device of this cache CUDA_CHECK(cudaSetDevice(dev_)); // Update the value of input keys that are existed in the cache const size_t keys_per_block = (BLOCK_SIZE_ / warp_size) * task_per_warp_tile; const size_t grid_size = ((len - 1) / keys_per_block) + 1; update_kernel<key_type, slabset, set_hasher, slab_hasher, mutex, empty_key, set_associativity, warp_size> <<<grid_size, BLOCK_SIZE_, 0, stream>>> (d_keys, len, d_values, embedding_vec_size_, capacity_in_set_, keys_, vals_, set_mutex_, task_per_warp_tile); // Check for GPU error before return CUDA_CHECK(cudaGetLastError()); } #else template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>:: Update(const key_type* d_keys, const size_t len, const float* d_values, cudaStream_t stream, const size_t task_per_warp_tile){ // Check if it is a valid update request if(len == 0){ return; } // Device Restorer nv::CudaDeviceRestorer dev_restorer; // Set to the device of this cache CUDA_CHECK(cudaSetDevice(dev_)); // Update the value of input keys that are existed in the cache const size_t keys_per_block = (BLOCK_SIZE_ / warp_size) * task_per_warp_tile; const size_t grid_size = ((len - 1) / keys_per_block) + 1; update_kernel<key_type, slabset, set_hasher, slab_hasher, empty_key, set_associativity, warp_size> <<<grid_size, BLOCK_SIZE_, 0, stream>>> (d_keys, len, d_values, embedding_vec_size_, capacity_in_set_, keys_, vals_, set_mutex_, task_per_warp_tile); // Check for GPU error before return CUDA_CHECK(cudaGetLastError()); } #endif #ifdef LIBCUDACXX_VERSION template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>:: Dump(key_type* d_keys, size_t* d_dump_counter, const size_t start_set_index, const size_t end_set_index, cudaStream_t stream){ // Check if it is a valid dump request if(start_set_index >= capacity_in_set_){ printf("Error: Invalid value for start_set_index. Nothing dumped.\n"); return; } if(end_set_index <= start_set_index || end_set_index > capacity_in_set_){ printf("Error: Invalid value for end_set_index. Nothing dumped.\n"); return; } // Device Restorer nv::CudaDeviceRestorer dev_restorer; // Set to the device of this cache CUDA_CHECK(cudaSetDevice(dev_)); // Set the global counter to 0 first CUDA_CHECK(cudaMemsetAsync(d_dump_counter, 0, sizeof(size_t), stream)); // Dump keys from the cache const size_t grid_size = (((end_set_index - start_set_index) - 1) / (BLOCK_SIZE_ / warp_size)) + 1; dump_kernel<key_type, slabset, mutex, empty_key, set_associativity, warp_size> <<<grid_size, BLOCK_SIZE_, 0, stream>>> (d_keys, d_dump_counter, keys_, set_mutex_, start_set_index, end_set_index); // Check for GPU error before return CUDA_CHECK(cudaGetLastError()); } #else template<typename key_type, typename ref_counter_type, key_type empty_key, int set_associativity, int warp_size, typename set_hasher, typename slab_hasher> void gpu_cache<key_type, ref_counter_type, empty_key, set_associativity, warp_size, set_hasher, slab_hasher>:: Dump(key_type* d_keys, size_t* d_dump_counter, const size_t start_set_index, const size_t end_set_index, cudaStream_t stream){ // Check if it is a valid dump request if(start_set_index >= capacity_in_set_){ printf("Error: Invalid value for start_set_index. Nothing dumped.\n"); return; } if(end_set_index <= start_set_index || end_set_index > capacity_in_set_){ printf("Error: Invalid value for end_set_index. Nothing dumped.\n"); return; } // Device Restorer nv::CudaDeviceRestorer dev_restorer; // Set to the device of this cache CUDA_CHECK(cudaSetDevice(dev_)); // Set the global counter to 0 first CUDA_CHECK(cudaMemsetAsync(d_dump_counter, 0, sizeof(size_t), stream)); // Dump keys from the cache const size_t grid_size = (((end_set_index - start_set_index) - 1) / (BLOCK_SIZE_ / warp_size)) + 1; dump_kernel<key_type, slabset, empty_key, set_associativity, warp_size> <<<grid_size, BLOCK_SIZE_, 0, stream>>> (d_keys, d_dump_counter, keys_, set_mutex_, start_set_index, end_set_index); // Check for GPU error before return CUDA_CHECK(cudaGetLastError()); } #endif template class gpu_cache<unsigned int, uint64_t, std::numeric_limits<unsigned int>::max(), SET_ASSOCIATIVITY, SLAB_SIZE>; template class gpu_cache<long long, uint64_t, std::numeric_limits<long long>::max(), SET_ASSOCIATIVITY, SLAB_SIZE>; } // namespace gpu_cache
the_stack
namespace kernels { template <typename I> __global__ void try_out_integral_math_functions(I* results, I* __restrict expected) { size_t i { 0 }; bool print_first_indices_for_each_function { false }; auto maybe_print = [&](const char* section_title) { if (print_first_indices_for_each_function) { printf("%-30s tests start at index %3d\n", section_title, i); } }; results[i] = kat::strictly_between<I>( I{ 0 }, I{ 5 }, I{ 10 } ); expected[i++] = false; results[i] = kat::strictly_between<I>( I{ 1 }, I{ 5 }, I{ 10 } ); expected[i++] = false; results[i] = kat::strictly_between<I>( I{ 4 }, I{ 5 }, I{ 10 } ); expected[i++] = false; results[i] = kat::strictly_between<I>( I{ 5 }, I{ 5 }, I{ 10 } ); expected[i++] = false; results[i] = kat::strictly_between<I>( I{ 6 }, I{ 5 }, I{ 10 } ); expected[i++] = true; results[i] = kat::strictly_between<I>( I{ 8 }, I{ 5 }, I{ 10 } ); expected[i++] = true; results[i] = kat::strictly_between<I>( I{ 9 }, I{ 5 }, I{ 10 } ); expected[i++] = true; results[i] = kat::strictly_between<I>( I{ 10 }, I{ 5 }, I{ 10 } ); expected[i++] = false; results[i] = kat::strictly_between<I>( I{ 11 }, I{ 5 }, I{ 10 } ); expected[i++] = false; results[i] = kat::strictly_between<I>( I{ 123 }, I{ 5 }, I{ 10 } ); expected[i++] = false; maybe_print("between_or_equal"); results[i] = kat::between_or_equal<I>( I{ 1 }, I{ 5 }, I{ 10 } ); expected[i++] = false; results[i] = kat::between_or_equal<I>( I{ 4 }, I{ 5 }, I{ 10 } ); expected[i++] = false; results[i] = kat::between_or_equal<I>( I{ 5 }, I{ 5 }, I{ 10 } ); expected[i++] = true; results[i] = kat::between_or_equal<I>( I{ 6 }, I{ 5 }, I{ 10 } ); expected[i++] = true; results[i] = kat::between_or_equal<I>( I{ 8 }, I{ 5 }, I{ 10 } ); expected[i++] = true; results[i] = kat::between_or_equal<I>( I{ 9 }, I{ 5 }, I{ 10 } ); expected[i++] = true; results[i] = kat::between_or_equal<I>( I{ 10 }, I{ 5 }, I{ 10 } ); expected[i++] = true; results[i] = kat::between_or_equal<I>( I{ 11 }, I{ 5 }, I{ 10 } ); expected[i++] = false; results[i] = kat::between_or_equal<I>( I{ 123 }, I{ 5 }, I{ 10 } ); expected[i++] = false; maybe_print("is_power_of_2"); results[i] = kat::is_power_of_2<I>(I{ 1}); expected[i++] = true; results[i] = kat::is_power_of_2<I>(I{ 2}); expected[i++] = true; results[i] = kat::is_power_of_2<I>(I{ 4}); expected[i++] = true; results[i] = kat::is_power_of_2<I>(I{ 7}); expected[i++] = false; results[i] = kat::is_power_of_2<I>(I{32}); expected[i++] = true; results[i] = kat::is_power_of_2<I>(I{33}); expected[i++] = false; maybe_print("modular_increment"); results[i] = kat::modular_increment<I>(I{ 0}, I{ 1}); expected[i++] = I{ 0 }; results[i] = kat::modular_increment<I>(I{ 1}, I{ 1}); expected[i++] = I{ 0 }; results[i] = kat::modular_increment<I>(I{ 0}, I{ 3}); expected[i++] = I{ 1 }; results[i] = kat::modular_increment<I>(I{ 1}, I{ 3}); expected[i++] = I{ 2 }; results[i] = kat::modular_increment<I>(I{ 2}, I{ 3}); expected[i++] = I{ 0 }; results[i] = kat::modular_increment<I>(I{ 3}, I{ 3}); expected[i++] = I{ 1 }; results[i] = kat::modular_increment<I>(I{ 4}, I{ 3}); expected[i++] = I{ 2 }; maybe_print("modular_decrement"); results[i] = kat::modular_decrement<I>(I{ 0}, I{ 1}); expected[i++] = I{ 0 }; results[i] = kat::modular_decrement<I>(I{ 1}, I{ 1}); expected[i++] = I{ 0 }; results[i] = kat::modular_decrement<I>(I{ 0}, I{ 3}); expected[i++] = I{ 2 }; results[i] = kat::modular_decrement<I>(I{ 1}, I{ 3}); expected[i++] = I{ 0 }; results[i] = kat::modular_decrement<I>(I{ 2}, I{ 3}); expected[i++] = I{ 1 }; results[i] = kat::modular_decrement<I>(I{ 3}, I{ 3}); expected[i++] = I{ 2 }; results[i] = kat::modular_decrement<I>(I{ 4}, I{ 3}); expected[i++] = I{ 0 }; maybe_print("ipow"); results[i] = kat::ipow<I>(I{ 0 }, 1 ); expected[i++] = I{ 0 }; results[i] = kat::ipow<I>(I{ 0 }, 2 ); expected[i++] = I{ 0 }; results[i] = kat::ipow<I>(I{ 0 }, 100 ); expected[i++] = I{ 0 }; results[i] = kat::ipow<I>(I{ 1 }, 0 ); expected[i++] = I{ 1 }; results[i] = kat::ipow<I>(I{ 1 }, 1 ); expected[i++] = I{ 1 }; results[i] = kat::ipow<I>(I{ 1 }, 2 ); expected[i++] = I{ 1 }; results[i] = kat::ipow<I>(I{ 1 }, 100 ); expected[i++] = I{ 1 }; results[i] = kat::ipow<I>(I{ 3 }, 0 ); expected[i++] = I{ 1 }; results[i] = kat::ipow<I>(I{ 3 }, 1 ); expected[i++] = I{ 3 }; results[i] = kat::ipow<I>(I{ 3 }, 2 ); expected[i++] = I{ 9 }; results[i] = kat::ipow<I>(I{ 3 }, 4 ); expected[i++] = I{ 81 }; maybe_print("unsafe div_rounding_up"); results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 }; results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 }; results[i] = kat::unsafe::div_rounding_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 }; results[i] = kat::unsafe::div_rounding_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 }; results[i] = kat::unsafe::div_rounding_up<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 1 }; results[i] = kat::unsafe::div_rounding_up<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 1 }; results[i] = kat::unsafe::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 }; maybe_print("div_rounding_up"); results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 }; results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 }; results[i] = kat::div_rounding_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 }; results[i] = kat::div_rounding_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 }; results[i] = kat::div_rounding_up<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 1 }; results[i] = kat::div_rounding_up<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 1 }; results[i] = kat::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 }; results[i] = kat::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 }; results[i] = kat::div_rounding_up<I>( std::numeric_limits<I>::max() , std::numeric_limits<I>::max() - 1 ); expected[i++] = I{ 2 }; results[i] = kat::div_rounding_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ); expected[i++] = I{ 1 }; maybe_print("round_down"); results[i] = kat::round_down<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 }; results[i] = kat::round_down<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 }; results[i] = kat::round_down<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 0 }; results[i] = kat::round_down<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 0 }; results[i] = kat::round_down<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 123 }; results[i] = kat::round_down<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 123 }; maybe_print("round_down_to_full_warps"); results[i] = kat::round_down_to_full_warps<I>( I{ 0 } ); expected[i++] = I{ 0 }; results[i] = kat::round_down_to_full_warps<I>( I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::round_down_to_full_warps<I>( I{ 8 } ); expected[i++] = I{ 0 }; results[i] = kat::round_down_to_full_warps<I>( I{ 16 } ); expected[i++] = I{ 0 }; results[i] = kat::round_down_to_full_warps<I>( I{ 31 } ); expected[i++] = I{ 0 }; results[i] = kat::round_down_to_full_warps<I>( I{ 32 } ); expected[i++] = I{ 32 }; results[i] = kat::round_down_to_full_warps<I>( I{ 33 } ); expected[i++] = I{ 32 }; results[i] = kat::round_down_to_full_warps<I>( I{ 125 } ); expected[i++] = I{ 96 }; // TODO: Consider testing rounding-up with negative dividends maybe_print("unsafe round_up"); results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 }; results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 }; results[i] = kat::unsafe::round_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 }; results[i] = kat::unsafe::round_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 }; results[i] = kat::unsafe::round_up<I>( I{ 63 }, I{ 64 } ); expected[i++] = I{ 64 }; results[i] = kat::unsafe::round_up<I>( I{ 64 }, I{ 64 } ); expected[i++] = I{ 64 }; results[i] = kat::unsafe::round_up<I>( I{ 65 }, I{ 32 } ); expected[i++] = I{ 96 }; maybe_print("round_up"); results[i] = kat::round_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::round_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 }; results[i] = kat::round_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 }; results[i] = kat::round_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 }; results[i] = kat::round_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 }; results[i] = kat::round_up<I>( I{ 63 }, I{ 64 } ); expected[i++] = I{ 64 }; results[i] = kat::round_up<I>( I{ 64 }, I{ 64 } ); expected[i++] = I{ 64 }; results[i] = kat::round_up<I>( I{ 65 }, I{ 32 } ); expected[i++] = I{ 96 }; results[i] = kat::round_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ); expected[i++] = I{ std::numeric_limits<I>::max() }; maybe_print("round_down_to_power_of_2"); results[i] = kat::round_down_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 }; results[i] = kat::round_down_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 }; results[i] = kat::round_down_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 }; results[i] = kat::round_down_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 }; results[i] = kat::round_down_to_power_of_2<I>( I{ 123 }, I{ 1 } ); expected[i++] = I{ 123 }; results[i] = kat::round_down_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 0 }; results[i] = kat::round_down_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 }; results[i] = kat::round_down_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 2 }; results[i] = kat::round_down_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 }; results[i] = kat::round_down_to_power_of_2<I>( I{ 123 }, I{ 2 } ); expected[i++] = I{ 122 }; maybe_print("round_up_to_power_of_2"); results[i] = kat::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 }; results[i] = kat::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 }; results[i] = kat::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 }; results[i] = kat::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 }; results[i] = kat::round_up_to_power_of_2<I>( I{ 23 }, I{ 1 } ); expected[i++] = I{ 23 }; results[i] = kat::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 }; results[i] = kat::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 }; results[i] = kat::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 4 }; results[i] = kat::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 }; results[i] = kat::round_up_to_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 64 }; maybe_print("unsafe round_up_to_power_of_2"); results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 }; results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 }; results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 }; results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 }; results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 23 }, I{ 1 } ); expected[i++] = I{ 23 }; results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 }; results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 }; results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 4 }; results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 }; results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 64 }; maybe_print("round_up_to_full_warps"); results[i] = kat::round_up_to_full_warps<I>( I{ 0 } ); expected[i++] = I{ 0 }; results[i] = kat::round_up_to_full_warps<I>( I{ 1 } ); expected[i++] = I{ 32 }; results[i] = kat::round_up_to_full_warps<I>( I{ 8 } ); expected[i++] = I{ 32 }; results[i] = kat::round_up_to_full_warps<I>( I{ 16 } ); expected[i++] = I{ 32 }; results[i] = kat::round_up_to_full_warps<I>( I{ 31 } ); expected[i++] = I{ 32 }; results[i] = kat::round_up_to_full_warps<I>( I{ 32 } ); expected[i++] = I{ 32 }; results[i] = kat::round_up_to_full_warps<I>( I{ 33 } ); expected[i++] = I{ 64 }; results[i] = kat::round_up_to_full_warps<I>( I{ 63 } ); expected[i++] = I{ 64 }; maybe_print("gcd"); results[i] = kat::gcd<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 }; results[i] = kat::gcd<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 1 }; results[i] = kat::gcd<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 }; results[i] = kat::gcd<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 }; results[i] = kat::gcd<I>( I{ 8 }, I{ 4 } ); expected[i++] = I{ 4 }; results[i] = kat::gcd<I>( I{ 4 }, I{ 8 } ); expected[i++] = I{ 4 }; results[i] = kat::gcd<I>( I{ 10 }, I{ 6 } ); expected[i++] = I{ 2 }; results[i] = kat::gcd<I>( I{ 120 }, I{ 70 } ); expected[i++] = I{ 10 }; results[i] = kat::gcd<I>( I{ 70 }, I{ 120 } ); expected[i++] = I{ 10 }; results[i] = kat::gcd<I>( I{ 97 }, I{ 120 } ); expected[i++] = I{ 1 }; maybe_print("lcm"); results[i] = kat::lcm<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 }; results[i] = kat::lcm<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 }; results[i] = kat::lcm<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 }; results[i] = kat::lcm<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 }; results[i] = kat::lcm<I>( I{ 5 }, I{ 3 } ); expected[i++] = I{ 15 }; results[i] = kat::lcm<I>( I{ 8 }, I{ 4 } ); expected[i++] = I{ 8 }; results[i] = kat::lcm<I>( I{ 4 }, I{ 8 } ); expected[i++] = I{ 8 }; results[i] = kat::lcm<I>( I{ 10 }, I{ 6 } ); expected[i++] = I{ 30 }; maybe_print("is_even"); results[i] = kat::is_even<I>( I{ 0 } ); expected[i++] = true; results[i] = kat::is_even<I>( I{ 1 } ); expected[i++] = false; results[i] = kat::is_even<I>( I{ 2 } ); expected[i++] = true; results[i] = kat::is_even<I>( I{ 3 } ); expected[i++] = false; results[i] = kat::is_even<I>( I{ 123 } ); expected[i++] = false; results[i] = kat::is_even<I>( I{ 124 } ); expected[i++] = true; maybe_print("is_odd"); results[i] = kat::is_odd<I>( I{ 0 } ); expected[i++] = false; results[i] = kat::is_odd<I>( I{ 1 } ); expected[i++] = true; results[i] = kat::is_odd<I>( I{ 2 } ); expected[i++] = false; results[i] = kat::is_odd<I>( I{ 3 } ); expected[i++] = true; results[i] = kat::is_odd<I>( I{ 123 } ); expected[i++] = true; results[i] = kat::is_odd<I>( I{ 124 } ); expected[i++] = false; maybe_print("log2"); results[i] = kat::log2<I>( I{ 1 } ); expected[i++] = 0; results[i] = kat::log2<I>( I{ 2 } ); expected[i++] = 1; results[i] = kat::log2<I>( I{ 3 } ); expected[i++] = 1; results[i] = kat::log2<I>( I{ 4 } ); expected[i++] = 2; results[i] = kat::log2<I>( I{ 6 } ); expected[i++] = 2; results[i] = kat::log2<I>( I{ 7 } ); expected[i++] = 2; results[i] = kat::log2<I>( I{ 8 } ); expected[i++] = 3; results[i] = kat::log2<I>( I{ 127 } ); expected[i++] = 6; // We don't have a goot integer sqrt() implementation to offer here. Perhaps // we could offer something based on casting to float? // // results[i] = kat::sqrt<I>( I{ 0 } ); expected[i++] = 0; // results[i] = kat::sqrt<I>( I{ 1 } ); expected[i++] = 1; // results[i] = kat::sqrt<I>( I{ 2 } ); expected[i++] = 1; // results[i] = kat::sqrt<I>( I{ 3 } ); expected[i++] = 1; // results[i] = kat::sqrt<I>( I{ 4 } ); expected[i++] = 2; // results[i] = kat::sqrt<I>( I{ 5 } ); expected[i++] = 2; // results[i] = kat::sqrt<I>( I{ 9 } ); expected[i++] = 3; // results[i] = kat::sqrt<I>( I{ 10 } ); expected[i++] = 3; // results[i] = kat::sqrt<I>( I{ 127 } ); expected[i++] = 11; maybe_print("div_by_power_of_2"); results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 1 }); expected[i++] = I{ 0 }; results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 1 }); expected[i++] = I{ 1 }; results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 1 }); expected[i++] = I{ 111 }; results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 2 }); expected[i++] = I{ 0 }; results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 2 }); expected[i++] = I{ 0 }; results[i] = kat::div_by_power_of_2<I>( I{ 2 }, I { 2 }); expected[i++] = I{ 1 }; results[i] = kat::div_by_power_of_2<I>( I{ 3 }, I { 2 }); expected[i++] = I{ 1 }; results[i] = kat::div_by_power_of_2<I>( I{ 4 }, I { 2 }); expected[i++] = I{ 2 }; results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 2 }); expected[i++] = I{ 55 }; results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 16 }); expected[i++] = I{ 0 }; results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 16 }); expected[i++] = I{ 0 }; results[i] = kat::div_by_power_of_2<I>( I{ 15 }, I { 16 }); expected[i++] = I{ 0 }; results[i] = kat::div_by_power_of_2<I>( I{ 16 }, I { 16 }); expected[i++] = I{ 1 }; results[i] = kat::div_by_power_of_2<I>( I{ 17 }, I { 16 }); expected[i++] = I{ 1 }; results[i] = kat::div_by_power_of_2<I>( I{ 32 }, I { 16 }); expected[i++] = I{ 2 }; results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 16 }); expected[i++] = I{ 6 }; maybe_print("divides"); results[i] = kat::divides<I>( I{ 1 }, I{ 0 } ); expected[i++] = true; results[i] = kat::divides<I>( I{ 2 }, I{ 0 } ); expected[i++] = true; results[i] = kat::divides<I>( I{ 3 }, I{ 0 } ); expected[i++] = true; results[i] = kat::divides<I>( I{ 1 }, I{ 1 } ); expected[i++] = true; results[i] = kat::divides<I>( I{ 2 }, I{ 1 } ); expected[i++] = false; results[i] = kat::divides<I>( I{ 3 }, I{ 1 } ); expected[i++] = false; results[i] = kat::divides<I>( I{ 1 }, I{ 2 } ); expected[i++] = true; results[i] = kat::divides<I>( I{ 2 }, I{ 2 } ); expected[i++] = true; results[i] = kat::divides<I>( I{ 3 }, I{ 2 } ); expected[i++] = false; results[i] = kat::divides<I>( I{ 4 }, I{ 2 } ); expected[i++] = false; results[i] = kat::divides<I>( I{ 6 }, I{ 9 } ); expected[i++] = false; results[i] = kat::divides<I>( I{ 9 }, I{ 6 } ); expected[i++] = false; results[i] = kat::divides<I>( I{ 4 }, I{ 24 } ); expected[i++] = true; results[i] = kat::divides<I>( I{ 24 }, I{ 4 } ); expected[i++] = false; maybe_print("is_divisible_by"); results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 1 } ); expected[i++] = true; results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 2 } ); expected[i++] = true; results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 3 } ); expected[i++] = true; results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 1 } ); expected[i++] = true; results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 2 } ); expected[i++] = false; results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 3 } ); expected[i++] = false; results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 1 } ); expected[i++] = true; results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 2 } ); expected[i++] = true; results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 3 } ); expected[i++] = false; results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 4 } ); expected[i++] = false; results[i] = kat::is_divisible_by<I>( I{ 9 }, I{ 6 } ); expected[i++] = false; results[i] = kat::is_divisible_by<I>( I{ 6 }, I{ 9 } ); expected[i++] = false; results[i] = kat::is_divisible_by<I>( I{ 24 }, I{ 4 } ); expected[i++] = true; results[i] = kat::is_divisible_by<I>( I{ 4 }, I{ 24 } ); expected[i++] = false; maybe_print("is_divisible_by_power_of_2"); results[i] = kat::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 1 } ); expected[i++] = true; results[i] = kat::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 2 } ); expected[i++] = true; results[i] = kat::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = true; results[i] = kat::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = false; results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = true; results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = true; results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 4 } ); expected[i++] = false; results[i] = kat::is_divisible_by_power_of_2<I>( I{ 24 }, I{ 4 } ); expected[i++] = true; results[i] = kat::is_divisible_by_power_of_2<I>( I{ 72 }, I{ 16 } ); expected[i++] = false; results[i] = kat::is_divisible_by_power_of_2<I>( I{ 64 }, I{ 16 } ); expected[i++] = true; maybe_print("power_of_2_divides"); results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 0 } ); expected[i++] = true; results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 0 } ); expected[i++] = true; results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 1 } ); expected[i++] = true; results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 1 } ); expected[i++] = false; results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 2 } ); expected[i++] = true; results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 2 } ); expected[i++] = true; results[i] = kat::power_of_2_divides<I>( I{ 4 }, I{ 2 } ); expected[i++] = false; results[i] = kat::power_of_2_divides<I>( I{ 4 }, I{ 24 } ); expected[i++] = true; results[i] = kat::power_of_2_divides<I>( I{ 16 }, I{ 72 } ); expected[i++] = false; results[i] = kat::power_of_2_divides<I>( I{ 16 }, I{ 64 } ); expected[i++] = true; maybe_print("log2_of_power_of_2"); results[i] = kat::log2_of_power_of_2<I>( I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::log2_of_power_of_2<I>( I{ 2 } ); expected[i++] = I{ 1 }; results[i] = kat::log2_of_power_of_2<I>( I{ 4 } ); expected[i++] = I{ 2 }; results[i] = kat::log2_of_power_of_2<I>( I{ 8 } ); expected[i++] = I{ 3 }; results[i] = kat::log2_of_power_of_2<I>( I{ 16 } ); expected[i++] = I{ 4 }; results[i] = kat::log2_of_power_of_2<I>( I{ 32 } ); expected[i++] = I{ 5 }; results[i] = kat::log2_of_power_of_2<I>( I{ 64 } ); expected[i++] = I{ 6 }; maybe_print("modulo_power_of_2"); results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 1 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 }; results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 1 }; results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 2 } ); expected[i++] = I{ 1 }; results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 1 }; results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 4 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 4 } ); expected[i++] = I{ 1 }; results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 4 } ); expected[i++] = I{ 2 }; results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 4 } ); expected[i++] = I{ 3 }; results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 4 } ); expected[i++] = I{ 0 }; results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 4 } ); expected[i++] = I{ 1 }; results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 4 } ); expected[i++] = I{ 3 }; #define NUM_TEST_LINES 268 } } // namespace kernels TEST_SUITE("math") { TEST_CASE_TEMPLATE("run-time on-device", I, INTEGER_TYPES) { cuda::device_t device { cuda::device::current::get() }; auto block_size { 1 }; auto num_grid_blocks { 1 }; auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; auto device_side_results { cuda::memory::device::make_unique<I[]>(device, NUM_TEST_LINES) }; auto device_side_expected_results { cuda::memory::device::make_unique<I[]>(device, NUM_TEST_LINES) }; auto host_side_results { std::unique_ptr<I[]>(new I[NUM_TEST_LINES]) }; auto host_side_expected_results { std::unique_ptr<I[]>(new I[NUM_TEST_LINES]) }; cuda::launch( kernels::try_out_integral_math_functions<I>, launch_config, device_side_results.get(), device_side_expected_results.get()); cuda::memory::copy(host_side_results.get(), device_side_results.get(), sizeof(I) * NUM_TEST_LINES); cuda::memory::copy(host_side_expected_results.get(), device_side_expected_results.get(), sizeof(I) * NUM_TEST_LINES); for(auto i { 0 }; i < NUM_TEST_LINES; i++) { CHECK(host_side_results.get()[i] == host_side_expected_results.get()[i]); if (host_side_results.get()[i] != host_side_expected_results.get()[i]) { MESSAGE("index of failure was: " << i); } } } } // TEST_SUITE("math")
the_stack
__device__ ulonglong8to16 *state2; uint32_t *d_YNonce[MAX_GPUS]; __constant__ uint32_t pTarget[8]; __constant__ uint32_t c_data[32]; __constant__ uint16 shapad; static uint32_t *d_hash[MAX_GPUS]; static uint8* d_hash2[MAX_GPUS]; static uint32* d_hash3[MAX_GPUS]; static uint32* d_hash4[MAX_GPUS]; #define xor3b(a,b,c) (a^b^c) #define andor32(x, y, z) ((x & (y | z)) | (y & z)) #define xandx(a, b, c) (((b^c) & a) ^ c) #define Maj(x, y, z) ((x & (y | z)) | (y & z)) #define Ch(a, b, c) (((b^c) & a) ^ c) static __device__ __forceinline__ void madd4long2(ulonglong2 &a, ulonglong2 b){ asm ("{\n\t" ".reg .u32 a0,a1,a2,a3,b0,b1,b2,b3;\n\t" "mov.b64 {a0,a1}, %0;\n\t" "mov.b64 {a2,a3}, %1;\n\t" "mov.b64 {b0,b1}, %2;\n\t" "mov.b64 {b2,b3}, %3;\n\t" "mad.lo.cc.u32 b0,a0,a1,b0; \n\t" "madc.hi.u32 b1,a0,a1,b1; \n\t" "mad.lo.cc.u32 b2,a2,a3,b2; \n\t" "madc.hi.u32 b3,a2,a3,b3; \n\t" "mov.b64 %0, {b0,b1};\n\t" "mov.b64 %1, {b2,b3};\n\t" "}\n\t" : "+l"(a.x), "+l"(a.y) : "l"(b.x), "l"(b.y)); } static __device__ __forceinline__ ulonglong2 __ldg2(const ulonglong2 *ptr){ ulonglong2 ret; asm("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR(ptr)); return ret; } static __device__ __forceinline__ uint32 __ldg32b(const uint32 *ptr) { uint32 ret; asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.lo.s0), "=r"(ret.lo.s1), "=r"(ret.lo.s2), "=r"(ret.lo.s3) : __LDG_PTR(ptr)); asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+16];" : "=r"(ret.lo.s4), "=r"(ret.lo.s5), "=r"(ret.lo.s6), "=r"(ret.lo.s7) : __LDG_PTR(ptr)); asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+32];" : "=r"(ret.lo.s8), "=r"(ret.lo.s9), "=r"(ret.lo.sa), "=r"(ret.lo.sb) : __LDG_PTR(ptr)); asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+48];" : "=r"(ret.lo.sc), "=r"(ret.lo.sd), "=r"(ret.lo.se), "=r"(ret.lo.sf) : __LDG_PTR(ptr)); asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+64];" : "=r"(ret.hi.s0), "=r"(ret.hi.s1), "=r"(ret.hi.s2), "=r"(ret.hi.s3) : __LDG_PTR(ptr)); asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+80];" : "=r"(ret.hi.s4), "=r"(ret.hi.s5), "=r"(ret.hi.s6), "=r"(ret.hi.s7) : __LDG_PTR(ptr)); asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+96];" : "=r"(ret.hi.s8), "=r"(ret.hi.s9), "=r"(ret.hi.sa), "=r"(ret.hi.sb) : __LDG_PTR(ptr)); asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+112];" : "=r"(ret.hi.sc), "=r"(ret.hi.sd), "=r"(ret.hi.se), "=r"(ret.hi.sf) : __LDG_PTR(ptr)); return ret; } static __device__ __forceinline__ uint16 __ldg16b(const uint16 *ptr) { uint16 ret; asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.s0), "=r"(ret.s1), "=r"(ret.s2), "=r"(ret.s3) : __LDG_PTR(ptr)); asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+16];" : "=r"(ret.s4), "=r"(ret.s5), "=r"(ret.s6), "=r"(ret.s7) : __LDG_PTR(ptr)); asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+32];" : "=r"(ret.s8), "=r"(ret.s9), "=r"(ret.sa), "=r"(ret.sb) : __LDG_PTR(ptr)); asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+48];" : "=r"(ret.sc), "=r"(ret.sd), "=r"(ret.se), "=r"(ret.sf) : __LDG_PTR(ptr)); return ret; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////// sha256 Transform function ///////////////////////// static __constant__ const uint16 pad1 = { 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636 }; static __constant__ const uint16 pad2 = { 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c }; static __constant__ const uint16 pad5 = { 0x00000001, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00002220 }; static __constant__ const uint16 padsha80 = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000280 }; static __constant__ const uint8 pad4 = { 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000300 }; static __constant__ const uint8 H256 = { 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 }; __constant__ static uint32_t _ALIGN(16) Ksha[64] = { 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2 }; __device__ __forceinline__ static uint32_t bsg2_0(const uint32_t x){ return xor3b(ROTR32(x, 2), ROTR32(x, 13), ROTR32(x, 22)); } __device__ __forceinline__ static uint32_t bsg2_1(const uint32_t x){ return xor3b(ROTR32(x, 6), ROTR32(x, 11), ROTR32(x, 25)); } __device__ __forceinline__ static uint32_t ssg2_0(const uint32_t x){ return xor3b(ROTR32(x, 7), ROTR32(x, 18), shr_u32(x, 3)); } __device__ __forceinline__ static uint32_t ssg2_1(const uint32_t x){ return xor3b(ROTR32(x, 17), ROTR32(x, 19), shr_u32(x, 10)); } __device__ __forceinline__ static void sha2_step1(const uint32_t a, const uint32_t b, const uint32_t c, uint32_t &d, const uint32_t e, const uint32_t f, const uint32_t g, uint32_t &h, const uint32_t in, const uint32_t Kshared){ const uint32_t t1 = h + bsg2_1(e) + Ch(e, f, g) + Kshared + in; h = t1 + bsg2_0(a) + Maj(a, b, c); d+= t1; } __device__ __forceinline__ static void sha2_step2(const uint32_t a, const uint32_t b, const uint32_t c, uint32_t &d, const uint32_t e,const uint32_t f, const uint32_t g, uint32_t &h, uint32_t* in, const uint32_t pc, const uint32_t Kshared){ uint32_t t1, t2; int pcidx1 = (pc - 2) & 0xF; int pcidx2 = (pc - 7) & 0xF; int pcidx3 = (pc - 15) & 0xF; uint32_t inx0 = in[pc]; uint32_t inx1 = in[pcidx1]; uint32_t inx2 = in[pcidx2]; uint32_t inx3 = in[pcidx3]; uint32_t ssg21 = ssg2_1(inx1); uint32_t ssg20 = ssg2_0(inx3); uint32_t vxandx = xandx(e, f, g); uint32_t bsg21 = bsg2_1(e); uint32_t bsg20 = bsg2_0(a); uint32_t andorv = andor32(a, b, c); in[pc] = ssg21 + inx2 + ssg20 + inx0; t1 = h + bsg21 + vxandx + Kshared + in[pc]; t2 = bsg20 + andorv; d = d + t1; h = t1 + t2; } #define SALSA(a,b,c,d) { \ b^=ROTL32(a+d, 7); \ c^=ROTL32(a+b, 9); \ d^=ROTL32(b+c, 13); \ a^=ROTL32(d+c, 18); \ } #define SALSA_CORE(state) { \ \ SALSA(state.s0,state.s4,state.s8,state.sc); \ SALSA(state.s5,state.s9,state.sd,state.s1); \ SALSA(state.sa,state.se,state.s2,state.s6); \ SALSA(state.sf,state.s3,state.s7,state.sb); \ SALSA(state.s0,state.s1,state.s2,state.s3); \ SALSA(state.s5,state.s6,state.s7,state.s4); \ SALSA(state.sa,state.sb,state.s8,state.s9); \ SALSA(state.sf,state.sc,state.sd,state.se); \ } #define uSALSA_CORE(state) { \ \ SALSA(state.s0,state.s4,state.s8,state.sc); \ SALSA(state.s1,state.s5,state.s9,state.sd); \ SALSA(state.s2,state.s6,state.sa,state.se); \ SALSA(state.s3,state.s7,state.sb,state.sf); \ SALSA(state.s0,state.sd,state.sa,state.s7); \ SALSA(state.s1,state.se,state.sb,state.s4); \ SALSA(state.s2,state.sf,state.s8,state.s5); \ SALSA(state.s3,state.sc,state.s9,state.s6); \ } #define shuffle(stat,state) { \ stat.s0 = state.s0; \ stat.s1 = state.s5; \ stat.s2 = state.sa; \ stat.s3 = state.sf; \ stat.s4 = state.s4; \ stat.s5 = state.s9; \ stat.s6 = state.se; \ stat.s7 = state.s3; \ stat.s8 = state.s8; \ stat.s9 = state.sd; \ stat.sa = state.s2; \ stat.sb = state.s7; \ stat.sc = state.sc; \ stat.sd = state.s1; \ stat.se = state.s6; \ stat.sf = state.sb; \ } #define unshuffle(state,X) { \ state.s0 = X.s0; \ state.s1 = X.sd; \ state.s2 = X.sa; \ state.s3 = X.s7; \ state.s4 = X.s4; \ state.s5 = X.s1; \ state.s6 = X.se; \ state.s7 = X.sb; \ state.s8 = X.s8; \ state.s9 = X.s5; \ state.sa = X.s2; \ state.sb = X.sf; \ state.sc = X.sc; \ state.sd = X.s9; \ state.se = X.s6; \ state.sf = X.s3; \ } __device__ __forceinline__ static void sha256_Transform(uint32_t* in, uint32_t *state){ // also known as sha2_round_body uint32_t a = state[0]; uint32_t b = state[1]; uint32_t c = state[2]; uint32_t d = state[3]; uint32_t e = state[4]; uint32_t f = state[5]; uint32_t g = state[6]; uint32_t h = state[7]; sha2_step1(a, b, c, d, e, f, g, h, in[ 0], Ksha[0]); sha2_step1(h, a, b, c, d, e, f, g, in[ 1], Ksha[1]); sha2_step1(g, h, a, b, c, d, e, f, in[ 2], Ksha[2]); sha2_step1(f, g, h, a, b, c, d, e, in[ 3], Ksha[3]); sha2_step1(e, f, g, h, a, b, c, d, in[ 4], Ksha[4]); sha2_step1(d, e, f, g, h, a, b, c, in[ 5], Ksha[5]); sha2_step1(c, d, e, f, g, h, a, b, in[ 6], Ksha[6]); sha2_step1(b, c, d, e, f, g, h, a, in[ 7], Ksha[7]); sha2_step1(a, b, c, d, e, f, g, h, in[ 8], Ksha[8]); sha2_step1(h, a, b, c, d, e, f, g, in[ 9], Ksha[9]); sha2_step1(g, h, a, b, c, d, e, f, in[10], Ksha[10]); sha2_step1(f, g, h, a, b, c, d, e, in[11], Ksha[11]); sha2_step1(e, f, g, h, a, b, c, d, in[12], Ksha[12]); sha2_step1(d, e, f, g, h, a, b, c, in[13], Ksha[13]); sha2_step1(c, d, e, f, g, h, a, b, in[14], Ksha[14]); sha2_step1(b, c, d, e, f, g, h, a, in[15], Ksha[15]); #pragma unroll 3 for (uint32_t i = 0; i<3; i++) { sha2_step2(a, b, c, d, e, f, g, h, in, 0, Ksha[16 + 16 * i]); sha2_step2(h, a, b, c, d, e, f, g, in, 1, Ksha[17 + 16 * i]); sha2_step2(g, h, a, b, c, d, e, f, in, 2, Ksha[18 + 16 * i]); sha2_step2(f, g, h, a, b, c, d, e, in, 3, Ksha[19 + 16 * i]); sha2_step2(e, f, g, h, a, b, c, d, in, 4, Ksha[20 + 16 * i]); sha2_step2(d, e, f, g, h, a, b, c, in, 5, Ksha[21 + 16 * i]); sha2_step2(c, d, e, f, g, h, a, b, in, 6, Ksha[22 + 16 * i]); sha2_step2(b, c, d, e, f, g, h, a, in, 7, Ksha[23 + 16 * i]); sha2_step2(a, b, c, d, e, f, g, h, in, 8, Ksha[24 + 16 * i]); sha2_step2(h, a, b, c, d, e, f, g, in, 9, Ksha[25 + 16 * i]); sha2_step2(g, h, a, b, c, d, e, f, in, 10, Ksha[26 + 16 * i]); sha2_step2(f, g, h, a, b, c, d, e, in, 11, Ksha[27 + 16 * i]); sha2_step2(e, f, g, h, a, b, c, d, in, 12, Ksha[28 + 16 * i]); sha2_step2(d, e, f, g, h, a, b, c, in, 13, Ksha[29 + 16 * i]); sha2_step2(c, d, e, f, g, h, a, b, in, 14, Ksha[30 + 16 * i]); sha2_step2(b, c, d, e, f, g, h, a, in, 15, Ksha[31 + 16 * i]); } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; } __device__ __forceinline__ static uint8 sha256_Transform2(uint16 in[1], const uint8 &r){ // also known as sha2_round_body uint8 tmp = r; #define a tmp.s0 #define b tmp.s1 #define c tmp.s2 #define d tmp.s3 #define e tmp.s4 #define f tmp.s5 #define g tmp.s6 #define h tmp.s7 sha2_step1(a, b, c, d, e, f, g, h, in[0].s0, Ksha[0]); sha2_step1(h, a, b, c, d, e, f, g, in[0].s1, Ksha[1]); sha2_step1(g, h, a, b, c, d, e, f, in[0].s2, Ksha[2]); sha2_step1(f, g, h, a, b, c, d, e, in[0].s3, Ksha[3]); sha2_step1(e, f, g, h, a, b, c, d, in[0].s4, Ksha[4]); sha2_step1(d, e, f, g, h, a, b, c, in[0].s5, Ksha[5]); sha2_step1(c, d, e, f, g, h, a, b, in[0].s6, Ksha[6]); sha2_step1(b, c, d, e, f, g, h, a, in[0].s7, Ksha[7]); sha2_step1(a, b, c, d, e, f, g, h, in[0].s8, Ksha[8]); sha2_step1(h, a, b, c, d, e, f, g, in[0].s9, Ksha[9]); sha2_step1(g, h, a, b, c, d, e, f, in[0].sa, Ksha[10]); sha2_step1(f, g, h, a, b, c, d, e, in[0].sb, Ksha[11]); sha2_step1(e, f, g, h, a, b, c, d, in[0].sc, Ksha[12]); sha2_step1(d, e, f, g, h, a, b, c, in[0].sd, Ksha[13]); sha2_step1(c, d, e, f, g, h, a, b, in[0].se, Ksha[14]); sha2_step1(b, c, d, e, f, g, h, a, in[0].sf, Ksha[15]); #pragma unroll 3 for (uint32_t i = 0; i<3; i++) { sha2_step2(a, b, c, d, e, f, g, h, (uint32_t*)in, 0, Ksha[16 + 16 * i]); sha2_step2(h, a, b, c, d, e, f, g, (uint32_t*)in, 1, Ksha[17 + 16 * i]); sha2_step2(g, h, a, b, c, d, e, f, (uint32_t*)in, 2, Ksha[18 + 16 * i]); sha2_step2(f, g, h, a, b, c, d, e, (uint32_t*)in, 3, Ksha[19 + 16 * i]); sha2_step2(e, f, g, h, a, b, c, d, (uint32_t*)in, 4, Ksha[20 + 16 * i]); sha2_step2(d, e, f, g, h, a, b, c, (uint32_t*)in, 5, Ksha[21 + 16 * i]); sha2_step2(c, d, e, f, g, h, a, b, (uint32_t*)in, 6, Ksha[22 + 16 * i]); sha2_step2(b, c, d, e, f, g, h, a, (uint32_t*)in, 7, Ksha[23 + 16 * i]); sha2_step2(a, b, c, d, e, f, g, h, (uint32_t*)in, 8, Ksha[24 + 16 * i]); sha2_step2(h, a, b, c, d, e, f, g, (uint32_t*)in, 9, Ksha[25 + 16 * i]); sha2_step2(g, h, a, b, c, d, e, f, (uint32_t*)in, 10, Ksha[26 + 16 * i]); sha2_step2(f, g, h, a, b, c, d, e, (uint32_t*)in, 11, Ksha[27 + 16 * i]); sha2_step2(e, f, g, h, a, b, c, d, (uint32_t*)in, 12, Ksha[28 + 16 * i]); sha2_step2(d, e, f, g, h, a, b, c, (uint32_t*)in, 13, Ksha[29 + 16 * i]); sha2_step2(c, d, e, f, g, h, a, b, (uint32_t*)in, 14, Ksha[30 + 16 * i]); sha2_step2(b, c, d, e, f, g, h, a, (uint32_t*)in, 15, Ksha[31 + 16 * i]); } #undef a #undef b #undef c #undef d #undef e #undef f return (r + tmp); } __device__ __forceinline__ static uint8 sha256_Transform3(uint32_t nonce,uint32_t next, const uint8 &r){ // also known as sha2_round_body uint8 tmp = r; uint16 in[1]={shapad}; in[0].s3=nonce; in[0].s4=next; #define a tmp.s0 #define b tmp.s1 #define c tmp.s2 #define d tmp.s3 #define e tmp.s4 #define f tmp.s5 #define g tmp.s6 #define h tmp.s7 sha2_step1(a, b, c, d, e, f, g, h, in[0].s0, Ksha[0]); sha2_step1(h, a, b, c, d, e, f, g, in[0].s1, Ksha[1]); sha2_step1(g, h, a, b, c, d, e, f, in[0].s2, Ksha[2]); sha2_step1(f, g, h, a, b, c, d, e, in[0].s3, Ksha[3]); sha2_step1(e, f, g, h, a, b, c, d, in[0].s4, Ksha[4]); sha2_step1(d, e, f, g, h, a, b, c, in[0].s5, Ksha[5]); sha2_step1(c, d, e, f, g, h, a, b, in[0].s6, Ksha[6]); sha2_step1(b, c, d, e, f, g, h, a, in[0].s7, Ksha[7]); sha2_step1(a, b, c, d, e, f, g, h, in[0].s8, Ksha[8]); sha2_step1(h, a, b, c, d, e, f, g, in[0].s9, Ksha[9]); sha2_step1(g, h, a, b, c, d, e, f, in[0].sa, Ksha[10]); sha2_step1(f, g, h, a, b, c, d, e, in[0].sb, Ksha[11]); sha2_step1(e, f, g, h, a, b, c, d, in[0].sc, Ksha[12]); sha2_step1(d, e, f, g, h, a, b, c, in[0].sd, Ksha[13]); sha2_step1(c, d, e, f, g, h, a, b, in[0].se, Ksha[14]); sha2_step1(b, c, d, e, f, g, h, a, in[0].sf, Ksha[15]); #pragma unroll 3 for (uint32_t i = 0; i<3; i++) { sha2_step2(a, b, c, d, e, f, g, h, (uint32_t*)in, 0, Ksha[16 + 16 * i]); sha2_step2(h, a, b, c, d, e, f, g, (uint32_t*)in, 1, Ksha[17 + 16 * i]); sha2_step2(g, h, a, b, c, d, e, f, (uint32_t*)in, 2, Ksha[18 + 16 * i]); sha2_step2(f, g, h, a, b, c, d, e, (uint32_t*)in, 3, Ksha[19 + 16 * i]); sha2_step2(e, f, g, h, a, b, c, d, (uint32_t*)in, 4, Ksha[20 + 16 * i]); sha2_step2(d, e, f, g, h, a, b, c, (uint32_t*)in, 5, Ksha[21 + 16 * i]); sha2_step2(c, d, e, f, g, h, a, b, (uint32_t*)in, 6, Ksha[22 + 16 * i]); sha2_step2(b, c, d, e, f, g, h, a, (uint32_t*)in, 7, Ksha[23 + 16 * i]); sha2_step2(a, b, c, d, e, f, g, h, (uint32_t*)in, 8, Ksha[24 + 16 * i]); sha2_step2(h, a, b, c, d, e, f, g, (uint32_t*)in, 9, Ksha[25 + 16 * i]); sha2_step2(g, h, a, b, c, d, e, f, (uint32_t*)in, 10, Ksha[26 + 16 * i]); sha2_step2(f, g, h, a, b, c, d, e, (uint32_t*)in, 11, Ksha[27 + 16 * i]); sha2_step2(e, f, g, h, a, b, c, d, (uint32_t*)in, 12, Ksha[28 + 16 * i]); sha2_step2(d, e, f, g, h, a, b, c, (uint32_t*)in, 13, Ksha[29 + 16 * i]); sha2_step2(c, d, e, f, g, h, a, b, (uint32_t*)in, 14, Ksha[30 + 16 * i]); sha2_step2(b, c, d, e, f, g, h, a, (uint32_t*)in, 15, Ksha[31 + 16 * i]); } #undef a #undef b #undef c #undef d #undef e #undef f return (r + tmp); } //////////////////////////////// end sha transform mechanism //////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// __device__ __forceinline__ static uint16 salsa20_8(const uint16 &X){ uint16 state=X; #pragma unroll 4 for (uint32_t i = 0; i < 4; ++i) uSALSA_CORE(state); return(X + state); } __device__ __forceinline__ static void block_pwxform_long(int thread, ulonglong2to8 *const __restrict__ Bout,uint32 *const __restrict__ prevstate){ ulonglong2 vec = Bout->l0; #pragma unroll 6 for (uint32_t i = 0; i < 6; i++) { ulonglong2 p0, p1; uint2 x = vectorize((vec.x >> 4) & 0x000000FF000000FF); p0 = __ldg2(&((ulonglong2*)(prevstate + 64 * thread))[x.x]); madd4long2(vec, p0); p1 = __ldg2(&((ulonglong2*)(prevstate + 64 * thread + 32))[x.y]); vec ^= p1; } Bout->l0 = vec; vec = Bout->l1; #pragma unroll 6 for (uint32_t i = 0; i < 6; i++){ ulonglong2 p0, p1; uint2 x = vectorize((vec.x >> 4) & 0x000000FF000000FF); p0 = __ldg2(&((ulonglong2*)(prevstate + 64 * thread))[x.x]); madd4long2(vec, p0); p1 = __ldg2(&((ulonglong2*)(prevstate + 64 * thread + 32))[x.y]); vec ^= p1; } Bout->l1 = vec; vec = Bout->l2; #pragma unroll 6 for (uint32_t i = 0; i < 6; i++){ ulonglong2 p0, p1; uint2 x = vectorize((vec.x >> 4) & 0x000000FF000000FF); p0 = __ldg2(&((ulonglong2*)(prevstate + 64 * thread))[x.x]); madd4long2(vec, p0); p1 = __ldg2(&((ulonglong2*)(prevstate + 64 * thread + 32))[x.y]); vec ^= p1; } Bout->l2 = vec; vec = Bout->l3; #pragma unroll 6 for (uint32_t i = 0; i < 6; i++){ ulonglong2 p0, p1; uint2 x = vectorize((vec.x >> 4) & 0x000000FF000000FF); p0 = __ldg2(&((ulonglong2*)(prevstate + 64 * thread))[x.x]); madd4long2(vec, p0); p1 = __ldg2(&((ulonglong2*)(prevstate + 64 * thread + 32))[x.y]); vec ^= p1; } Bout->l3 = vec; } __device__ __forceinline__ static void blockmix_salsa8_small2(uint32 &Bin) { uint16 X = Bin.hi; X ^= Bin.lo; X = salsa20_8(X); Bin.lo = X; X ^= Bin.hi; X = salsa20_8(X); Bin.hi = X; } __device__ __forceinline__ static void blockmix_pwxform3(int thread, ulonglong2to8 *const __restrict__ Bin,uint32 *const __restrict__ prevstate){ Bin[0] ^= Bin[15]; block_pwxform_long(thread, &Bin[0],prevstate); for (uint32_t i = 1; i < 16; i++){ Bin[i] ^= Bin[i - 1]; block_pwxform_long(thread, &Bin[i],prevstate); } ((uint16*)Bin)[15] = salsa20_8(((uint16*)Bin)[15]); // Bin[15] = salsa20_8_long(Bin[15]); } __global__ __launch_bounds__(256, 1) void yescrypt_gpu_hash_k0(const uint32_t threads,const uint32_t startNonce, uint8* sha256test, uint32* B){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads){ const uint32_t nonce = startNonce + thread; uint16 in[1]; uint8 state1, state2; uint8 passwd;// = sha256_80(nonce); uint32_t in1[16] = { 0 }; // uint32_t buf[ 8]; ((uint16*)in1)[0] = ((uint16*)c_data)[0]; passwd = H256; sha256_Transform(in1, ((uint32_t*)&passwd)); ((uint16*)in1)[0] = padsha80; in1[0] = c_data[16]; in1[1] = c_data[17]; in1[2] = c_data[18]; in1[3] = nonce; sha256_Transform(in1, ((uint32_t*)&passwd)); in[0].lo = pad1.lo ^ passwd; in[0].hi = pad1.hi; state1 = sha256_Transform2(in, H256); in[0].lo = pad2.lo ^ passwd; in[0].hi = pad2.hi; state2 = sha256_Transform2(in, H256); in[0] = ((uint16*)c_data)[0]; ///HMAC_SHA256_update(salt) state1 = sha256_Transform2(in, state1); #pragma unroll for (uint32_t i = 0; i<8; i++) { uint32 result; in[0].lo = sha256_Transform3(nonce,4*i+1, state1); in[0].hi = pad4; result.lo.lo = swapvec(sha256_Transform2(in, state2)); if (i == 0) (sha256test + thread)[0] = result.lo.lo; in[0].lo = sha256_Transform3(nonce,4*i+2, state1); in[0].hi = pad4; result.lo.hi = swapvec(sha256_Transform2(in, state2)); in[0].lo = sha256_Transform3(nonce,4*i+3, state1); in[0].hi = pad4; result.hi.lo = swapvec(sha256_Transform2(in, state2)); in[0].lo = sha256_Transform3(nonce,4*i+4, state1); in[0].hi = pad4; result.hi.hi = swapvec(sha256_Transform2(in, state2)); shuffle((B + 8 * thread)[i].lo, result.lo); shuffle((B + 8 * thread)[i].hi, result.hi); } } } __global__ __launch_bounds__(32, 1) void yescrypt_gpu_hash_k1(const uint32_t threads, uint32_t startNonce,uint32* prevstate,uint32* B){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads){ // smix1_first(thread); uint32 X; #define Bdev(x) (B+8*thread)[x] #define state(x) (prevstate+64*thread)[x] X = Bdev(0); state(0) = X; blockmix_salsa8_small2(X); state(1) = X; blockmix_salsa8_small2(X); uint32_t n = 1; #pragma unroll for (uint32_t i = 2; i < 64; i ++) { state(i) = X; if ((i&(i - 1)) == 0) n = n << 1; uint32_t j = X.hi.s0 & (n - 1); j += i - n; X ^= __ldg32b(&state(j)); blockmix_salsa8_small2(X); } Bdev(0) = X; #undef Bdev #undef state } } __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(128, 1) #else __launch_bounds__(16, 1) #endif void yescrypt_gpu_hash_k2c(int threads,uint32* prevstate,uint32* B){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads){ // smix1_second(thread); ulonglong8to16 X[8]; //,Z; const uint32_t length = 8; const uint32_t shift = 8 * 2048 * thread; #define Bdev(x) (B+8*thread)[x] #pragma unroll 8 for (uint32_t i = 0; i<8; i++) { ((uint32*)X)[i] = __ldg32b(&Bdev(i)); } #pragma unroll 8 for (uint32_t i = 0; i<length; i++) (state2+shift)[i] = X[i]; blockmix_pwxform3(thread, (ulonglong2to8*)X, prevstate); #pragma unroll 8 for (uint32_t i = 0; i<length; i++) (state2 + shift+length)[i] = X[i]; blockmix_pwxform3(thread, (ulonglong2to8*)X, prevstate); uint32_t n = 1; for (uint32_t i = 2; i < 2048; i++){ #pragma unroll 8 for (uint32_t k = 0; k<length; k++) (state2 + shift + length * i)[k] = X[k]; if ((i&(i - 1)) == 0) n = n << 1; const uint32_t j = (((uint32*)X)[7].hi.s0 & (n - 1)) + i - n; #pragma unroll 64 for (uint32_t k = 0; k < 64; k++) ((ulonglong2*)X)[k] ^= __ldg2(&((ulonglong2*)(state2 + shift + length * j))[k]); blockmix_pwxform3(thread, (ulonglong2to8*)X, prevstate); } #pragma unroll 8 for (uint32_t i = 0; i<8; i++) { (B + 8 * thread)[i] = ((uint32*)X)[i]; } ///////////////////////////////////////////////// } } __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(128, 1) #else __launch_bounds__(16, 1) #endif void yescrypt_gpu_hash_k2c1(int threads,uint32* prevstate,uint32* B){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads){ ulonglong8to16 X[8]; //,Z; const uint32_t length = 8; const uint32_t shift = 8 * 2048 * thread; #define Bdev(x) (B+8*thread)[x] #define BigStore(s,i) (state2 + shift + s)[i] #pragma unroll 8 for (uint32_t i = 0; i<8; i++) { ((uint32*)X)[i] = __ldg32b(&Bdev(i)); } for (uint32_t z = 0; z < 682; z++){ uint32_t j = ((uint32*)X)[7].hi.s0 & 2047; #pragma unroll 64 for (uint32_t k = 0; k < 64; k++) ((ulonglong2*)X)[k] ^= __ldg2(&((ulonglong2*)(state2 + shift + length * j))[k]); #pragma unroll 8 for (uint32_t k = 0; k<length; k++) BigStore(length * j, k) = X[k]; blockmix_pwxform3(thread, (ulonglong2to8*)X, prevstate); } for (uint32_t z = 682; z < 684; z++){ uint32_t j = ((uint32*)X)[7].hi.s0 & 2047; #pragma unroll 64 for (uint32_t k = 0; k < 64; k++) ((ulonglong2*)X)[k] ^= __ldg2(&((ulonglong2*)(state2 + shift + length * j))[k]); blockmix_pwxform3(thread, (ulonglong2to8*)X, prevstate); } #pragma unroll 8 for (uint32_t i = 0; i<8; i++) { // ((uint32*)X)[i] = Bdev(i); unshuffle(Bdev(i).lo, ((uint32*)X)[i].lo); unshuffle(Bdev(i).hi, ((uint32*)X)[i].hi); } } } __global__ __launch_bounds__(16, 1) void yescrypt_gpu_hash_k5(int threads, uint32_t startNonce, uint32_t *resNonces, uint8* sha256test, uint32* B){ int thread = (blockDim.x * blockIdx.x + threadIdx.x); const uint32_t nonce = startNonce + thread; uint16 in[1]; uint8 state1, state2; uint8 swpass = (sha256test + thread)[0]; #define Bdev(x) (B+8*thread)[x] swpass = swapvec(swpass); in[0].lo = pad1.lo ^ swpass; in[0].hi = pad1.hi; state1 = sha256_Transform2(in, H256); in[0].lo = pad2.lo ^ swpass; in[0].hi = pad2.hi; state2 = sha256_Transform2(in, H256); for (uint32_t i = 0; i<8; i++) { in[0] = __ldg16b(&Bdev(i).lo); in[0] = swapvec(in[0]); state1 = sha256_Transform2(in, state1); in[0] = __ldg16b(&Bdev(i).hi); in[0] = swapvec(in[0]); state1 = sha256_Transform2(in, state1); } in[0] = pad5; state1 = sha256_Transform2(in, state1); in[0].lo = state1; in[0].hi = pad4; uint8 res = sha256_Transform2(in, state2); //hmac and final sha state1 = state2 = H256; in[0].lo = pad1.lo ^ res; in[0].hi = pad1.hi; state1 = sha256_Transform2(in, state1); in[0].lo = pad2.lo ^ res; in[0].hi = pad2.hi; state2 = sha256_Transform2(in, state2); in[0] = ((uint16*)c_data)[0]; state1 = sha256_Transform2(in, state1); in[0] = padsha80; in[0].s0 = c_data[16]; in[0].s1 = c_data[17]; in[0].s2 = c_data[18]; in[0].s3 = nonce; in[0].sf = 0x480; state1 = sha256_Transform2(in, state1); in[0].lo = state1; in[0].hi = pad4; state1 = sha256_Transform2(in, state2); // state2 = H256; in[0].lo = state1; in[0].hi = pad4; in[0].sf = 0x100; res = sha256_Transform2(in, H256); // return(swapvec(res)); // uint8 res = pbkdf_sha256_second2(thread, nonce); if (cuda_swab32(res.s7) <= pTarget[7]) { uint32_t tmp = atomicExch(&resNonces[0], nonce); if(tmp != UINT32_MAX) resNonces[1] = tmp; } } __host__ void yescrypt_cpu_init(int thr_id, int threads){ //PREPEI NA MPEI KAI FREE! CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], 2048 * 128 * sizeof(uint64_t) * threads)); CUDA_SAFE_CALL(cudaMalloc(&d_hash2[thr_id], 8 * sizeof(uint32_t) * threads)); CUDA_SAFE_CALL(cudaMalloc(&d_hash3[thr_id], 32*64 * sizeof(uint32_t) * threads)); CUDA_SAFE_CALL(cudaMalloc(&d_hash4[thr_id], 32*8 * sizeof(uint32_t) * threads)); cudaMemcpyToSymbol(state2, &d_hash[thr_id], sizeof(d_hash[thr_id]), 0, cudaMemcpyHostToDevice); // cudaMemcpyToSymbol(sha256test, &hash2, sizeof(hash2), 0, cudaMemcpyHostToDevice); // cudaMemcpyToSymbol(prevstate, &d_hash3[thr_id], sizeof(d_hash3[thr_id]), 0, cudaMemcpyHostToDevice); // cudaMemcpyToSymbol(B, &d_hash4[thr_id], sizeof(d_hash4[thr_id]), 0, cudaMemcpyHostToDevice); } __host__ void yescrypt_free(int thr_id){ cudaFree(d_hash[thr_id]); cudaFree(d_hash2[thr_id]); cudaFree(d_hash3[thr_id]); cudaFree(d_hash4[thr_id]); } __host__ void yescrypt_cpu_hash_k4(int thr_id, int threads, uint32_t startNounce, uint32_t* resNonce){ int dev_id = device_map[thr_id]; const uint32_t threadsperblock = 16; const uint32_t threadsperblock2 = 64; const uint32_t threadsperblock3 = 64; const uint32_t threadsperblock4 = 32; const uint32_t tpbk2c = device_sm[dev_id]<=500 ? 16 : 128; dim3 gridk2c((threads + tpbk2c - 1) / tpbk2c); dim3 blockk2c(tpbk2c); dim3 grid4((threads + threadsperblock4 - 1) / threadsperblock4); dim3 block4(threadsperblock4); dim3 grid3((threads + threadsperblock3 - 1) / threadsperblock3); dim3 block3(threadsperblock3); dim3 grid2((threads + threadsperblock2 - 1) / threadsperblock2); dim3 block2(threadsperblock2); dim3 grid((threads + threadsperblock - 1) / threadsperblock); dim3 block(threadsperblock); yescrypt_gpu_hash_k0 << <grid3, block3>> >(threads, startNounce,d_hash2[thr_id],d_hash4[thr_id]); yescrypt_gpu_hash_k1 << <grid4, block4 >> >(threads, startNounce,d_hash3[thr_id],d_hash4[thr_id]); yescrypt_gpu_hash_k2c << <gridk2c, blockk2c >> >(threads, d_hash3[thr_id],d_hash4[thr_id]); yescrypt_gpu_hash_k2c1 << <gridk2c, blockk2c >> >(threads, d_hash3[thr_id],d_hash4[thr_id]); yescrypt_gpu_hash_k5 << <grid, block >> >(threads, startNounce, resNonce,d_hash2[thr_id],d_hash4[thr_id]); } __host__ void yescrypt_setBlockTarget(uint32_t* pdata, const void *target){ unsigned char PaddedMessage[128]; //bring balance to the force memcpy(PaddedMessage, pdata, 80); // memcpy(PaddedMessage+80, 0, 48); uint32_t pad3[16] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000004a0 }; pad3[0] = pdata[16]; pad3[1] = pdata[17]; pad3[2] = pdata[18]; cudaMemcpyToSymbol(shapad, pad3, 16 * sizeof(uint32_t), 0, cudaMemcpyHostToDevice); CUDA_SAFE_CALL(cudaMemcpyToSymbol(pTarget, target, 8 * sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_data, PaddedMessage, 32 * sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); }
the_stack
#include "common.h" #include "bn.h" /* * Device functions and data structures */ struct Float2 { float v1, v2; __device__ Float2() {} __device__ Float2(float _v1, float _v2) : v1(_v1), v2(_v2) {} __device__ Float2(float v) : v1(v), v2(v) {} __device__ Float2(int v) : v1(v), v2(v) {} __device__ Float2 &operator+=(const Float2 &a) { v1 += a.v1; v2 += a.v2; return *this; } }; struct SumOp { __device__ SumOp(const float *t, int c, int s) : tensor(t), C(c), S(s) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { return tensor[(batch * C + plane) * S + n]; } const float *tensor; const int C; const int S; }; struct VarOp { __device__ VarOp(float m, const float *t, int c, int s) : mean(m), tensor(t), C(c), S(s) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { float val = tensor[(batch * C + plane) * S + n]; return (val - mean) * (val - mean); } const float mean; const float *tensor; const int C; const int S; }; struct GradOp { __device__ GradOp(float _gamma, float _beta, const float *_z, const float *_dz, int c, int s) : gamma(_gamma), beta(_beta), z(_z), dz(_dz), C(c), S(s) {} __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { float _y = (z[(batch * C + plane) * S + n] - beta) / gamma; float _dz = dz[(batch * C + plane) * S + n]; return Float2(_dz, _y * _dz); } const float gamma; const float beta; const float *z; const float *dz; const int C; const int S; }; static __device__ __forceinline__ float warpSum(float val) { #if __CUDA_ARCH__ >= 300 for (int i = 0; i < getMSB(WARP_SIZE); ++i) { val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); } #else __shared__ float values[MAX_BLOCK_SIZE]; values[threadIdx.x] = val; __threadfence_block(); const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; for (int i = 1; i < WARP_SIZE; i++) { val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; } #endif return val; } static __device__ __forceinline__ Float2 warpSum(Float2 value) { value.v1 = warpSum(value.v1); value.v2 = warpSum(value.v2); return value; } template <typename T, typename Op> __device__ T reduce(Op op, int plane, int N, int C, int S) { T sum = (T)0; for (int batch = 0; batch < N; ++batch) { for (int x = threadIdx.x; x < S; x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } /* * Kernels */ __global__ void mean_var_kernel(const float *x, float *mean, float *var, int N, int C, int S) { int plane = blockIdx.x; float norm = 1.f / (N * S); float _mean = reduce<float, SumOp>(SumOp(x, C, S), plane, N, C, S) * norm; __syncthreads(); float _var = reduce<float, VarOp>(VarOp(_mean, x, C, S), plane, N, C, S) * norm; if (threadIdx.x == 0) { mean[plane] = _mean; var[plane] = _var; } } __global__ void forward_kernel(const float *x, const float *mean, const float *var, const float *weight, const float *bias, float *y, float *z, float eps, int N, int C, int S) { int plane = blockIdx.x; float _mean = mean[plane]; float _var = var[plane]; float invStd = 0; if (_var != 0.f || eps != 0.f) { invStd = 1 / sqrt(_var + eps); } float gamma = weight != 0 ? weight[plane] : 1.f; float beta = bias != 0 ? bias[plane] : 0.f; for (int batch = 0; batch < N; ++batch) { for (int n = threadIdx.x; n < S; n += blockDim.x) { float _x = x[(batch * C + plane) * S + n]; float _y = (_x - _mean) * invStd; float _z = _y * gamma + beta; y[(batch * C + plane) * S + n] = _y; z[(batch * C + plane) * S + n] = _z; } } } __global__ void edz_eydz_kernel(const float *z, const float *dz, const float *weight, const float *bias, float *edz, float *eydz, int N, int C, int S) { int plane = blockIdx.x; float norm = 1.f / (N * S); float gamma = weight != 0 ? weight[plane] : 1.f; float beta = bias != 0 ? bias[plane] : 0.f; Float2 res = reduce<Float2, GradOp>(GradOp(gamma, beta, z, dz, C, S), plane, N, C, S); float _edz = res.v1 * norm; float _eydz = res.v2 * norm; __syncthreads(); if (threadIdx.x == 0) { edz[plane] = _edz; eydz[plane] = _eydz; } } __global__ void backward_kernel(const float *dz, const float *z, const float *var, const float *weight, const float *bias, const float *edz, const float *eydz, float *dx, float *dweight, float *dbias, float eps, int N, int C, int S) { int plane = blockIdx.x; float _edz = edz[plane]; float _eydz = eydz[plane]; float gamma = weight != 0 ? weight[plane] : 1.f; float beta = bias != 0 ? bias[plane] : 0.f; if (dx != 0) { float _var = var[plane]; float invStd = 0; if (_var != 0.f || eps != 0.f) { invStd = 1 / sqrt(_var + eps); } float mul = gamma * invStd; for (int batch = 0; batch < N; ++batch) { for (int n = threadIdx.x; n < S; n += blockDim.x) { float _dz = dz[(batch * C + plane) * S + n]; float _y = (z[(batch * C + plane) * S + n] - beta) / gamma; dx[(batch * C + plane) * S + n] = (_dz - _edz - _y * _eydz) * mul; } } } if (dweight != 0 || dbias != 0) { float norm = N * S; if (dweight != 0) { if (threadIdx.x == 0) { dweight[plane] += _eydz * norm; } } if (dbias != 0) { if (threadIdx.x == 0) { dbias[plane] += _edz * norm; } } } } /* * Implementations */ extern "C" int _bn_mean_var_cuda(int N, int C, int S, const float *x, float *mean, float *var, cudaStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); mean_var_kernel<<<blocks, threads, 0, stream>>>(x, mean, var, N, C, S); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _bn_forward_cuda(int N, int C, int S, const float *x, const float *mean, const float *var, const float *weight, const float *bias, float *y, float *z, float eps, cudaStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); forward_kernel<<<blocks, threads, 0, stream>>>(x, mean, var, weight, bias, y, z, eps, N, C, S); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _bn_edz_eydz_cuda(int N, int C, int S, const float *z, const float *dz, const float *weight, const float *bias, float *edz, float *eydz, cudaStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); edz_eydz_kernel<<<blocks, threads, 0, stream>>>(z, dz, weight, bias, edz, eydz, N, C, S); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _bn_backward_cuda(int N, int C, int S, const float *dz, const float *z, const float *var, const float *weight, const float *bias, const float *edz, const float *eydz, float *dx, float *dweight, float *dbias, float eps, cudaStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); backward_kernel<<<blocks, threads, 0, stream>>>(dz, z, var, weight, bias, edz, eydz, dx, dweight, dbias, eps, N, C, S); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _leaky_relu_cuda(int N, float *x, float slope, cudaStream_t stream) { // Run using thrust thrust::device_ptr<float> th_x = thrust::device_pointer_cast(x); thrust::transform_if(thrust::cuda::par.on(stream), th_x, th_x + N, th_x, [slope] __device__ (const float& x) { return x * slope; }, [] __device__ (const float& x) { return x < 0; }); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _leaky_relu_backward_cuda(int N, const float *x, float *dx, float slope, cudaStream_t stream) { // Run using thrust thrust::device_ptr<const float> th_x = thrust::device_pointer_cast(x); thrust::device_ptr<float> th_dx = thrust::device_pointer_cast(dx); thrust::transform_if(thrust::cuda::par.on(stream), th_dx, th_dx + N, th_x, th_dx, [slope] __device__ (const float& dx) { return dx * slope; }, [] __device__ (const float& x) { return x < 0; }); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _elu_cuda(int N, float *x, cudaStream_t stream) { // Run using thrust thrust::device_ptr<float> th_x = thrust::device_pointer_cast(x); thrust::transform_if(thrust::cuda::par.on(stream), th_x, th_x + N, th_x, [] __device__ (const float& x) { return exp(x) - 1.f; }, [] __device__ (const float& x) { return x < 0; }); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _elu_backward_cuda(int N, const float *x, float *dx, cudaStream_t stream) { // Run using thrust thrust::device_ptr<const float> th_x = thrust::device_pointer_cast(x); thrust::device_ptr<float> th_dx = thrust::device_pointer_cast(dx); thrust::transform_if(thrust::cuda::par.on(stream), th_dx, th_dx + N, th_x, th_x, th_dx, [] __device__ (const float& dx, const float& x) { return dx * (x + 1.f); }, [] __device__ (const float& x) { return x < 0; }); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _elu_inv_cuda(int N, float *x, cudaStream_t stream) { // Run using thrust thrust::device_ptr<float> th_x = thrust::device_pointer_cast(x); thrust::transform_if(thrust::cuda::par.on(stream), th_x, th_x + N, th_x, [] __device__ (const float& x) { return log1p(x); }, [] __device__ (const float& x) { return x < 0; }); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; }
the_stack
#include "CUFLU.h" #if ( MODEL == HYDRO && ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) ) // external functions #ifdef __CUDACC__ #include "CUFLU_Shared_FluUtility.cu" #else void Hydro_Rotate3D( real InOut[], const int XYZ, const bool Forward, const int Mag_Offset ); void Hydro_Con2Pri( const real In[], real Out[], const real MinPres, const bool FracPassive, const int NFrac, const int FracIdx[], const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2E_t EoS_DensPres2Eint, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX], real* const EintOut ); void Hydro_Pri2Con( const real In[], real Out[], const bool FracPassive, const int NFrac, const int FracIdx[], const EoS_DP2E_t EoS_DensPres2Eint, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX], const real* const EintIn ); #if ( FLU_SCHEME == MHM ) void Hydro_Con2Flux( const int XYZ, real Flux[], const real In[], const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX], const real* const PresIn ); #endif #endif // #ifdef __CUDACC__ ... else ... // internal functions (GPU_DEVICE is defined in CUFLU.h) GPU_DEVICE static void Hydro_LimitSlope( const real L[], const real C[], const real R[], const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const int XYZ, const real LEigenVec[][NWAVE], const real REigenVec[][NWAVE], real Slope_Limiter[], const EoS_t *EoS ); #if ( FLU_SCHEME == CTU || ( defined MHD && defined CHAR_RECONSTRUCTION ) ) #ifdef MHD GPU_DEVICE static void MHD_GetEigenSystem( const real CC_Var[], real EigenVal[], real LEigenVec[][NWAVE], real REigenVec[][NWAVE], const EoS_t *EoS, const int XYZ ); #else GPU_DEVICE static void Hydro_GetEigenSystem( const real CC_Var[], real EigenVal[][NWAVE], real LEigenVec[][NWAVE], real REigenVec[][NWAVE], const EoS_t *EoS ); #endif #endif #if ( FLU_SCHEME == MHM ) GPU_DEVICE static void Hydro_HancockPredict( real fc[][NCOMP_LR], const real dt, const real dh, const real g_cc_array[][ CUBE(FLU_NXT) ], const int cc_idx, const real MinDens, const real MinPres, const real MinEint, const EoS_t *EoS ); #endif #ifdef CHAR_RECONSTRUCTION GPU_DEVICE static void Hydro_Pri2Char( real InOut[], const real Dens, const real Pres, const real LEigenVec[][NWAVE], const int XYZ, const EoS_t *EoS ); GPU_DEVICE static void Hydro_Char2Pri( real InOut[], const real Dens, const real Pres, const real REigenVec[][NWAVE], const int XYZ, const EoS_t *EoS ); #endif // macro for adding MHD source terms in CTU #if ( defined MHD && FLU_SCHEME == CTU ) # define MINMOD( a , b ) ( ( (a)*(b)>(real)0.0 ) ? ( SIGN(a)*FMIN(FABS(a),FABS(b)) ) : (real)0.0 ) #endif #if ( LR_SCHEME == PLM ) //------------------------------------------------------------------------------------------------------- // Function : Hydro_DataReconstruction // Description : Reconstruct the face-centered variables by the piecewise-linear method (PLM) // // Note : 1. Use the parameter "LR_Limiter" to choose different slope limiters // 2. Input data can be either conserved or primitive variables // --> If the input data are conserved variables, one must provide g_ConVar[] and enable "Con2Pri" // --> Primitive variables will be calculated by this function and stored in g_PriVar[] // --> g_PriVar[] must be allocated in advance but it doesn't need to be initialized // --> Adopted by MHM and CTU // --> If the input data are primitive variables, one must provide g_PriVar[] and disable "Con2Pri" // --> g_ConVar[] is useless here // --> Adopted by MHM_RP, where Hydro_RiemannPredict() already returns primitive variables // 3. Output data are always conserved variables // --> Because Hydro_HancockPredict() only works with conserved variables // 4. PLM and PPM data reconstruction functions share the same function name // 5. Face-centered variables will be advanced by half time-step for the MHM and CTU schemes // 6. Data reconstruction can be applied to characteristic variables by // defining "CHAR_RECONSTRUCTION" in the header CUFLU.h // 7. This function is shared by MHM, MHM_RP, and CTU schemes // 8. g_FC_B[] has the size of SQR(FLU_NXT)*FLU_NXT_P1 but is accessed with the strides // NIn/NIn+1 along the transverse/longitudinal directions // 9. Support applying data reconstruction to internal energy and using that instead of pressure // for converting primitive variables to conserved variables // --> Controlled by the option "LR_EINT" in CUFLU.h; see the description thereof for details // // Parameter : g_ConVar : Array storing the input cell-centered conserved variables // --> Should contain NCOMP_TOTAL variables // g_FC_B : Array storing the input face-centered magnetic field (for MHD only) // --> Should contain NCOMP_MAG variables // g_PriVar : Array storing/to store the cell-centered primitive variables // --> Should contain NCOMP_LR variables // --> Store internal energy as the last variable when LR_EINT is on // --> For MHD, this array currently stores the normal B field as well // --> For MHM, g_ConVar[] and g_PriVar[] must point to different arrays since // Hydro_HancockPredict() requires the original g_ConVar[] // g_FC_Var : Array to store the output face-centered conserved variables // --> Should contain NCOMP_TOTAL_PLUS_MAG variables // g_Slope_PPM : Array to store the x/y/z slopes for the PPM reconstruction // --> Should contain NCOMP_LR variables // --> Store internal energy as the last variable when LR_EINT is on // --> Useless for PLM // Con2Pri : Convert conserved variables in g_ConVar[] to primitive variables and // store the results in g_PriVar[] // NIn : Size of g_PriVar[] along each direction // --> Can be smaller than FLU_NXT // NGhost : Number of ghost zones // --> "NIn-2*NGhost" cells will be computed along each direction // --> Size of g_FC_Var[] is assumed to be "(NIn-2*NGhost)^3" // --> The reconstructed data at cell (i,j,k) will be stored in g_FC_Var[] // with the index "(i-NGhost,j-NGhost,k-NGhost)" // LR_Limiter : Slope limiter for the data reconstruction in the MHM/MHM_RP/CTU schemes // (0/1/2/3) = (vanLeer/generalized MinMod/vanAlbada/vanLeer+generalized MinMod) limiter // MinMod_Coeff : Coefficient of the generalized MinMod limiter // dt : Time interval to advance solution (for the CTU scheme) // dh : Cell size // MinDens/Pres/Eint : Density, pressure, and internal energy floors // FracPassive : true --> convert passive scalars to mass fraction during data reconstruction // NFrac : Number of passive scalars for the option "FracPassive" // FracIdx : Target variable indices for the option "FracPassive" // JeansMinPres : Apply minimum pressure estimated from the Jeans length // JeansMinPres_Coeff : Coefficient used by JeansMinPres = G*(Jeans_NCell*Jeans_dh)^2/(Gamma*pi); // EoS : EoS object //------------------------------------------------------------------------------------------------------ GPU_DEVICE void Hydro_DataReconstruction( const real g_ConVar [][ CUBE(FLU_NXT) ], const real g_FC_B [][ SQR(FLU_NXT)*FLU_NXT_P1 ], real g_PriVar [][ CUBE(FLU_NXT) ], real g_FC_Var [][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_Slope_PPM[][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], const bool Con2Pri, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const real dt, const real dh, const real MinDens, const real MinPres, const real MinEint, const bool FracPassive, const int NFrac, const int FracIdx[], const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t *EoS ) { //### NOTE: temporary solution to the bug in cuda 10.1 and 10.2 that incorrectly overwrites didx_cc[] # if ( FLU_SCHEME == MHM ) const int NIn = FLU_NXT; # elif ( FLU_SCHEME == MHM_RP ) const int NIn = N_HF_VAR; # elif ( FLU_SCHEME == CTU ) const int NIn = FLU_NXT; # else # error : ERROR : unsupported FLU_SCHEME !! # endif const int NGhost = LR_GHOST_SIZE; // check # ifdef GAMER_DEBUG if ( NIn - 2*NGhost != N_FC_VAR ) printf( "ERROR : NIn - 2*NGhost != N_FC_VAR (NIn %d, NGhost %d, N_FC_VAR %d) !!\n", NIn, NGhost, N_FC_VAR ); # if ( defined LR_EINT && FLU_SCHEME == CTU ) # error : CTU does NOT support LR_EINT !! # endif # endif // GAMER_DEBUG const int didx_cc[3] = { 1, NIn, SQR(NIn) }; # if ( FLU_SCHEME == CTU ) const real dt_dh2 = (real)0.5*dt/dh; // index mapping between arrays with size NWAVE and NCOMP_TOTAL_PLUS_MAG/NCOMP_LR # ifdef MHD const int idx_wave[NWAVE] = { 0, 1, 2, 3, 4, MAG_OFFSET+1, MAG_OFFSET+2 }; # else const int idx_wave[NWAVE] = { 0, 1, 2, 3, 4 }; # endif # endif // #if ( FLU_SCHEME == CTU ) // eigenvalues and eigenvectors // --> constant components of the left and right eigenvector matrices must be initialized # if ( FLU_SCHEME == CTU ) real EigenVal[3][NWAVE]; # ifdef MHD real REigenVec[NWAVE][NWAVE] = { { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL } }; real LEigenVec[NWAVE][NWAVE] = { { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, NULL_REAL, 0.0, 0.0 }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL } }; # else real LEigenVec[NWAVE][NWAVE] = { { 0.0, NULL_REAL, 0.0, 0.0, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, NULL_REAL }, { 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, NULL_REAL, 0.0, 0.0, NULL_REAL } }; real REigenVec[NWAVE][NWAVE] = { { 1.0, NULL_REAL, 0.0, 0.0, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0 }, { 1.0, NULL_REAL, 0.0, 0.0, NULL_REAL } }; # endif // #ifdef MHD ... else ... # elif ( defined MHD && defined CHAR_RECONSTRUCTION ) // #if ( FLU_SCHEME == CTU ) real EigenVal[3][NWAVE]; real REigenVec[NWAVE][NWAVE] = { { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL } }; real LEigenVec[NWAVE][NWAVE] = { { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, NULL_REAL, 0.0, 0.0 }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL } }; # else // #if ( FLU_SCHEME == CTU ) ... elif ... real (*const REigenVec)[NWAVE] = NULL; real (*const LEigenVec)[NWAVE] = NULL; # endif // #if ( FLU_SCHEME == CTU ) ... elif ... else ... // 0. conserved --> primitive variables if ( Con2Pri ) { real ConVar_1Cell[NCOMP_TOTAL_PLUS_MAG], PriVar_1Cell[NCOMP_TOTAL_PLUS_MAG]; # ifdef LR_EINT real Eint; real* const EintPtr = &Eint; # else real* const EintPtr = NULL; # endif CGPU_LOOP( idx, CUBE(NIn) ) { for (int v=0; v<NCOMP_TOTAL; v++) ConVar_1Cell[v] = g_ConVar[v][idx]; # ifdef MHD // assuming that g_FC_B[] is accessed with the strides NIn/NIn+1 along the transverse/longitudinal directions const int size_ij = SQR( NIn ); const int i = idx % NIn; const int j = idx % size_ij / NIn; const int k = idx / size_ij; MHD_GetCellCenteredBField( ConVar_1Cell+NCOMP_TOTAL, g_FC_B[0], g_FC_B[1], g_FC_B[2], NIn, NIn, NIn, i, j, k ); # endif Hydro_Con2Pri( ConVar_1Cell, PriVar_1Cell, MinPres, FracPassive, NFrac, FracIdx, JeansMinPres, JeansMinPres_Coeff, EoS->DensEint2Pres_FuncPtr, EoS->DensPres2Eint_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, EintPtr ); for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) g_PriVar[v][idx] = PriVar_1Cell[v]; # ifdef LR_EINT g_PriVar[NCOMP_TOTAL_PLUS_MAG][idx] = Hydro_CheckMinEint( Eint, MinEint ); // store Eint in the last variable # endif } // CGPU_LOOP( idx, CUBE(NIn) ) # ifdef __CUDACC__ __syncthreads(); # endif } // if ( Con2Pri ) // data reconstruction const int N_FC_VAR2 = SQR( N_FC_VAR ); # ifdef MHD const int NIn_p1 = NIn + 1; int idx_B[NCOMP_MAG]; # endif CGPU_LOOP( idx_fc, CUBE(N_FC_VAR) ) { const int i_cc = NGhost + idx_fc%N_FC_VAR; const int j_cc = NGhost + idx_fc%N_FC_VAR2/N_FC_VAR; const int k_cc = NGhost + idx_fc/N_FC_VAR2; const int idx_cc = IDX321( i_cc, j_cc, k_cc, NIn, NIn ); # ifdef MHD // assuming that g_FC_B[] is accessed with the strides NIn/NIn+1 along the transverse/longitudinal directions idx_B[0] = IDX321( i_cc, j_cc, k_cc, NIn_p1, NIn ); idx_B[1] = IDX321( i_cc, j_cc, k_cc, NIn, NIn_p1 ); idx_B[2] = IDX321( i_cc, j_cc, k_cc, NIn, NIn ); # endif // cc_C/L/R: cell-centered variables of the Central/Left/Right cells // fc: face-centered variables of the central cell real cc_C[NCOMP_LR], cc_L[NCOMP_LR], cc_R[NCOMP_LR]; real fc[6][NCOMP_LR], Slope_Limiter[NCOMP_LR]; for (int v=0; v<NCOMP_LR; v++) cc_C[v] = g_PriVar[v][idx_cc]; // 1-a. evaluate the eigenvalues and eigenvectors along all three directions for the pure-hydro CTU integrator # if ( !defined MHD && FLU_SCHEME == CTU ) Hydro_GetEigenSystem( cc_C, EigenVal, LEigenVec, REigenVec, EoS ); # endif // loop over different spatial directions for (int d=0; d<3; d++) { // 1-b. evaluate the eigenvalues and eigenvectors along the target direction for the MHD CTU integrator # if ( defined MHD && ( FLU_SCHEME == CTU || defined CHAR_RECONSTRUCTION ) ) MHD_GetEigenSystem( cc_C, EigenVal[d], LEigenVec, REigenVec, EoS, d ); # endif // 2. evaluate the monotonic slope const int faceL = 2*d; // left and right face indices const int faceR = faceL+1; const int idx_ccL = idx_cc - didx_cc[d]; const int idx_ccR = idx_cc + didx_cc[d]; for (int v=0; v<NCOMP_LR; v++) { cc_L[v] = g_PriVar[v][idx_ccL]; cc_R[v] = g_PriVar[v][idx_ccR]; } Hydro_LimitSlope( cc_L, cc_C, cc_R, LR_Limiter, MinMod_Coeff, d, LEigenVec, REigenVec, Slope_Limiter, EoS ); // 3. get the face-centered primitive variables for (int v=0; v<NCOMP_LR; v++) { fc[faceL][v] = cc_C[v] - (real)0.5*Slope_Limiter[v]; fc[faceR][v] = cc_C[v] + (real)0.5*Slope_Limiter[v]; } // ensure the face-centered variables lie between neighboring cell-centered values for (int v=0; v<NCOMP_LR; v++) { real Min, Max; Min = ( cc_C[v] < cc_L[v] ) ? cc_C[v] : cc_L[v]; Max = ( cc_C[v] > cc_L[v] ) ? cc_C[v] : cc_L[v]; fc[faceL][v] = ( fc[faceL][v] > Min ) ? fc[faceL][v] : Min; fc[faceL][v] = ( fc[faceL][v] < Max ) ? fc[faceL][v] : Max; fc[faceR][v] = (real)2.0*cc_C[v] - fc[faceL][v]; Min = ( cc_C[v] < cc_R[v] ) ? cc_C[v] : cc_R[v]; Max = ( cc_C[v] > cc_R[v] ) ? cc_C[v] : cc_R[v]; fc[faceR][v] = ( fc[faceR][v] > Min ) ? fc[faceR][v] : Min; fc[faceR][v] = ( fc[faceR][v] < Max ) ? fc[faceR][v] : Max; fc[faceL][v] = (real)2.0*cc_C[v] - fc[faceR][v]; } // 4. advance the face-centered variables by half time-step for the CTU integrator # if ( FLU_SCHEME == CTU ) # ifdef LR_EINT # error : CTU does NOT support LR_EINT !! # endif real Coeff_L, Coeff_R; real Correct_L[NCOMP_LR], Correct_R[NCOMP_LR], dfc[NCOMP_LR]; // 4-1. evaluate the slope (for passive scalars as well) for (int v=0; v<NCOMP_LR; v++) dfc[v] = fc[faceR][v] - fc[faceL][v]; // 4-2. re-order variables for the y/z directions Hydro_Rotate3D( dfc, d, true, MAG_OFFSET ); // ===================================================================================== // a. for the HLL solvers (HLLE/HLLC/HLLD) // ===================================================================================== # if ( ( RSOLVER == HLLE || RSOLVER == HLLC || RSOLVER == HLLD ) && defined HLL_NO_REF_STATE ) // 4-2-a1. evaluate the corrections to the left and right face-centered variables for (int v=0; v<NWAVE; v++) { Correct_L[ idx_wave[v] ] = (real)0.0; Correct_R[ idx_wave[v] ] = (real)0.0; } # ifdef HLL_INCLUDE_ALL_WAVES for (int Mode=0; Mode<NWAVE; Mode++) { Coeff_L = (real)0.0; for (int v=0; v<NWAVE; v++) Coeff_L += LEigenVec[Mode][v]*dfc[ idx_wave[v] ]; Coeff_L *= -dt_dh2*EigenVal[d][Mode]; for (int v=0; v<NWAVE; v++) Correct_L[ idx_wave[v] ] += Coeff_L*REigenVec[Mode][v]; } // for (int Mode=0; Mode<NWAVE; Mode++) for (int v=0; v<NWAVE; v++) Correct_R[ idx_wave[v] ] = Correct_L[ idx_wave[v] ]; # else // #ifdef HLL_INCLUDE_ALL_WAVES for (int Mode=0; Mode<NWAVE; Mode++) { Coeff_L = (real)0.0; Coeff_R = (real)0.0; if ( EigenVal[d][Mode] <= (real)0.0 ) { for (int v=0; v<NWAVE; v++) Coeff_L += LEigenVec[Mode][v]*dfc[ idx_wave[v] ]; Coeff_L *= -dt_dh2*EigenVal[d][Mode]; for (int v=0; v<NWAVE; v++) Correct_L[ idx_wave[v] ] += Coeff_L*REigenVec[Mode][v]; } if ( EigenVal[d][Mode] >= (real)0.0 ) { for (int v=0; v<NWAVE; v++) Coeff_R += LEigenVec[Mode][v]*dfc[ idx_wave[v] ]; Coeff_R *= -dt_dh2*EigenVal[d][Mode]; for (int v=0; v<NWAVE; v++) Correct_R[ idx_wave[v] ] += Coeff_R*REigenVec[Mode][v]; } } // for (int Mode=0; Mode<NWAVE; Mode++) # endif // #ifdef HLL_INCLUDE_ALL_WAVES ... else ... // ===================================================================================== // b. for the Roe's and exact solvers // ===================================================================================== # else // ( RSOLVER == ROE/EXACT || ifndef HLL_NO_REF_STATE ) // 4-2-b1. evaluate the reference states Coeff_L = -dt_dh2*FMIN( EigenVal[d][ 0 ], (real)0.0 ); Coeff_R = -dt_dh2*FMAX( EigenVal[d][ NWAVE-1 ], (real)0.0 ); for (int v=0; v<NWAVE; v++) { Correct_L[ idx_wave[v] ] = Coeff_L*dfc[ idx_wave[v] ]; Correct_R[ idx_wave[v] ] = Coeff_R*dfc[ idx_wave[v] ]; } // 4-2-b2. evaluate the corrections to the left and right face-centered variables for (int Mode=0; Mode<NWAVE; Mode++) { Coeff_L = (real)0.0; Coeff_R = (real)0.0; if ( EigenVal[d][Mode] <= (real)0.0 ) { for (int v=0; v<NWAVE; v++) Coeff_L += LEigenVec[Mode][v]*dfc[ idx_wave[v] ]; Coeff_L *= dt_dh2*( EigenVal[d][0] - EigenVal[d][Mode] ); for (int v=0; v<NWAVE; v++) Correct_L[ idx_wave[v] ] += Coeff_L*REigenVec[Mode][v]; } if ( EigenVal[d][Mode] >= (real)0.0 ) { for (int v=0; v<NWAVE; v++) Coeff_R += LEigenVec[Mode][v]*dfc[ idx_wave[v] ]; Coeff_R *= dt_dh2*( EigenVal[d][ NWAVE-1 ] - EigenVal[d][Mode] ); for (int v=0; v<NWAVE; v++) Correct_R[ idx_wave[v] ] += Coeff_R*REigenVec[Mode][v]; } } // for (int Mode=0; Mode<NWAVE; Mode++) # endif // if ( ( RSOLVER == HLLE || RSOLVER == HLLC || RSOLVER == HLLD ) && defined HLL_NO_REF_STATE ) ... else ... // 4-3. evaluate the corrections to the left and right face-centered passive scalars // --> passive scalars travel with fluid velocity (i.e., entropy mode) # if ( NCOMP_PASSIVE > 0 ) Coeff_L = -dt_dh2*FMIN( EigenVal[d][1], (real)0.0 ); Coeff_R = -dt_dh2*FMAX( EigenVal[d][1], (real)0.0 ); for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) { Correct_L[v] = Coeff_L*dfc[v]; Correct_R[v] = Coeff_R*dfc[v]; } # endif // 4-4. add the MHD source terms # ifdef MHD const int t1 = (d+1)%3; // transverse direction 1 const int t2 = (d+2)%3; // transverse direction 2 real B_nL, B_nR, B_t1L, B_t1R, B_t2L, B_t2R; real dB_n, dB_t1, dB_t2, v_t1, v_t2, src_t1, src_t2; B_nL = g_FC_B[d ][ idx_B[d ] ]; B_t1L = g_FC_B[t1][ idx_B[t1] ]; B_t2L = g_FC_B[t2][ idx_B[t2] ]; B_nR = g_FC_B[d ][ idx_B[d ] + didx_cc[d ] ]; B_t1R = g_FC_B[t1][ idx_B[t1] + didx_cc[t1] ]; B_t2R = g_FC_B[t2][ idx_B[t2] + didx_cc[t2] ]; dB_n = B_nR - B_nL; dB_t1 = B_t1R - B_t1L; dB_t2 = B_t2R - B_t2L; v_t1 = cc_C[ 1 + t1 ]; v_t2 = cc_C[ 1 + t2 ]; src_t1 = dt_dh2*v_t1*MINMOD( dB_n, -dB_t1 ); src_t2 = dt_dh2*v_t2*MINMOD( dB_n, -dB_t2 ); Correct_L[ MAG_OFFSET + 1 ] += src_t1; Correct_R[ MAG_OFFSET + 1 ] += src_t1; Correct_L[ MAG_OFFSET + 2 ] += src_t2; Correct_R[ MAG_OFFSET + 2 ] += src_t2; # endif // #ifdef MHD // 4-5. evaluate the face-centered variables at the half time-step Hydro_Rotate3D( Correct_L, d, false, MAG_OFFSET ); Hydro_Rotate3D( Correct_R, d, false, MAG_OFFSET ); for (int v=0; v<NCOMP_LR; v++) { fc[faceL][v] += Correct_L[v]; fc[faceR][v] += Correct_R[v]; } // 4-6. apply density and pressure floors fc[faceL][0] = FMAX( fc[faceL][0], MinDens ); fc[faceR][0] = FMAX( fc[faceR][0], MinDens ); fc[faceL][4] = Hydro_CheckMinPres( fc[faceL][4], MinPres ); fc[faceR][4] = Hydro_CheckMinPres( fc[faceR][4], MinPres ); # if ( NCOMP_PASSIVE > 0 ) for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) { fc[faceL][v] = FMAX( fc[faceL][v], TINY_NUMBER ); fc[faceR][v] = FMAX( fc[faceR][v], TINY_NUMBER ); } # endif # endif // #if ( FLU_SCHEME == CTU ) // 5. reset the longitudinal B field to the input face-centered values // --> actually no data reconstruction is required for that //###OPTIMIZARION: do not perform data reconstruction for the longitudinal B field # ifdef MHD # if ( FLU_SCHEME != CTU ) const real B_nL = g_FC_B[d][ idx_B[d] ]; const real B_nR = g_FC_B[d][ idx_B[d] + didx_cc[d] ]; # endif fc[faceL][ MAG_OFFSET + d ] = B_nL; fc[faceR][ MAG_OFFSET + d ] = B_nR; # endif // #ifdef MHD // 6. primitive variables --> conserved variables // --> When LR_EINT is on, use the reconstructed internal energy instead of pressure in Hydro_Pri2Con() // to skip expensive EoS conversion real tmp[NCOMP_LR]; // input and output arrays must not overlap for Pri2Con() # ifdef LR_EINT real* const EintPtr = tmp + NCOMP_TOTAL_PLUS_MAG; # else real* const EintPtr = NULL; # endif for (int v=0; v<NCOMP_LR; v++) tmp[v] = fc[faceL][v]; Hydro_Pri2Con( tmp, fc[faceL], FracPassive, NFrac, FracIdx, EoS->DensPres2Eint_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, EintPtr ); for (int v=0; v<NCOMP_LR; v++) tmp[v] = fc[faceR][v]; Hydro_Pri2Con( tmp, fc[faceR], FracPassive, NFrac, FracIdx, EoS->DensPres2Eint_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, EintPtr ); } // for (int d=0; d<3; d++) # if ( FLU_SCHEME == MHM ) // 7. advance the face-centered variables by half time-step for the MHM integrator Hydro_HancockPredict( fc, dt, dh, g_ConVar, idx_cc, MinDens, MinPres, MinEint, EoS ); # endif // 8. store the face-centered values to the output array // --> use NCOMP_TOTAL_PLUS_MAG instead of LR_EINT since we don't need to store internal energy in g_FC_Var[] for (int f=0; f<6; f++) for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) g_FC_Var[f][v][idx_fc] = fc[f][v]; } // CGPU_LOOP( idx_fc, CUBE(N_FC_VAR) ) # ifdef __CUDACC__ __syncthreads(); # endif } // FUNCTION : Hydro_DataReconstruction (PLM) #endif // #if ( LR_SCHEME == PLM ) #if ( LR_SCHEME == PPM ) //------------------------------------------------------------------------------------------------------- // Function : Hydro_DataReconstruction // Description : Reconstruct the face-centered variables by the piecewise-parabolic method (PPM) // // Note : See the PLM routine // // Parameter : See the PLM routine //------------------------------------------------------------------------------------------------------ GPU_DEVICE void Hydro_DataReconstruction( const real g_ConVar [][ CUBE(FLU_NXT) ], const real g_FC_B [][ SQR(FLU_NXT)*FLU_NXT_P1 ], real g_PriVar [][ CUBE(FLU_NXT) ], real g_FC_Var [][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_Slope_PPM[][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], const bool Con2Pri, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const real dt, const real dh, const real MinDens, const real MinPres, const real MinEint, const bool FracPassive, const int NFrac, const int FracIdx[], const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t *EoS ) { //### NOTE: temporary solution to the bug in cuda 10.1 and 10.2 that incorrectly overwrites didx_cc[] # if ( FLU_SCHEME == MHM ) const int NIn = FLU_NXT; # elif ( FLU_SCHEME == MHM_RP ) const int NIn = N_HF_VAR; # elif ( FLU_SCHEME == CTU ) const int NIn = FLU_NXT; # else # error : ERROR : unsupported FLU_SCHEME !! # endif const int NGhost = LR_GHOST_SIZE; // check # ifdef GAMER_DEBUG if ( NIn - 2*NGhost != N_FC_VAR ) printf( "ERROR : NIn - 2*NGhost != N_FC_VAR (NIn %d, NGhost %d, N_FC_VAR %d) !!\n", NIn, NGhost, N_FC_VAR ); # if ( N_SLOPE_PPM != N_FC_VAR + 2 ) # error : ERROR : N_SLOPE_PPM != N_FC_VAR + 2 !! # endif # if ( defined LR_EINT && FLU_SCHEME == CTU ) # error : CTU does NOT support LR_EINT !! # endif # endif // GAMER_DEBUG const int didx_cc [3] = { 1, NIn, SQR(NIn) }; const int didx_slope[3] = { 1, N_SLOPE_PPM, SQR(N_SLOPE_PPM) }; # if ( FLU_SCHEME == CTU ) const real dt_dh2 = (real)0.5*dt/dh; // index mapping between arrays with size NWAVE and NCOMP_TOTAL_PLUS_MAG/NCOMP_LR # ifdef MHD const int idx_wave[NWAVE] = { 0, 1, 2, 3, 4, MAG_OFFSET+1, MAG_OFFSET+2 }; # else const int idx_wave[NWAVE] = { 0, 1, 2, 3, 4 }; # endif // include waves both from left and right directions during the data reconstruction, as suggested in ATHENA # if ( ( RSOLVER == HLLE || RSOLVER == HLLC || RSOLVER == HLLD ) && defined HLL_NO_REF_STATE ) # ifdef HLL_INCLUDE_ALL_WAVES const bool HLL_Include_All_Waves = true; # else const bool HLL_Include_All_Waves = false; # endif # endif // if ( ( RSOLVER == HLLE || RSOLVER == HLLC || RSOLVER == HLLD ) && defined HLL_NO_REF_STATE ) # endif // #if ( FLU_SCHEME == CTU ) // eigenvalues and eigenvectors // --> constant components of the left and right eigenvector matrices must be initialized # if ( FLU_SCHEME == CTU ) real EigenVal[3][NWAVE]; # ifdef MHD real REigenVec[NWAVE][NWAVE] = { { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL } }; real LEigenVec[NWAVE][NWAVE] = { { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, NULL_REAL, 0.0, 0.0 }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL } }; # else real LEigenVec[NWAVE][NWAVE] = { { 0.0, NULL_REAL, 0.0, 0.0, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, NULL_REAL }, { 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, NULL_REAL, 0.0, 0.0, NULL_REAL } }; real REigenVec[NWAVE][NWAVE] = { { 1.0, NULL_REAL, 0.0, 0.0, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0 }, { 1.0, NULL_REAL, 0.0, 0.0, NULL_REAL } }; # endif // #ifdef MHD ... else ... # elif ( defined MHD && defined CHAR_RECONSTRUCTION ) // #if ( FLU_SCHEME == CTU ) real EigenVal[3][NWAVE]; real REigenVec[NWAVE][NWAVE] = { { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL } }; real LEigenVec[NWAVE][NWAVE] = { { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 1.0, 0.0, 0.0, 0.0, NULL_REAL, 0.0, 0.0 }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL }, { 0.0, 0.0, NULL_REAL, NULL_REAL, 0.0, NULL_REAL, NULL_REAL }, { 0.0, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL, NULL_REAL } }; # else // #if ( FLU_SCHEME == CTU ) ... elif ... real (*const REigenVec)[NWAVE] = NULL; real (*const LEigenVec)[NWAVE] = NULL; # endif // #if ( FLU_SCHEME == CTU ) ... elif ... else ... // 0. conserved --> primitive variables if ( Con2Pri ) { real ConVar_1Cell[NCOMP_TOTAL_PLUS_MAG], PriVar_1Cell[NCOMP_TOTAL_PLUS_MAG]; # ifdef LR_EINT real Eint; real* const EintPtr = &Eint; # else real* const EintPtr = NULL; # endif CGPU_LOOP( idx, CUBE(NIn) ) { for (int v=0; v<NCOMP_TOTAL; v++) ConVar_1Cell[v] = g_ConVar[v][idx]; # ifdef MHD // assuming that g_FC_B[] is accessed with the strides NIn/NIn+1 along the transverse/longitudinal directions const int size_ij = SQR( NIn ); const int i = idx % NIn; const int j = idx % size_ij / NIn; const int k = idx / size_ij; MHD_GetCellCenteredBField( ConVar_1Cell+NCOMP_TOTAL, g_FC_B[0], g_FC_B[1], g_FC_B[2], NIn, NIn, NIn, i, j, k ); # endif Hydro_Con2Pri( ConVar_1Cell, PriVar_1Cell, MinPres, FracPassive, NFrac, FracIdx, JeansMinPres, JeansMinPres_Coeff, EoS->DensEint2Pres_FuncPtr, EoS->DensPres2Eint_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, EintPtr ); for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) g_PriVar[v][idx] = PriVar_1Cell[v]; # ifdef LR_EINT g_PriVar[NCOMP_TOTAL_PLUS_MAG][idx] = Hydro_CheckMinEint( Eint, MinEint ); // store Eint in the last variable # endif } // CGPU_LOOP( idx, CUBE(NIn) ) # ifdef __CUDACC__ __syncthreads(); # endif } // if ( Con2Pri ) // 1. evaluate the monotonic slope of all cells const int N_SLOPE_PPM2 = SQR( N_SLOPE_PPM ); CGPU_LOOP( idx_slope, CUBE(N_SLOPE_PPM) ) { const int i_cc = NGhost - 1 + idx_slope%N_SLOPE_PPM; const int j_cc = NGhost - 1 + idx_slope%N_SLOPE_PPM2/N_SLOPE_PPM; const int k_cc = NGhost - 1 + idx_slope/N_SLOPE_PPM2; const int idx_cc = IDX321( i_cc, j_cc, k_cc, NIn, NIn ); // cc_C/L/R: cell-centered variables of the Central/Left/Right cells real cc_C[NCOMP_LR], cc_L[NCOMP_LR], cc_R[NCOMP_LR], Slope_Limiter[NCOMP_LR]; for (int v=0; v<NCOMP_LR; v++) cc_C[v] = g_PriVar[v][idx_cc]; // loop over different spatial directions for (int d=0; d<3; d++) { const int idx_ccL = idx_cc - didx_cc[d]; const int idx_ccR = idx_cc + didx_cc[d]; # if ( defined MHD && defined CHAR_RECONSTRUCTION ) MHD_GetEigenSystem( cc_C, EigenVal[d], LEigenVec, REigenVec, EoS, d ); # endif for (int v=0; v<NCOMP_LR; v++) { cc_L[v] = g_PriVar[v][idx_ccL]; cc_R[v] = g_PriVar[v][idx_ccR]; } Hydro_LimitSlope( cc_L, cc_C, cc_R, LR_Limiter, MinMod_Coeff, d, LEigenVec, REigenVec, Slope_Limiter, EoS ); // store the results to g_Slope_PPM[] for (int v=0; v<NCOMP_LR; v++) g_Slope_PPM[d][v][idx_slope] = Slope_Limiter[v]; } // for (int d=0; d<3; d++) } // CGPU_LOOP( idx_slope, CUBE(N_SLOPE_PPM) ) # ifdef __CUDACC__ __syncthreads(); # endif // data reconstruction const int N_FC_VAR2 = SQR( N_FC_VAR ); # ifdef MHD const int NIn_p1 = NIn + 1; int idx_B[NCOMP_MAG]; # endif CGPU_LOOP( idx_fc, CUBE(N_FC_VAR) ) { const int i_fc = idx_fc%N_FC_VAR; const int j_fc = idx_fc%N_FC_VAR2/N_FC_VAR; const int k_fc = idx_fc/N_FC_VAR2; const int i_cc = i_fc + NGhost; const int j_cc = j_fc + NGhost; const int k_cc = k_fc + NGhost; const int idx_cc = IDX321( i_cc, j_cc, k_cc, NIn, NIn ); const int i_slope = i_fc + 1; // because N_SLOPE_PPM = N_FC_VAR + 2 const int j_slope = j_fc + 1; const int k_slope = k_fc + 1; const int idx_slope = IDX321( i_slope, j_slope, k_slope, N_SLOPE_PPM, N_SLOPE_PPM ); # ifdef MHD // assuming that g_FC_B[] is accessed with the strides NIn/NIn+1 along the transverse/longitudinal directions idx_B[0] = IDX321( i_cc, j_cc, k_cc, NIn_p1, NIn ); idx_B[1] = IDX321( i_cc, j_cc, k_cc, NIn, NIn_p1 ); idx_B[2] = IDX321( i_cc, j_cc, k_cc, NIn, NIn ); # endif // cc/fc: cell/face-centered variables; _C_ncomp: central cell with all NCOMP_LR variables real cc_C_ncomp[NCOMP_LR], fc[6][NCOMP_LR], dfc[NCOMP_LR], dfc6[NCOMP_LR]; for (int v=0; v<NCOMP_LR; v++) cc_C_ncomp[v] = g_PriVar[v][idx_cc]; // 2-a. evaluate the eigenvalues and eigenvectors along all three directions for the pure-hydro CTU integrator # if ( !defined MHD && FLU_SCHEME == CTU ) Hydro_GetEigenSystem( cc_C_ncomp, EigenVal, LEigenVec, REigenVec, EoS ); # endif // loop over different spatial directions for (int d=0; d<3; d++) { // 2-b. evaluate the eigenvalues and eigenvectors along the target direction for the MHD CTU integrator # if ( defined MHD && ( FLU_SCHEME == CTU || defined CHAR_RECONSTRUCTION ) ) MHD_GetEigenSystem( cc_C_ncomp, EigenVal[d], LEigenVec, REigenVec, EoS, d ); # endif // 3. get the face-centered primitive variables const int faceL = 2*d; // left and right face indices const int faceR = faceL+1; const int idx_ccL = idx_cc - didx_cc[d]; const int idx_ccR = idx_cc + didx_cc[d]; const int idx_slopeL = idx_slope - didx_slope[d]; const int idx_slopeR = idx_slope + didx_slope[d]; for (int v=0; v<NCOMP_LR; v++) { // cc/fc: cell/face-centered variables; _C/L/R: Central/Left/Right cells real cc_C, cc_L, cc_R, dcc_L, dcc_R, dcc_C, fc_L, fc_R, Max, Min; // 3-1. parabolic interpolation cc_L = g_PriVar[v][idx_ccL]; cc_R = g_PriVar[v][idx_ccR]; cc_C = cc_C_ncomp[v]; dcc_L = g_Slope_PPM[d][v][idx_slopeL]; dcc_R = g_Slope_PPM[d][v][idx_slopeR]; dcc_C = g_Slope_PPM[d][v][idx_slope ]; fc_L = (real)0.5*( cc_C + cc_L ) - (real)1.0/(real)6.0*( dcc_C - dcc_L ); fc_R = (real)0.5*( cc_C + cc_R ) - (real)1.0/(real)6.0*( dcc_R - dcc_C ); // 3-2. monotonicity constraint // extra monotonicity check for the CENTRAL limiter since it's not TVD if ( LR_Limiter == LR_LIMITER_CENTRAL ) { if ( (cc_C-fc_L)*(fc_L-cc_L) < (real)0.0 ) fc_L = (real)0.5*( cc_C + cc_L ); if ( (cc_R-fc_R)*(fc_R-cc_C) < (real)0.0 ) fc_R = (real)0.5*( cc_C + cc_R ); } dfc [v] = fc_R - fc_L; dfc6[v] = (real)6.0*( cc_C - (real)0.5*( fc_L + fc_R ) ); if ( ( fc_R - cc_C )*( cc_C - fc_L ) <= (real)0.0 ) { fc_L = cc_C; fc_R = cc_C; } else if ( dfc[v]*dfc6[v] > +dfc[v]*dfc[v] ) fc_L = (real)3.0*cc_C - (real)2.0*fc_R; else if ( dfc[v]*dfc6[v] < -dfc[v]*dfc[v] ) fc_R = (real)3.0*cc_C - (real)2.0*fc_L; // 3-3. ensure the face-centered variables lie between neighboring cell-centered values Min = ( cc_C < cc_L ) ? cc_C : cc_L; Max = ( cc_C > cc_L ) ? cc_C : cc_L; fc_L = ( fc_L > Min ) ? fc_L : Min; fc_L = ( fc_L < Max ) ? fc_L : Max; Min = ( cc_C < cc_R ) ? cc_C : cc_R; Max = ( cc_C > cc_R ) ? cc_C : cc_R; fc_R = ( fc_R > Min ) ? fc_R : Min; fc_R = ( fc_R < Max ) ? fc_R : Max; fc[faceL][v] = fc_L; fc[faceR][v] = fc_R; } // for (int v=0; v<NCOMP_LR; v++) // 4. advance the face-centered variables by half time-step for the CTU integrator # if ( FLU_SCHEME == CTU ) # ifdef LR_EINT # error : CTU does NOT support LR_EINT !! # endif real Coeff_L, Coeff_R; real Correct_L[NCOMP_LR], Correct_R[NCOMP_LR]; // 4-1. compute the PPM coefficient (for the passive scalars as well) for (int v=0; v<NCOMP_LR; v++) { dfc [v] = fc[faceR][v] - fc[faceL][v]; dfc6[v] = (real)6.0*( cc_C_ncomp[v] - (real)0.5*( fc[faceL][v] + fc[faceR][v] ) ); } // 4-2. re-order variables for the y/z directions Hydro_Rotate3D( dfc, d, true, MAG_OFFSET ); Hydro_Rotate3D( dfc6, d, true, MAG_OFFSET ); // ===================================================================================== // a. for the HLL solvers (HLLE/HLLC/HLLD) // ===================================================================================== # if ( ( RSOLVER == HLLE || RSOLVER == HLLC || RSOLVER == HLLD ) && defined HLL_NO_REF_STATE ) // 4-2-a1. evaluate the corrections to the left and right face-centered variables for (int v=0; v<NWAVE; v++) { Correct_L[ idx_wave[v] ] = (real)0.0; Correct_R[ idx_wave[v] ] = (real)0.0; } for (int Mode=0; Mode<NWAVE; Mode++) { Coeff_L = (real)0.0; Coeff_R = (real)0.0; if ( HLL_Include_All_Waves || EigenVal[d][Mode] <= (real)0.0 ) { const real Coeff_C = -dt_dh2*EigenVal[d][Mode]; const real Coeff_D = real(-4.0/3.0)*SQR(Coeff_C); for (int v=0; v<NWAVE; v++) Coeff_L += LEigenVec[Mode][v]*( Coeff_C*( dfc[ idx_wave[v] ] + dfc6[ idx_wave[v] ] ) + Coeff_D*( dfc6[ idx_wave[v] ] ) ); for (int v=0; v<NWAVE; v++) Correct_L[ idx_wave[v] ] += Coeff_L*REigenVec[Mode][v]; } if ( HLL_Include_All_Waves || EigenVal[d][Mode] >= (real)0.0 ) { const real Coeff_A = -dt_dh2*EigenVal[d][Mode]; const real Coeff_B = real(-4.0/3.0)*SQR(Coeff_A); for (int v=0; v<NWAVE; v++) Coeff_R += LEigenVec[Mode][v]*( Coeff_A*( dfc[ idx_wave[v] ] - dfc6[ idx_wave[v] ] ) + Coeff_B*( dfc6[ idx_wave[v] ] ) ); for (int v=0; v<NWAVE; v++) Correct_R[ idx_wave[v] ] += Coeff_R*REigenVec[Mode][v]; } } // for (int Mode=0; Mode<NWAVE; Mode++) // ===================================================================================== // b. for the Roe's and exact solvers // ===================================================================================== # else // ( RSOLVER == ROE/EXACT || ifndef HLL_NO_REF_STATE ) // 4-2-b1. evaluate the reference states Coeff_L = -dt_dh2*FMIN( EigenVal[d][ 0 ], (real)0.0 ); Coeff_R = -dt_dh2*FMAX( EigenVal[d][ NWAVE-1 ], (real)0.0 ); for (int v=0; v<NWAVE; v++) { Correct_L[ idx_wave[v] ] = Coeff_L*( dfc[ idx_wave[v] ] + ( (real)1.0 - real(4.0/3.0)*Coeff_L )*dfc6[ idx_wave[v] ] ); Correct_R[ idx_wave[v] ] = Coeff_R*( dfc[ idx_wave[v] ] - ( (real)1.0 + real(4.0/3.0)*Coeff_R )*dfc6[ idx_wave[v] ] ); } // 4-2-b2. evaluate the corrections to the left and right face-centered variables for (int Mode=0; Mode<NWAVE; Mode++) { Coeff_L = (real)0.0; Coeff_R = (real)0.0; if ( EigenVal[d][Mode] <= (real)0.0 ) { const real Coeff_C = dt_dh2*( EigenVal[d][0] - EigenVal[d][Mode] ); // write as (a-b)*(a+b) instead of a^2-b^2 to ensure that Coeff_D=0 when Coeff_C=0 // Coeff_D = real(4.0/3.0)*dt_dh2*dt_dh2* ( EigenVal[d][ 0]*EigenVal[d][ 0] - // EigenVal[d][Mode]*EigenVal[d][Mode] ); const real Coeff_D = real(4.0/3.0)*dt_dh2*Coeff_C*( EigenVal[d][0] + EigenVal[d][Mode] ); for (int v=0; v<NWAVE; v++) Coeff_L += LEigenVec[Mode][v]*( Coeff_C*( dfc[ idx_wave[v] ] + dfc6[ idx_wave[v] ] ) + Coeff_D*( dfc6[ idx_wave[v] ] ) ); for (int v=0; v<NWAVE; v++) Correct_L[ idx_wave[v] ] += Coeff_L*REigenVec[Mode][v]; } if ( EigenVal[d][Mode] >= (real)0.0 ) { const real Coeff_A = dt_dh2*( EigenVal[d][ NWAVE-1 ] - EigenVal[d][Mode] ); // write as (a-b)*(a+b) instead of a^2-b^2 to ensure that Coeff_B=0 when Coeff_A=0 // Coeff_B = real(4.0/3.0)*dt_dh2*dt_dh2* ( EigenVal[d][NWAVE-1]*EigenVal[d][NWAVE-1] - // EigenVal[d][Mode ]*EigenVal[d][Mode ] ); const real Coeff_B = real(4.0/3.0)*dt_dh2*Coeff_A*( EigenVal[d][ NWAVE-1 ] + EigenVal[d][Mode] ); for (int v=0; v<NWAVE; v++) Coeff_R += LEigenVec[Mode][v]*( Coeff_A*( dfc[ idx_wave[v] ] - dfc6[ idx_wave[v] ] ) + Coeff_B*( dfc6[ idx_wave[v] ] ) ); for (int v=0; v<NWAVE; v++) Correct_R[ idx_wave[v] ] += Coeff_R*REigenVec[Mode][v]; } } // for (int Mode=0; Mode<NWAVE; Mode++) # endif // if ( ( RSOLVER == HLLE || RSOLVER == HLLC || RSOLVER == HLLD ) && defined HLL_NO_REF_STATE ) ... else ... // 4-3. evaluate the corrections to the left and right face-centered passive scalars // --> passive scalars travel with fluid velocity (i.e., entropy mode) # if ( NCOMP_PASSIVE > 0 ) Coeff_L = -dt_dh2*FMIN( EigenVal[d][1], (real)0.0 ); Coeff_R = -dt_dh2*FMAX( EigenVal[d][1], (real)0.0 ); for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) { Correct_L[v] = Coeff_L*( dfc[v] + ( (real)1.0 - real(4.0/3.0)*Coeff_L )*dfc6[v] ); Correct_R[v] = Coeff_R*( dfc[v] - ( (real)1.0 + real(4.0/3.0)*Coeff_R )*dfc6[v] ); } # endif // 4-4. add the MHD source terms # ifdef MHD const int t1 = (d+1)%3; // transverse direction 1 const int t2 = (d+2)%3; // transverse direction 2 real B_nL, B_nR, B_t1L, B_t1R, B_t2L, B_t2R; real dB_n, dB_t1, dB_t2, v_t1, v_t2, src_t1, src_t2; B_nL = g_FC_B[d ][ idx_B[d ] ]; B_t1L = g_FC_B[t1][ idx_B[t1] ]; B_t2L = g_FC_B[t2][ idx_B[t2] ]; B_nR = g_FC_B[d ][ idx_B[d ] + didx_cc[d ] ]; B_t1R = g_FC_B[t1][ idx_B[t1] + didx_cc[t1] ]; B_t2R = g_FC_B[t2][ idx_B[t2] + didx_cc[t2] ]; dB_n = B_nR - B_nL; dB_t1 = B_t1R - B_t1L; dB_t2 = B_t2R - B_t2L; v_t1 = cc_C_ncomp[ 1 + t1 ]; v_t2 = cc_C_ncomp[ 1 + t2 ]; src_t1 = dt_dh2*v_t1*MINMOD( dB_n, -dB_t1 ); src_t2 = dt_dh2*v_t2*MINMOD( dB_n, -dB_t2 ); Correct_L[ MAG_OFFSET + 1 ] += src_t1; Correct_R[ MAG_OFFSET + 1 ] += src_t1; Correct_L[ MAG_OFFSET + 2 ] += src_t2; Correct_R[ MAG_OFFSET + 2 ] += src_t2; # endif // #ifdef MHD // 4-5. evaluate the face-centered variables at the half time-step Hydro_Rotate3D( Correct_L, d, false, MAG_OFFSET ); Hydro_Rotate3D( Correct_R, d, false, MAG_OFFSET ); for (int v=0; v<NCOMP_LR; v++) { fc[faceL][v] += Correct_L[v]; fc[faceR][v] += Correct_R[v]; } // 4-6. apply density and pressure floors fc[faceL][0] = FMAX( fc[faceL][0], MinDens ); fc[faceR][0] = FMAX( fc[faceR][0], MinDens ); fc[faceL][4] = Hydro_CheckMinPres( fc[faceL][4], MinPres ); fc[faceR][4] = Hydro_CheckMinPres( fc[faceR][4], MinPres ); # if ( NCOMP_PASSIVE > 0 ) for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) { fc[faceL][v] = FMAX( fc[faceL][v], TINY_NUMBER ); fc[faceR][v] = FMAX( fc[faceR][v], TINY_NUMBER ); } # endif # endif // #if ( FLU_SCHEME == CTU ) // 5. reset the longitudinal B field to the input face-centered values // --> actually no data reconstruction is required for that //###OPTIMIZARION: do not perform data reconstruction for the longitudinal B field # ifdef MHD # if ( FLU_SCHEME != CTU ) const real B_nL = g_FC_B[d][ idx_B[d] ]; const real B_nR = g_FC_B[d][ idx_B[d] + didx_cc[d] ]; # endif fc[faceL][ MAG_OFFSET + d ] = B_nL; fc[faceR][ MAG_OFFSET + d ] = B_nR; # endif // #ifdef MHD // 6. primitive variables --> conserved variables // --> When LR_EINT is on, use the reconstructed internal energy instead of pressure in Hydro_Pri2Con() // to skip expensive EoS conversion real tmp[NCOMP_LR]; // input and output arrays must not overlap for Pri2Con() # ifdef LR_EINT real* const EintPtr = tmp + NCOMP_TOTAL_PLUS_MAG; # else real* const EintPtr = NULL; # endif for (int v=0; v<NCOMP_LR; v++) tmp[v] = fc[faceL][v]; Hydro_Pri2Con( tmp, fc[faceL], FracPassive, NFrac, FracIdx, EoS->DensPres2Eint_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, EintPtr ); for (int v=0; v<NCOMP_LR; v++) tmp[v] = fc[faceR][v]; Hydro_Pri2Con( tmp, fc[faceR], FracPassive, NFrac, FracIdx, EoS->DensPres2Eint_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, EintPtr ); } // for (int d=0; d<3; d++) # if ( FLU_SCHEME == MHM ) // 7. advance the face-centered variables by half time-step for the MHM integrator Hydro_HancockPredict( fc, dt, dh, g_ConVar, idx_cc, MinDens, MinPres, MinEint, EoS ); # endif // 8. store the face-centered values to the output array // --> use NCOMP_TOTAL_PLUS_MAG instead of LR_EINT since we don't need to store internal energy in g_FC_Var[] for (int f=0; f<6; f++) for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) g_FC_Var[f][v][idx_fc] = fc[f][v]; } // CGPU_LOOP( idx_fc, CUBE(N_FC_VAR) ) # ifdef __CUDACC__ __syncthreads(); # endif } // FUNCTION : Hydro_DataReconstruction (PPM) #endif // #if ( LR_SCHEME == PPM ) #ifdef CHAR_RECONSTRUCTION //------------------------------------------------------------------------------------------------------- // Function : Hydro_Pri2Char // Description : Primitive variables --> characteristic variables // // Note 1. Passive scalars require no conversion // --> Their eigenmatrices are just identity matrix // 2. Input and output share the same array // 3. InOut[] should have the size of NCOMP_TOTAL_PLUS_MAG or NCOMP_EINT // --> For LR_EINT, where NCOMP_EINT=NCOMP_TOTAL_PLUS_MAG+1, this function assumes that the // internal energy is stored as the last element and does not touch it at all // 4. Does NOT support general EoS // // Parameter : InOut : Array storing both the input primitive variables and output characteristic variables // Dens : Density // Pres : Pressure // LEigenVec : Left eigenvector (for MHD only) // XYZ : Target spatial direction : (0/1/2) --> (x/y/z) // EoS : EoS object //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_Pri2Char( real InOut[], const real Dens, const real Pres, const real LEigenVec[][NWAVE], const int XYZ, const EoS_t *EoS ) { // check # if ( EOS == EOS_GAMMA ) # ifndef MHD const real *Passive = NULL; // EOS_GAMMA does not involve passive scalars # endif # else # error : Hydro_Pri2Char() only supports EOS_GAMMA !! # endif # if ( defined CHECK_NEGATIVE_IN_FLUID && !defined MHD ) if ( Hydro_CheckNegative(Pres) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", Pres, __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(Dens) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", Dens, __FILE__, __LINE__, __FUNCTION__ ); # endif // back-up the input array and rotate it according to the target direction // --> it's unnecessary to copy the passive scalars since they will not be modified real Temp[ NCOMP_FLUID + NCOMP_MAG ]; for (int v=0; v<NCOMP_FLUID; v++) Temp[ v ] = InOut[ v ]; # ifdef MHD for (int v=0; v<NCOMP_MAG; v++) Temp[ v + NCOMP_FLUID ] = InOut[ v + MAG_OFFSET ]; # endif Hydro_Rotate3D( Temp, XYZ, true, NCOMP_FLUID ); // remove the normal B field to be consistent with the eigenvector matrix # ifdef MHD Temp[ NCOMP_FLUID + 0 ] = Temp[ NCOMP_FLUID + 1 ]; Temp[ NCOMP_FLUID + 1 ] = Temp[ NCOMP_FLUID + 2 ]; # endif // primitive --> characteristic // a. MHD # ifdef MHD const real tmp_f1 = LEigenVec[0][1]*Temp[1] + LEigenVec[0][2]*Temp[2] + LEigenVec[0][3]*Temp[3]; const real tmp_b1 = LEigenVec[0][4]*Temp[4] + LEigenVec[0][5]*Temp[5] + LEigenVec[0][6]*Temp[6]; const real tmp_f2 = LEigenVec[2][1]*Temp[1] + LEigenVec[2][2]*Temp[2] + LEigenVec[2][3]*Temp[3]; const real tmp_b2 = LEigenVec[2][4]*Temp[4] + LEigenVec[2][5]*Temp[5] + LEigenVec[2][6]*Temp[6]; InOut[MAG_OFFSET+0] = (real)0.0; InOut[ 3] = Temp[0] + LEigenVec[3][4]*Temp[4]; InOut[ 1] = LEigenVec[1][2]*Temp[2] + LEigenVec[1][3]*Temp[3] + LEigenVec[1][5]*Temp[5] + LEigenVec[1][6]*Temp[6]; InOut[MAG_OFFSET+1] = LEigenVec[5][2]*Temp[2] + LEigenVec[5][3]*Temp[3] + LEigenVec[5][5]*Temp[5] + LEigenVec[5][6]*Temp[6]; InOut[ 0] = tmp_f1 + tmp_b1; InOut[ 2] = tmp_f2 + tmp_b2; InOut[ 4] = -tmp_f2 + tmp_b2; InOut[MAG_OFFSET+2] = -tmp_f1 + tmp_b1; // b. pure hydro # else // #ifdef MHD const real a2 = EoS->DensPres2CSqr_FuncPtr( Dens, Pres, Passive, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table ); const real _a2 = (real)1.0 / a2; const real _a = SQRT( _a2 ); InOut[0] = -(real)0.5*Dens*_a*Temp[1] + (real)0.5*_a2*Temp[4]; InOut[1] = Temp[0] - _a2*Temp[4]; InOut[2] = Temp[2]; InOut[3] = Temp[3]; InOut[4] = +(real)0.5*Dens*_a*Temp[1] + (real)0.5*_a2*Temp[4]; # endif // #ifdef MHD ... else ... } // FUNCTION : Hydro_Pri2Char //------------------------------------------------------------------------------------------------------- // Function : Hydro_Char2Pri // Description : Characteristic variables --> primitive variables // // Note 1. Passive scalars require no conversion // --> Their eigenmatrices are just identity matrix // 2. Input and output share the same array // 3. InOut[] should have the size of NCOMP_TOTAL_PLUS_MAG or NCOMP_EINT // --> For LR_EINT, where NCOMP_EINT=NCOMP_TOTAL_PLUS_MAG+1, this function assumes that the // internal energy is stored as the last element and does not touch it at all // 4. Does NOT support general EoS // // Parameter : InOut : Array storing both the input characteristic variables and output primitive variables // Dens : Density // Pres : Pressure // REigenVec : Right eigenvector (for MHD only) // XYZ : Target spatial direction : (0/1/2) --> (x/y/z) // EoS : EoS object //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_Char2Pri( real InOut[], const real Dens, const real Pres, const real REigenVec[][NWAVE], const int XYZ, const EoS_t *EoS ) { // check # if ( EOS == EOS_GAMMA ) const real *Passive = NULL; // EOS_GAMMA does not involve passive scalars # else # error : Hydro_Char2Pri() only supports EOS_GAMMA !! # endif # if ( defined CHECK_NEGATIVE_IN_FLUID && !defined MHD ) if ( Hydro_CheckNegative(Pres) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", Pres, __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(Dens) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", Dens, __FILE__, __LINE__, __FUNCTION__ ); # endif // back-up the input array and rotate it according to the target direction // --> it's unnecessary to copy the passive scalars since they will not be modified // --> it's also unnecessary to copy the normal B field (just to be consistent with the eigenvector matrix) real Temp[NWAVE]; for (int v=0; v<NCOMP_FLUID; v++) Temp[v] = InOut[v]; # ifdef MHD for (int v=NCOMP_FLUID; v<NWAVE; v++) Temp[v] = InOut[ v - NCOMP_FLUID + MAG_OFFSET + 1 ]; # endif // primitive --> characteristic const real a2 = EoS->DensPres2CSqr_FuncPtr( Dens, Pres, Passive, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table ); // a. MHD # ifdef MHD InOut[ 0] = REigenVec[0][0]*Temp[0] + REigenVec[2][0]*Temp[2] + Temp[3] + REigenVec[4][0]*Temp[4] + REigenVec[6][0]*Temp[6]; InOut[ 1] = REigenVec[0][1]*Temp[0] + REigenVec[2][1]*Temp[2] + REigenVec[4][1]*Temp[4] + REigenVec[6][1]*Temp[6]; InOut[ 2] = REigenVec[0][2]*Temp[0] + REigenVec[1][2]*Temp[1] + REigenVec[2][2]*Temp[2] + REigenVec[4][2]*Temp[4] + REigenVec[5][2]*Temp[5] + REigenVec[6][2]*Temp[6]; InOut[ 3] = REigenVec[0][3]*Temp[0] + REigenVec[1][3]*Temp[1] + REigenVec[2][3]*Temp[2] + REigenVec[4][3]*Temp[4] + REigenVec[5][3]*Temp[5] + REigenVec[6][3]*Temp[6]; InOut[ 4] = ( InOut[0] - Temp[3] )*a2; InOut[MAG_OFFSET+0] = (real)0.0; InOut[MAG_OFFSET+1] = REigenVec[0][5]*Temp[0] + REigenVec[1][5]*Temp[1] + REigenVec[2][5]*Temp[2] + REigenVec[4][5]*Temp[4] + REigenVec[5][5]*Temp[5] + REigenVec[6][5]*Temp[6]; InOut[MAG_OFFSET+2] = REigenVec[0][6]*Temp[0] + REigenVec[1][6]*Temp[1] + REigenVec[2][6]*Temp[2] + REigenVec[4][6]*Temp[4] + REigenVec[5][6]*Temp[5] + REigenVec[6][6]*Temp[6]; // b. pure hydro # else // #ifdef MHD const real a = SQRT( a2 ); InOut[0] = Temp[0] + Temp[1] + Temp[4]; InOut[1] = a/Dens*( -Temp[0] + Temp[4] ); InOut[2] = Temp[2]; InOut[3] = Temp[3]; InOut[4] = a2*( Temp[0] + Temp[4] ); # endif // #ifdef MHD ... else ... Hydro_Rotate3D( InOut, XYZ, false, MAG_OFFSET ); } // FUNCTION : Hydro_Char2Pri #endif #if ( FLU_SCHEME == CTU || ( defined MHD && defined CHAR_RECONSTRUCTION ) ) //------------------------------------------------------------------------------------------------------- // Function : Hydro_GetEigenSystem // Description : Evaluate the eigenvalues and left/right eigenvectors // // Note : 1. Input data must be primitive variables // 2. Constant components of eigenvectors must be set in advance // 3. Work for the CTU scheme and the characteristic data reconstruction in MHD // 4. Do not need to consider passive scalars // --> Their eigenmatrices are just identity matrix // 5. For pure hydro, this function computes the eigenvalues and eigenvectors // along all three spatial directions at once // --> Because eigenvectors along different directions are the same for pure hydro // But for MHD, this function only computes the eigenvalues and eigenvectors // along the spatial direction specified by XYZ // --> Because eigenvectors along different directions are different for MHD // 6. Does NOT support general EoS // // Parameter : CC_Var : Array storing the input cell-centered primitive variables // EigenVal : Array to store the output eigenvalues // --> Hydro: along all three spatial directions // MHD : only along the target spatial direction // L/REigenVec : Array to store the output left/right eigenvectors // EoS : EoS object // XYZ : Target spatial direction (for MHD only) // // Return : EigenVal[], L/REigenVec[] //------------------------------------------------------------------------------------------------------- GPU_DEVICE #ifdef MHD void MHD_GetEigenSystem( const real CC_Var[], real EigenVal[], real LEigenVec[][NWAVE], real REigenVec[][NWAVE], const EoS_t *EoS, const int XYZ ) #else void Hydro_GetEigenSystem( const real CC_Var[], real EigenVal[][NWAVE], real LEigenVec[][NWAVE], real REigenVec[][NWAVE], const EoS_t *EoS ) #endif { # if ( EOS == EOS_GAMMA ) const real *Passive = NULL; // EOS_GAMMA does not involve passive scalars # else # error : Hydro/MHD_GetEigenSystem() only supports EOS_GAMMA !! # endif # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(CC_Var[4]) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", CC_Var[4], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(CC_Var[0]) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", CC_Var[0], __FILE__, __LINE__, __FUNCTION__ ); # endif const real Rho = CC_Var[0]; const real _Rho = (real)1.0/Rho; const real a2 = EoS->DensPres2CSqr_FuncPtr( Rho, CC_Var[4], Passive, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table ); const real a = SQRT( a2 ); const real _a = (real)1.0/a; const real _a2 = _a*_a; // a. MHD # ifdef MHD real Cf2, Cs2, Cf, Cs; real PriVar[NCOMP_TOTAL_PLUS_MAG]; for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) PriVar[v] = CC_Var[v]; Hydro_Rotate3D( PriVar, XYZ, true, MAG_OFFSET ); const real Bx = PriVar[ MAG_OFFSET + 0 ]; const real By = PriVar[ MAG_OFFSET + 1 ]; const real Bz = PriVar[ MAG_OFFSET + 2 ]; const real Bn2 = SQR( By ) + SQR( Bz ); const real Bn = SQRT( Bn2 ); const real Cax2 = SQR( Bx )*_Rho; const real Cax = SQRT( Cax2 ); const real Cat2 = Bn2*_Rho; const real tsum = Cax2 + Cat2 + a2; // Ca^2 + a^2 const real tdif = Cax2 + Cat2 - a2; // Ca^2 - a^2 const real Cf2_min_Cs2 = SQRT( SQR(tdif) + (real)4.0*a2*Cat2 ); // Cf^2 - Cs^2 // evaluate the fast/slow wave speed (Cf/Cs) if ( Cat2 == (real)0.0 ) { if ( Cax2 == a2 ) { Cf2 = a2; Cs2 = a2; } else if ( Cax2 > a2 ) { Cf2 = Cax2; Cs2 = a2; } else { Cf2 = a2; Cs2 = Cax2; } } else { if ( Cax2 == (real)0.0 ) { Cf2 = a2 + Cat2; Cs2 = (real)0.0; } else { Cf2 = (real)0.5*( tsum + Cf2_min_Cs2 ); Cs2 = a2*Cax2/Cf2; // do not use "Cf2 - Cf2_min_Cs2" to avoid negative values caused by round-off errors // Cs2 = Cf2 - Cf2_min_Cs2; } } // if ( Cat2 == (real)0.0 ) ... else ... Cf = SQRT( Cf2 ); Cs = SQRT( Cs2 ); // eigenvalues along the target spatial direction EigenVal[0] = PriVar[1] - Cf; EigenVal[1] = PriVar[1] - Cax; EigenVal[2] = PriVar[1] - Cs; EigenVal[3] = PriVar[1]; EigenVal[4] = PriVar[1] + Cs; EigenVal[5] = PriVar[1] + Cax; EigenVal[6] = PriVar[1] + Cf; // right eigenvectors (rows instead of columns of the matrix REigenVec for better performance) const real S = SIGN( Bx ); const real sqrt_Rho = SQRT( Rho ); const real _sqrt_Rho = (real)1.0 / sqrt_Rho; const real a2_min_Cs2 = a2 - Cs2; const real Cf2_min_a2 = Cf2 - a2; real beta_y, beta_z, alpha_f, alpha_s; if ( Bn == (real)0.0 ) { beta_y = (real)1.0; beta_z = (real)0.0; } else { const real _Bn = (real)1.0 / Bn; beta_y = By * _Bn; beta_z = Bz * _Bn; } if ( Cf2_min_Cs2 == (real)0.0 ) { alpha_f = (real)1.0; alpha_s = (real)0.0; } else if ( a2_min_Cs2 <= (real)0.0 ) { alpha_f = (real)0.0; alpha_s = (real)1.0; } else if ( Cf2_min_a2 <= (real)0.0 ) { alpha_f = (real)1.0; alpha_s = (real)0.0; } else { # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(a2_min_Cs2) ) printf( "ERROR : invalid a2_min_Cs2 (%14.7e) at file <%s>, line <%d>, function <%s>\n", a2_min_Cs2, __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(Cf2_min_a2) ) printf( "ERROR : invalid Cf2_min_a2 (%14.7e) at file <%s>, line <%d>, function <%s>\n", Cf2_min_a2, __FILE__, __LINE__, __FUNCTION__ ); # endif const real _Cf2_min_Cs2 = (real)1.0 / Cf2_min_Cs2; alpha_f = SQRT( a2_min_Cs2*_Cf2_min_Cs2 ); alpha_s = SQRT( Cf2_min_a2*_Cf2_min_Cs2 ); } const real Af = a * alpha_f * sqrt_Rho; const real As = a * alpha_s * sqrt_Rho; const real Cff = Cf * alpha_f; const real Css = Cs * alpha_s; const real Qf = Cff * S; const real Qs = Css * S; const real S_sqrt_Rho = S * sqrt_Rho; REigenVec[0][0] = Rho * alpha_f; REigenVec[0][1] = -Cff; REigenVec[0][2] = Qs * beta_y; REigenVec[0][3] = Qs * beta_z; REigenVec[0][4] = REigenVec[0][0] * a2; REigenVec[0][5] = As * beta_y; REigenVec[0][6] = As * beta_z; REigenVec[1][2] = -beta_z; REigenVec[1][3] = beta_y; REigenVec[1][5] = -S_sqrt_Rho * beta_z; REigenVec[1][6] = S_sqrt_Rho * beta_y; REigenVec[2][0] = Rho * alpha_s; REigenVec[2][1] = -Css; REigenVec[2][2] = -Qf * beta_y; REigenVec[2][3] = -Qf * beta_z; REigenVec[2][4] = REigenVec[2][0] * a2; REigenVec[2][5] = -Af * beta_y; REigenVec[2][6] = -Af * beta_z; REigenVec[4][0] = REigenVec[2][0]; REigenVec[4][1] = -REigenVec[2][1]; REigenVec[4][2] = -REigenVec[2][2]; REigenVec[4][3] = -REigenVec[2][3]; REigenVec[4][4] = REigenVec[2][4]; REigenVec[4][5] = REigenVec[2][5]; REigenVec[4][6] = REigenVec[2][6]; REigenVec[5][2] = -REigenVec[1][2]; REigenVec[5][3] = -REigenVec[1][3]; REigenVec[5][5] = REigenVec[1][5]; REigenVec[5][6] = REigenVec[1][6]; REigenVec[6][0] = REigenVec[0][0]; REigenVec[6][1] = -REigenVec[0][1]; REigenVec[6][2] = -REigenVec[0][2]; REigenVec[6][3] = -REigenVec[0][3]; REigenVec[6][4] = REigenVec[0][4]; REigenVec[6][5] = REigenVec[0][5]; REigenVec[6][6] = REigenVec[0][6]; // left eigenvectors (rows of the matrix LEigenVec) const real N = (real)0.5 * _a2; const real N_By = N * beta_y; const real N_Bz = N * beta_z; const real As_Rho = As * _Rho; const real Af_Rho = Af * _Rho; const real S_inv_Rho = S * _sqrt_Rho; LEigenVec[0][1] = -N * Cff; LEigenVec[0][2] = N_By * Qs; LEigenVec[0][3] = N_Bz * Qs; LEigenVec[0][4] = N * alpha_f * _Rho; LEigenVec[0][5] = N_By * As_Rho; LEigenVec[0][6] = N_Bz * As_Rho; LEigenVec[1][2] = -(real)0.5 * beta_z; LEigenVec[1][3] = (real)0.5 * beta_y; LEigenVec[1][5] = LEigenVec[1][2] * S_inv_Rho; LEigenVec[1][6] = LEigenVec[1][3] * S_inv_Rho; LEigenVec[2][1] = -N * Css; LEigenVec[2][2] = -N_By * Qf; LEigenVec[2][3] = -N_Bz * Qf; LEigenVec[2][4] = N * alpha_s * _Rho; LEigenVec[2][5] = -N_By * Af_Rho; LEigenVec[2][6] = -N_Bz * Af_Rho; LEigenVec[3][4] = -_a2; LEigenVec[4][1] = -LEigenVec[2][1]; LEigenVec[4][2] = -LEigenVec[2][2]; LEigenVec[4][3] = -LEigenVec[2][3]; LEigenVec[4][4] = LEigenVec[2][4]; LEigenVec[4][5] = LEigenVec[2][5]; LEigenVec[4][6] = LEigenVec[2][6]; LEigenVec[5][2] = -LEigenVec[1][2]; LEigenVec[5][3] = -LEigenVec[1][3]; LEigenVec[5][5] = LEigenVec[1][5]; LEigenVec[5][6] = LEigenVec[1][6]; LEigenVec[6][1] = -LEigenVec[0][1]; LEigenVec[6][2] = -LEigenVec[0][2]; LEigenVec[6][3] = -LEigenVec[0][3]; LEigenVec[6][4] = LEigenVec[0][4]; LEigenVec[6][5] = LEigenVec[0][5]; LEigenVec[6][6] = LEigenVec[0][6]; // b. pure hydro # else // #ifdef MHD const real vx = CC_Var[1]; const real vy = CC_Var[2]; const real vz = CC_Var[3]; // eigenvalues along all three spatial directions EigenVal[0][0] = vx - a; EigenVal[0][1] = vx; EigenVal[0][2] = vx; EigenVal[0][3] = vx; EigenVal[0][4] = vx + a; EigenVal[1][0] = vy - a; EigenVal[1][1] = vy; EigenVal[1][2] = vy; EigenVal[1][3] = vy; EigenVal[1][4] = vy + a; EigenVal[2][0] = vz - a; EigenVal[2][1] = vz; EigenVal[2][2] = vz; EigenVal[2][3] = vz; EigenVal[2][4] = vz + a; // NOTE : the left and right eigenvectors have the same form along different spatial directions for hydro // left eigenvectors (rows of the matrix LEigenVec) LEigenVec[0][1] = -(real)0.5*Rho*_a; LEigenVec[0][4] = (real)0.5*_a2; LEigenVec[1][4] = -_a2; LEigenVec[4][1] = -LEigenVec[0][1]; LEigenVec[4][4] = +LEigenVec[0][4]; // right eigenvectors (rows instead of columns of the matrix REigenVec for better performance) REigenVec[0][1] = -a*_Rho; REigenVec[0][4] = a2; REigenVec[4][1] = -REigenVec[0][1]; REigenVec[4][4] = a2; # endif // #ifdef MHD ... else ... } // FUNCTION : Hydro/MHD_GetEigenSystem #endif // #if ( FLU_SCHEME == CTU || ( defined MHD && defined CHAR_RECONSTRUCTION ) ) //------------------------------------------------------------------------------------------------------- // Function : Hydro_LimitSlope // Description : Evaluate the monotonic slope by slope limiters // // Note : 1. Input data must be primitive variables // 2. Size of each input array should be NCOMP_LR // // Parameter : L : Element x-1 // C : Element x // R : Element x+1 // LR_Limiter : Slope limiter for the data reconstruction in the MHM/MHM_RP/CTU schemes // (0/1/2/3) = (vanLeer/generalized MinMod/vanAlbada/vanLeer+generalized MinMod) limiter // MinMod_Coeff : Coefficient of the generalized MinMod limiter // XYZ : Target spatial direction : (0/1/2) --> (x/y/z) // --> For CHAR_RECONSTRUCTION only // L/REigenVec : Array storing the left/right eigenvectors // --> For MHD + CHAR_RECONSTRUCTION only // Slope_Limiter : Array to store the output monotonic slope // EoS : EoS object //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_LimitSlope( const real L[], const real C[], const real R[], const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const int XYZ, const real LEigenVec[][NWAVE], const real REigenVec[][NWAVE], real Slope_Limiter[], const EoS_t *EoS ) { // check # ifdef GAMER_DEBUG # if ( defined MHD && defined CHAR_RECONSTRUCTION ) if ( LEigenVec == NULL ) printf( "ERROR : LEigenVec == NULL !!\n" ); if ( REigenVec == NULL ) printf( "ERROR : REigenVec == NULL !!\n" ); # endif # endif real Slope_L[NCOMP_LR], Slope_R[NCOMP_LR], Slope_C[NCOMP_LR]; real Slope_A[NCOMP_LR], Slope_LR; // evaluate different slopes for (int v=0; v<NCOMP_LR; v++) { Slope_L[v] = C[v] - L[v]; Slope_R[v] = R[v] - C[v]; Slope_C[v] = (real)0.5*( Slope_L[v] + Slope_R[v] ); } if ( LR_Limiter == LR_LIMITER_VL_GMINMOD ) { for (int v=0; v<NCOMP_LR; v++) { if ( Slope_L[v]*Slope_R[v] > (real)0.0 ) Slope_A[v] = (real)2.0*Slope_L[v]*Slope_R[v]/( Slope_L[v] + Slope_R[v] ); else Slope_A[v] = (real)0.0; } } // primitive variables --> characteristic variables # ifdef CHAR_RECONSTRUCTION const real Dens = C[0]; const real Pres = C[4]; Hydro_Pri2Char( Slope_L, Dens, Pres, LEigenVec, XYZ, EoS ); Hydro_Pri2Char( Slope_R, Dens, Pres, LEigenVec, XYZ, EoS ); Hydro_Pri2Char( Slope_C, Dens, Pres, LEigenVec, XYZ, EoS ); if ( LR_Limiter == LR_LIMITER_VL_GMINMOD ) Hydro_Pri2Char( Slope_A, Dens, Pres, LEigenVec, XYZ, EoS ); # endif // apply the slope limiter for (int v=0; v<NCOMP_LR; v++) { Slope_LR = Slope_L[v]*Slope_R[v]; if ( Slope_LR > (real)0.0 ) { switch ( LR_Limiter ) { // notes for LR_LIMITER_CENTRAL: // (1) not TVD --> extra monotonicity check outside this function is required // (2) mainly for MHM_RP+PPM to achieve 2nd-order accuracy in linear wave tests case LR_LIMITER_CENTRAL: // central Slope_Limiter[v] = Slope_C[v]; break; case LR_LIMITER_VANLEER: // van-Leer Slope_Limiter[v] = (real)2.0*Slope_LR/( Slope_L[v] + Slope_R[v] ); break; case LR_LIMITER_GMINMOD: // generalized MinMod Slope_L[v] *= MinMod_Coeff; Slope_R[v] *= MinMod_Coeff; Slope_Limiter[v] = FMIN( FABS( Slope_L[v] ), FABS( Slope_R[v] ) ); Slope_Limiter[v] = FMIN( FABS( Slope_C[v] ), Slope_Limiter[v] ); Slope_Limiter[v] *= SIGN( Slope_C[v] ); break; case LR_LIMITER_ALBADA: // van-Albada Slope_Limiter[v] = Slope_LR*( Slope_L[v] + Slope_R[v] ) / ( Slope_L[v]*Slope_L[v] + Slope_R[v]*Slope_R[v] ); break; case LR_LIMITER_VL_GMINMOD: // van-Leer + generalized MinMod Slope_L[v] *= MinMod_Coeff; Slope_R[v] *= MinMod_Coeff; Slope_Limiter[v] = FMIN( FABS( Slope_L[v] ), FABS( Slope_R[v] ) ); Slope_Limiter[v] = FMIN( FABS( Slope_C[v] ), Slope_Limiter[v] ); Slope_Limiter[v] = FMIN( FABS( Slope_A[v] ), Slope_Limiter[v] ); Slope_Limiter[v] *= SIGN( Slope_C[v] ); break; default : # ifdef GAMER_DEBUG printf( "ERROR : incorrect parameter %s = %d !!\n", "LR_Limiter", LR_Limiter ); # endif return; } } // if ( Slope_LR > (real)0.0 ) else { Slope_Limiter[v] = (real)0.0; } // if ( Slope_LR > (real)0.0 ) ... else ... } // for (int v=0; v<NCOMP_LR; v++) // characteristic variables --> primitive variables # ifdef CHAR_RECONSTRUCTION Hydro_Char2Pri( Slope_Limiter, Dens, Pres, REigenVec, XYZ, EoS ); # endif } // FUNCTION : Hydro_LimitSlope #if ( FLU_SCHEME == MHM ) //------------------------------------------------------------------------------------------------------- // Function : Hydro_HancockPredict // Description : Evolve the face-centered variables by half time-step by calculating the face-centered fluxes // (no Riemann solver is required) // // Note : 1. Work for the MHM scheme // 2. Do NOT require data in the neighboring cells // 3. Input variables must be conserved variables // // Parameter : fc : Face-centered conserved variables to be updated // dt : Time interval to advance solution // dh : Cell size // g_cc_array : Array storing the cell-centered conserved variables for checking // negative density and pressure // --> It is just the input array Flu_Array_In[] // cc_idx : Index for accessing g_cc_array[] // MinDens/Pres/Eint : Density, pressure, and internal energy floors // EoS : EoS object //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_HancockPredict( real fc[][NCOMP_LR], const real dt, const real dh, const real g_cc_array[][ CUBE(FLU_NXT) ], const int cc_idx, const real MinDens, const real MinPres, const real MinEint, const EoS_t *EoS ) { const real dt_dh2 = (real)0.5*dt/dh; real Flux[6][NCOMP_TOTAL], dFlux; // calculate flux for (int f=0; f<6; f++) Hydro_Con2Flux( f/2, Flux[f], fc[f], MinPres, EoS->DensEint2Pres_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, NULL ); // update the face-centered variables for (int v=0; v<NCOMP_TOTAL; v++) { dFlux = dt_dh2*( Flux[1][v] - Flux[0][v] + Flux[3][v] - Flux[2][v] + Flux[5][v] - Flux[4][v] ); for (int f=0; f<6; f++) fc[f][v] -= dFlux; } // check the negative density and energy for (int f=0; f<6; f++) { # ifdef BAROTROPIC_EOS if ( fc[f][0] <= (real)0.0 ) # else if ( fc[f][0] <= (real)0.0 || fc[f][4] <= (real)0.0 ) # endif { // set to the cell-centered values before update for (int f=0; f<6; f++) for (int v=0; v<NCOMP_TOTAL; v++) fc[f][v] = g_cc_array[v][cc_idx]; break; } } // apply density and internal energy floors for (int f=0; f<6; f++) { fc[f][0] = FMAX( fc[f][0], MinDens ); # ifndef BAROTROPIC_EOS # ifdef MHD # error : ERROR : MHD is not supported here !!! const real Emag = NULL_REAL; # else const real Emag = NULL_REAL; # endif fc[f][4] = Hydro_CheckMinEintInEngy( fc[f][0], fc[f][1], fc[f][2], fc[f][3], fc[f][4], MinEint, Emag ); # endif // #ifndef BAROTROPIC_EOS # if ( NCOMP_PASSIVE > 0 ) for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) fc[f][v] = FMAX( fc[f][v], TINY_NUMBER ); # endif } } // FUNCTION : Hydro_HancockPredict #endif // #if ( FLU_SCHEME == MHM ) // MINMOD macro is only used in this function #ifdef MINMOD # undef MINMOD #endif #endif // #if ( MODEL == HYDRO && ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) ) #endif // #ifndef __CUFLU_DATARECONSTRUCTION__
the_stack
* \file * Random-access iterator types */ #pragma once #include "thread/thread_load.cuh" #include "util_device.cuh" #include "util_debug.cuh" #include "util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Texture references *****************************************************************************/ #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document // Anonymous namespace namespace { /// Templated texture reference type template <typename T> struct TexIteratorRef { // Texture reference type typedef texture<T, cudaTextureType1D, cudaReadModeElementType> TexRef; static TexRef ref; /** * Bind texture */ static cudaError_t BindTexture(void *d_in) { cudaChannelFormatDesc tex_desc = cudaCreateChannelDesc<T>(); if (d_in) return (CubDebug(cudaBindTexture(NULL, ref, d_in, tex_desc))); return cudaSuccess; } /** * Unbind textures */ static cudaError_t UnbindTexture() { return CubDebug(cudaUnbindTexture(ref)); } }; // Texture reference definitions template <typename Value> typename TexIteratorRef<Value>::TexRef TexIteratorRef<Value>::ref = 0; } // Anonymous namespace #endif // DOXYGEN_SHOULD_SKIP_THIS /** * \addtogroup UtilModule * @{ */ /****************************************************************************** * Iterators *****************************************************************************/ /** * \brief A simple random-access iterator pointing to a range of constant values * * \par Overview * ConstantIteratorRA is a random-access iterator that when dereferenced, always * returns the supplied constant of type \p OutputType. * * \tparam OutputType The value type of this iterator */ template <typename OutputType> class ConstantIteratorRA { public: #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document typedef ConstantIteratorRA self_type; typedef OutputType value_type; typedef OutputType reference; typedef OutputType* pointer; typedef std::random_access_iterator_tag iterator_category; typedef int difference_type; #endif // DOXYGEN_SHOULD_SKIP_THIS private: OutputType val; public: /// Constructor __host__ __device__ __forceinline__ ConstantIteratorRA( const OutputType &val) ///< Constant value for the iterator instance to report : val(val) {} #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document __host__ __device__ __forceinline__ self_type operator++() { self_type i = *this; return i; } __host__ __device__ __forceinline__ self_type operator++(int junk) { return *this; } __host__ __device__ __forceinline__ reference operator*() { return val; } template <typename SizeT> __host__ __device__ __forceinline__ self_type operator+(SizeT n) { return ConstantIteratorRA(val); } template <typename SizeT> __host__ __device__ __forceinline__ self_type operator-(SizeT n) { return ConstantIteratorRA(val); } template <typename SizeT> __host__ __device__ __forceinline__ reference operator[](SizeT n) { return ConstantIteratorRA(val); } __host__ __device__ __forceinline__ pointer operator->() { return &val; } __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) { return (val == rhs.val); } __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) { return (val != rhs.val); } #endif // DOXYGEN_SHOULD_SKIP_THIS }; /** * \brief A simple random-access transform iterator for applying a transformation operator. * * \par Overview * TransformIteratorRA is a random-access iterator that wraps both a native * device pointer of type <tt>InputType*</tt> and a unary conversion functor of * type \p ConversionOp. \p OutputType references are made by pulling \p InputType * values through the \p ConversionOp instance. * * \tparam InputType The value type of the pointer being wrapped * \tparam ConversionOp Unary functor type for mapping objects of type \p InputType to type \p OutputType. Must have member <tt>OutputType operator()(const InputType &datum)</tt>. * \tparam OutputType The value type of this iterator */ template <typename OutputType, typename ConversionOp, typename InputType> class TransformIteratorRA { public: #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document typedef TransformIteratorRA self_type; typedef OutputType value_type; typedef OutputType reference; typedef OutputType* pointer; typedef std::random_access_iterator_tag iterator_category; typedef int difference_type; #endif // DOXYGEN_SHOULD_SKIP_THIS private: ConversionOp conversion_op; InputType* ptr; public: /** * \brief Constructor * @param ptr Native pointer to wrap * @param conversion_op Binary transformation functor */ __host__ __device__ __forceinline__ TransformIteratorRA(InputType* ptr, ConversionOp conversion_op) : conversion_op(conversion_op), ptr(ptr) {} #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document __host__ __device__ __forceinline__ self_type operator++() { self_type i = *this; ptr++; return i; } __host__ __device__ __forceinline__ self_type operator++(int junk) { ptr++; return *this; } __host__ __device__ __forceinline__ reference operator*() { return conversion_op(*ptr); } template <typename SizeT> __host__ __device__ __forceinline__ self_type operator+(SizeT n) { TransformIteratorRA retval(ptr + n, conversion_op); return retval; } template <typename SizeT> __host__ __device__ __forceinline__ self_type operator-(SizeT n) { TransformIteratorRA retval(ptr - n, conversion_op); return retval; } template <typename SizeT> __host__ __device__ __forceinline__ reference operator[](SizeT n) { return conversion_op(ptr[n]); } __host__ __device__ __forceinline__ pointer operator->() { return &conversion_op(*ptr); } __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) { return (ptr == rhs.ptr); } __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) { return (ptr != rhs.ptr); } #endif // DOXYGEN_SHOULD_SKIP_THIS }; /** * \brief A simple random-access iterator for loading primitive values through texture cache. * * \par Overview * TexIteratorRA is a random-access iterator that wraps a native * device pointer of type <tt>T*</tt>. References made through TexIteratorRA * causes values to be pulled through texture cache. * * \par Usage Considerations * - Can only be used with primitive types (e.g., \p char, \p int, \p float), with the exception of \p double * - Only one TexIteratorRA or TexIteratorRA of a certain \p InputType can be bound at any given time (per host thread) * * \tparam InputType The value type of the pointer being wrapped * \tparam ConversionOp Unary functor type for mapping objects of type \p InputType to type \p OutputType. Must have member <tt>OutputType operator()(const InputType &datum)</tt>. * \tparam OutputType The value type of this iterator */ template <typename T> class TexIteratorRA { public: #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document typedef TexIteratorRA self_type; typedef T value_type; typedef T reference; typedef T* pointer; typedef std::random_access_iterator_tag iterator_category; typedef int difference_type; #endif // DOXYGEN_SHOULD_SKIP_THIS /// Tag identifying iterator type as being texture-bindable typedef void TexBindingTag; private: T* ptr; size_t tex_align_offset; cudaTextureObject_t tex_obj; public: /** * \brief Constructor */ __host__ __device__ __forceinline__ TexIteratorRA() : ptr(NULL), tex_align_offset(0), tex_obj(0) {} /// \brief Bind iterator to texture reference cudaError_t BindTexture( T *ptr, ///< Native pointer to wrap that is aligned to cudaDeviceProp::textureAlignment size_t bytes, ///< Number of items size_t tex_align_offset = 0) ///< Offset (in items) from ptr denoting the position of the iterator { this->ptr = ptr; this->tex_align_offset = tex_align_offset; int ptx_version; cudaError_t error = cudaSuccess; if (CubDebug(error = PtxVersion(ptx_version))) return error; if (ptx_version >= 300) { // Use texture object cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<T>(); cudaResourceDesc res_desc; cudaTextureDesc tex_desc; memset(&res_desc, 0, sizeof(cudaResourceDesc)); memset(&tex_desc, 0, sizeof(cudaTextureDesc)); res_desc.resType = cudaResourceTypeLinear; res_desc.res.linear.devPtr = ptr; res_desc.res.linear.desc = channel_desc; res_desc.res.linear.sizeInBytes = bytes; tex_desc.readMode = cudaReadModeElementType; return cudaCreateTextureObject(&tex_obj, &res_desc, &tex_desc, NULL); } else { // Use texture reference return TexIteratorRef<T>::BindTexture(ptr); } } /// \brief Unbind iterator to texture reference cudaError_t UnbindTexture() { int ptx_version; cudaError_t error = cudaSuccess; if (CubDebug(error = PtxVersion(ptx_version))) return error; if (ptx_version < 300) { // Use texture reference return TexIteratorRef<T>::UnbindTexture(); } else { // Use texture object return cudaDestroyTextureObject(tex_obj); } } #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document __host__ __device__ __forceinline__ self_type operator++() { self_type i = *this; ptr++; tex_align_offset++; return i; } __host__ __device__ __forceinline__ self_type operator++(int junk) { ptr++; tex_align_offset++; return *this; } __host__ __device__ __forceinline__ reference operator*() { #if (CUB_PTX_ARCH == 0) // Simply dereference the pointer on the host return *ptr; #elif (CUB_PTX_ARCH < 300) // Use the texture reference return tex1Dfetch(TexIteratorRef<T>::ref, tex_align_offset); #else // Use the texture object return conversion_op(tex1Dfetch<InputType>(tex_obj, tex_align_offset)); #endif } template <typename SizeT> __host__ __device__ __forceinline__ self_type operator+(SizeT n) { TexIteratorRA retval; retval.ptr = ptr + n; retval.tex_align_offset = tex_align_offset + n; return retval; } template <typename SizeT> __host__ __device__ __forceinline__ self_type operator-(SizeT n) { TexIteratorRA retval; retval.ptr = ptr - n; retval.tex_align_offset = tex_align_offset - n; return retval; } template <typename SizeT> __host__ __device__ __forceinline__ reference operator[](SizeT n) { #if (CUB_PTX_ARCH == 0) // Simply dereference the pointer on the host return ptr[n]; #elif (CUB_PTX_ARCH < 300) // Use the texture reference return tex1Dfetch(TexIteratorRef<T>::ref, tex_align_offset + n); #else // Use the texture object return conversion_op(tex1Dfetch<InputType>(tex_obj, tex_align_offset + n)); #endif } __host__ __device__ __forceinline__ pointer operator->() { #if (CUB_PTX_ARCH == 0) // Simply dereference the pointer on the host return &(*ptr); #elif (CUB_PTX_ARCH < 300) // Use the texture reference return &(tex1Dfetch(TexIteratorRef<T>::ref, tex_align_offset)); #else // Use the texture object return conversion_op(tex1Dfetch<InputType>(tex_obj, tex_align_offset)); #endif } __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) { return (ptr == rhs.ptr); } __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) { return (ptr != rhs.ptr); } #endif // DOXYGEN_SHOULD_SKIP_THIS }; /** * \brief A simple random-access transform iterator for loading primitive values through texture cache and and subsequently applying a transformation operator. * * \par Overview * TexTransformIteratorRA is a random-access iterator that wraps both a native * device pointer of type <tt>InputType*</tt> and a unary conversion functor of * type \p ConversionOp. \p OutputType references are made by pulling \p InputType * values through the texture cache and then transformed them using the * \p ConversionOp instance. * * \par Usage Considerations * - Can only be used with primitive types (e.g., \p char, \p int, \p float), with the exception of \p double * - Only one TexIteratorRA or TexTransformIteratorRA of a certain \p InputType can be bound at any given time (per host thread) * * \tparam InputType The value type of the pointer being wrapped * \tparam ConversionOp Unary functor type for mapping objects of type \p InputType to type \p OutputType. Must have member <tt>OutputType operator()(const InputType &datum)</tt>. * \tparam OutputType The value type of this iterator */ template <typename OutputType, typename ConversionOp, typename InputType> class TexTransformIteratorRA { public: #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document typedef TexTransformIteratorRA self_type; typedef OutputType value_type; typedef OutputType reference; typedef OutputType* pointer; typedef std::random_access_iterator_tag iterator_category; typedef int difference_type; #endif // DOXYGEN_SHOULD_SKIP_THIS /// Tag identifying iterator type as being texture-bindable typedef void TexBindingTag; private: ConversionOp conversion_op; InputType* ptr; size_t tex_align_offset; cudaTextureObject_t tex_obj; public: /** * \brief Constructor */ TexTransformIteratorRA( ConversionOp conversion_op) ///< Binary transformation functor : conversion_op(conversion_op), ptr(NULL), tex_align_offset(0), tex_obj(0) {} /// \brief Bind iterator to texture reference cudaError_t BindTexture( InputType* ptr, ///< Native pointer to wrap that is aligned to cudaDeviceProp::textureAlignment size_t bytes, ///< Number of items size_t tex_align_offset = 0) ///< Offset (in items) from ptr denoting the position of the iterator { this->ptr = ptr; this->tex_align_offset = tex_align_offset; int ptx_version; cudaError_t error = cudaSuccess; if (CubDebug(error = PtxVersion(ptx_version))) return error; if (ptx_version >= 300) { // Use texture object cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<InputType>(); cudaResourceDesc res_desc; cudaTextureDesc tex_desc; memset(&res_desc, 0, sizeof(cudaResourceDesc)); memset(&tex_desc, 0, sizeof(cudaTextureDesc)); res_desc.resType = cudaResourceTypeLinear; res_desc.res.linear.devPtr = ptr; res_desc.res.linear.desc = channel_desc; res_desc.res.linear.sizeInBytes = bytes; tex_desc.readMode = cudaReadModeElementType; return cudaCreateTextureObject(&tex_obj, &res_desc, &tex_desc, NULL); } else { // Use texture reference return TexIteratorRef<InputType>::BindTexture(ptr); } } /// \brief Unbind iterator to texture reference cudaError_t UnbindTexture() { int ptx_version; cudaError_t error = cudaSuccess; if (CubDebug(error = PtxVersion(ptx_version))) return error; if (ptx_version >= 300) { // Use texture object return cudaDestroyTextureObject(tex_obj); } else { // Use texture reference return TexIteratorRef<InputType>::UnbindTexture(); } } #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document __host__ __device__ __forceinline__ self_type operator++() { self_type i = *this; ptr++; tex_align_offset++; return i; } __host__ __device__ __forceinline__ self_type operator++(int junk) { ptr++; tex_align_offset++; return *this; } __host__ __device__ __forceinline__ reference operator*() { #if (CUB_PTX_ARCH == 0) // Simply dereference the pointer on the host return conversion_op(*ptr); #elif (CUB_PTX_ARCH < 300) // Use the texture reference return conversion_op(tex1Dfetch(TexIteratorRef<InputType>::ref, tex_align_offset)); #else // Use the texture object return conversion_op(tex1Dfetch<InputType>(tex_obj, tex_align_offset)); #endif } template <typename SizeT> __host__ __device__ __forceinline__ self_type operator+(SizeT n) { TexTransformIteratorRA retval(conversion_op); retval.ptr = ptr + n; retval.tex_align_offset = tex_align_offset + n; return retval; } template <typename SizeT> __host__ __device__ __forceinline__ self_type operator-(SizeT n) { TexTransformIteratorRA retval(conversion_op); retval.ptr = ptr - n; retval.tex_align_offset = tex_align_offset - n; return retval; } template <typename SizeT> __host__ __device__ __forceinline__ reference operator[](SizeT n) { #if (CUB_PTX_ARCH == 0) // Simply dereference the pointer on the host return conversion_op(ptr[n]); #elif (CUB_PTX_ARCH < 300) // Use the texture reference return conversion_op(tex1Dfetch(TexIteratorRef<InputType>::ref, tex_align_offset + n)); #else // Use the texture object return conversion_op(tex1Dfetch<InputType>(tex_obj, tex_align_offset + n)); #endif } __host__ __device__ __forceinline__ pointer operator->() { #if (CUB_PTX_ARCH == 0) // Simply dereference the pointer on the host return &conversion_op(*ptr); #elif (CUB_PTX_ARCH < 300) // Use the texture reference return &conversion_op(tex1Dfetch(TexIteratorRef<InputType>::ref, tex_align_offset)); #else // Use the texture object return &conversion_op(tex1Dfetch<InputType>(tex_obj, tex_align_offset)); #endif } __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) { return (ptr == rhs.ptr); } __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) { return (ptr != rhs.ptr); } #endif // DOXYGEN_SHOULD_SKIP_THIS }; /** @} */ // end group UtilModule } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#include "caffe/layers/lrn_layer.hpp" #include "caffe/util/benchmark.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { #ifdef TODO_REFACTOR #ifdef USE_CUDA template<typename Dtype, typename MItype, typename MOtype> __global__ void LRNFillScale(const int_tp nthreads, const Dtype* const in, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, const int_tp size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int_tp w = index % width; const int_tp h = (index / width) % height; const int_tp n = index / width / height; const int_tp offset = (n * channels * height + h) * width + w; const int_tp step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; int_tp head = 0; const int_tp pre_pad = (size - 1) / 2; const int_tp post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } } #endif // USE_CUDA template<typename Dtype, typename MItype, typename MOtype> void LRNLayer<Dtype, MItype, MOtype>::Forward_gpu(const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_gpu(bottom, top); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelForward(bottom, top); break; default: LOG(FATAL)<< "Unknown normalization region."; } } // TODO: check if it would be faster to just put it into the previous kernel. #ifdef USE_CUDA template<typename Dtype, typename MItype, typename MOtype> __global__ void LRNComputeOutput(const int_tp nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } #endif // USE_CUDA template<typename Dtype, typename MItype, typename MOtype> void LRNLayer<Dtype, MItype, MOtype>::CrossChannelForward_fuse_pooling_gpu( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top, bool use_fuse) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); CHECK(IsFusedWithPoolMax() && this->device_->backend() == BACKEND_OpenCL); viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); if (use_fuse) { viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel( CL_KERNEL_SELECT("lrn_fuse_pool_max")); #define TILE_W 16 #define TILE_H 8 size_t simd_size = TILE_W; cl_uint argIdx = 0; const int_tp tile_pooled_block_h = (TILE_H - pool_h_) / pool_stride_h_ + 1; const int_tp tile_pooled_block_w = (TILE_W - pool_w_) / pool_stride_w_ + 1; const int tiled_width = (width_ + tile_pooled_block_w * pool_stride_w_ - 1) / (tile_pooled_block_w * pool_stride_w_); const int tiled_height = (height_ + tile_pooled_block_h * pool_stride_h_ - 1) / (tile_pooled_block_h * pool_stride_h_); int_tp n_threads = num_ * tiled_width * tiled_height; size_t global_work_size_[2] = {(size_t)n_threads, simd_size}; size_t local_work_size[2] = {1, simd_size}; oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) bottom_data, &ctx)); oclk_lrn_fill.arg(argIdx++, channels_); oclk_lrn_fill.arg(argIdx++, height_); oclk_lrn_fill.arg(argIdx++, width_); oclk_lrn_fill.arg(argIdx++, tiled_height); oclk_lrn_fill.arg(argIdx++, tiled_width); oclk_lrn_fill.arg(argIdx++, size_); oclk_lrn_fill.arg(argIdx++, alpha_ / size_); oclk_lrn_fill.arg(argIdx++, fixup_arg_type(k_)); oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) top_data, &ctx)); oclk_lrn_fill.arg(argIdx++, fixup_arg_type(-beta_)); oclk_lrn_fill.arg(argIdx++, pool_h_); oclk_lrn_fill.arg(argIdx++, pool_w_); oclk_lrn_fill.arg(argIdx++, pool_stride_h_); oclk_lrn_fill.arg(argIdx++, pool_stride_w_); oclk_lrn_fill.arg(argIdx++, pooled_height_); oclk_lrn_fill.arg(argIdx++, pooled_width_); oclk_lrn_fill.arg(argIdx++, tile_pooled_block_h); oclk_lrn_fill.arg(argIdx++, tile_pooled_block_w); OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(), oclk_lrn_fill.handle().get(), 2, NULL, global_work_size_, local_work_size, 0, NULL, NULL)); } else { Dtype* top_lrn_data = lrn_top_blob_.mutable_gpu_data(); // Do LRN firstly. cl_uint argIdx = 0; int_tp n_threads = num_ * height_ * width_; size_t global_work_size_[1] = {(size_t)n_threads}; viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel( CL_KERNEL_SELECT("lrn_full_no_scale")); oclk_lrn_fill.arg(argIdx++, n_threads); oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) bottom_data, &ctx)); oclk_lrn_fill.arg(argIdx++, num_); oclk_lrn_fill.arg(argIdx++, channels_); oclk_lrn_fill.arg(argIdx++, height_); oclk_lrn_fill.arg(argIdx++, width_); oclk_lrn_fill.arg(argIdx++, size_); oclk_lrn_fill.arg(argIdx++, alpha_ / size_); oclk_lrn_fill.arg(argIdx++, fixup_arg_type(k_)); oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) top_lrn_data, &ctx)); oclk_lrn_fill.arg(argIdx++, fixup_arg_type(-beta_)); OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(), oclk_lrn_fill.handle().get(), 1, NULL, global_work_size_, NULL, 0, NULL, NULL)); // Do pooling. viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel( CL_KERNEL_SELECT("max_pool_forward_no_mask")); int_tp count = pooled_width_ * pooled_height_ * channels_ * num_; viennacl::ocl::enqueue( oclk_max_pool_forward(count, WrapHandle((cl_mem) top_lrn_data, &ctx), num_, channels_, height_, width_, pooled_height_, pooled_width_, pool_h_, pool_w_, pool_stride_h_, pool_stride_w_, 0, 0, WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); } } template<typename Dtype, typename MItype, typename MOtype> void LRNLayer<Dtype, MItype, MOtype>::CrossChannelForward_gpu( const vector<Blob<MOtype>*>& top) { // First, compute scale const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. int_tp n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) LRNFillScale CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, k_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeOutput CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_OPENCL viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); if (this->phase_ == caffe::TRAIN) { cl_uint argIdx = 0; int_tp n_threads = num_ * height_ * width_; size_t global_work_size_[1] = {(size_t)n_threads}; viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel( CL_KERNEL_SELECT("lrn_full")); oclk_lrn_fill.arg(argIdx++, n_threads); oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) bottom_data, &ctx)); oclk_lrn_fill.arg(argIdx++, num_); oclk_lrn_fill.arg(argIdx++, channels_); oclk_lrn_fill.arg(argIdx++, height_); oclk_lrn_fill.arg(argIdx++, width_); oclk_lrn_fill.arg(argIdx++, size_); oclk_lrn_fill.arg(argIdx++, alpha_ / size_); oclk_lrn_fill.arg(argIdx++, fixup_arg_type(k_)); oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) scale_data, &ctx)); oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) top_data, &ctx)); oclk_lrn_fill.arg(argIdx++, fixup_arg_type(-beta_)); OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(), oclk_lrn_fill.handle().get(), 1, NULL, global_work_size_, NULL, 0, NULL, NULL)); } else { if (!IsFused()) { cl_uint argIdx = 0; int_tp n_threads = num_ * height_ * width_; size_t global_work_size_[1] = {(size_t)n_threads}; viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel( CL_KERNEL_SELECT("lrn_full_no_scale")); oclk_lrn_fill.arg(argIdx++, n_threads); oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) bottom_data, &ctx)); oclk_lrn_fill.arg(argIdx++, num_); oclk_lrn_fill.arg(argIdx++, channels_); oclk_lrn_fill.arg(argIdx++, height_); oclk_lrn_fill.arg(argIdx++, width_); oclk_lrn_fill.arg(argIdx++, size_); oclk_lrn_fill.arg(argIdx++, alpha_ / size_); oclk_lrn_fill.arg(argIdx++, fixup_arg_type(k_)); oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) top_data, &ctx)); oclk_lrn_fill.arg(argIdx++, fixup_arg_type(-beta_)); OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(), oclk_lrn_fill.handle().get(), 1, NULL, global_work_size_, NULL, 0, NULL, NULL)); } else if (IsFusedWithPoolMax()) { // We can't make sure the fused kernel be the faster for all platforms. // have to apply a simple tuning here. if (this->device_->CheckCapability("cl_intel_subgroups")) { if (fuse_tuned_) { CrossChannelForward_fuse_pooling_gpu(bottom, top, tuned_use_fuse_); } else { float elapsedTime[2]; bool use_fuse[2] = {true, false}; // warm up. CrossChannelForward_fuse_pooling_gpu(bottom, top, true); CrossChannelForward_fuse_pooling_gpu(bottom, top, false); for (int i = 0; i < 2; i++) { Timer timer; timer.initted(); timer.Start(); int loop_cnt = 2; for (int j = 0; j < loop_cnt; j++) { CrossChannelForward_fuse_pooling_gpu(bottom, top, use_fuse[i]); } timer.Stop(); elapsedTime[i] = timer.MilliSeconds() / loop_cnt; } tuned_use_fuse_ = elapsedTime[0] < elapsedTime[1]; fuse_tuned_ = true; } } else { CrossChannelForward_fuse_pooling_gpu(bottom, top, false); } } } #endif // USE_OPENCL } } #ifdef HAS_HALF_SUPPORT template void LRNLayer<half>::CrossChannelForward_gpu( const vector<Blob<half>*>& bottom, const vector<Blob<half>*>& top); template void LRNLayer<half>::CrossChannelForward_fuse_pooling_gpu( const vector<Blob<half>*>& bottom, const vector<Blob<half>*>& top, bool); #endif template void LRNLayer<float>::CrossChannelForward_gpu( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void LRNLayer<double>::CrossChannelForward_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top); template void LRNLayer<float>::CrossChannelForward_fuse_pooling_gpu( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top, bool); template void LRNLayer<double>::CrossChannelForward_fuse_pooling_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top, bool); template<typename Dtype, typename MItype, typename MOtype> void LRNLayer<Dtype, MItype, MOtype>::Backward_gpu(const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<MItype>*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_gpu(top, propagate_down, bottom); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelBackward(top, propagate_down, bottom); break; default: LOG(FATAL)<< "Unknown normalization region."; } } #ifdef USE_CUDA template<typename Dtype, typename MItype, typename MOtype> __global__ void LRNComputeDiff(const int_tp nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, const Dtype* const top_diff, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, const int_tp size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int_tp w = index % width; const int_tp h = (index / width) % height; const int_tp n = index / width / height; const int_tp offset = (n * channels * height + h) * width + w; const int_tp step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; int_tp head = 0; const int_tp pre_pad = size - (size + 1) / 2; const int_tp post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } } #endif // USE_CUDA template<typename Dtype, typename MItype, typename MOtype> void LRNLayer<Dtype, MItype, MOtype>::CrossChannelBackward_gpu( const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<MItype>*>& bottom) { int_tp n_threads = num_ * height_ * width_; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeDiff CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), bottom[0]->mutable_gpu_diff()); #endif // USE_CUDA } else { #ifdef USE_OPENCL viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_lrn = program.get_kernel( CL_KERNEL_SELECT("lrn_compute_diff")); viennacl::ocl::enqueue( oclk_lrn(n_threads, WrapHandle((cl_mem) (bottom[0]->gpu_data()), &ctx), WrapHandle((cl_mem) (top[0]->gpu_data()), &ctx), WrapHandle((cl_mem) (scale_.gpu_data()), &ctx), WrapHandle((cl_mem) (top[0]->gpu_diff()), &ctx), num_, channels_, height_, width_, size_, fixup_arg_type(-beta_), fixup_arg_type(Dtype(2. * alpha_ * beta_ / size_)), WrapHandle((cl_mem) (bottom[0]->mutable_gpu_diff()), &ctx)), ctx.get_queue()); #endif // USE_OPENCL } } #ifdef HAS_HALF_SUPPORT template void LRNLayer<half>::CrossChannelBackward_gpu( const vector<Blob<half>*>& top, const vector<bool>& propagate_down, const vector<Blob<half>*>& bottom); #endif template void LRNLayer<float>::CrossChannelBackward_gpu( const vector<Blob<float>*>& top, const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom); template void LRNLayer<double>::CrossChannelBackward_gpu( const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom); INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer); #endif // TODO_REFACTOR } // namespace caffe
the_stack
#include <cooperative_groups.h> #if ( __CUDACC_VER_MAJOR__ > 10 ) #include <cooperative_groups/reduce.h> #endif namespace cg = cooperative_groups; // Check if C++17 is being used #if __cplusplus >= 201703L #include <thrust/complex.h> /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER 8x8 // /////////////////////////////////////////////////////////////////////////////// // T is input type // U is output type template<typename T, typename U, int M = 8, int WARPSIZE = 32> __device__ void _cupy_channelizer_8x8( const int n_chans, const int n_taps, const int n_pts, const T *__restrict__ x, const T *__restrict__ h, U *__restrict__ y, T s_mem[M][M] ) { const auto block { cg::this_thread_block( ) }; const auto tile_32 { cg::tiled_partition<WARPSIZE>( block ) }; const auto tile { cg::tiled_partition<M>( tile_32 ) }; const auto btx { blockIdx.x * blockDim.x + threadIdx.x }; const auto tx { threadIdx.x }; const auto ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = thrust::conj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = h[ty * n_chans + btx]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } block.sync( ); T local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][( n_taps - 1 ) - ty] = thrust::conj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } } else { if ( btx < n_chans && ty <= bid ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][bid - ty] = thrust::conj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } } block.sync( ); T local_reg { s_mem[ty][tx] }; T temp {}; U vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { vv.real( cg::reduce( tile, temp.real( ), cg::plus<typename U::value_type>( ) ) ); vv.imag( cg::reduce( tile, temp.imag( ), cg::plus<typename U::value_type>( ) ) ); } else { vv.real( cg::reduce( tile, temp, cg::plus<typename U::value_type>( ) ) ); } } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ float s_mem[8][8]; _cupy_channelizer_8x8<float, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<float> *__restrict__ x, const thrust::complex<float> *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ thrust::complex<float> s_mem[8][8]; _cupy_channelizer_8x8<thrust::complex<float>, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ double s_mem[8][8]; _cupy_channelizer_8x8<double, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<double> *__restrict__ x, const thrust::complex<double> *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ thrust::complex<double> s_mem[8][8]; _cupy_channelizer_8x8<thrust::complex<double>, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER 16x16 // /////////////////////////////////////////////////////////////////////////////// // T is input type // U is output type template<typename T, typename U, int M = 16, int WARPSIZE = 32> __device__ void _cupy_channelizer_16x16( const int n_chans, const int n_taps, const int n_pts, const T *__restrict__ x, const T *__restrict__ h, U *__restrict__ y, T s_mem[M][M] ) { const auto block { cg::this_thread_block( ) }; const auto tile_32 { cg::tiled_partition<WARPSIZE>( block ) }; const auto tile { cg::tiled_partition<M>( tile_32 ) }; const auto btx { blockIdx.x * blockDim.x + threadIdx.x }; const auto tx { threadIdx.x }; const auto ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = thrust::conj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = h[ty * n_chans + btx]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } block.sync( ); T local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][( n_taps - 1 ) - ty] = thrust::conj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } } else { if ( btx < n_chans && ty <= bid ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][bid - ty] = thrust::conj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } } block.sync( ); T local_reg { s_mem[ty][tx] }; T temp {}; U vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { vv.real( cg::reduce( tile, temp.real( ), cg::plus<typename U::value_type>( ) ) ); vv.imag( cg::reduce( tile, temp.imag( ), cg::plus<typename U::value_type>( ) ) ); } else { vv.real( cg::reduce( tile, temp, cg::plus<typename U::value_type>( ) ) ); } } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ float s_mem[16][16]; _cupy_channelizer_16x16<float, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<float> *__restrict__ x, const thrust::complex<float> *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ thrust::complex<float> s_mem[16][16]; _cupy_channelizer_16x16<thrust::complex<float>, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ double s_mem[16][16]; _cupy_channelizer_16x16<double, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<double> *__restrict__ x, const thrust::complex<double> *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ thrust::complex<double> s_mem[16][16]; _cupy_channelizer_16x16<thrust::complex<double>, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER 32x32 // /////////////////////////////////////////////////////////////////////////////// // T is input type // U is output type template<typename T, typename U, int M = 32, int WARPSIZE = 32> __device__ void _cupy_channelizer_32x32( const int n_chans, const int n_taps, const int n_pts, const T *__restrict__ x, const T *__restrict__ h, U *__restrict__ y, T s_mem[M][M] ) { const auto block { cg::this_thread_block( ) }; const auto tile { cg::tiled_partition<WARPSIZE>( block ) }; const auto btx { blockIdx.x * blockDim.x + threadIdx.x }; const auto tx { threadIdx.x }; const auto ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = thrust::conj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = h[ty * n_chans + btx]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } block.sync( ); T local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][( n_taps - 1 ) - ty] = thrust::conj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } } else { if ( btx < n_chans && ty <= bid ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][bid - ty] = thrust::conj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } } block.sync( ); T local_reg { s_mem[ty][tx] }; T temp {}; U vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { vv.real( cg::reduce( tile, temp.real( ), cg::plus<typename U::value_type>( ) ) ); vv.imag( cg::reduce( tile, temp.imag( ), cg::plus<typename U::value_type>( ) ) ); } else { vv.real( cg::reduce( tile, temp, cg::plus<typename U::value_type>( ) ) ); } } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ float s_mem[32][32]; _cupy_channelizer_32x32<float, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<float> *__restrict__ x, const thrust::complex<float> *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ thrust::complex<float> s_mem[32][32]; _cupy_channelizer_32x32<thrust::complex<float>, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ double s_mem[32][32]; _cupy_channelizer_32x32<double, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<double> *__restrict__ x, const thrust::complex<double> *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ thrust::complex<double> s_mem[32][32]; _cupy_channelizer_32x32<thrust::complex<double>, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } #else // C++11 being used /////////////////////////////////////////////////////////////////////////////// // CUDA 10.1/10.2 // /////////////////////////////////////////////////////////////////////////////// #include <cuComplex.h> template<typename T, int M> __device__ T reduce_sum_tile_shfl( cg::thread_block_tile<M> g, T val ) { // Each iteration halves the number of active threads // Each thread adds its partial sum[i] to sum[lane+i] for ( int i = g.size( ) / 2; i > 0; i /= 2 ) { val += g.shfl_down( val, i ); } return val; // note: only thread 0 will return full sum } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER F/CF // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y, float s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = h[ty * n_chans + btx]; } else { s_mem[tx][ty] = 0.0f; } block.sync( ); float local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } else { s_mem[tx][ty] = 0.0f; } } block.sync( ); float local_reg { s_mem[ty][tx] }; float temp {}; cuFloatComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; vv.x = reduce_sum_tile_shfl<float, M>( tile, temp ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ float s_mem[8][8]; _cupy_channelizer_float32_complex64<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ float s_mem[16][16]; _cupy_channelizer_float32_complex64<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ float s_mem[32][32]; _cupy_channelizer_float32_complex64<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER CF/CF // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y, cuFloatComplex s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = cuConjf( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = make_cuFloatComplex( 0.0f, 0.0f ); } block.sync( ); cuFloatComplex local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = cuConjf( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = cuConjf( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][ty] = make_cuFloatComplex( 0.0f, 0.0f ); } } block.sync( ); cuFloatComplex local_reg { s_mem[ty][tx] }; cuFloatComplex temp {}; cuFloatComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = cuCmulf( local_h, local_reg ); vv.x = reduce_sum_tile_shfl<float, M>( tile, temp.x ); vv.y = reduce_sum_tile_shfl<float, M>( tile, temp.y ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ cuFloatComplex s_mem[8][8]; _cupy_channelizer_complex64_complex64<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ cuFloatComplex s_mem[16][16]; _cupy_channelizer_complex64_complex64<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ cuFloatComplex s_mem[32][32]; _cupy_channelizer_complex64_complex64<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER D/CD // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, cuDoubleComplex *__restrict__ y, double s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = h[ty * n_chans + btx]; } else { s_mem[tx][ty] = 0.0; } block.sync( ); double local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } else { s_mem[tx][ty] = 0.0; } } block.sync( ); double local_reg { s_mem[ty][tx] }; double temp {}; cuDoubleComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; vv.x = reduce_sum_tile_shfl<double, M>( tile, temp ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ double s_mem[8][8]; _cupy_channelizer_float64_complex128<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ double s_mem[16][16]; _cupy_channelizer_float64_complex128<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ double s_mem[32][32]; _cupy_channelizer_float64_complex128<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER CD/CD // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const cuDoubleComplex *__restrict__ x, const cuDoubleComplex *__restrict__ h, cuDoubleComplex *__restrict__ y, cuDoubleComplex s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = cuConj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = make_cuDoubleComplex( 0.0, 0.0 ); } block.sync( ); cuDoubleComplex local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = cuConj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = cuConj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][ty] = make_cuDoubleComplex( 0.0, 0.0 ); } } block.sync( ); cuDoubleComplex local_reg { s_mem[ty][tx] }; cuDoubleComplex temp {}; cuDoubleComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = cuCmul( local_h, local_reg ); vv.x = reduce_sum_tile_shfl<double, M>( tile, temp.x ); vv.y = reduce_sum_tile_shfl<double, M>( tile, temp.y ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const cuDoubleComplex *__restrict__ x, const cuDoubleComplex *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ cuDoubleComplex s_mem[8][8]; _cupy_channelizer_complex128_complex128<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const cuDoubleComplex *__restrict__ x, const cuDoubleComplex *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ cuDoubleComplex s_mem[16][16]; _cupy_channelizer_complex128_complex128<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const cuDoubleComplex *__restrict__ x, const cuDoubleComplex *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ cuDoubleComplex s_mem[32][32]; _cupy_channelizer_complex128_complex128<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } #endif
the_stack
extern "C" { #include <errno.h> #include <stdio.h> #include <unistd.h> } #include <miner.h> #include <cuda_helper.h> #include <cuda_vector_uint2x4.h> // todo #include "wildkeccak.h" extern char *device_config[MAX_GPUS]; // -l extern uint64_t* pscratchpad_buff; static uint64_t* d_input[MAX_GPUS]; static uint32_t* d_retnonce[MAX_GPUS]; static ulonglong4* d_scratchpad[MAX_GPUS]; static uint64_t* h_scratchpad[MAX_GPUS] = { 0 }; static cudaStream_t bufpad_stream[MAX_GPUS] = { 0 }; static cudaStream_t kernel_stream[MAX_GPUS] = { 0 }; uint64_t scratchpad_size = 0; uint32_t WK_CUDABlocks = 64; uint32_t WK_CUDAThreads = 256; #define st0 vst0.x #define st1 vst0.y #define st2 vst0.z #define st3 vst0.w #define st4 vst4.x #define st5 vst4.y #define st6 vst4.z #define st7 vst4.w #define st8 vst8.x #define st9 vst8.y #define st10 vst8.z #define st11 vst8.w #define st12 vst12.x #define st13 vst12.y #define st14 vst12.z #define st15 vst12.w #define st16 vst16.x #define st17 vst16.y #define st18 vst16.z #define st19 vst16.w #define st20 vst20.x #define st21 vst20.y #define st22 vst20.z #define st23 vst20.w #if __CUDA_ARCH__ >= 320 __device__ __forceinline__ uint64_t cuda_rotl641(const uint64_t value) { uint2 result; asm("shf.l.wrap.b32 %0, %1, %2, 1U;" : "=r"(result.x) : "r"(__double2hiint(__longlong_as_double(value))), "r"(__double2loint(__longlong_as_double(value)))); asm("shf.l.wrap.b32 %0, %1, %2, 1U;" : "=r"(result.y) : "r"(__double2loint(__longlong_as_double(value))), "r"(__double2hiint(__longlong_as_double(value)))); return __double_as_longlong(__hiloint2double(result.y, result.x)); } #else __noinline__ __device__ uint64_t cuda_rotl641(const uint64_t x) { return((x << 1) | (x >> 63)); } #endif __noinline__ __device__ uint64_t bitselect(const uint64_t a, const uint64_t b, const uint64_t c) { return(a ^ (c & (b ^ a))); } #define ROTL641(x) (cuda_rotl641(x)) #define RND() \ bc[0] = st0 ^ st5 ^ st10 * st15 * st20 ^ ROTL641(st2 ^ st7 ^ st12 * st17 * st22); \ bc[1] = st1 ^ st6 ^ st11 * st16 * st21 ^ ROTL641(st3 ^ st8 ^ st13 * st18 * st23); \ bc[2] = st2 ^ st7 ^ st12 * st17 * st22 ^ ROTL641(st4 ^ st9 ^ st14 * st19 * st24); \ bc[3] = st3 ^ st8 ^ st13 * st18 * st23 ^ ROTL641(st0 ^ st5 ^ st10 * st15 * st20); \ bc[4] = st4 ^ st9 ^ st14 * st19 * st24 ^ ROTL641(st1 ^ st6 ^ st11 * st16 * st21); \ tmp1 = st1 ^ bc[0]; \ \ st0 ^= bc[4]; \ st1 = ROTL64(st6 ^ bc[0], 44); \ st6 = ROTL64(st9 ^ bc[3], 20); \ st9 = ROTL64(st22 ^ bc[1], 61); \ st22 = ROTL64(st14 ^ bc[3], 39); \ st14 = ROTL64(st20 ^ bc[4], 18); \ st20 = ROTL64(st2 ^ bc[1], 62); \ st2 = ROTL64(st12 ^ bc[1], 43); \ st12 = ROTL64(st13 ^ bc[2], 25); \ st13 = ROTL64(st19 ^ bc[3], 8); \ st19 = ROTL64(st23 ^ bc[2], 56); \ st23 = ROTL64(st15 ^ bc[4], 41); \ st15 = ROTL64(st4 ^ bc[3], 27); \ st4 = ROTL64(st24 ^ bc[3], 14); \ st24 = ROTL64(st21 ^ bc[0], 2); \ st21 = ROTL64(st8 ^ bc[2], 55); \ st8 = ROTL64(st16 ^ bc[0], 45); \ st16 = ROTL64(st5 ^ bc[4], 36); \ st5 = ROTL64(st3 ^ bc[2], 28); \ st3 = ROTL64(st18 ^ bc[2], 21); \ st18 = ROTL64(st17 ^ bc[1], 15); \ st17 = ROTL64(st11 ^ bc[0], 10); \ st11 = ROTL64(st7 ^ bc[1], 6); \ st7 = ROTL64(st10 ^ bc[4], 3); \ st10 = ROTL641(tmp1); \ \ tmp1 = st0; tmp2 = st1; st0 = bitselect(st0 ^ st2, st0, st1); st1 = bitselect(st1 ^ st3, st1, st2); \ st2 = bitselect(st2 ^ st4, st2, st3); st3 = bitselect(st3 ^ tmp1, st3, st4); st4 = bitselect(st4 ^ tmp2, st4, tmp1); \ tmp1 = st5; tmp2 = st6; st5 = bitselect(st5 ^ st7, st5, st6); st6 = bitselect(st6 ^ st8, st6, st7); \ st7 = bitselect(st7 ^ st9, st7, st8); st8 = bitselect(st8 ^ tmp1, st8, st9); st9 = bitselect(st9 ^ tmp2, st9, tmp1); \ tmp1 = st10; tmp2 = st11; st10 = bitselect(st10 ^ st12, st10, st11); st11 = bitselect(st11 ^ st13, st11, st12); \ st12 = bitselect(st12 ^ st14, st12, st13); st13 = bitselect(st13 ^ tmp1, st13, st14); st14 = bitselect(st14 ^ tmp2, st14, tmp1); \ tmp1 = st15; tmp2 = st16; st15 = bitselect(st15 ^ st17, st15, st16); st16 = bitselect(st16 ^ st18, st16, st17); \ st17 = bitselect(st17 ^ st19, st17, st18); st18 = bitselect(st18 ^ tmp1, st18, st19); st19 = bitselect(st19 ^ tmp2, st19, tmp1); \ tmp1 = st20; tmp2 = st21; st20 = bitselect(st20 ^ st22, st20, st21); st21 = bitselect(st21 ^ st23, st21, st22); \ st22 = bitselect(st22 ^ st24, st22, st23); st23 = bitselect(st23 ^ tmp1, st23, st24); st24 = bitselect(st24 ^ tmp2, st24, tmp1); \ st0 ^= 1; #define LASTRND1() \ bc[0] = st0 ^ st5 ^ st10 * st15 * st20 ^ ROTL64(st2 ^ st7 ^ st12 * st17 * st22, 1); \ bc[1] = st1 ^ st6 ^ st11 * st16 * st21 ^ ROTL64(st3 ^ st8 ^ st13 * st18 * st23, 1); \ bc[2] = st2 ^ st7 ^ st12 * st17 * st22 ^ ROTL64(st4 ^ st9 ^ st14 * st19 * st24, 1); \ bc[3] = st3 ^ st8 ^ st13 * st18 * st23 ^ ROTL64(st0 ^ st5 ^ st10 * st15 * st20, 1); \ bc[4] = st4 ^ st9 ^ st14 * st19 * st24 ^ ROTL64(st1 ^ st6 ^ st11 * st16 * st21, 1); \ \ st0 ^= bc[4]; \ st1 = ROTL64(st6 ^ bc[0], 44); \ st2 = ROTL64(st12 ^ bc[1], 43); \ st4 = ROTL64(st24 ^ bc[3], 14); \ st3 = ROTL64(st18 ^ bc[2], 21); \ \ tmp1 = st0; st0 = bitselect(st0 ^ st2, st0, st1); st1 = bitselect(st1 ^ st3, st1, st2); st2 = bitselect(st2 ^ st4, st2, st3); st3 = bitselect(st3 ^ tmp1, st3, st4); \ st0 ^= 1; #define LASTRND2() \ bc[2] = st2 ^ st7 ^ st12 * st17 * st22 ^ ROTL64(st4 ^ st9 ^ st14 * st19 * st24, 1); \ bc[3] = st3 ^ st8 ^ st13 * st18 * st23 ^ ROTL64(st0 ^ st5 ^ st10 * st15 * st20, 1); \ bc[4] = st4 ^ st9 ^ st14 * st19 * st24 ^ ROTL64(st1 ^ st6 ^ st11 * st16 * st21, 1); \ \ st0 ^= bc[4]; \ st4 = ROTL64(st24 ^ bc[3], 14); \ st3 = ROTL64(st18 ^ bc[2], 21); \ st3 = bitselect(st3 ^ st0, st3, st4); __device__ ulonglong4 operator^(const ulonglong4 &a, const ulonglong4 &b) { return(make_ulonglong4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w)); } #define MIX(vst) vst = vst ^ scratchpad[vst.x % scr_size] ^ scratchpad[vst.y % scr_size] ^ scratchpad[vst.z % scr_size] ^ scratchpad[vst.w % scr_size]; #define MIX_ALL MIX(vst0); MIX(vst4); MIX(vst8); MIX(vst12); MIX(vst16); MIX(vst20); __global__ void wk(uint32_t* __restrict__ retnonce, const uint64_t* __restrict__ input, const ulonglong4* __restrict__ scratchpad, const uint32_t scr_size, const uint32_t target, uint64_t startNonce) { ulonglong4 vst0, vst4, vst8, vst12, vst16, vst20; uint64_t bc[5]; uint64_t st24, tmp1, tmp2; const uint64_t nonce = startNonce + (blockDim.x * blockIdx.x) + threadIdx.x; vst0 = make_ulonglong4((nonce << 8) + (input[0] & 0xFF), input[1] & 0xFFFFFFFFFFFFFF00ULL, input[2], input[3]); vst4 = make_ulonglong4(input[4], input[5], input[6], input[7]); vst8 = make_ulonglong4(input[8], input[9], (input[10] & 0xFF) | 0x100, 0); vst12 = make_ulonglong4(0, 0, 0, 0); vst16 = make_ulonglong4(0x8000000000000000ULL, 0, 0, 0); vst20 = make_ulonglong4(0, 0, 0, 0); st24 = 0; RND(); MIX_ALL; for(int i = 0; i < 22; i++) { RND(); MIX_ALL; } LASTRND1(); vst4 = make_ulonglong4(1, 0, 0, 0); vst8 = make_ulonglong4(0, 0, 0, 0); vst12 = make_ulonglong4(0, 0, 0, 0); vst16 = make_ulonglong4(0x8000000000000000ULL, 0, 0, 0); vst20 = make_ulonglong4(0, 0, 0, 0); st24 = 0; RND(); MIX_ALL; #pragma unroll for(int i = 0; i < 22; i++) { RND(); MIX_ALL; } LASTRND2(); if((st3 >> 32) <= target) { retnonce[0] = (uint32_t) nonce; retnonce[1] = retnonce[0]; } } __host__ void wildkeccak_kernel(const int thr_id, const uint32_t threads, const uint32_t startNounce, const uint2 target, uint32_t *resNonces) { CUDA_SAFE_CALL(cudaMemsetAsync(d_retnonce[thr_id], 0xff, 2 * sizeof(uint32_t), kernel_stream[thr_id])); const uint32_t threadsperblock = WK_CUDAThreads; dim3 grid((threads + threadsperblock - 1) / threadsperblock); dim3 block(threadsperblock); wk <<<grid, block, 0, kernel_stream[thr_id]>>> (d_retnonce[thr_id], d_input[thr_id], d_scratchpad[thr_id], (uint32_t)(scratchpad_size >> 2), target.y, startNounce); cudaMemcpyAsync(resNonces, d_retnonce[thr_id], 2*sizeof(uint32_t), cudaMemcpyDeviceToHost, kernel_stream[thr_id]); } static bool init[MAX_GPUS] = { 0 }; extern "C" int scanhash_wildkeccak(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done) { uint32_t *ptarget = work->target; uint32_t throughput = 0; uint64_t n, nonce, first; uint8_t *pdata = (uint8_t*) work->data; memcpy(&first, &pdata[1], 8); n = nonce = first; if (!scratchpad_size || !h_scratchpad[thr_id]) { if (h_scratchpad[thr_id]) applog(LOG_ERR, "Scratchpad size is not set!"); work->data[0] = 0; // invalidate sleep(1); return -EBUSY; } if (!init[thr_id]) { if (device_config[thr_id]) { sscanf(device_config[thr_id], "%ux%u", &WK_CUDABlocks, &WK_CUDAThreads); gpulog(LOG_INFO, thr_id, "Using %u x %u kernel launch config, %u threads", WK_CUDABlocks, WK_CUDAThreads, throughput); } else { throughput = cuda_default_throughput(thr_id, WK_CUDABlocks*WK_CUDAThreads); gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput); } cudaSetDevice(device_map[thr_id]); if (opt_cudaschedule == -1 && gpu_threads == 1) { cudaDeviceReset(); // reduce cpu usage (linux) cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); CUDA_LOG_ERROR(); } CUDA_SAFE_CALL(cudaMalloc(&d_input[thr_id], 88)); CUDA_SAFE_CALL(cudaMalloc(&d_retnonce[thr_id], 2*sizeof(uint32_t))); int status = (int) cudaMalloc(&d_scratchpad[thr_id], WILD_KECCAK_SCRATCHPAD_BUFFSIZE); if (status != cudaSuccess) { gpulog(LOG_ERR, thr_id, "Unable to allocate device memory, %u MB, err %d", (uint32_t) (WILD_KECCAK_SCRATCHPAD_BUFFSIZE/(1024*1024)), status); exit(-ENOMEM); } cudaStreamCreate(&bufpad_stream[thr_id]); cudaStreamCreate(&kernel_stream[thr_id]); CUDA_SAFE_CALL(cudaMemcpyAsync(d_scratchpad[thr_id], h_scratchpad[thr_id], scratchpad_size << 3, cudaMemcpyHostToDevice, bufpad_stream[thr_id])); init[thr_id] = true; } throughput = WK_CUDABlocks * WK_CUDAThreads; cudaMemcpy(d_input[thr_id], pdata, 88, cudaMemcpyHostToDevice); // cudaMemset(d_retnonce[thr_id], 0xFF, 2*sizeof(uint32_t)); if (h_scratchpad[thr_id]) { cudaStreamSynchronize(bufpad_stream[thr_id]); } do { // const uint32_t blocks = WK_CUDABlocks, threads = WK_CUDAThreads; // const dim3 block(blocks); // const dim3 thread(threads); uint32_t h_retnonce[2] = { UINT32_MAX, UINT32_MAX }; uint2 target = make_uint2(ptarget[6], ptarget[7]); wildkeccak_kernel(thr_id, throughput, (uint32_t) nonce, target, h_retnonce); /* wk <<<block, thread, 0, kernel_stream[thr_id]>>> (d_retnonce[thr_id], d_input[thr_id], d_scratchpad[thr_id], (uint32_t)(scratchpad_size >> 2), nonce, ptarget[7]); */ *hashes_done = (unsigned long) (n - first + throughput); cudaStreamSynchronize(kernel_stream[thr_id]); if(h_retnonce[0] != UINT32_MAX) { uint8_t _ALIGN(64) cpuhash[32]; uint32_t* vhash = (uint32_t*) cpuhash; uint64_t nonce64; memcpy(&pdata[1], &h_retnonce[0], sizeof(uint32_t)); memcpy(&nonce64, &pdata[1], 8); wildkeccak_hash(cpuhash, pdata, pscratchpad_buff, scratchpad_size); if (!cpuhash[31] && vhash[7] <= ptarget[7] && fulltest(vhash, ptarget)) { work_set_target_ratio(work, vhash); //applog_hex(pdata, 84); //applog_hex(cpuhash, 32); //applog_hex(ptarget, 32); memcpy(work->nonces, &nonce64, 8); if (n + throughput > max_nonce) { *hashes_done = (unsigned long) (max_nonce - first); } work->valid_nonces = 1; return 1; } else if (vhash[7] > ptarget[7]) { gpu_increment_reject(thr_id); if (!opt_quiet) gpulog(LOG_WARNING, thr_id, "result for nonce %08x does not validate on CPU!", h_retnonce[0]); } } if (n + throughput >= max_nonce) { n = max_nonce; break; } n += throughput; nonce += throughput; } while(!work_restart[thr_id].restart); *hashes_done = (unsigned long) (n - first + 1); return 0; } void wildkeccak_scratchpad_need_update(uint64_t* pscratchpad_buff) { for(int i = 0; i < opt_n_threads; i++) { h_scratchpad[i] = pscratchpad_buff; if (init[i]) { gpulog(LOG_DEBUG, i, "Starting scratchpad update..."); cudaMemcpyAsync(d_scratchpad[i], h_scratchpad[i], scratchpad_size << 3, cudaMemcpyHostToDevice, bufpad_stream[i]); work_restart[i].restart = true; } } } void free_wildkeccak(int thr_id) { if (!init[thr_id]) return; cudaThreadSynchronize(); cudaFree(d_scratchpad[thr_id]); cudaFree(d_input[thr_id]); cudaFree(d_retnonce[thr_id]); cudaStreamDestroy(bufpad_stream[thr_id]); cudaStreamDestroy(kernel_stream[thr_id]); cudaDeviceSynchronize(); init[thr_id] = false; }
the_stack
__device__ inline size_t pre_fftshift(size_t offset, CallbackData* cb) { // For inverse transforms with apply_fftshift=true, we cyclically shift // the input data here by modifying the read offset. if( cb->do_fftshift && cb->inverse ) { for( int d=0; d<cb->ndim; ++d ) { // Compute the index of this element along dimension d // **TODO: 64-bit indexing support int size = cb->shape[d]; auto stride = cb->istrides[d]; auto inembed = cb->inembed[d]; int i = (int)offset / stride % inembed; int shift = (i < (size-size/2)) ? size/2 : -(size-size/2); offset += shift*stride; } } return offset; } template<typename Complex> __device__ inline Complex post_fftshift(size_t offset, Complex value, CallbackData* cb) { // For forward transforms with apply_fftshift=true, we cyclically shift // the output data by phase-rotating the input data here. if( cb->do_fftshift && !cb->inverse ) { for( int d=0; d<cb->ndim; ++d ) { // Compute the index of this element along dimension d // **TODO: 64-bit indexing support int size = cb->shape[d]; auto stride = cb->istrides[d]; auto inembed = cb->inembed[d]; int i = (int)offset / stride % inembed; // We achieve a cyclic shift of the FFT output (aka fftshift) // by multiplying the input by a phase shift. // Note that this only works for complex input if( size % 2 == 0 ) { // For even sizes, the phase multiplication reduces to a // simple form: {even i: +1, odd i: -1}. if( i%2 ) { value.x = -value.x; value.y = -value.y; } } else { // For odd sizes we must do the math in full // TODO: Confirm that float and __sincosf provide enough // precision for all practical FFT sizes. const float pi = 3.1415926535897932; float phase = 2*pi*i/size*(size/2); float sin_phase, cos_phase; __sincosf(phase, &sin_phase, &cos_phase); Complex tmp; tmp.x = value.x*cos_phase; tmp.x -= value.y*sin_phase; tmp.y = value.x*sin_phase; tmp.y += value.y*cos_phase; value = tmp; } } } return value; } __device__ cufftComplex callback_load_ci4(void* dataIn, size_t offset, void* callerInfo, void* sharedPointer) { // WAR for CUFFT insisting on pointers aligned to sizeof(cufftComplex) CallbackData* callback_data = (CallbackData*)callerInfo; *(char**)&dataIn += callback_data->ptr_offset; offset = pre_fftshift(offset, callback_data); int8_t packed = ((int8_t*)dataIn)[offset]; int8_t real = packed & 0xF0; int8_t imag = packed << 4; cufftComplex result = make_float2(real * (1.f/128), imag * (1.f/128)); result = post_fftshift(offset, result, callback_data); return result; } __device__ cufftComplex callback_load_ci8(void* dataIn, size_t offset, void* callerInfo, void* sharedPointer) { // WAR for CUFFT insisting on pointers aligned to sizeof(cufftComplex) CallbackData* callback_data = (CallbackData*)callerInfo; *(char**)&dataIn += callback_data->ptr_offset; offset = pre_fftshift(offset, callback_data); char2 val = ((char2*)dataIn)[offset]; cufftComplex result = make_float2(val.x * (1.f/128), val.y * (1.f/128)); result = post_fftshift(offset, result, callback_data); return result; } __device__ cufftComplex callback_load_ci16(void* dataIn, size_t offset, void* callerInfo, void* sharedPointer) { // WAR for CUFFT insisting on pointers aligned to sizeof(cufftComplex) CallbackData* callback_data = (CallbackData*)callerInfo; *(char**)&dataIn += callback_data->ptr_offset; offset = pre_fftshift(offset, callback_data); short2 val = ((short2*)dataIn)[offset]; cufftComplex result = make_float2(val.x * (1.f/32768), val.y * (1.f/32768)); result = post_fftshift(offset, result, callback_data); return result; } __device__ cufftComplex callback_load_cf32(void* dataIn, size_t offset, void* callerInfo, void* sharedPointer) { CallbackData* callback_data = (CallbackData*)callerInfo; // Note: cufftComplex loads must be aligned offset = pre_fftshift(offset, callback_data); cufftComplex result = ((cufftComplex*)dataIn)[offset]; result = post_fftshift(offset, result, callback_data); return result; } __device__ cufftDoubleComplex callback_load_cf64(void* dataIn, size_t offset, void* callerInfo, void* sharedPointer) { CallbackData* callback_data = (CallbackData*)callerInfo; // Note: cufftDoubleComplex loads must be aligned offset = pre_fftshift(offset, callback_data); cufftDoubleComplex result = ((cufftDoubleComplex*)dataIn)[offset]; result = post_fftshift(offset, result, callback_data); return result; } static __device__ cufftCallbackLoadC callback_load_ci4_dptr = callback_load_ci4; static __device__ cufftCallbackLoadC callback_load_ci8_dptr = callback_load_ci8; static __device__ cufftCallbackLoadC callback_load_ci16_dptr = callback_load_ci16; static __device__ cufftCallbackLoadC callback_load_cf32_dptr = callback_load_cf32; static __device__ cufftCallbackLoadZ callback_load_cf64_dptr = callback_load_cf64; template<typename T> struct is_signed { enum { value = (((T)(-1)) < 0) }; }; template<typename T> __host__ __device__ inline T maxval(T x=T()) { return (1<<(sizeof(T)*8-is_signed<T>::value)) - 1; } template<typename T> __device__ cufftReal callback_load_real(void* dataIn, size_t offset, void* callerInfo, void* sharedPointer) { // WAR for CUFFT insisting on pointers aligned to sizeof(cufftComplex) CallbackData* callback_data = (CallbackData*)callerInfo; *(char**)&dataIn += callback_data->ptr_offset; T val = ((T*)dataIn)[offset]; cufftReal result = val * (1.f/(maxval<T>()+1)); return result; } static __device__ cufftCallbackLoadR callback_load_i8_dptr = callback_load_real<int8_t>; static __device__ cufftCallbackLoadR callback_load_i16_dptr = callback_load_real<int16_t>; static __device__ cufftCallbackLoadR callback_load_u8_dptr = callback_load_real<uint8_t>; static __device__ cufftCallbackLoadR callback_load_u16_dptr = callback_load_real<uint16_t>; BFstatus set_fft_load_callback(BFdtype dtype, int nbit, cufftHandle handle, bool do_fftshift, CallbackData* callerInfo, bool* using_callback) { cufftCallbackLoadC callback_load_c_hptr; cufftCallbackLoadR callback_load_r_hptr; cufftCallbackLoadZ callback_load_z_hptr; *using_callback = true; // TODO: Try to reduce repetition here switch( dtype ) { case BF_DTYPE_CI4: { BF_CHECK_CUDA( cudaMemcpyFromSymbol(&callback_load_c_hptr, callback_load_ci4_dptr, sizeof(cufftCallbackLoadC)), BF_STATUS_DEVICE_ERROR ); BF_CHECK_CUFFT( cufftXtSetCallback(handle, (void**)&callback_load_c_hptr, CUFFT_CB_LD_COMPLEX, (void**)&callerInfo) ); break; } case BF_DTYPE_CI8: { BF_CHECK_CUDA( cudaMemcpyFromSymbol(&callback_load_c_hptr, callback_load_ci8_dptr, sizeof(cufftCallbackLoadC)), BF_STATUS_DEVICE_ERROR ); BF_CHECK_CUFFT( cufftXtSetCallback(handle, (void**)&callback_load_c_hptr, CUFFT_CB_LD_COMPLEX, (void**)&callerInfo) ); break; } case BF_DTYPE_CI16: { BF_CHECK_CUDA( cudaMemcpyFromSymbol(&callback_load_c_hptr, callback_load_ci16_dptr, sizeof(cufftCallbackLoadC)), BF_STATUS_DEVICE_ERROR ); BF_CHECK_CUFFT( cufftXtSetCallback(handle, (void**)&callback_load_c_hptr, CUFFT_CB_LD_COMPLEX, (void**)&callerInfo) ); break; } case BF_DTYPE_I8: { BF_ASSERT(!do_fftshift, BF_STATUS_UNSUPPORTED); BF_CHECK_CUDA( cudaMemcpyFromSymbol(&callback_load_r_hptr, callback_load_i8_dptr, sizeof(cufftCallbackLoadR)), BF_STATUS_DEVICE_ERROR ); BF_CHECK_CUFFT( cufftXtSetCallback(handle, (void**)&callback_load_r_hptr, CUFFT_CB_LD_REAL, (void**)&callerInfo) ); break; } case BF_DTYPE_I16: { BF_ASSERT(!do_fftshift, BF_STATUS_UNSUPPORTED); BF_CHECK_CUDA( cudaMemcpyFromSymbol(&callback_load_r_hptr, callback_load_i16_dptr, sizeof(cufftCallbackLoadR)), BF_STATUS_DEVICE_ERROR ); BF_CHECK_CUFFT( cufftXtSetCallback(handle, (void**)&callback_load_r_hptr, CUFFT_CB_LD_REAL, (void**)&callerInfo) ); break; } case BF_DTYPE_U8: { BF_ASSERT(!do_fftshift, BF_STATUS_UNSUPPORTED); BF_CHECK_CUDA( cudaMemcpyFromSymbol(&callback_load_r_hptr, callback_load_u8_dptr, sizeof(cufftCallbackLoadR)), BF_STATUS_DEVICE_ERROR ); BF_CHECK_CUFFT( cufftXtSetCallback(handle, (void**)&callback_load_r_hptr, CUFFT_CB_LD_REAL, (void**)&callerInfo) ); break; } case BF_DTYPE_U16: { BF_ASSERT(!do_fftshift, BF_STATUS_UNSUPPORTED); BF_CHECK_CUDA( cudaMemcpyFromSymbol(&callback_load_r_hptr, callback_load_u16_dptr, sizeof(cufftCallbackLoadR)), BF_STATUS_DEVICE_ERROR ); BF_CHECK_CUFFT( cufftXtSetCallback(handle, (void**)&callback_load_r_hptr, CUFFT_CB_LD_REAL, (void**)&callerInfo) ); break; } case BF_DTYPE_CF32: { if( do_fftshift ) { BF_CHECK_CUDA( cudaMemcpyFromSymbol(&callback_load_c_hptr, callback_load_cf32_dptr, sizeof(cufftCallbackLoadC)), BF_STATUS_DEVICE_ERROR ); BF_CHECK_CUFFT( cufftXtSetCallback(handle, (void**)&callback_load_c_hptr, CUFFT_CB_LD_COMPLEX, (void**)&callerInfo) ); break; } else { // Fall-through } } case BF_DTYPE_F32: { BF_ASSERT(nbit == 32, BF_STATUS_INVALID_DTYPE); BF_ASSERT(!do_fftshift, BF_STATUS_UNSUPPORTED); // No callback needed *using_callback = false; break; } case BF_DTYPE_CF64: { if( do_fftshift ) { BF_CHECK_CUDA( cudaMemcpyFromSymbol(&callback_load_z_hptr, callback_load_cf64_dptr, sizeof(cufftCallbackLoadZ)), BF_STATUS_DEVICE_ERROR ); BF_CHECK_CUFFT( cufftXtSetCallback(handle, (void**)&callback_load_z_hptr, CUFFT_CB_LD_COMPLEX_DOUBLE, (void**)&callerInfo) ); break; } else { // Fall-through } } case BF_DTYPE_F64: { BF_ASSERT(nbit == 64, BF_STATUS_INVALID_DTYPE); BF_ASSERT(!do_fftshift, BF_STATUS_UNSUPPORTED); // No callback needed *using_callback = false; break; } default: { BF_FAIL("Supported input data type", BF_STATUS_INVALID_DTYPE); } } return BF_STATUS_SUCCESS; }
the_stack
#include <vector> #include "caffe/layers/detectnet_transform_layer.hpp" #include "caffe/util/detectnet_coverage.hpp" #include "caffe/util/gpu_memory.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { // Calculate the location in the image from the loop index __device__ void get_pixel_indices(const int loop_index, const uint4 shape, int* x, int* y, int* n ) { int idx = loop_index; *n = idx / (shape.y * shape.x); idx -= *n * shape.y * shape.x; *y = idx / shape.x; idx -= *y * shape.x; *x = idx; } // https://www.cs.rit.edu/~ncs/color/t_convert.html template <typename Dtype> __device__ void convert_rgb_to_hsv( Dtype r, Dtype g, Dtype b, Dtype* h, Dtype* s, Dtype* v ) { Dtype min_v = min(min(r, g), b); Dtype max_v = max(max(r, g), b); // NOLINT(build/include_what_you_use) Dtype delta = max_v - min_v; if (max_v == 0 || delta == 0) { *h = 0; *s = 0; *v = max_v; return; } if (r == max_v) { *h = (g - b) / delta; } else if (g == max_v) { *h = 2 + (b - r) / delta; } else { *h = 4 + (r - g) / delta; } *h *= 60; if (h < 0) { *h += 360; } *s = delta / max_v; *v = max_v; } // https://www.cs.rit.edu/~ncs/color/t_convert.html template <typename Dtype> __device__ void convert_hsv_to_rgb( Dtype h, Dtype s, Dtype v, Dtype* r, Dtype* g, Dtype* b ) { int i; Dtype f, p, q, t; if (s == 0) { *r = v; *g = v; *b = v; return; } h /= 60; // sector 0 to 5 i = floor(h); f = h - i; // factorial part of h p = v * (1 - s); q = v * (1 - s * f); t = v * (1 - s * (1 - f)); switch (i) { case 0: *r = v; *g = t; *b = p; break; case 1: *r = q; *g = v; *b = p; break; case 2: *r = p; *g = v; *b = t; break; case 3: *r = p; *g = q; *b = v; break; case 4: *r = t; *g = p; *b = v; break; default: // case 5: *r = v; *g = p; *b = q; break; } } template <typename Dtype> __global__ void color_transformations( const Dtype* src_data, Dtype* dst_data, const uint4 shape, const AugmentSelection* aug_data ) { CUDA_KERNEL_LOOP(loop_index, shape.x * shape.y * shape.w) { int x, y, n; get_pixel_indices(loop_index, shape, &x, &y, &n); // check what needs doing const AugmentSelection& as = aug_data[n]; const bool doHueRotation = (abs(as.hue_rotation) > FLT_EPSILON); const bool doDesaturation = (as.saturation < (1.0 - 1.0/UINT8_MAX)); // N*cs*hs*ws + H*ws + W int index = n * shape.z * shape.y * shape.x + y * shape.x + x; // hs*ws const int channel_stride = shape.y * shape.x; // read Dtype r = src_data[index + 0 * channel_stride]; Dtype g = src_data[index + 1 * channel_stride]; Dtype b = src_data[index + 2 * channel_stride]; if (doHueRotation || doDesaturation) { // transform Dtype h, s, v; convert_rgb_to_hsv(r, g, b, &h, &s, &v); if (doHueRotation) { h -= aug_data[n].hue_rotation; } if (doDesaturation) { s *= aug_data[n].saturation; } convert_hsv_to_rgb(h, s, v, &r, &g, &b); } // write dst_data[index + 0 * channel_stride] = r; dst_data[index + 1 * channel_stride] = g; dst_data[index + 2 * channel_stride] = b; } } // Mean is WxHxC // For each pixel in the current image, subtract the corresponding pixel // from the mean image template <typename Dtype> __global__ void pixel_mean_subtraction( Dtype* data, const Dtype* mean_data, const uint4 shape ) { CUDA_KERNEL_LOOP(loop_index, shape.x * shape.y * shape.w) { int x, y, n; get_pixel_indices(loop_index, shape, &x, &y, &n); for (int c = 0; c < shape.z; c++) { // N*cs*hs*ws + C*hs*ws + H*ws + W const int data_idx = (n * shape.z * shape.y * shape.x) + (c * shape.y * shape.x) + (y * shape.x) + x; // C*hs*ws + H*ws + W const int mean_idx = (c * shape.y * shape.x) + (y * shape.x) + x; data[data_idx] -= mean_data[mean_idx]; } } } // Mean is 1x1xC // For each pixel in the current image, subtract the mean pixel template <typename Dtype> __global__ void channel_mean_subtraction( Dtype* data, const uint4 shape, const Dtype mean_value1, const Dtype mean_value2, const Dtype mean_value3 ) { CUDA_KERNEL_LOOP(loop_index, shape.x * shape.y * shape.w) { int x, y, n; get_pixel_indices(loop_index, shape, &x, &y, &n); // N*cs*hs*ws + C*hs*ws + H*ws + W const int data_idx = (n * shape.z * shape.y * shape.x) +(y * shape.x) + x; // hs*ws const int channel_stride = shape.y * shape.x; data[data_idx + 0 * channel_stride] -= mean_value1; data[data_idx + 1 * channel_stride] -= mean_value2; data[data_idx + 2 * channel_stride] -= mean_value3; } } template <typename Dtype> __device__ void rotate_point( const Dtype ax, const Dtype ay, // original point const Dtype cx, const Dtype cy, // center point float angle, Dtype* bx, Dtype* by // destination point ) { const Dtype s = sin(angle); const Dtype c = cos(angle); // translate to origin const Dtype tx = ax - cx; const Dtype ty = ay - cy; *bx = (tx * c) - (ty * s) + cx; *by = (tx * s) + (ty * c) + cy; } template <typename Dtype> __device__ Dtype get_value( const Dtype* data, const uint4& shape, const unsigned int n, const unsigned int c, int y, int x ) { // Replicate border for 1 pixel if (x == -1) x = 0; if (x == shape.x) x = shape.x - 1; if (y == -1) y = 0; if (y == shape.y) y = shape.y - 1; if (x >= 0 && x < shape.x && y >= 0 && y < shape.y) { // N*cs*hs*ws + C*hs*ws + H*ws + W return data[(n * shape.z * shape.y * shape.x) + (c * shape.y * shape.x) + (y * shape.x) + x]; } else { return 0; } } template <typename Dtype> __device__ Dtype cubic_interpolation(const Dtype& d, const Dtype& v1, const Dtype& v2, const Dtype& v3, const Dtype& v4 ) { // d is [0,1], marking the distance from v2 towards v3 return v2 + d * ( -2.0 * v1 - 3.0 * v2 + 6.0 * v3 - 1.0 * v4 + d * ( 3.0 * v1 - 6.0 * v2 + 3.0 * v3 + 0.0 * v4 + d * ( -1.0 * v1 + 3.0 * v2 - 3.0 * v3 + 1.0 * v4))) / 6.0; } // Interpolate in 1D space template <typename Dtype> __device__ Dtype interpolate_x( const Dtype* data, const uint4& shape, const unsigned int n, const unsigned int c, const int y, const Dtype x ) { Dtype dx = x - floor(x); return cubic_interpolation(dx, get_value(data, shape, n, c, y, floor(x) - 1), get_value(data, shape, n, c, y, floor(x)), get_value(data, shape, n, c, y, ceil(x)), get_value(data, shape, n, c, y, ceil(x) + 1)); } // Interpolate in 2D space template <typename Dtype> __device__ Dtype interpolate_xy( const Dtype* data, const uint4& shape, const unsigned int n, const unsigned int c, const Dtype y, const Dtype x ) { Dtype dy = y - floor(y); return cubic_interpolation(dy, interpolate_x(data, shape, n, c, floor(y) - 1, x), interpolate_x(data, shape, n, c, floor(y), x), interpolate_x(data, shape, n, c, ceil(y), x), interpolate_x(data, shape, n, c, ceil(y) + 1, x)); } template <typename Dtype> __global__ void spatial_transformations( const Dtype* src_data, const uint4 src_shape, const AugmentSelection* aug_data, Dtype* dst_data, const uint4 dst_shape ) { CUDA_KERNEL_LOOP(loop_index, dst_shape.x * dst_shape.y * dst_shape.w) { int dst_x, dst_y, n; get_pixel_indices(loop_index, dst_shape, &dst_x, &dst_y, &n); const AugmentSelection& as = aug_data[n]; // calculate src pixel indices for this thread Dtype x = dst_x; Dtype y = dst_y; // crop x += as.crop_offset.x; y += as.crop_offset.y; // rotate if (abs(as.rotation) > FLT_EPSILON) { const Dtype w_before = as.scale.width - 1; const Dtype h_before = as.scale.height - 1; const float angle = as.rotation * CUDART_PI_F / 180.0f; const Dtype w_after = abs(w_before * cos(angle)) + abs(h_before * sin(angle)); const Dtype h_after = abs(w_before * sin(angle)) + abs(h_before * cos(angle)); rotate_point(x, y, w_after / 2.0f, h_after / 2.0f, -angle, &x, &y); x -= (w_after - w_before) / 2.0f; y -= (h_after - h_before) / 2.0f; } // scale if (src_shape.x != as.scale.width) { x *= Dtype(src_shape.x - 1) / (as.scale.width - 1); } if (src_shape.y != as.scale.height) { y *= Dtype(src_shape.y - 1) / (as.scale.height - 1); } // flip if (as.flip) { x = (src_shape.x - x - 1.0); } for (int c = 0; c < dst_shape.z; c++) { // N*cs*hs*ws + C*hs*ws + H*ws + W const int dst_idx = (n * dst_shape.z * dst_shape.y * dst_shape.x) + (c * dst_shape.y * dst_shape.x) + (dst_y * dst_shape.x) + dst_x; dst_data[dst_idx] = interpolate_xy(src_data, src_shape, n, c, y, x); } } } template <typename Dtype> void DetectNetTransformationLayer<Dtype>::Forward_gpu( const vector<Blob*>& bottom, const vector<Blob*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data<Dtype>(); Dtype* top_data = top[0]->mutable_gpu_data<Dtype>(); AugmentSelection* aug_data = reinterpret_cast<AugmentSelection*>( gpu_workspace_augmentations_.data()); Dtype* tmp_data = reinterpret_cast<Dtype*>( gpu_workspace_tmpdata_.data()); const uint4 bottom_shape = make_uint4( bottom[0]->shape(3), // x = W bottom[0]->shape(2), // y = H bottom[0]->shape(1), // z = C bottom[0]->shape(0)); // w = N const int bottom_count = bottom[0]->count(); const int bottom_pixels = bottom_shape.x * bottom_shape.y * bottom_shape.w; const uint4 top_shape = make_uint4( top[0]->shape(3), // x = W top[0]->shape(2), // y = H top[0]->shape(1), // z = C top[0]->shape(0)); // w = N const int top_count = top[0]->count(); const int top_pixels = top_shape.x * top_shape.y * top_shape.w; // Get current stream cudaStream_t stream = Caffe::thread_stream(); // Make augmentation selections for each image vector<AugmentSelection> augmentations; for (int i = 0; i < bottom_shape.w; i++) { augmentations.push_back(get_augmentations( cv::Point(bottom_shape.x, bottom_shape.y))); } // Copy augmentation selections to GPU size_t aug_data_sz = sizeof(AugmentSelection) * augmentations.size(); caffe_gpu_memcpy(aug_data_sz, &augmentations[0], aug_data); // Color transformations // NOLINT_NEXT_LINE(whitespace/operators) color_transformations<<<CAFFE_GET_BLOCKS(bottom_pixels), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(bottom_data, tmp_data, bottom_shape, aug_data); CUDA_CHECK(cudaStreamSynchronize(stream)); // Mean subtraction if (t_param_.has_mean_file()) { // NOLINT_NEXT_LINE(whitespace/operators) pixel_mean_subtraction<<<CAFFE_GET_BLOCKS(bottom_pixels), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(tmp_data, mean_blob_.gpu_data(), bottom_shape); } else if (t_param_.mean_value_size() != 0) { CHECK_EQ(bottom_shape.z, 3) << "Data must have 3 channels when " "using transform_param.mean_value."; // NOLINT_NEXT_LINE(whitespace/operators) channel_mean_subtraction<<<CAFFE_GET_BLOCKS(bottom_pixels), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(tmp_data, bottom_shape, mean_values_[0] * UINT8_MAX, mean_values_[1] * UINT8_MAX, mean_values_[2] * UINT8_MAX); } // Spatial transformations // NOLINT_NEXT_LINE(whitespace/operators) spatial_transformations<<<CAFFE_GET_BLOCKS(top_pixels), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(tmp_data, bottom_shape, aug_data, top_data, top_shape); CUDA_CHECK(cudaStreamSynchronize(stream)); // Use CPU to transform labels const vector<vector<BboxLabel> > list_list_bboxes = blobToLabels(*bottom[1]); for (size_t i = 0; i < bottom[1]->num(); i++) { const vector<BboxLabel>& list_bboxes = list_list_bboxes[i]; Dtype* output_label = &top[1]->mutable_cpu_data<Dtype>()[ top[1]->offset(i, 0, 0, 0) ]; transform_label_cpu(list_bboxes, output_label, augmentations[i], cv::Size(bottom_shape.x, bottom_shape.y)); } } INSTANTIATE_LAYER_GPU_FORWARD(DetectNetTransformationLayer); } // namespace caffe
the_stack
#include "test.hpp" class ActiveElement { public: __host__ __device__ ActiveElement() { val += 100000; } __host__ __device__ ~ActiveElement() { val += 1000000; } inline bool operator==(ActiveElement other) const { return val == other.val; } int val; }; class PassiveElement { public: inline bool operator==(PassiveElement other) const { return val == other.val; } int val; }; class ConstructorDestructorTestCellActive { public: inline explicit ConstructorDestructorTestCellActive(double temperature=0.0, bool alive=false) : temperature(temperature), alive(alive) {} inline bool operator==(const ConstructorDestructorTestCellActive& other) const { return (temperature == other.temperature) && (alive == other.alive) && (element == other.element); } inline bool operator!=(const ConstructorDestructorTestCellActive& other) const { return !(*this == other); } double temperature; bool alive; ActiveElement element; }; class ConstructorDestructorTestCellPassive { public: inline explicit ConstructorDestructorTestCellPassive(double temperature=0.0, bool alive=false) : temperature(temperature), alive(alive) {} inline bool operator==(const ConstructorDestructorTestCellPassive& other) const { return (temperature == other.temperature) && (alive == other.alive) && (element == other.element); } inline bool operator!=(const ConstructorDestructorTestCellPassive& other) const { return !(*this == other); } double temperature; bool alive; PassiveElement element; }; class CellWithArrayMember { public: __host__ __device__ inline explicit CellWithArrayMember(int j = 0) : j(j) { i[0] = j + 1; i[1] = j + 2; i[2] = j + 3; x[0] = j + 0.4; x[1] = j + 0.5; } __host__ __device__ inline CellWithArrayMember(int newI[3], double newX[2], int j) : j(j) { i[0] = newI[0]; i[1] = newI[1]; i[1] = newI[2]; x[0] = newX[0]; x[1] = newX[1]; } int i[3]; int j; double x[2]; }; class CellWithActiveArrayMember { public: __host__ __device__ inline explicit CellWithActiveArrayMember(int j = 0) : j(j) { i[0] = j + 1; i[1] = j + 2; i[2] = j + 3; } int i[3]; int j; ActiveElement elements[2]; }; class CellWithPassiveArrayMember { public: __host__ __device__ inline explicit CellWithPassiveArrayMember(int j = 0) : j(j) { i[0] = j + 1; i[1] = j + 2; i[2] = j + 3; } int i[3]; int j; PassiveElement elements[2]; }; LIBFLATARRAY_REGISTER_SOA(ConstructorDestructorTestCellActive, ((double)(temperature)) ((ActiveElement)(element)) ((bool)(alive)) ) LIBFLATARRAY_REGISTER_SOA(ConstructorDestructorTestCellPassive, ((double)(temperature)) ((PassiveElement)(element)) ((bool)(alive)) ) LIBFLATARRAY_REGISTER_SOA(CellWithArrayMember, ((int)(i)(3)) ((int)(j)) ((double)(x)(2)) ) LIBFLATARRAY_REGISTER_SOA(CellWithActiveArrayMember, ((int)(i)(3)) ((int)(j)) ((ActiveElement)(elements)(2)) ) LIBFLATARRAY_REGISTER_SOA(CellWithPassiveArrayMember, ((int)(i)(3)) ((int)(j)) ((PassiveElement)(elements)(2)) ) namespace LibFlatArray { std::map<std::size_t, char*> allocation_cache; /** * We fake allocation here to make sure our grids in the tests below * get the same pointers. We need this to be sure that we're working * on the same memory region with each. */ template<class T> class fake_cuda_allocator { public: typedef ptrdiff_t difference_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; typedef T value_type; pointer allocate(std::size_t n, const void* = 0) { if (allocation_cache[n] != 0) { return allocation_cache[n]; } pointer ret = 0; cudaMalloc(&ret, n * sizeof(T)); allocation_cache[n] = ret; return ret; } void deallocate(pointer p, std::size_t) { // intentionally left blank } void deallocate_all() { for (typename std::map<std::size_t, pointer>::iterator i = allocation_cache.begin(); i != allocation_cache.end(); ++i) { cudaFree(i->second); i->second = 0; } } }; ADD_TEST(TestCUDAConstructionDestruction) { char *data = 0; { // prep device memory with consecutive numbers: soa_grid<ConstructorDestructorTestCellPassive, fake_cuda_allocator<char>, true> device_grid(20, 10, 5); data = device_grid.data(); soa_grid<ConstructorDestructorTestCellPassive> host_grid(20, 10, 5); for (int z = 0; z < 5; ++z) { for (int y = 0; y < 10; ++y) { for (int x = 0; x < 20; ++x) { ConstructorDestructorTestCellPassive cell((x + 1) * (y + 1), true); cell.element.val = x + y * 20 + z * 20 * 10; host_grid.set(x, y, z, cell); cell = host_grid.get(x, y, z); } } } cudaMemcpy(device_grid.data(), host_grid.data(), device_grid.byte_size(), cudaMemcpyHostToDevice); } { // ensure c-tor was run by checking increment on all elements: soa_grid<ConstructorDestructorTestCellActive, fake_cuda_allocator<char>, true> device_grid(20, 10, 5); BOOST_TEST(data == device_grid.data()); soa_grid<ConstructorDestructorTestCellPassive> host_grid(20, 10, 5); cudaMemcpy(host_grid.data(), device_grid.data(), device_grid.byte_size(), cudaMemcpyDeviceToHost); for (int z = 0; z < 5; ++z) { for (int y = 0; y < 10; ++y) { for (int x = 0; x < 20; ++x) { ConstructorDestructorTestCellPassive cell = host_grid.get(x, y, z); int expected = x + y * 20 + z * 20 * 10 + 100000; BOOST_TEST(cell.element.val == expected); BOOST_TEST(cell.temperature == 0); BOOST_TEST(cell.alive == false); } } } } { // ensure d-tor was run by checking increment on all elements: soa_grid<ConstructorDestructorTestCellPassive> host_grid(20, 10, 5); cudaMemcpy(host_grid.data(), data, host_grid.byte_size(), cudaMemcpyDeviceToHost); for (int z = 0; z < 5; ++z) { for (int y = 0; y < 10; ++y) { for (int x = 0; x < 20; ++x) { ConstructorDestructorTestCellPassive cell = host_grid.get(x, y, z); int expected = x + y * 20 + z * 20 * 10 + 1100000; BOOST_TEST(cell.element.val == expected); BOOST_TEST(cell.temperature == 0); BOOST_TEST(cell.alive == false); } } } } fake_cuda_allocator<char>().deallocate_all(); } ADD_TEST(TestCUDAGetSetSingleElements) { soa_grid<ConstructorDestructorTestCellPassive, cuda_allocator<char>, true> device_grid(40, 13, 8); for (int z = 0; z < 8; ++z) { for (int y = 0; y < 13; ++y) { for (int x = 0; x < 40; ++x) { ConstructorDestructorTestCellPassive cell((x + 2) * (y + 2), true); cell.element.val = 10000 + x + y * 40 + z * 40 * 13; device_grid.set(x, y, z, cell); } } } for (int z = 0; z < 8; ++z) { for (int y = 0; y < 13; ++y) { for (int x = 0; x < 40; ++x) { ConstructorDestructorTestCellPassive cell = device_grid.get(x, y, z); int expected = 10000 + x + y * 40 + z * 40 * 13; BOOST_TEST(cell.element.val == expected); BOOST_TEST(cell.temperature == ((x + 2) * (y + 2))); BOOST_TEST(cell.alive == true); } } } } ADD_TEST(TestCUDAGetSetMultipleElements) { soa_grid<ConstructorDestructorTestCellPassive, cuda_allocator<char>, true> device_grid(35, 25, 15); for (int z = 0; z < 15; ++z) { for (int y = 0; y < 25; ++y) { std::vector<ConstructorDestructorTestCellPassive> cells(35); for (int x = 0; x < 35; ++x) { cells[x].alive = x % 2; cells[x].temperature = x * y * z; cells[x].element.val = 20000 + x + y * 35 + z * 35 * 25; } device_grid.set(0, y, z, cells.data(), 35); } } for (int z = 0; z < 15; ++z) { for (int y = 0; y < 25; ++y) { std::vector<ConstructorDestructorTestCellPassive> cells(35); device_grid.get(0, y, z, cells.data(), 35); for (int x = 0; x < 35; ++x) { int expected = 20000 + x + y * 35 + z * 35 * 25; BOOST_TEST(cells[x].element.val == expected); BOOST_TEST(cells[x].alive == (x % 2)); BOOST_TEST(cells[x].temperature == (x * y * z)); } } } } ADD_TEST(TestCUDALoadSaveElements) { soa_grid<ConstructorDestructorTestCellPassive> host_grid(21, 10, 9); for (int z = 0; z < 9; ++z) { for (int y = 0; y < 10; ++y) { for (int x = 0; x < 21; ++x) { ConstructorDestructorTestCellPassive cell; cell.alive = ((x % 3) == 0); cell.temperature = x * y * z * -1; cell.element.val = 30000 + x + y * 21 + z * 21 * 10; host_grid.set(x, y, z, cell); } } } std::vector<char> buffer(10 * aggregated_member_size<ConstructorDestructorTestCellPassive>::VALUE); host_grid.save(11, 9, 8, buffer.data(), 10); soa_grid<ConstructorDestructorTestCellPassive, cuda_allocator<char>, true> device_grid(31, 20, 19); device_grid.load(21, 19, 18, buffer.data(), 10); for (int i = 0; i < 20; ++i) { ConstructorDestructorTestCellPassive cell; cell.alive = i % 4; cell.temperature = 4711 + i; cell.element.val = 100 * i; device_grid.set(i + 1, 5, 6, cell); } buffer.resize(20 * aggregated_member_size<ConstructorDestructorTestCellPassive>::VALUE); device_grid.save(1, 5, 6, buffer.data(), 20); // very load: soa_grid<ConstructorDestructorTestCellPassive> host_grid2(31, 20, 19); cudaMemcpy(host_grid2.data(), device_grid.data(), device_grid.byte_size(), cudaMemcpyDeviceToHost); for (int i = 0; i < 10; ++i) { ConstructorDestructorTestCellPassive cell = host_grid2.get(21 + i, 19, 18); bool expectedAlive = (((i + 11) % 3) == 0); double expectedTemperature = (11 + i) * 9 * 8 * -1; int expectedVal = 30000 + (11 + i) + 9 * 21 + 8 * 21 * 10; BOOST_TEST(cell.alive == expectedAlive); BOOST_TEST(cell.temperature == expectedTemperature); BOOST_TEST(cell.element.val == expectedVal); } // verify save: double *temperature = (double*)(buffer.data() + 0 * 20); int *val = (int*) (buffer.data() + 8 * 20); bool *alive = (bool*) (buffer.data() + 12 * 20); for (int i = 0; i < 20; ++i) { bool expectedAlive = i % 4; double expectedTemperature = 4711 + i; int expectedVal = i * 100; BOOST_TEST(expectedAlive == alive[i]); BOOST_TEST(expectedTemperature == temperature[i]); BOOST_TEST(expectedVal == val[i]); } // sanity check: cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { std::cerr << "ERROR: " << cudaGetErrorString(error) << "\n"; throw std::runtime_error("CUDA error"); } } ADD_TEST(TestCUDAArrayMembersGetSet) { // test set/get single elements: soa_grid<CellWithArrayMember, cuda_allocator<char>, true> device_grid(12, 23, 34); for (int z = 0; z < 34; ++z) { for (int y = 0; y < 23; ++y) { for (int x = 0; x < 12; ++x) { CellWithArrayMember cell; cell.i[0] = x; cell.i[1] = y; cell.i[2] = z; cell.j = x * y * z; cell.x[0] = x + y + 0.1; cell.x[1] = y + z + 0.2; device_grid.set(x, y, z, cell); } } } for (int z = 0; z < 34; ++z) { for (int y = 0; y < 23; ++y) { for (int x = 0; x < 12; ++x) { int expectedCellI0 = x; int expectedCellI1 = y; int expectedCellI2 = z; int expectedCellJ = x * y * z; double expectedCellX0 = x + y + 0.1; double expectedCellX1 = y + z + 0.2; CellWithArrayMember cell = device_grid.get(x, y, z); BOOST_TEST(expectedCellI0 == cell.i[0]); BOOST_TEST(expectedCellI1 == cell.i[1]); BOOST_TEST(expectedCellI2 == cell.i[2]); BOOST_TEST(expectedCellJ == cell.j); BOOST_TEST(expectedCellX0 == cell.x[0]); BOOST_TEST(expectedCellX1 == cell.x[1]); } } } } ADD_TEST(TestCUDAArrayMembersGetSetMultiple) { // test set/get single elements: soa_grid<CellWithArrayMember, cuda_allocator<char>, true> device_grid(40, 23, 34); for (int z = 0; z < 34; ++z) { for (int y = 0; y < 23; ++y) { CellWithArrayMember cells[40]; for (int x = 0; x < 40; ++x) { cells[x].i[0] = x; cells[x].i[1] = y; cells[x].i[2] = z; cells[x].j = x * y * z; cells[x].x[0] = x + y + 0.1; cells[x].x[1] = y + z + 0.2; } device_grid.set(0, y, z, cells, 40); } } for (int z = 0; z < 34; ++z) { for (int y = 0; y < 23; ++y) { CellWithArrayMember cells[40]; device_grid.get(0, y, z, cells, 40); for (int x = 0; x < 40; ++x) { int expectedCellI0 = x; int expectedCellI1 = y; int expectedCellI2 = z; int expectedCellJ = x * y * z; double expectedCellX0 = x + y + 0.1; double expectedCellX1 = y + z + 0.2; BOOST_TEST(expectedCellI0 == cells[x].i[0]); BOOST_TEST(expectedCellI1 == cells[x].i[1]); BOOST_TEST(expectedCellI2 == cells[x].i[2]); BOOST_TEST(expectedCellJ == cells[x].j); BOOST_TEST(expectedCellX0 == cells[x].x[0]); BOOST_TEST(expectedCellX1 == cells[x].x[1]); } } } } ADD_TEST(TestCUDAArrayMembersConstructDestruct) { char *data = 0; { // prep device memory with consecutive numbers: soa_grid<CellWithPassiveArrayMember, fake_cuda_allocator<char>, true> device_grid(8, 9, 13); data = device_grid.data(); soa_grid<CellWithPassiveArrayMember> host_grid(8, 9, 13); for (int z = 0; z < 13; ++z) { for (int y = 0; y < 9; ++y) { for (int x = 0; x < 8; ++x) { CellWithPassiveArrayMember cell((x + 1) * (y + 1)); cell.elements[0].val = 40000 + x + y * 8 + z * 8 * 9; cell.elements[1].val = 50000 + x + y * 8 + z * 8 * 9; host_grid.set(x, y, z, cell); cell = host_grid.get(x, y, z); } } } cudaMemcpy(device_grid.data(), host_grid.data(), device_grid.byte_size(), cudaMemcpyHostToDevice); } { // ensure c-tor was run by checking increment on all elements: soa_grid<CellWithActiveArrayMember, fake_cuda_allocator<char>, true> device_grid(8, 9, 13); BOOST_TEST(data == device_grid.data()); soa_grid<CellWithPassiveArrayMember> host_grid(8, 9, 13); cudaMemcpy(host_grid.data(), device_grid.data(), device_grid.byte_size(), cudaMemcpyDeviceToHost); for (int z = 0; z < 13; ++z) { for (int y = 0; y < 9; ++y) { for (int x = 0; x < 8; ++x) { CellWithPassiveArrayMember cell = host_grid.get(x, y, z); int expected0 = 40000 + x + y * 8 + z * 8 * 9 + 100000; int expected1 = 50000 + x + y * 8 + z * 8 * 9 + 100000; BOOST_TEST(cell.elements[0].val == expected0); BOOST_TEST(cell.elements[1].val == expected1); BOOST_TEST(cell.i[0] == 0); BOOST_TEST(cell.i[1] == 0); BOOST_TEST(cell.i[2] == 0); } } } } { // ensure d-tor was run by checking increment on all elements: soa_grid<CellWithPassiveArrayMember> host_grid(8, 9, 13); cudaMemcpy(host_grid.data(), data, host_grid.byte_size(), cudaMemcpyDeviceToHost); for (int z = 0; z < 13; ++z) { for (int y = 0; y < 9; ++y) { for (int x = 0; x < 8; ++x) { CellWithPassiveArrayMember cell = host_grid.get(x, y, z); int expected0 = 40000 + x + y * 8 + z * 8 * 9 + 1100000; int expected1 = 50000 + x + y * 8 + z * 8 * 9 + 1100000; BOOST_TEST(cell.elements[0].val == expected0); BOOST_TEST(cell.elements[1].val == expected1); BOOST_TEST(cell.i[0] == 0); BOOST_TEST(cell.i[1] == 0); BOOST_TEST(cell.i[2] == 0); } } } } fake_cuda_allocator<char>().deallocate_all(); } ADD_TEST(TestCUDAArrayMembersLoadSave) { soa_grid<CellWithPassiveArrayMember, cuda_allocator<char>, true> device_grid(45, 35, 25); for (int z = 0; z < 25; ++z) { for (int y = 0; y < 35; ++y) { for (int x = 0; x < 45; ++x) { CellWithPassiveArrayMember cell; cell.i[0] = x; cell.i[1] = y; cell.i[2] = z; cell.j = x * y * z; cell.elements[0].val = 4711 + x * y; cell.elements[1].val = 666 + y * z; device_grid.set(x, y, z, cell); } } } std::vector<char> buffer(aggregated_member_size<CellWithPassiveArrayMember>::VALUE * 33); device_grid.save(12, 34, 24, buffer.data(), 33); soa_grid<CellWithPassiveArrayMember, cuda_allocator<char>, true> device_grid2(35, 20, 5); device_grid2.load(2, 19, 4, buffer.data(), 33); for (int x = 0; x < 33; ++x) { CellWithPassiveArrayMember cell = device_grid2.get(x + 2, 19, 4); int expectedI0 = x + 12; int expectedI1 = 34; int expectedI2 = 24; int expectedJ = (x + 12) * 34 * 24; int expectedElements0 = 4711 + (x + 12) * 34; int expectedElements1 = 666 + 34 * 24; BOOST_TEST(cell.i[0] == expectedI0); BOOST_TEST(cell.i[1] == expectedI1); BOOST_TEST(cell.i[2] == expectedI2); BOOST_TEST(cell.j == expectedJ); BOOST_TEST(cell.elements[0].val == expectedElements0); BOOST_TEST(cell.elements[1].val == expectedElements1); } } } int main(int argc, char **argv) { return 0; }
the_stack
#include <stdio.h> #include <assert.h> #include <algorithm> #include <cuda_runtime_api.h> #include <device_atomic_functions.h> #include <device_launch_parameters.h> #define W 32 #define G 1024 #define B 256 __forceinline__ __device__ static int idx2(int n, int u, int U1) { return n * U1 + u; } __forceinline__ __device__ static int idx3(int n, int t, int u, int T, int U) { return n * (T * U) + t * U + u; } __forceinline__ __device__ static int idx4(int n, int t, int u, int v, int T, int U, int V) { return n * (T * U * V) + t * (U * V) + u * V + v; } __forceinline__ __device__ static float log_sum_exp(float a, float b) { float maximum, diff; if (a > b) { maximum = a; diff = b-a; } else { maximum = b; diff = a-b; } //if (diff > -42) { maximum += log1pf(expf(diff)); //} return maximum; } __device__ void kernel_warp_alphas(unsigned int *counts, volatile float *alphas, const int *labels, const float *log_probs, const int *xn, const int *yn, int T, int U, int V, int blank) { unsigned int d = threadIdx.x; unsigned int g = blockIdx.x; unsigned int u = blockIdx.y + 1; unsigned int n = blockIdx.z; unsigned int p = g * W; unsigned int t = p + d + 1; assert (d < W); assert (u <= U); assert (gridDim.y == U); assert (blockDim.x == W); int actual_t = xn[n]; int actual_u = yn[n] + 1; if (t > actual_t || u > actual_u) return; unsigned int *lock = counts + n * U * 2 + blockIdx.y; if (blockIdx.x == 0 && blockIdx.y == 0) { alphas[idx3(n, 0, 0, T, U)] = 0; } if (blockIdx.x > 0) { // Wait previous row do {} while (atomicAdd(lock, 0) < g); } if (blockIdx.y > 0) { // Wait previous column do {} while (atomicAdd(lock-1, 0) <= g); } if (blockIdx.x == 0 && u < actual_u) { // Compute initial row value unsigned int l = labels[idx2(n, u-1, U-1)]; float a = alphas[idx3(n, 0, u-1, T, U)]; float b = log_probs[idx4(n, 0, u-1, l, T, U, V)]; alphas[idx3(n, 0, u, T, U)] = a + b; } if (blockIdx.y == 0 && t < actual_t) { // Compute initial column with local scan algorithm float a; float b = log_probs[idx4(n, t-1, 0, blank, T, U, V)]; #pragma unroll for(unsigned int i = 1; i < W; i *= 2) { a = __shfl_up_sync(0xffffffff, b, i); if (i <= d) { b += a; } } a = alphas[idx3(n, p, 0, T, U)]; alphas[idx3(n, t, 0, T, U)] = a + b; } if (t < actual_t && u < actual_u) { // Ready to compute alphas[t, u] unsigned int l = labels[idx2(n, u-1, U-1)]; float bias = log_probs[idx4(n, t-1, u, blank, T, U, V)]; float skip = alphas[idx3(n, p, u, T, U)] + bias; float emit = alphas[idx3(n, t, u-1, T, U)] + log_probs[idx4(n, t, u-1, l, T, U, V)]; float r = log_sum_exp(skip, emit); float output = r; for(unsigned int i = 1; i < W; i++) { r = __shfl_up_sync(0xffffffff, r, 1); if (i == d) { r = log_sum_exp(r + bias, emit); output = r; } } alphas[idx3(n, t, u, T, U)] = output; } if (d == 0) { // https://stackoverflow.com/a/5233737 __threadfence(); atomicAdd(lock, 1); } } __device__ void kernel_warp_betas(unsigned int *counts, volatile float *betas, const int *labels, const float *log_probs, const int *xn, const int *yn, int T, int U, int V, int blank) { unsigned int d = threadIdx.x; unsigned int g = blockIdx.x; unsigned int u = blockIdx.y + 1; unsigned int n = blockIdx.z; unsigned int p = g * W; unsigned int t = p + d + 1; assert (d < W); assert (u <= U); assert (gridDim.y == U); assert (blockDim.x == W); int actual_t = xn[n]; int actual_u = yn[n] + 1; if (t > actual_t || u > actual_u) return; int T1 = actual_t - 1; int U1 = actual_u - 1; unsigned int *lock = counts + n * U * 2 + U + blockIdx.y; if (blockIdx.x == 0 && blockIdx.y == 0) { betas[idx3(n, T1, U1, T, U)] = log_probs[idx4(n, T1, U1, blank, T, U, V)]; } if (blockIdx.x > 0) { // Wait previous row do {} while (atomicAdd(lock, 0) < g); } if (blockIdx.y > 0) { // Wait previous column do {} while (atomicAdd(lock-1, 0) <= g); } if (blockIdx.x == 0 && u < actual_u) { // Compute last row value unsigned int l = labels[idx2(n, U1-u, U-1)]; float a = betas[idx3(n, T1, U1-u+1, T, U)]; float b = log_probs[idx4(n, T1, U1-u, l, T, U, V)]; betas[idx3(n, T1, U1-u, T, U)] = a + b; } if (blockIdx.y == 0 && t < actual_t) { // Compute last column with local scan algorithm float a; float b = log_probs[idx4(n, T1-t, U1, blank, T, U, V)]; #pragma unroll for(unsigned int i = 1; i < W; i *= 2) { a = __shfl_up_sync(0xffffffff, b, i); if (i <= d) { b += a; } } a = betas[idx3(n, T1-p, U1, T, U)]; betas[idx3(n, T1-t, U1, T, U)] = a + b; } if (t < actual_t && u < actual_u) { // Ready to compute betas[T1-t, U1-u] unsigned int l = labels[idx2(n, U1-u, U-1)]; float bias = log_probs[idx4(n, T1-t, U1-u, blank, T, U, V)]; float skip = betas[idx3(n, T1-p, U1-u, T, U)] + bias; float emit = betas[idx3(n, T1-t, U1-u+1, T, U)] + log_probs[idx4(n, T1-t, U1-u, l, T, U, V)]; float r = log_sum_exp(skip, emit); float output = r; for(unsigned int i = 1; i < W; i++) { r = __shfl_up_sync(0xffffffff, r, 1); if (i == d) { r = log_sum_exp(r + bias, emit); output = r; } } betas[idx3(n, T1-t, U1-u, T, U)] = output; } if (d == 0) { // https://stackoverflow.com/a/5233737 __threadfence(); atomicAdd(lock, 1); } } __global__ void kernel_warp(unsigned int *counts, volatile float *alphas, volatile float *betas, const int *labels, const float *log_probs, const int *xn, const int *yn, int T, int U, int V, int blank) { if (threadIdx.y == 0) { kernel_warp_alphas(counts, alphas, labels, log_probs, xn, yn, T, U, V, blank); } else if (threadIdx.y == 1) { kernel_warp_betas(counts, betas, labels, log_probs, xn, yn, T, U, V, blank); } } __global__ void kernel_grads_blank(float *grads, const float *alphas, const float *betas, const float *log_probs, const int *xn, const int *yn, int T, int U, int V, int blank) { unsigned int d = threadIdx.x; unsigned int g = blockIdx.x; unsigned int u = blockIdx.y; unsigned int n = blockIdx.z; unsigned int t = g * G + d; assert (u < U); assert (d < G); assert (blockDim.x == G); assert (gridDim.y == U); int actual_t = xn[n]; int actual_u = yn[n] + 1; if (t >= actual_t || u >= actual_u) return; if (t == actual_t-1 && u < actual_u-1) return; float a = alphas[idx3(n, t, u, T, U)]; if (t < actual_t-1) { a += betas[idx3(n, t+1, u, T, U)]; } unsigned int index = idx4(n, t, u, blank, T, U, V); a = expf(a + log_probs[index] - betas[idx3(n, 0, 0, T, U)]); grads[index] = -a; } __global__ void kernel_grads_label(float *grads, const float *alphas, const float *betas, const int *labels, const float *log_probs, const int *xn, const int *yn, int T, int U, int V, float fastemit_lambda) { unsigned int d = threadIdx.x; unsigned int g = blockIdx.x; unsigned int u = blockIdx.y; unsigned int n = blockIdx.z; unsigned int t = g * G + d; assert (u < U - 1); assert (d < G); assert (blockDim.x == G); assert (gridDim.y == U - 1); int actual_t = xn[n]; int actual_u = yn[n]; if (t >= actual_t || u >= actual_u) return; unsigned int l = labels[idx2(n, u, U-1)]; float a = alphas[idx3(n, t, u, T, U)] + betas[idx3(n, t, u+1, T, U)]; unsigned int index = idx4(n, t, u, l, T, U, V); a = expf(a + log_probs[index] - betas[idx3(n, 0, 0, T, U)]); // apply FastEmit regularization // https://arxiv.org/abs/2010.11148 a = (1. + fastemit_lambda) * a; grads[index] = -a; } __global__ void kernel_fill_costs(float *costs, float *grads, const float *alphas, const float *betas, const float *log_probs, const int *xn, const int *yn, int N, int T, int U, int V, int blank) { unsigned int n = blockIdx.x * blockDim.x + threadIdx.x; if (n >= N) return; int t = xn[n] - 1; int u = yn[n]; float a = alphas[idx3(n, t, u, T, U)] + log_probs[idx4(n, t, u, blank, T, U, V)]; float b = betas[idx3(n, 0, 0, T, U)]; float ratio = fabsf(a - b) / fabsf(fmaxf(a, b)); if (ratio > 0.001) { printf("\nWARNING: sample %d [%d, %d] has a forward/backward mismatch %f / %f\n", n, t + 1, u, a, b); float *g = grads + idx4(n, 0, 0, 0, T, U, V); for (int i = 0; i < T; ++i) { for (int j = 0; j < U; ++j) { for (int v = 0; v < V; ++v, ++g) { *g = 0; } } } b = (a + b) / 2.0f; } costs[n] = -b; } rnntStatus_t run_warp_rnnt(cudaStream_t stream, unsigned int *counts, float *alphas, float *betas, const int *labels, const float *log_probs, float *grads, float *costs, const int *xn, const int *yn, int N, int T, int U, int V, int blank, float fastemit_lambda) { dim3 threads1(W, 2); dim3 blocks1((T + W - 1) / W, U, N); kernel_warp <<<blocks1, threads1, 0, stream>>> (counts, alphas, betas, labels, log_probs, xn, yn, T, U, V, blank); if (cudaGetLastError() != cudaSuccess) return RNNT_STATUS_WARP_FAILED; dim3 blocks2((T + G - 1) / G, U, N); kernel_grads_blank <<<blocks2, G, 0, stream>>> (grads, alphas, betas, log_probs, xn, yn, T, U, V, blank); if (cudaGetLastError() != cudaSuccess) return RNNT_STATUS_GRADS_BLANK_FAILED; if (U > 1) { dim3 blocks3((T + G - 1) / G, U - 1, N); kernel_grads_label <<<blocks3, G, 0, stream>>> (grads, alphas, betas, labels, log_probs, xn, yn, T, U, V, fastemit_lambda); if (cudaGetLastError() != cudaSuccess) return RNNT_STATUS_GRADS_LABEL_FAILED; } dim3 blocks4((N + B - 1) / B, 1, 1); kernel_fill_costs <<<blocks4, B, 0, stream>>> (costs, grads, alphas, betas, log_probs, xn, yn, N, T, U, V, blank); if (cudaGetLastError() != cudaSuccess) return RNNT_STATUS_COSTS_FAILED; return RNNT_STATUS_SUCCESS; }
the_stack