hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
bdd3dd9b611a4b2561c6f6b16e8ced6d4d48d284.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "BNSL_GPU.cuh" int *valuesRange, *samplesValues; int nodesNum, samplesNum; int allParentSetNumPerNode; double * dev_lsTable; int* globalBestGraph, *globalBestOrder; double globalBestScore; int initTime, calcLocalScoreTime, searchTime; void BNSL_init() { startWatch(); readNodeInfo(&nodesNum, &valuesRange); readSamples(&samplesValues, &samplesNum, nodesNum); initTime = stopWatch(); } void BNSL_calcLocalScore() { startWatch(); allParentSetNumPerNode = 0; for (int i = 0; i <= CONSTRAINTS; i++) { allParentSetNumPerNode = allParentSetNumPerNode + C(i, nodesNum - 1); } int * dev_valuesRange; int * dev_samplesValues; int * dev_N; // calculate max different values number for all pair of child and parent set int valuesMaxNum = calcValuesMaxNum(valuesRange, nodesNum); CUDA_CHECK_RETURN( hipMalloc(&dev_lsTable, nodesNum * allParentSetNumPerNode * sizeof(double)), "hipMalloc failed: dev_lsTable."); CUDA_CHECK_RETURN(hipMalloc(&dev_valuesRange, nodesNum * sizeof(int)), "hipMalloc failed: dev_valuesRange."); CUDA_CHECK_RETURN( hipMalloc(&dev_samplesValues, samplesNum * nodesNum * sizeof(int)), "hipMalloc failed: dev_samplesValues."); CUDA_CHECK_RETURN( hipMalloc(&dev_N, valuesMaxNum * allParentSetNumPerNode * sizeof(int)), "dev_N hipMalloc failed."); CUDA_CHECK_RETURN( hipMemcpy(dev_valuesRange, valuesRange, nodesNum * sizeof(int), hipMemcpyHostToDevice), "hipMemcpy failed: valuesRange -> dev_valuesRange"); CUDA_CHECK_RETURN( hipMemcpy(dev_samplesValues, samplesValues, samplesNum * nodesNum * sizeof(int), hipMemcpyHostToDevice), "hipMemcpy failed: samplesValues -> dev_samplesValues"); int threadNum = 256; int blockNum = (allParentSetNumPerNode - 1) / threadNum + 1; // calcAllLocalScore_kernel<<<blockNum, threadNum, nodesNum * sizeof(int)>>>( // dev_valuesRange, dev_samplesValues, dev_lsTable, dev_N, samplesNum, // nodesNum, allParentSetNumPerNode, valuesMaxNum); CUDA_CHECK_RETURN(hipFree(dev_valuesRange), "hipFree failed: dev_valuesRange."); CUDA_CHECK_RETURN(hipFree(dev_samplesValues), "hipFree failed: dev_samplesValues."); CUDA_CHECK_RETURN(hipFree(dev_N), "hipFree failed: dev_N."); free(valuesRange); free(samplesValues); calcLocalScoreTime = stopWatch(); } void BNSL_search() { startWatch(); int i, j, iter; int parentSetNumInOrder = 0; for (i = 0; i < nodesNum; i++) { for (j = 0; j <= CONSTRAINTS && j < i + 1; j++) { parentSetNumInOrder += C(j, i); } } int ordersNum = 128; int iterNum = ITER; srand((unsigned int) time(NULL)); int seed = 1234; int * dev_newOrders; CUDA_CHECK_RETURN( hipMalloc(&dev_newOrders, ordersNum * nodesNum * sizeof(int)), "hipMalloc failed: dev_newOrders."); int * newOrder = (int *) malloc(nodesNum * sizeof(int)); CUDA_CHECK_RETURN(hipHostMalloc(&newOrder, nodesNum * sizeof(int)), "hipHostMalloc failed: newOrder."); randInitOrder(newOrder, nodesNum); double * dev_parentSetScore; CUDA_CHECK_RETURN( hipMalloc(&dev_parentSetScore, ordersNum * parentSetNumInOrder * sizeof(double)), "hipMalloc failed: dev_parentSetScore."); double * dev_maxLocalScore; CUDA_CHECK_RETURN( hipMalloc(&dev_maxLocalScore, ordersNum * nodesNum * sizeof(double)), "hipMalloc failed: dev_maxLocalScore."); double * dev_ordersScore, *ordersScore; CUDA_CHECK_RETURN(hipMalloc(&dev_ordersScore, ordersNum * sizeof(double)), "hipMalloc failed: dev_ordersScore."); CUDA_CHECK_RETURN(hipHostMalloc(&ordersScore, ordersNum * sizeof(double)), "hipHostMalloc failed: ordersScore."); double *dev_prob, *prob; CUDA_CHECK_RETURN(hipMalloc(&dev_prob, ordersNum * sizeof(double)), "hipMalloc failed: dev_prob."); CUDA_CHECK_RETURN(hipHostMalloc(&prob, ordersNum * sizeof(double)), "hipHostMalloc failed: prob."); int *dev_samples, *samples; CUDA_CHECK_RETURN(hipMalloc(&dev_samples, ordersNum * sizeof(int)), "hipMalloc failed: dev_samples."); CUDA_CHECK_RETURN(hipHostMalloc(&samples, ordersNum * sizeof(int)), "hipHostMalloc failed: samples."); globalBestOrder = (int *) malloc(nodesNum * sizeof(int)); globalBestScore = -FLT_MAX; hiprandState_t *dev_curandState; CUDA_CHECK_RETURN( hipMalloc(&dev_curandState, ordersNum * sizeof(hiprandState_t)), "hipMalloc failed: dev_curandState."); hipLaunchKernelGGL(( curandSetup_kernel), dim3(1), dim3(ordersNum), 0, 0, dev_curandState, seed); CUDA_CHECK_RETURN(hipGetLastError(), "curandSetup_kernel launch failed."); calcCDFInit(ordersNum); for (iter = 1; iter <= iterNum; iter++) { printf("iter = %d:\n", iter); //calcGPUTimeStart("generateOrders_kernel: "); CUDA_CHECK_RETURN( hipMemcpy(dev_newOrders, newOrder, nodesNum * sizeof(int), hipMemcpyHostToDevice), "hipMemcpy failed: newOrder -> dev_newOrders."); hipLaunchKernelGGL(( generateOrders_kernel), dim3(1), dim3(128), nodesNum * sizeof(int), 0, dev_newOrders, dev_curandState, nodesNum, ordersNum); CUDA_CHECK_RETURN(hipGetLastError(), "generateOrders_kernel launch failed."); //calcGPUTimeEnd(); //calcGPUTimeStart("calcOnePairPerThread_kernel: "); int threadNum = 128; int blockNum = (parentSetNumInOrder - 1) / threadNum + 1; dim3 gridDim(blockNum, ordersNum); hipLaunchKernelGGL(( calcOnePairPerThread_kernel), dim3(gridDim), dim3(threadNum), 0, 0, dev_lsTable, dev_newOrders, dev_parentSetScore, nodesNum, allParentSetNumPerNode, parentSetNumInOrder); CUDA_CHECK_RETURN(hipGetLastError(), "calcOnePairPerThread_kernel launch failed."); //calcGPUTimeEnd(); //calcGPUTimeStart("calcMaxParentSetScoreForEachNode_kernel: "); hipLaunchKernelGGL(( calcMaxParentSetScoreForEachNode_kernel), dim3(nodesNum), dim3(ordersNum), 0, 0, dev_parentSetScore, dev_maxLocalScore, parentSetNumInOrder, nodesNum); CUDA_CHECK_RETURN(hipGetLastError(), "calcMaxLocalScoreForEachNode_kernel launch failed."); //calcGPUTimeEnd(); hipLaunchKernelGGL(( calcAllOrdersScore_kernel), dim3(1), dim3(ordersNum), 0, 0, dev_maxLocalScore, dev_ordersScore, nodesNum); CUDA_CHECK_RETURN(hipGetLastError(), "calcAllOrdersScore_kernel launch failed."); CUDA_CHECK_RETURN( hipMemcpy(ordersScore, dev_ordersScore, ordersNum * sizeof(double), hipMemcpyDeviceToHost), "hipMemcpy failed: dev_ordersScore -> ordersScore."); int maxId = calcCDF(ordersScore, prob); if (ordersScore[maxId] > globalBestScore) { CUDA_CHECK_RETURN( hipMemcpy(globalBestOrder, dev_newOrders + maxId * nodesNum, nodesNum * sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy failed: dev_newOrders -> globalBestOrder"); globalBestScore = ordersScore[maxId]; } CUDA_CHECK_RETURN( hipMemcpy(dev_prob, prob, ordersNum * sizeof(double), hipMemcpyHostToDevice), "hipMemcpy failed: prob -> dev_prob."); hipLaunchKernelGGL(( sample_kernel), dim3(1), dim3(ordersNum), ordersNum * 8, 0, dev_prob, dev_samples, dev_curandState, ordersNum); CUDA_CHECK_RETURN(hipGetLastError(), "sample_kernel launch failed."); CUDA_CHECK_RETURN( hipMemcpy(samples, dev_samples, ordersNum * sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy failed: dev_samples -> samples."); int r = rand() % ordersNum; CUDA_CHECK_RETURN( hipMemcpy(newOrder, dev_newOrders + samples[r] * nodesNum, nodesNum * sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy failed: dev_newOrders -> newOrder"); } CUDA_CHECK_RETURN(hipFree(dev_newOrders), "hipFree failed: dev_newOrders."); CUDA_CHECK_RETURN(hipFree(dev_parentSetScore), "hipFree failed: dev_parentSetScore."); CUDA_CHECK_RETURN(hipFree(dev_maxLocalScore), "hipFree failed: dev_maxLocalScore."); CUDA_CHECK_RETURN(hipFree(dev_ordersScore), "hipFree failed: dev_ordersScore."); CUDA_CHECK_RETURN(hipFree(dev_prob), "hipFree failed: dev_prob."); CUDA_CHECK_RETURN(hipFree(dev_samples), "hipFree failed: dev_samples."); CUDA_CHECK_RETURN(hipFree(dev_curandState), "hipFree failed: dev_curandState."); CUDA_CHECK_RETURN(hipHostFree(newOrder), "hipHostFree failed: newOrder."); CUDA_CHECK_RETURN(hipHostFree(ordersScore), "hipHostFree failed: ordersScore."); CUDA_CHECK_RETURN(hipHostFree(prob), "hipHostFree failed: prob."); CUDA_CHECK_RETURN(hipHostFree(samples), "hipHostFree failed: samples."); calcCDFFinish(); searchTime = stopWatch(); } void BNSL_printResult() { printf("Best Score: %f \n", globalBestScore); printf("Best Topology: "); for (int i = 0; i < nodesNum; i++) { printf("%d ", globalBestOrder[i]); } printf("\n"); printf("BNSL_init elapsed time is %dms.\n", initTime); printf("BNSL_calcLocalScore time is %dms. \n", calcLocalScoreTime); printf("BNSL_search time is %dms. \n", searchTime); } void BNSL_finish() { CUDA_CHECK_RETURN(hipFree(dev_lsTable), "hipFree failed: dev_lsTable."); free(globalBestOrder); free(globalBestGraph); }
bdd3dd9b611a4b2561c6f6b16e8ced6d4d48d284.cu
#include "BNSL_GPU.cuh" int *valuesRange, *samplesValues; int nodesNum, samplesNum; int allParentSetNumPerNode; double * dev_lsTable; int* globalBestGraph, *globalBestOrder; double globalBestScore; int initTime, calcLocalScoreTime, searchTime; void BNSL_init() { startWatch(); readNodeInfo(&nodesNum, &valuesRange); readSamples(&samplesValues, &samplesNum, nodesNum); initTime = stopWatch(); } void BNSL_calcLocalScore() { startWatch(); allParentSetNumPerNode = 0; for (int i = 0; i <= CONSTRAINTS; i++) { allParentSetNumPerNode = allParentSetNumPerNode + C(i, nodesNum - 1); } int * dev_valuesRange; int * dev_samplesValues; int * dev_N; // calculate max different values number for all pair of child and parent set int valuesMaxNum = calcValuesMaxNum(valuesRange, nodesNum); CUDA_CHECK_RETURN( cudaMalloc(&dev_lsTable, nodesNum * allParentSetNumPerNode * sizeof(double)), "cudaMalloc failed: dev_lsTable."); CUDA_CHECK_RETURN(cudaMalloc(&dev_valuesRange, nodesNum * sizeof(int)), "cudaMalloc failed: dev_valuesRange."); CUDA_CHECK_RETURN( cudaMalloc(&dev_samplesValues, samplesNum * nodesNum * sizeof(int)), "cudaMalloc failed: dev_samplesValues."); CUDA_CHECK_RETURN( cudaMalloc(&dev_N, valuesMaxNum * allParentSetNumPerNode * sizeof(int)), "dev_N cudaMalloc failed."); CUDA_CHECK_RETURN( cudaMemcpy(dev_valuesRange, valuesRange, nodesNum * sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy failed: valuesRange -> dev_valuesRange"); CUDA_CHECK_RETURN( cudaMemcpy(dev_samplesValues, samplesValues, samplesNum * nodesNum * sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy failed: samplesValues -> dev_samplesValues"); int threadNum = 256; int blockNum = (allParentSetNumPerNode - 1) / threadNum + 1; // calcAllLocalScore_kernel<<<blockNum, threadNum, nodesNum * sizeof(int)>>>( // dev_valuesRange, dev_samplesValues, dev_lsTable, dev_N, samplesNum, // nodesNum, allParentSetNumPerNode, valuesMaxNum); CUDA_CHECK_RETURN(cudaFree(dev_valuesRange), "cudaFree failed: dev_valuesRange."); CUDA_CHECK_RETURN(cudaFree(dev_samplesValues), "cudaFree failed: dev_samplesValues."); CUDA_CHECK_RETURN(cudaFree(dev_N), "cudaFree failed: dev_N."); free(valuesRange); free(samplesValues); calcLocalScoreTime = stopWatch(); } void BNSL_search() { startWatch(); int i, j, iter; int parentSetNumInOrder = 0; for (i = 0; i < nodesNum; i++) { for (j = 0; j <= CONSTRAINTS && j < i + 1; j++) { parentSetNumInOrder += C(j, i); } } int ordersNum = 128; int iterNum = ITER; srand((unsigned int) time(NULL)); int seed = 1234; int * dev_newOrders; CUDA_CHECK_RETURN( cudaMalloc(&dev_newOrders, ordersNum * nodesNum * sizeof(int)), "cudaMalloc failed: dev_newOrders."); int * newOrder = (int *) malloc(nodesNum * sizeof(int)); CUDA_CHECK_RETURN(cudaMallocHost(&newOrder, nodesNum * sizeof(int)), "cudaMallocHost failed: newOrder."); randInitOrder(newOrder, nodesNum); double * dev_parentSetScore; CUDA_CHECK_RETURN( cudaMalloc(&dev_parentSetScore, ordersNum * parentSetNumInOrder * sizeof(double)), "cudaMalloc failed: dev_parentSetScore."); double * dev_maxLocalScore; CUDA_CHECK_RETURN( cudaMalloc(&dev_maxLocalScore, ordersNum * nodesNum * sizeof(double)), "cudaMalloc failed: dev_maxLocalScore."); double * dev_ordersScore, *ordersScore; CUDA_CHECK_RETURN(cudaMalloc(&dev_ordersScore, ordersNum * sizeof(double)), "cudaMalloc failed: dev_ordersScore."); CUDA_CHECK_RETURN(cudaMallocHost(&ordersScore, ordersNum * sizeof(double)), "cudaMallocHost failed: ordersScore."); double *dev_prob, *prob; CUDA_CHECK_RETURN(cudaMalloc(&dev_prob, ordersNum * sizeof(double)), "cudaMalloc failed: dev_prob."); CUDA_CHECK_RETURN(cudaMallocHost(&prob, ordersNum * sizeof(double)), "cudaMallocHost failed: prob."); int *dev_samples, *samples; CUDA_CHECK_RETURN(cudaMalloc(&dev_samples, ordersNum * sizeof(int)), "cudaMalloc failed: dev_samples."); CUDA_CHECK_RETURN(cudaMallocHost(&samples, ordersNum * sizeof(int)), "cudaMallocHost failed: samples."); globalBestOrder = (int *) malloc(nodesNum * sizeof(int)); globalBestScore = -FLT_MAX; curandState *dev_curandState; CUDA_CHECK_RETURN( cudaMalloc(&dev_curandState, ordersNum * sizeof(curandState)), "cudaMalloc failed: dev_curandState."); curandSetup_kernel<<<1, ordersNum>>>(dev_curandState, seed); CUDA_CHECK_RETURN(cudaGetLastError(), "curandSetup_kernel launch failed."); calcCDFInit(ordersNum); for (iter = 1; iter <= iterNum; iter++) { printf("iter = %d:\n", iter); //calcGPUTimeStart("generateOrders_kernel: "); CUDA_CHECK_RETURN( cudaMemcpy(dev_newOrders, newOrder, nodesNum * sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy failed: newOrder -> dev_newOrders."); generateOrders_kernel<<<1, 128, nodesNum * sizeof(int)>>>(dev_newOrders, dev_curandState, nodesNum, ordersNum); CUDA_CHECK_RETURN(cudaGetLastError(), "generateOrders_kernel launch failed."); //calcGPUTimeEnd(); //calcGPUTimeStart("calcOnePairPerThread_kernel: "); int threadNum = 128; int blockNum = (parentSetNumInOrder - 1) / threadNum + 1; dim3 gridDim(blockNum, ordersNum); calcOnePairPerThread_kernel<<<gridDim, threadNum>>>(dev_lsTable, dev_newOrders, dev_parentSetScore, nodesNum, allParentSetNumPerNode, parentSetNumInOrder); CUDA_CHECK_RETURN(cudaGetLastError(), "calcOnePairPerThread_kernel launch failed."); //calcGPUTimeEnd(); //calcGPUTimeStart("calcMaxParentSetScoreForEachNode_kernel: "); calcMaxParentSetScoreForEachNode_kernel<<<nodesNum, ordersNum>>>( dev_parentSetScore, dev_maxLocalScore, parentSetNumInOrder, nodesNum); CUDA_CHECK_RETURN(cudaGetLastError(), "calcMaxLocalScoreForEachNode_kernel launch failed."); //calcGPUTimeEnd(); calcAllOrdersScore_kernel<<<1, ordersNum>>>(dev_maxLocalScore, dev_ordersScore, nodesNum); CUDA_CHECK_RETURN(cudaGetLastError(), "calcAllOrdersScore_kernel launch failed."); CUDA_CHECK_RETURN( cudaMemcpy(ordersScore, dev_ordersScore, ordersNum * sizeof(double), cudaMemcpyDeviceToHost), "cudaMemcpy failed: dev_ordersScore -> ordersScore."); int maxId = calcCDF(ordersScore, prob); if (ordersScore[maxId] > globalBestScore) { CUDA_CHECK_RETURN( cudaMemcpy(globalBestOrder, dev_newOrders + maxId * nodesNum, nodesNum * sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy failed: dev_newOrders -> globalBestOrder"); globalBestScore = ordersScore[maxId]; } CUDA_CHECK_RETURN( cudaMemcpy(dev_prob, prob, ordersNum * sizeof(double), cudaMemcpyHostToDevice), "cudaMemcpy failed: prob -> dev_prob."); sample_kernel<<<1, ordersNum, ordersNum * 8>>>(dev_prob, dev_samples, dev_curandState, ordersNum); CUDA_CHECK_RETURN(cudaGetLastError(), "sample_kernel launch failed."); CUDA_CHECK_RETURN( cudaMemcpy(samples, dev_samples, ordersNum * sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy failed: dev_samples -> samples."); int r = rand() % ordersNum; CUDA_CHECK_RETURN( cudaMemcpy(newOrder, dev_newOrders + samples[r] * nodesNum, nodesNum * sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy failed: dev_newOrders -> newOrder"); } CUDA_CHECK_RETURN(cudaFree(dev_newOrders), "cudaFree failed: dev_newOrders."); CUDA_CHECK_RETURN(cudaFree(dev_parentSetScore), "cudaFree failed: dev_parentSetScore."); CUDA_CHECK_RETURN(cudaFree(dev_maxLocalScore), "cudaFree failed: dev_maxLocalScore."); CUDA_CHECK_RETURN(cudaFree(dev_ordersScore), "cudaFree failed: dev_ordersScore."); CUDA_CHECK_RETURN(cudaFree(dev_prob), "cudaFree failed: dev_prob."); CUDA_CHECK_RETURN(cudaFree(dev_samples), "cudaFree failed: dev_samples."); CUDA_CHECK_RETURN(cudaFree(dev_curandState), "cudaFree failed: dev_curandState."); CUDA_CHECK_RETURN(cudaFreeHost(newOrder), "cudaFreeHost failed: newOrder."); CUDA_CHECK_RETURN(cudaFreeHost(ordersScore), "cudaFreeHost failed: ordersScore."); CUDA_CHECK_RETURN(cudaFreeHost(prob), "cudaFreeHost failed: prob."); CUDA_CHECK_RETURN(cudaFreeHost(samples), "cudaFreeHost failed: samples."); calcCDFFinish(); searchTime = stopWatch(); } void BNSL_printResult() { printf("Best Score: %f \n", globalBestScore); printf("Best Topology: "); for (int i = 0; i < nodesNum; i++) { printf("%d ", globalBestOrder[i]); } printf("\n"); printf("BNSL_init elapsed time is %dms.\n", initTime); printf("BNSL_calcLocalScore time is %dms. \n", calcLocalScoreTime); printf("BNSL_search time is %dms. \n", searchTime); } void BNSL_finish() { CUDA_CHECK_RETURN(cudaFree(dev_lsTable), "cudaFree failed: dev_lsTable."); free(globalBestOrder); free(globalBestGraph); }
37eb9cbdfd80566e43b68b6fd8ac6206a1591a1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Code edits and additions * Copyright 2018 Rommel Quintanilla <[email protected]> */ #include <cmath> #include <algorithm> #include <thrust/copy.h> #include <thrust/execution_policy.h> #include "cudf.h" #include "utilities/cudf_utils.h" #include "utilities/error_utils.h" #include "rmm/thrust_rmm_allocator.h" template<typename T, typename Tout, typename F> __global__ void gpu_unary_op(const T *data, const gdf_valid_type *valid, gdf_size_type size, Tout *results, F functor) { int tid = threadIdx.x; int blkid = blockIdx.x; int blksz = blockDim.x; int gridsz = gridDim.x; int start = tid + blkid * blksz; int step = blksz * gridsz; if ( valid ) { // has valid mask for (int i=start; i<size; i+=step) { if ( gdf_is_valid(valid, i) ) results[i] = functor.apply(data[i]); } } else { // no valid mask for (int i=start; i<size; i+=step) { results[i] = functor.apply(data[i]); } } } template<typename T, typename Tout, typename F> struct UnaryOp { static gdf_error launch(gdf_column *input, gdf_column *output) { // Return immediately for empty inputs if((0==input->size)) { return GDF_SUCCESS; } /* check for size of the columns */ if (input->size != output->size) { return GDF_COLUMN_SIZE_MISMATCH; } // find optimal blocksize int mingridsize, blocksize; CUDA_TRY( hipOccupancyMaxPotentialBlockSize(&mingridsize, &blocksize, gpu_unary_op<T, Tout, F>) ); // find needed gridsize int neededgridsize = (input->size + blocksize - 1) / blocksize; int gridsize = ::min(neededgridsize, mingridsize); F functor; hipLaunchKernelGGL(( gpu_unary_op), dim3(gridsize), dim3(blocksize), 0, 0, // input (const T*)input->data, input->valid, input->size, // output (Tout*)output->data, // action functor ); CUDA_CHECK_LAST(); return GDF_SUCCESS; } }; template<typename T, typename F> struct MathOp { static gdf_error launch(gdf_column *input, gdf_column *output) { return UnaryOp<T, T, F>::launch(input, output); } }; #define DEF_UNARY_OP_REAL(F) \ gdf_error F##_generic(gdf_column *input, gdf_column *output) { \ switch ( input->dtype ) { \ case GDF_FLOAT32: return F##_f32(input, output); \ case GDF_FLOAT64: return F##_f64(input, output); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } #define DEF_CAST_OP(TO) \ gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output) { \ switch ( input->dtype ) { \ case GDF_INT8: return gdf_cast_i8_to_##TO(input, output); \ case GDF_INT32: return gdf_cast_i32_to_##TO(input, output); \ case GDF_INT64: return gdf_cast_i64_to_##TO(input, output); \ case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output); \ case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output); \ case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output); \ case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output); \ case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } #define DEF_CAST_OP_TS(TO) \ gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) {\ switch ( input->dtype ) { \ case GDF_INT8: return gdf_cast_i8_to_##TO(input, output, time_unit); \ case GDF_INT32: return gdf_cast_i32_to_##TO(input, output, time_unit); \ case GDF_INT64: return gdf_cast_i64_to_##TO(input, output, time_unit); \ case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output, time_unit); \ case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output, time_unit); \ case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output, time_unit); \ case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output, time_unit); \ case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output, time_unit); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } // trig functions template<typename T> struct DeviceSin { __device__ T apply(T data) { return std::sin(data); } }; template<typename T> struct DeviceCos { __device__ T apply(T data) { return std::cos(data); } }; template<typename T> struct DeviceTan { __device__ T apply(T data) { return std::tan(data); } }; template<typename T> struct DeviceArcSin { __device__ T apply(T data) { return std::asin(data); } }; template<typename T> struct DeviceArcCos { __device__ T apply(T data) { return std::acos(data); } }; template<typename T> struct DeviceArcTan { __device__ T apply(T data) { return std::atan(data); } }; DEF_UNARY_OP_REAL(gdf_sin) gdf_error gdf_sin_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceSin<float> >::launch(input, output); } gdf_error gdf_sin_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceSin<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_cos) gdf_error gdf_cos_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceCos<float> >::launch(input, output); } gdf_error gdf_cos_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceCos<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_tan) gdf_error gdf_tan_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceTan<float> >::launch(input, output); } gdf_error gdf_tan_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceTan<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_asin) gdf_error gdf_asin_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcSin<float> >::launch(input, output); } gdf_error gdf_asin_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcSin<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_acos) gdf_error gdf_acos_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcCos<float> >::launch(input, output); } gdf_error gdf_acos_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcCos<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_atan) gdf_error gdf_atan_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcTan<float> >::launch(input, output); } gdf_error gdf_atan_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcTan<double> >::launch(input, output); } // exponential functions template<typename T> struct DeviceExp { __device__ T apply(T data) { return ::exp(data); } }; template<typename T> struct DeviceLog { __device__ T apply(T data) { return ::log(data); } }; DEF_UNARY_OP_REAL(gdf_exp) gdf_error gdf_exp_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceExp<float> >::launch(input, output); } gdf_error gdf_exp_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceExp<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_log) gdf_error gdf_log_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceLog<float> >::launch(input, output); } gdf_error gdf_log_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceLog<double> >::launch(input, output); } // exponential functions template<typename T> struct DeviceSqrt { __device__ T apply(T data) { return std::sqrt(data); } }; DEF_UNARY_OP_REAL(gdf_sqrt) gdf_error gdf_sqrt_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceSqrt<float> >::launch(input, output); } gdf_error gdf_sqrt_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceSqrt<double> >::launch(input, output); } // rounding functions template<typename T> struct DeviceCeil { __device__ T apply(T data) { return ::ceil(data); } }; template<typename T> struct DeviceFloor { __device__ T apply(T data) { return ::floor(data); } }; DEF_UNARY_OP_REAL(gdf_ceil) gdf_error gdf_ceil_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceCeil<float> >::launch(input, output); } gdf_error gdf_ceil_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceCeil<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_floor) gdf_error gdf_floor_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceFloor<float> >::launch(input, output); } gdf_error gdf_floor_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceFloor<double> >::launch(input, output); } // casting template<typename From, typename To> struct DeviceCast { __device__ To apply(From data) { return (To)data; } }; template<typename From, typename To, int64_t units_factor> struct UpCasting { __device__ To apply(From data) { return (To)(data*units_factor); } }; template<typename From, typename To, int64_t units_factor> struct DownCasting { __device__ To apply(From data) { return (To)((data-(units_factor-1)*(data<0))/units_factor); //ceiling only when data is negative } }; // Castings are differentiate between physical and logical ones. // In physical casting only change the physical representation, for example from GDF_FLOAT32 (float) to GDF_FLOAT64 (double) // on the other hand, casting between date timestamps needs also perform some calculations according to the time unit: // - when the source or destination datatype is GDF_DATE32, the value is multiplied or divided by the amount of timeunits by day // - when datatypes are timestamps, the value is multiplied or divided according to the S.I. nano 10^-9, micro 10^-6, milli 10^-3 // No calculation is necessary when casting between GDF_DATE64 and GDF_TIMESTAMP (with ms as time unit), because are logically and physically the same thing #define DEF_CAST_IMPL(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \ gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output) { \ GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \ \ hipStream_t stream = 0; \ rmm_temp_allocator allocator(stream); \ \ output->dtype = LTO; \ if (input->valid && output->valid) { \ gdf_size_type num_chars_bitmask = gdf_get_num_chars_bitmask(input->size); \ thrust::copy(thrust::hip::par(allocator).on(stream), input->valid, input->valid + num_chars_bitmask, output->valid); \ } \ \ /* Handling datetime logical castings */ \ if( LTFROM == GDF_DATE64 && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ms ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ /* Handling only physical castings */ \ return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \ } // Castings functions where Timestamp is the destination type #define DEF_CAST_IMPL_TS(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \ gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) { \ GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \ \ hipStream_t stream = 0; \ rmm_temp_allocator allocator(stream); \ \ output->dtype = LTO; \ output->dtype_info.time_unit = time_unit; \ if (input->valid && output->valid) { \ gdf_size_type num_chars_bitmask = gdf_get_num_chars_bitmask(input->size); \ thrust::copy(thrust::hip::par(allocator).on(stream), input->valid, input->valid + num_chars_bitmask, output->valid); \ } \ \ /* Handling datetime logical castings */ \ if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ms ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( LTFROM == GDF_TIMESTAMP && LTO == GDF_TIMESTAMP ) \ { \ if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ } \ /* Handling only physical castings */ \ return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \ } #define DEF_CAST_IMPL_TEMPLATE(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \ DEF_CAST_OP(ABREV) \ DEF_CAST_IMPL(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \ DEF_CAST_IMPL(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \ DEF_CAST_IMPL(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \ DEF_CAST_IMPL(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \ DEF_CAST_IMPL(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \ DEF_CAST_IMPL(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \ DEF_CAST_IMPL(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \ DEF_CAST_IMPL(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE) #define DEF_CAST_IMPL_TEMPLATE_TS(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \ DEF_CAST_OP_TS(ABREV) \ DEF_CAST_IMPL_TS(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE) DEF_CAST_IMPL_TEMPLATE(f32, float, GDF_FLOAT32) DEF_CAST_IMPL_TEMPLATE(f64, double, GDF_FLOAT64) DEF_CAST_IMPL_TEMPLATE(i8, int8_t, GDF_INT8) DEF_CAST_IMPL_TEMPLATE(i32, int32_t, GDF_INT32) DEF_CAST_IMPL_TEMPLATE(i64, int64_t, GDF_INT64) DEF_CAST_IMPL_TEMPLATE(date32, int32_t, GDF_DATE32) DEF_CAST_IMPL_TEMPLATE(date64, int64_t, GDF_DATE64) DEF_CAST_IMPL_TEMPLATE_TS(timestamp, int64_t, GDF_TIMESTAMP)
37eb9cbdfd80566e43b68b6fd8ac6206a1591a1a.cu
/* * * Code edits and additions * Copyright 2018 Rommel Quintanilla <[email protected]> */ #include <cmath> #include <algorithm> #include <thrust/copy.h> #include <thrust/execution_policy.h> #include "cudf.h" #include "utilities/cudf_utils.h" #include "utilities/error_utils.h" #include "rmm/thrust_rmm_allocator.h" template<typename T, typename Tout, typename F> __global__ void gpu_unary_op(const T *data, const gdf_valid_type *valid, gdf_size_type size, Tout *results, F functor) { int tid = threadIdx.x; int blkid = blockIdx.x; int blksz = blockDim.x; int gridsz = gridDim.x; int start = tid + blkid * blksz; int step = blksz * gridsz; if ( valid ) { // has valid mask for (int i=start; i<size; i+=step) { if ( gdf_is_valid(valid, i) ) results[i] = functor.apply(data[i]); } } else { // no valid mask for (int i=start; i<size; i+=step) { results[i] = functor.apply(data[i]); } } } template<typename T, typename Tout, typename F> struct UnaryOp { static gdf_error launch(gdf_column *input, gdf_column *output) { // Return immediately for empty inputs if((0==input->size)) { return GDF_SUCCESS; } /* check for size of the columns */ if (input->size != output->size) { return GDF_COLUMN_SIZE_MISMATCH; } // find optimal blocksize int mingridsize, blocksize; CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&mingridsize, &blocksize, gpu_unary_op<T, Tout, F>) ); // find needed gridsize int neededgridsize = (input->size + blocksize - 1) / blocksize; int gridsize = std::min(neededgridsize, mingridsize); F functor; gpu_unary_op<<<gridsize, blocksize>>>( // input (const T*)input->data, input->valid, input->size, // output (Tout*)output->data, // action functor ); CUDA_CHECK_LAST(); return GDF_SUCCESS; } }; template<typename T, typename F> struct MathOp { static gdf_error launch(gdf_column *input, gdf_column *output) { return UnaryOp<T, T, F>::launch(input, output); } }; #define DEF_UNARY_OP_REAL(F) \ gdf_error F##_generic(gdf_column *input, gdf_column *output) { \ switch ( input->dtype ) { \ case GDF_FLOAT32: return F##_f32(input, output); \ case GDF_FLOAT64: return F##_f64(input, output); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } #define DEF_CAST_OP(TO) \ gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output) { \ switch ( input->dtype ) { \ case GDF_INT8: return gdf_cast_i8_to_##TO(input, output); \ case GDF_INT32: return gdf_cast_i32_to_##TO(input, output); \ case GDF_INT64: return gdf_cast_i64_to_##TO(input, output); \ case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output); \ case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output); \ case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output); \ case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output); \ case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } #define DEF_CAST_OP_TS(TO) \ gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) {\ switch ( input->dtype ) { \ case GDF_INT8: return gdf_cast_i8_to_##TO(input, output, time_unit); \ case GDF_INT32: return gdf_cast_i32_to_##TO(input, output, time_unit); \ case GDF_INT64: return gdf_cast_i64_to_##TO(input, output, time_unit); \ case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output, time_unit); \ case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output, time_unit); \ case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output, time_unit); \ case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output, time_unit); \ case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output, time_unit); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } // trig functions template<typename T> struct DeviceSin { __device__ T apply(T data) { return std::sin(data); } }; template<typename T> struct DeviceCos { __device__ T apply(T data) { return std::cos(data); } }; template<typename T> struct DeviceTan { __device__ T apply(T data) { return std::tan(data); } }; template<typename T> struct DeviceArcSin { __device__ T apply(T data) { return std::asin(data); } }; template<typename T> struct DeviceArcCos { __device__ T apply(T data) { return std::acos(data); } }; template<typename T> struct DeviceArcTan { __device__ T apply(T data) { return std::atan(data); } }; DEF_UNARY_OP_REAL(gdf_sin) gdf_error gdf_sin_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceSin<float> >::launch(input, output); } gdf_error gdf_sin_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceSin<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_cos) gdf_error gdf_cos_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceCos<float> >::launch(input, output); } gdf_error gdf_cos_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceCos<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_tan) gdf_error gdf_tan_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceTan<float> >::launch(input, output); } gdf_error gdf_tan_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceTan<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_asin) gdf_error gdf_asin_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcSin<float> >::launch(input, output); } gdf_error gdf_asin_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcSin<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_acos) gdf_error gdf_acos_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcCos<float> >::launch(input, output); } gdf_error gdf_acos_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcCos<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_atan) gdf_error gdf_atan_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcTan<float> >::launch(input, output); } gdf_error gdf_atan_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcTan<double> >::launch(input, output); } // exponential functions template<typename T> struct DeviceExp { __device__ T apply(T data) { return std::exp(data); } }; template<typename T> struct DeviceLog { __device__ T apply(T data) { return std::log(data); } }; DEF_UNARY_OP_REAL(gdf_exp) gdf_error gdf_exp_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceExp<float> >::launch(input, output); } gdf_error gdf_exp_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceExp<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_log) gdf_error gdf_log_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceLog<float> >::launch(input, output); } gdf_error gdf_log_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceLog<double> >::launch(input, output); } // exponential functions template<typename T> struct DeviceSqrt { __device__ T apply(T data) { return std::sqrt(data); } }; DEF_UNARY_OP_REAL(gdf_sqrt) gdf_error gdf_sqrt_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceSqrt<float> >::launch(input, output); } gdf_error gdf_sqrt_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceSqrt<double> >::launch(input, output); } // rounding functions template<typename T> struct DeviceCeil { __device__ T apply(T data) { return std::ceil(data); } }; template<typename T> struct DeviceFloor { __device__ T apply(T data) { return std::floor(data); } }; DEF_UNARY_OP_REAL(gdf_ceil) gdf_error gdf_ceil_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceCeil<float> >::launch(input, output); } gdf_error gdf_ceil_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceCeil<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_floor) gdf_error gdf_floor_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceFloor<float> >::launch(input, output); } gdf_error gdf_floor_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceFloor<double> >::launch(input, output); } // casting template<typename From, typename To> struct DeviceCast { __device__ To apply(From data) { return (To)data; } }; template<typename From, typename To, int64_t units_factor> struct UpCasting { __device__ To apply(From data) { return (To)(data*units_factor); } }; template<typename From, typename To, int64_t units_factor> struct DownCasting { __device__ To apply(From data) { return (To)((data-(units_factor-1)*(data<0))/units_factor); //ceiling only when data is negative } }; // Castings are differentiate between physical and logical ones. // In physical casting only change the physical representation, for example from GDF_FLOAT32 (float) to GDF_FLOAT64 (double) // on the other hand, casting between date timestamps needs also perform some calculations according to the time unit: // - when the source or destination datatype is GDF_DATE32, the value is multiplied or divided by the amount of timeunits by day // - when datatypes are timestamps, the value is multiplied or divided according to the S.I. nano 10^-9, micro 10^-6, milli 10^-3 // No calculation is necessary when casting between GDF_DATE64 and GDF_TIMESTAMP (with ms as time unit), because are logically and physically the same thing #define DEF_CAST_IMPL(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \ gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output) { \ GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \ \ cudaStream_t stream = 0; \ rmm_temp_allocator allocator(stream); \ \ output->dtype = LTO; \ if (input->valid && output->valid) { \ gdf_size_type num_chars_bitmask = gdf_get_num_chars_bitmask(input->size); \ thrust::copy(thrust::cuda::par(allocator).on(stream), input->valid, input->valid + num_chars_bitmask, output->valid); \ } \ \ /* Handling datetime logical castings */ \ if( LTFROM == GDF_DATE64 && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ms ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ /* Handling only physical castings */ \ return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \ } // Castings functions where Timestamp is the destination type #define DEF_CAST_IMPL_TS(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \ gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) { \ GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \ \ cudaStream_t stream = 0; \ rmm_temp_allocator allocator(stream); \ \ output->dtype = LTO; \ output->dtype_info.time_unit = time_unit; \ if (input->valid && output->valid) { \ gdf_size_type num_chars_bitmask = gdf_get_num_chars_bitmask(input->size); \ thrust::copy(thrust::cuda::par(allocator).on(stream), input->valid, input->valid + num_chars_bitmask, output->valid); \ } \ \ /* Handling datetime logical castings */ \ if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ms ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( LTFROM == GDF_TIMESTAMP && LTO == GDF_TIMESTAMP ) \ { \ if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ } \ /* Handling only physical castings */ \ return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \ } #define DEF_CAST_IMPL_TEMPLATE(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \ DEF_CAST_OP(ABREV) \ DEF_CAST_IMPL(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \ DEF_CAST_IMPL(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \ DEF_CAST_IMPL(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \ DEF_CAST_IMPL(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \ DEF_CAST_IMPL(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \ DEF_CAST_IMPL(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \ DEF_CAST_IMPL(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \ DEF_CAST_IMPL(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE) #define DEF_CAST_IMPL_TEMPLATE_TS(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \ DEF_CAST_OP_TS(ABREV) \ DEF_CAST_IMPL_TS(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE) DEF_CAST_IMPL_TEMPLATE(f32, float, GDF_FLOAT32) DEF_CAST_IMPL_TEMPLATE(f64, double, GDF_FLOAT64) DEF_CAST_IMPL_TEMPLATE(i8, int8_t, GDF_INT8) DEF_CAST_IMPL_TEMPLATE(i32, int32_t, GDF_INT32) DEF_CAST_IMPL_TEMPLATE(i64, int64_t, GDF_INT64) DEF_CAST_IMPL_TEMPLATE(date32, int32_t, GDF_DATE32) DEF_CAST_IMPL_TEMPLATE(date64, int64_t, GDF_DATE64) DEF_CAST_IMPL_TEMPLATE_TS(timestamp, int64_t, GDF_TIMESTAMP)
febad2e8ce3ddc853efa43cb821d192b6fb2a593.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" const int Nthreads = 1024, maxFR = 5000, NrankMax = 6; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void bestFilter(const double *Params, const float *data, float *err, int *ftype, int *kkmax, int *kall){ int tid, tid0, i, bid, NT, Nchan, ibest = 0, kbest; float Cf, Cbest = 0.0f; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; Nchan = (int) Params[9]; tid0 = tid + bid * blockDim.x; while (tid0<NT){ kbest = 0; for (i=0; i<Nchan;i++){ Cf = data[tid0 + NT*i]; if (Cf > Cbest + 1e-6){ Cbest = Cf; ibest = i; kbest = kkmax[tid0 + NT*i]; } } err[tid0] = Cbest; ftype[tid0] = ibest; kall[tid0] = kbest; tid0 += blockDim.x * gridDim.x; } }
febad2e8ce3ddc853efa43cb821d192b6fb2a593.cu
#include "includes.h" const int Nthreads = 1024, maxFR = 5000, NrankMax = 6; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void bestFilter(const double *Params, const float *data, float *err, int *ftype, int *kkmax, int *kall){ int tid, tid0, i, bid, NT, Nchan, ibest = 0, kbest; float Cf, Cbest = 0.0f; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; Nchan = (int) Params[9]; tid0 = tid + bid * blockDim.x; while (tid0<NT){ kbest = 0; for (i=0; i<Nchan;i++){ Cf = data[tid0 + NT*i]; if (Cf > Cbest + 1e-6){ Cbest = Cf; ibest = i; kbest = kkmax[tid0 + NT*i]; } } err[tid0] = Cbest; ftype[tid0] = ibest; kall[tid0] = kbest; tid0 += blockDim.x * gridDim.x; } }
021857d473600f774c66818b32aca9dba36be771.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" const int KEY = 1; using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(hipGetLastError()); */ static void checkCudaCall(hipError_t result) { if (result != hipSuccess) { cerr << "cuda error: " << hipGetErrorString(result) << endl; exit(1); } } __global__ void encryptKernel(char* deviceDataIn, char* deviceDataOut) { // insert operations unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = (deviceDataIn[index] + KEY) % 128; } __global__ void decryptKernel(char* deviceDataIn, char* deviceDataOut) { // insert operations unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = (deviceDataIn[index] - KEY) % 128; } int fileSize() { int size; ifstream file ("original.data", ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.close(); } else { cout << "Unable to open file"; size = -1; } return size; } int readData(char *fileName, char *data) { streampos size; ifstream file (fileName, ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.seekg (0, ios::beg); file.read (data, size); file.close(); cout << "The entire file content is in memory." << endl; } else cout << "Unable to open file" << endl; return 0; } int writeData(int size, char *fileName, char *data) { ofstream file (fileName, ios::out|ios::binary|ios::trunc); if (file.is_open()) { file.write (data, size); file.close(); cout << "The entire file content was written to file." << endl; return 0; } else cout << "Unable to open file"; return -1; } int EncryptSeq (int n, char* data_in, char* data_out) { int i; timer sequentialTime = timer("Sequential encryption"); sequentialTime.start(); for (i=0; i<n; i++) { data_out[i]= (data_in[i] + KEY) % 128; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Encryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int DecryptSeq (int n, char* data_in, char* data_out) { int i; timer sequentialTime = timer("Sequential decryption"); sequentialTime.start(); for (i=0; i<n; i++) { data_out[i]= (data_in[i] - KEY) % 128; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Decryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int EncryptCuda (int n, char* data_in, char* data_out) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(hipMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(hipMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(hipFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(hipMemcpy(deviceDataIn, data_in, n*sizeof(char), hipMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); hipLaunchKernelGGL(( encryptKernel), dim3(n/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceDataIn, deviceDataOut); hipDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(hipGetLastError()); // copy result back memoryTime.start(); checkCudaCall(hipMemcpy(data_out, deviceDataOut, n * sizeof(char), hipMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(hipFree(deviceDataIn)); checkCudaCall(hipFree(deviceDataOut)); cout << fixed << setprecision(6); cout << "Encrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Encrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int DecryptCuda (int n, char* data_in, char* data_out) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(hipMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(hipMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(hipFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(hipMemcpy(deviceDataIn, data_in, n*sizeof(char), hipMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); hipLaunchKernelGGL(( decryptKernel), dim3(n/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceDataIn, deviceDataOut); hipDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(hipGetLastError()); // copy result back memoryTime.start(); checkCudaCall(hipMemcpy(data_out, deviceDataOut, n * sizeof(char), hipMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(hipFree(deviceDataIn)); checkCudaCall(hipFree(deviceDataOut)); cout << fixed << setprecision(6); cout << "Decrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Decrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int main(int argc, char* argv[]) { int n; n = fileSize(); if (n == -1) { cout << "File not found! Exiting ... " << endl; exit(0); } char* data_in = new char[n]; char* data_out = new char[n]; readData("original.data", data_in); cout << "Encrypting a file of " << n << " characters." << endl; EncryptSeq(n, data_in, data_out); writeData(n, "sequential.data", data_out); EncryptCuda(n, data_in, data_out); writeData(n, "cuda.data", data_out); readData("cuda.data", data_in); cout << "Decrypting a file of " << n << " characters" << endl; DecryptSeq(n, data_in, data_out); writeData(n, "sequential_decrypted.data", data_out); DecryptCuda(n, data_in, data_out); writeData(n, "recovered.data", data_out); delete[] data_in; delete[] data_out; return 0; }
021857d473600f774c66818b32aca9dba36be771.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" const int KEY = 1; using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(cudaGetLastError()); */ static void checkCudaCall(cudaError_t result) { if (result != cudaSuccess) { cerr << "cuda error: " << cudaGetErrorString(result) << endl; exit(1); } } __global__ void encryptKernel(char* deviceDataIn, char* deviceDataOut) { // insert operations unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = (deviceDataIn[index] + KEY) % 128; } __global__ void decryptKernel(char* deviceDataIn, char* deviceDataOut) { // insert operations unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = (deviceDataIn[index] - KEY) % 128; } int fileSize() { int size; ifstream file ("original.data", ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.close(); } else { cout << "Unable to open file"; size = -1; } return size; } int readData(char *fileName, char *data) { streampos size; ifstream file (fileName, ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.seekg (0, ios::beg); file.read (data, size); file.close(); cout << "The entire file content is in memory." << endl; } else cout << "Unable to open file" << endl; return 0; } int writeData(int size, char *fileName, char *data) { ofstream file (fileName, ios::out|ios::binary|ios::trunc); if (file.is_open()) { file.write (data, size); file.close(); cout << "The entire file content was written to file." << endl; return 0; } else cout << "Unable to open file"; return -1; } int EncryptSeq (int n, char* data_in, char* data_out) { int i; timer sequentialTime = timer("Sequential encryption"); sequentialTime.start(); for (i=0; i<n; i++) { data_out[i]= (data_in[i] + KEY) % 128; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Encryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int DecryptSeq (int n, char* data_in, char* data_out) { int i; timer sequentialTime = timer("Sequential decryption"); sequentialTime.start(); for (i=0; i<n; i++) { data_out[i]= (data_in[i] - KEY) % 128; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Decryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int EncryptCuda (int n, char* data_in, char* data_out) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(cudaFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(cudaMemcpy(deviceDataIn, data_in, n*sizeof(char), cudaMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); encryptKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceDataIn, deviceDataOut); cudaDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(cudaGetLastError()); // copy result back memoryTime.start(); checkCudaCall(cudaMemcpy(data_out, deviceDataOut, n * sizeof(char), cudaMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(cudaFree(deviceDataIn)); checkCudaCall(cudaFree(deviceDataOut)); cout << fixed << setprecision(6); cout << "Encrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Encrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int DecryptCuda (int n, char* data_in, char* data_out) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(cudaFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(cudaMemcpy(deviceDataIn, data_in, n*sizeof(char), cudaMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); decryptKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceDataIn, deviceDataOut); cudaDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(cudaGetLastError()); // copy result back memoryTime.start(); checkCudaCall(cudaMemcpy(data_out, deviceDataOut, n * sizeof(char), cudaMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(cudaFree(deviceDataIn)); checkCudaCall(cudaFree(deviceDataOut)); cout << fixed << setprecision(6); cout << "Decrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Decrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int main(int argc, char* argv[]) { int n; n = fileSize(); if (n == -1) { cout << "File not found! Exiting ... " << endl; exit(0); } char* data_in = new char[n]; char* data_out = new char[n]; readData("original.data", data_in); cout << "Encrypting a file of " << n << " characters." << endl; EncryptSeq(n, data_in, data_out); writeData(n, "sequential.data", data_out); EncryptCuda(n, data_in, data_out); writeData(n, "cuda.data", data_out); readData("cuda.data", data_in); cout << "Decrypting a file of " << n << " characters" << endl; DecryptSeq(n, data_in, data_out); writeData(n, "sequential_decrypted.data", data_out); DecryptCuda(n, data_in, data_out); writeData(n, "recovered.data", data_out); delete[] data_in; delete[] data_out; return 0; }
cc3c30278e2b80584cd2e3c0635be6de576397ed.hip
// !!! This is a file automatically generated by hipify!!! /* * * pageableMemcpyHtoD16.cu * * Microdemo that illustrates how to implement a fast pageable memcpy * using standard CUDA functionality. * * A pair of pinned staging buffers are allocated, and after the first * staging buffer has been filled, the GPU pulls from one while the * CPU fills the other. CUDA events are used for synchronization. * * This implementation uses the SSE-optimized memcpy of memcpy16.cpp, * so for simplicity, it requires host pointers to be 16-byte aligned. * * Build with: nvcc -I ../chLib <options> pageableMemcpyHtoD16.cu memcpy16.cpp * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <assert.h> #include "chError.h" #include "chTimer.h" #define STAGING_BUFFER_SIZE 1048576 void *g_hostBuffers[2]; hipEvent_t g_events[2]; // these are already defined on some platforms - make our // own definitions that will work. #undef min #undef max #define min(a,b) ((a)<(b)?(a):(b)) #define max(a,b) ((b)<(a)?(a):(b)) extern bool memcpy16( void *_dst, const void *_src, size_t N ); void chMemcpyHtoD( void *device, const void *host, size_t N ) { hipError_t status; char *dst = (char *) device; const char *src = (const char *) host; int stagingIndex = 0; while ( N ) { size_t thisCopySize = min( N, STAGING_BUFFER_SIZE ); cuda(EventSynchronize( g_events[stagingIndex] ) ); memcpy16( g_hostBuffers[stagingIndex], src, thisCopySize ); cuda(MemcpyAsync( dst, g_hostBuffers[stagingIndex], thisCopySize, hipMemcpyHostToDevice, NULL ) ); cuda(EventRecord( g_events[1-stagingIndex], NULL ) ); dst += thisCopySize; src += thisCopySize; N -= thisCopySize; stagingIndex = 1 - stagingIndex; } Error: return; } bool TestMemcpy( int *dstDevice, int *srcHost, const int *srcOriginal, size_t dstOffset, size_t srcOffset, size_t numInts ) { chMemcpyHtoD( dstDevice+dstOffset, srcOriginal+srcOffset, numInts*sizeof(int) ); hipMemcpy( srcHost, dstDevice+dstOffset, numInts*sizeof(int), hipMemcpyDeviceToHost ); for ( size_t i = 0; i < numInts; i++ ) { if ( srcHost[i] != srcOriginal[srcOffset+i] ) { return false; } } return true; } int main( int argc, char *argv[] ) { hipError_t status; int *deviceInt = 0; int *hostInt = 0; const size_t numInts = 32*1048576; const int cIterations = 10; int *testVector = 0; printf( "Pageable memcpy (16-byte aligned)... " ); fflush( stdout ); chTimerTimestamp start, stop; cuda(HostAlloc( &g_hostBuffers[0], STAGING_BUFFER_SIZE, hipHostMallocDefault ) ); cuda(HostAlloc( &g_hostBuffers[1], STAGING_BUFFER_SIZE, hipHostMallocDefault ) ); cuda(EventCreate( &g_events[0] ) ); cuda(EventRecord( g_events[0], 0 ) ); // so it is signaled on first synchronize cuda(EventCreate( &g_events[1] ) ); cuda(EventRecord( g_events[1], 0 ) ); // so it is signaled on first synchronize cuda(Malloc( &deviceInt, numInts*sizeof(int) ) ); cuda(HostAlloc( &hostInt, numInts*sizeof(int), 0 ) ); testVector = (int *) malloc( numInts*sizeof(int) ); if ( ! testVector ) { printf( "malloc() failed\n" ); return 1; } for ( size_t i = 0; i < numInts; i++ ) { testVector[i] = rand(); } if ( ! TestMemcpy( deviceInt, hostInt, testVector, 0, 0, numInts ) ) { goto Error; } for ( int i = 0; i < cIterations; i++ ) { size_t numInts4 = numInts / 4; size_t dstOffset = rand() % (numInts4-1); size_t srcOffset = rand() % (numInts4-1); size_t intsThisIteration = 1 + rand() % (numInts4-max(dstOffset,srcOffset)-1); dstOffset *= 4; srcOffset *= 4; intsThisIteration *= 4; assert( intsThisIteration <= numInts ); if ( ! TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ) ) { TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ); goto Error; } } chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { chMemcpyHtoD( deviceInt, testVector, numInts*sizeof(int) ) ; } cuda(ThreadSynchronize() ); chTimerGetTime( &stop ); { double MBytes = cIterations*numInts*sizeof(int) / 1048576.0; double MBpers = MBytes / chTimerElapsedTime( &start, &stop ); printf( "%.2f MB/s\n", MBpers ); } hipFree( deviceInt ); hipHostFree( hostInt ); return 0; Error: printf( "Error\n" ); return 1; }
cc3c30278e2b80584cd2e3c0635be6de576397ed.cu
/* * * pageableMemcpyHtoD16.cu * * Microdemo that illustrates how to implement a fast pageable memcpy * using standard CUDA functionality. * * A pair of pinned staging buffers are allocated, and after the first * staging buffer has been filled, the GPU pulls from one while the * CPU fills the other. CUDA events are used for synchronization. * * This implementation uses the SSE-optimized memcpy of memcpy16.cpp, * so for simplicity, it requires host pointers to be 16-byte aligned. * * Build with: nvcc -I ../chLib <options> pageableMemcpyHtoD16.cu memcpy16.cpp * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <assert.h> #include "chError.h" #include "chTimer.h" #define STAGING_BUFFER_SIZE 1048576 void *g_hostBuffers[2]; cudaEvent_t g_events[2]; // these are already defined on some platforms - make our // own definitions that will work. #undef min #undef max #define min(a,b) ((a)<(b)?(a):(b)) #define max(a,b) ((b)<(a)?(a):(b)) extern bool memcpy16( void *_dst, const void *_src, size_t N ); void chMemcpyHtoD( void *device, const void *host, size_t N ) { cudaError_t status; char *dst = (char *) device; const char *src = (const char *) host; int stagingIndex = 0; while ( N ) { size_t thisCopySize = min( N, STAGING_BUFFER_SIZE ); cuda(EventSynchronize( g_events[stagingIndex] ) ); memcpy16( g_hostBuffers[stagingIndex], src, thisCopySize ); cuda(MemcpyAsync( dst, g_hostBuffers[stagingIndex], thisCopySize, cudaMemcpyHostToDevice, NULL ) ); cuda(EventRecord( g_events[1-stagingIndex], NULL ) ); dst += thisCopySize; src += thisCopySize; N -= thisCopySize; stagingIndex = 1 - stagingIndex; } Error: return; } bool TestMemcpy( int *dstDevice, int *srcHost, const int *srcOriginal, size_t dstOffset, size_t srcOffset, size_t numInts ) { chMemcpyHtoD( dstDevice+dstOffset, srcOriginal+srcOffset, numInts*sizeof(int) ); cudaMemcpy( srcHost, dstDevice+dstOffset, numInts*sizeof(int), cudaMemcpyDeviceToHost ); for ( size_t i = 0; i < numInts; i++ ) { if ( srcHost[i] != srcOriginal[srcOffset+i] ) { return false; } } return true; } int main( int argc, char *argv[] ) { cudaError_t status; int *deviceInt = 0; int *hostInt = 0; const size_t numInts = 32*1048576; const int cIterations = 10; int *testVector = 0; printf( "Pageable memcpy (16-byte aligned)... " ); fflush( stdout ); chTimerTimestamp start, stop; cuda(HostAlloc( &g_hostBuffers[0], STAGING_BUFFER_SIZE, cudaHostAllocDefault ) ); cuda(HostAlloc( &g_hostBuffers[1], STAGING_BUFFER_SIZE, cudaHostAllocDefault ) ); cuda(EventCreate( &g_events[0] ) ); cuda(EventRecord( g_events[0], 0 ) ); // so it is signaled on first synchronize cuda(EventCreate( &g_events[1] ) ); cuda(EventRecord( g_events[1], 0 ) ); // so it is signaled on first synchronize cuda(Malloc( &deviceInt, numInts*sizeof(int) ) ); cuda(HostAlloc( &hostInt, numInts*sizeof(int), 0 ) ); testVector = (int *) malloc( numInts*sizeof(int) ); if ( ! testVector ) { printf( "malloc() failed\n" ); return 1; } for ( size_t i = 0; i < numInts; i++ ) { testVector[i] = rand(); } if ( ! TestMemcpy( deviceInt, hostInt, testVector, 0, 0, numInts ) ) { goto Error; } for ( int i = 0; i < cIterations; i++ ) { size_t numInts4 = numInts / 4; size_t dstOffset = rand() % (numInts4-1); size_t srcOffset = rand() % (numInts4-1); size_t intsThisIteration = 1 + rand() % (numInts4-max(dstOffset,srcOffset)-1); dstOffset *= 4; srcOffset *= 4; intsThisIteration *= 4; assert( intsThisIteration <= numInts ); if ( ! TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ) ) { TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ); goto Error; } } chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { chMemcpyHtoD( deviceInt, testVector, numInts*sizeof(int) ) ; } cuda(ThreadSynchronize() ); chTimerGetTime( &stop ); { double MBytes = cIterations*numInts*sizeof(int) / 1048576.0; double MBpers = MBytes / chTimerElapsedTime( &start, &stop ); printf( "%.2f MB/s\n", MBpers ); } cudaFree( deviceInt ); cudaFreeHost( hostInt ); return 0; Error: printf( "Error\n" ); return 1; }
ba9bcef0d808037a2cd5924f502b67f9c003bfb6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void cuda_set_sg(int *sxz, int sxbeg, int szbeg, int jsx, int jsz, int ns, int nz) /*< set the positions of sources/geophones >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if (id<ns) sxz[id]=(szbeg+id*jsz)+nz*(sxbeg+id*jsx); } __global__ void cuda_ricker_wavelet(float *wlt, float amp, float fm, float dt, int nt) /*< generate ricker wavelet with time deley >*/ { int it=threadIdx.x+blockDim.x*blockIdx.x; if (it<nt) { float tmp = PI*fm*(it*dt-1.0/fm); tmp *=tmp; wlt[it]=amp*(1.0-2.0*tmp)*expf(-tmp); } } __global__ void cuda_add_source(float *p, float *source, int *sxz, int ns, bool add) /*< add==true, add (inject) the source; add==false, subtract the source >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if (id<ns) { if (add) p[sxz[id]]+=source[id]; else p[sxz[id]]-=source[id]; } } __global__ void cuda_record(float*p, float *seis, int *gxz, int ng) /*< record the seismogram at time it >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if (id<ng) seis[id]=p[gxz[id]]; } __global__ void cuda_step_forward(float *p0, float *p1, float *vv, float dtz, float dtx, int nz, int nx) /*< step forward: dtz=dt/dx; dtx=dt/dz; >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.y; int id=i1+i2*nz; __shared__ float s_p0[Block_Size2+2][Block_Size1+2]; __shared__ float s_p1[Block_Size2+2][Block_Size1+2]; if(threadIdx.x<1) { s_p0[threadIdx.y+1][threadIdx.x]=(blockIdx.x>0)?p0[id-1]:0.0; s_p1[threadIdx.y+1][threadIdx.x]=(blockIdx.x>0)?p1[id-1]:0.0; } if(threadIdx.x>=blockDim.x-1) { s_p0[threadIdx.y+1][threadIdx.x+2]=(blockIdx.x<gridDim.x-1)?p0[id+1]:0.0; s_p1[threadIdx.y+1][threadIdx.x+2]=(blockIdx.x<gridDim.x-1)?p1[id+1]:0.0; } if(threadIdx.y<1) { s_p0[threadIdx.y][threadIdx.x+1]=(blockIdx.y>0)?p1[id-nz]:0.0; s_p1[threadIdx.y][threadIdx.x+1]=(blockIdx.y>0)?p1[id-nz]:0.0; } if(threadIdx.y>=blockDim.y-1) { s_p0[threadIdx.y+2][threadIdx.x+1]=(blockIdx.y<gridDim.y-1)?p1[id+nz]:0.0; s_p1[threadIdx.y+2][threadIdx.x+1]=(blockIdx.y<gridDim.y-1)?p1[id+nz]:0.0; } s_p0[threadIdx.y+1][threadIdx.x+1]=p0[id]; s_p1[threadIdx.y+1][threadIdx.x+1]=p1[id]; __syncthreads(); if (i1<nz && i2<nx) { float v1=vv[id]*dtz; float v2=vv[id]*dtx; float c1=v1*v1*(s_p1[threadIdx.y+1][threadIdx.x+2]-2.0*s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y+1][threadIdx.x]); float c2=v2*v2*(s_p1[threadIdx.y+2][threadIdx.x+1]-2.0*s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y][threadIdx.x+1]); /* if(i1==0)// top boundary is free surface boundary condition, commentted!! { c1=v1*(-s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y+1][threadIdx.x+2] +s_p0[threadIdx.y+1][threadIdx.x+1]-s_p0[threadIdx.y+1][threadIdx.x+2]); if(i2>0 && i2<nx-1) c2=0.5*c2; } */ if(i1==nz-1) /* bottom boundary */ { c1=v1*(s_p1[threadIdx.y+1][threadIdx.x]-s_p1[threadIdx.y+1][threadIdx.x+1] -s_p0[threadIdx.y+1][threadIdx.x]+s_p0[threadIdx.y+1][threadIdx.x+1]); if(i2>0 && i2<nx-1) c2=0.5*c2; } if(i2==0)/* left boundary */ { if(i1>0 && i1<nz-1) c1=0.5*c1; c2=v2*(-s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y+2][threadIdx.x+1] +s_p0[threadIdx.y+1][threadIdx.x+1]-s_p0[threadIdx.y+2][threadIdx.x+1]); } if(i2==nx-1) /* right boundary */ { if(i1>0 && i1<nz-1) c1=0.5*c1; c2=v2*(s_p1[threadIdx.y][threadIdx.x+1]-s_p1[threadIdx.y+1][threadIdx.x+1] -s_p0[threadIdx.y][threadIdx.x+1]+s_p0[threadIdx.y+1][threadIdx.x+1]); } p0[id]=2.0*s_p1[threadIdx.y+1][threadIdx.x+1]-s_p0[threadIdx.y+1][threadIdx.x+1]+c1+c2; } } __global__ void cuda_rw_bndr(float *bndr, float *p1, int nz, int nx, bool write) /*< write boundaries out or read them into wavefield variables p>*/ { int id=threadIdx.x+blockIdx.x*blockDim.x; if(write){ if(id<nz) bndr[id]=p1[id];/* left boundary */ else if (id<2*nz) bndr[id]=p1[(id-nz)+nz*(nx-1)];/*right boundary */ else if (id<2*nz+nx) bndr[id]=p1[nz-1+nz*(id-2*nz)];/* bottom boundary */ }else{ if(id<nz) p1[id]=bndr[id];/*left boundary */ else if (id<2*nz) p1[(id-nz)+nz*(nx-1)]=bndr[id];/*right boundary*/ else if (id<2*nz+nx) p1[nz-1+nz*(id-2*nz)]=bndr[id];/*bottom boundary */ } } __global__ void cuda_step_backward(float *illum, float *lap, float *p0, float *p1, float *vv, float dtz, float dtx, int nz, int nx) /*< step backward >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.y; int id=i1+i2*nz; __shared__ float s_p1[Block_Size2+2][Block_Size1+2]; s_p1[threadIdx.y+1][threadIdx.x+1]=p1[id]; if(threadIdx.x<1) { s_p1[threadIdx.y+1][threadIdx.x]=(blockIdx.x>0)?p1[id-1]:0.0; } if(threadIdx.x>=blockDim.x-1) { s_p1[threadIdx.y+1][threadIdx.x+2]=(blockIdx.x<gridDim.x-1)?p1[id+1]:0.0; } if(threadIdx.y<1) { s_p1[threadIdx.y][threadIdx.x+1]=(blockIdx.y>0)?p1[id-nz]:0.0; } if(threadIdx.y>=blockDim.y-1) { s_p1[threadIdx.y+2][threadIdx.x+1]=(blockIdx.y<gridDim.y-1)?p1[id+nz]:0.0; } __syncthreads(); if (i1<nz && i2<nx) { float c1=(s_p1[threadIdx.y+1][threadIdx.x+2]-2.0*s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y+1][threadIdx.x]); float c2=(s_p1[threadIdx.y+2][threadIdx.x+1]-2.0*s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y][threadIdx.x+1]); lap[id]=c1+c2; float v1=vv[id]*dtz; float v2=vv[id]*dtx; c1*=v1*v1; c2*=v2*v2; p0[id]=2.0*s_p1[threadIdx.y+1][threadIdx.x+1]-p0[id]+c1+c2; illum[id]+=s_p1[threadIdx.y+1][threadIdx.x+1]*s_p1[threadIdx.y+1][threadIdx.x+1]; } } __global__ void cuda_cal_residuals(float *dcal, float *dobs, float *derr, int ng) /*< calculate residual wavefield at the receiver positions dcal: d_{cal} dobs: d_{obs} derr: d_{err}=d_{cal}-d_{obs} >*/ { int id=blockIdx.x*blockDim.x+threadIdx.x; if (id<ng) derr[id]=dcal[id]-dobs[id]; } __global__ void cuda_cal_objective(float *obj, float *err, int ng) /*< calculate the value of objective function: obj >*/ { __shared__ float sdata[Block_Size]; int tid=threadIdx.x; sdata[tid]=0.0f; for(int s=0; s<(ng+Block_Size-1)/Block_Size; s++) { int id=s*blockDim.x+threadIdx.x; float a=(id<ng)?err[id]:0.0f; sdata[tid] += a*a; } __syncthreads(); /* do reduction in shared mem */ for(int s=blockDim.x/2; s>32; s>>=1) { if (threadIdx.x < s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid < 32) { if (blockDim.x >= 64) { sdata[tid] += sdata[tid + 32]; } if (blockDim.x >= 32) { sdata[tid] += sdata[tid + 16]; } if (blockDim.x >= 16) { sdata[tid] += sdata[tid + 8]; } if (blockDim.x >= 8) { sdata[tid] += sdata[tid + 4]; } if (blockDim.x >= 4) { sdata[tid] += sdata[tid + 2]; } if (blockDim.x >= 2) { sdata[tid] += sdata[tid + 1]; } } if (tid == 0) { *obj=sdata[0]; } } __global__ void cuda_cal_gradient(float *g1, float *lap, float *gp, int nz, int nx) /*< calculate gradient >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.y; int id=i1+nz*i2; /* Here, the second derivative of sp has been replaced with laplace according to wave equation: second_derivative{p}=v^2 lap{p} */ if (i1<nz && i2<nx) g1[id]+=lap[id]*gp[id]; } __global__ void cuda_scale_gradient(float *g1, float *vv, float *illum, int nz, int nx, bool precon) /*< scale gradient >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.y; int id=i1+nz*i2; if (i1<nz && i2<nx) { float a=vv[id]; if (precon) a*=sqrtf(illum[id]+EPS);/*precondition with residual wavefield illumination*/ g1[id]*=2.0/a; } } __global__ void cuda_cal_beta(float *beta, float *g0, float *g1, float *cg, int N) /*< calculate beta for nonlinear conjugate gradient algorithm configuration requirement: <<<1,Block_Size>>> >*/ { __shared__ float sdata[Block_Size]; __shared__ float tdata[Block_Size]; __shared__ float rdata[Block_Size]; int tid = threadIdx.x; sdata[tid] = 0.0f; tdata[tid] = 0.0f; rdata[tid] = 0.0f; for(int s=0; s<(N+Block_Size-1)/Block_Size; s++) { int id=s*blockDim.x+threadIdx.x; float a=(id<N)?g0[id]:0.0f; float b=(id<N)?g1[id]:0.0f; float c=(id<N)?cg[id]:0.0f; /* HS: Hestenses-Stiefel NLCG algorithm */ sdata[tid] += b*(b-a); // numerator of HS tdata[tid] += c*(b-a); // denominator of HS,DY rdata[tid] += b*b; // numerator of DY /* // PRP: Polark-Ribiere-Polyar NLCG algorithm sdata[tid] += b*(b-a); // numerator tdata[tid] += a*a; // denominator // HS: Hestenses-Stiefel NLCG algorithm sdata[tid] += b*(b-a); // numerator tdata[tid] += c*(b-a); // denominator // FR: Fletcher-Reeves NLCG algorithm sdata[tid] += b*b; // numerator tdata[tid] += a*a; // denominator // PRP: Polark-Ribiere-Polyar NLCG algorithm sdata[tid] += b*(b-a); // numerator tdata[tid] += a*a; // denominator // CD: Fletcher NLCG algorithm sdata[tid] += b*b; // numerator tdata[tid] -= c*a; // denominator // DY: Dai-Yuan NLCG algorithm sdata[tid] += b*b; // numerator tdata[tid] += c*(b-a); // denominator */ } __syncthreads(); /* do reduction in shared mem */ for(int s=blockDim.x/2; s>32; s>>=1) { if (threadIdx.x < s) { sdata[tid]+=sdata[tid+s]; tdata[tid]+=tdata[tid+s]; rdata[tid]+=rdata[tid+s];} __syncthreads(); } if (tid < 32) { if (blockDim.x >=64) { sdata[tid]+=sdata[tid+32]; tdata[tid]+=tdata[tid+32]; rdata[tid]+=rdata[tid+32];} if (blockDim.x >=32) { sdata[tid]+=sdata[tid+16]; tdata[tid]+=tdata[tid+16]; rdata[tid]+=rdata[tid+16];} if (blockDim.x >=16) { sdata[tid]+=sdata[tid+ 8]; tdata[tid]+=tdata[tid+ 8]; rdata[tid]+=rdata[tid+ 8];} if (blockDim.x >= 8) { sdata[tid]+=sdata[tid+ 4]; tdata[tid]+=tdata[tid+ 4]; rdata[tid]+=rdata[tid+ 4];} if (blockDim.x >= 4) { sdata[tid]+=sdata[tid+ 2]; tdata[tid]+=tdata[tid+ 2]; rdata[tid]+=rdata[tid+ 2];} if (blockDim.x >= 2) { sdata[tid]+=sdata[tid+ 1]; tdata[tid]+=tdata[tid+ 1]; rdata[tid]+=rdata[tid+ 1];} } if (tid == 0) { float beta_HS=0.0; float beta_DY=0.0; if(fabsf(tdata[0])>EPS) { beta_HS=sdata[0]/tdata[0]; beta_DY=rdata[0]/tdata[0]; } *beta=max(0.0, min(beta_HS, beta_DY));/* Hybrid HS-DY method combined with iteration restart */ } } __global__ void cuda_cal_conjgrad(float *g1, float *cg, float beta, int nz, int nx) /*< calculate nonlinear conjugate gradient >*/ { int i1=blockIdx.x*blockDim.x+threadIdx.x; int i2=blockIdx.y*blockDim.y+threadIdx.y; int id=i1+i2*nz; if (i1<nz && i2<nx) cg[id]=-g1[id]+beta*cg[id]; } __global__ void cuda_cal_epsilon(float *vv, float *cg, float *epsil, int N) /*< calculate estimated stepsize (epsil) according to Taratola's method configuration requirement: <<<1, Block_Size>>> >*/ { __shared__ float sdata[Block_Size];/* find max(|vv(:)|) */ __shared__ float tdata[Block_Size];/* find max(|cg(:)|) */ int tid = threadIdx.x; sdata[tid] = 0.0f; tdata[tid] = 0.0f; for(int s=0; s<(N+Block_Size-1)/Block_Size; s++) { int id=s*blockDim.x+threadIdx.x; float a=(id<N)?fabsf(vv[id]):0.0f; float b=(id<N)?fabsf(cg[id]):0.0f; sdata[tid]= max(sdata[tid], a); tdata[tid]= max(tdata[tid], b); } __syncthreads(); /* do reduction in shared mem */ for(int s=blockDim.x/2; s>32; s>>=1) { if (threadIdx.x < s) {sdata[tid]=max(sdata[tid], sdata[tid+s]);tdata[tid]=max(tdata[tid], tdata[tid+s]);} __syncthreads(); } if (tid < 32) { if (blockDim.x >= 64) { sdata[tid] =max(sdata[tid],sdata[tid + 32]);tdata[tid]=max(tdata[tid], tdata[tid+32]);} if (blockDim.x >= 32) { sdata[tid] =max(sdata[tid],sdata[tid + 16]);tdata[tid]=max(tdata[tid], tdata[tid+16]);} if (blockDim.x >= 16) { sdata[tid] =max(sdata[tid],sdata[tid + 8]);tdata[tid]=max(tdata[tid], tdata[tid+8]);} if (blockDim.x >= 8) { sdata[tid] =max(sdata[tid],sdata[tid + 4]);tdata[tid]=max(tdata[tid], tdata[tid+4]);} if (blockDim.x >= 4) { sdata[tid] =max(sdata[tid],sdata[tid + 2]);tdata[tid]=max(tdata[tid], tdata[tid+2]);} if (blockDim.x >= 2) { sdata[tid] =max(sdata[tid],sdata[tid + 1]);tdata[tid]=max(tdata[tid], tdata[tid+1]);} } if (tid == 0) { if(tdata[0]>EPS) *epsil=0.01*sdata[0]/tdata[0]; else *epsil=0.0;} } __global__ void cuda_cal_vtmp(float *vtmp, float *vv, float *cg, float epsil, int nz, int nx) /*< calculate temporary velocity >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.x; int id=i1+i2*nz; if (i1<nz && i2<nx) vtmp[id]=vv[id]+epsil*cg[id]; } __global__ void cuda_sum_alpha12(float *alpha1, float *alpha2, float *dcaltmp, float *dobs, float *derr, int ng) /*< calculate the numerator and denominator of alpha alpha1: numerator; length=ng alpha2: denominator; length=ng >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if(id<ng) { float c=derr[id]; float a=dobs[id]+c;/* since f(mk)-dobs[id]=derr[id], thus f(mk)=b+c; */ float b=dcaltmp[id]-a;/* f(mk+epsil*cg)-f(mk) */ alpha1[id]-=b*c; alpha2[id]+=b*b; } } __global__ void cuda_cal_alpha(float *alpha, float *alpha1, float *alpha2, float epsil, int ng) /*< calculate searched stepsize (alpha) according to Taratola's method configuration requirement: <<<1, Block_Size>>> >*/ { __shared__ float sdata[Block_Size]; __shared__ float tdata[Block_Size]; int tid=threadIdx.x; sdata[tid]=0.0f; tdata[tid]=0.0f; for(int s=0; s<(ng+Block_Size-1)/Block_Size; s++) { int id=s*blockDim.x+threadIdx.x; float a=(id<ng)?alpha1[id]:0.0f; float b=(id<ng)?alpha2[id]:0.0f; sdata[tid] +=a; tdata[tid] +=b; } __syncthreads(); /* do reduction in shared mem */ for(int s=blockDim.x/2; s>32; s>>=1) { if (threadIdx.x < s) { sdata[tid] += sdata[tid + s];tdata[tid] += tdata[tid + s]; } __syncthreads(); } if (tid < 32) { if (blockDim.x >= 64) { sdata[tid] += sdata[tid + 32]; tdata[tid] += tdata[tid + 32];} if (blockDim.x >= 32) { sdata[tid] += sdata[tid + 16]; tdata[tid] += tdata[tid + 16];} if (blockDim.x >= 16) { sdata[tid] += sdata[tid + 8]; tdata[tid] += tdata[tid + 8];} if (blockDim.x >= 8) { sdata[tid] += sdata[tid + 4]; tdata[tid] += tdata[tid + 4];} if (blockDim.x >= 4) { sdata[tid] += sdata[tid + 2]; tdata[tid] += tdata[tid + 2];} if (blockDim.x >= 2) { sdata[tid] += sdata[tid + 1]; tdata[tid] += tdata[tid + 1];} } if (tid == 0) { if(tdata[0]>EPS) *alpha=epsil*sdata[0]/(tdata[0]+EPS); else *alpha=0.0;} } __global__ void cuda_update_vel(float *vv, float *cg, float alpha, int nz, int nx) /*< update velocity model with obtained stepsize (alpha) >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.x; int id=i1+i2*nz; if (i1<nz && i2<nx) vv[id]=vv[id]+alpha*cg[id]; } __global__ void cuda_bell_smoothz(float *g, float *smg, int rbell, int nz, int nx) /*< smoothing with gaussian function >*/ { int i; int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.x; int id=i1+i2*nz; if(i1<nz && i2<nx) { float s=0; for(i=-rbell; i<=rbell; i++) if(i1+i>=0 && i1+i<nz) s+=expf(-(2.0*i*i)/rbell)*g[id+i]; smg[id]=s; } } __global__ void cuda_bell_smoothx(float *g, float *smg, int rbell, int nz, int nx) /*< smoothing with gaussian function >*/ { int i; int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.x; int id=i1+i2*nz; if(i1<nz && i2<nx) { float s=0; for(i=-rbell; i<=rbell; i++) if(i2+i>=0 && i2+i<nx) s+=expf(-(2.0*i*i)/rbell)*g[id+nz*i]; smg[id]=s; } }
ba9bcef0d808037a2cd5924f502b67f9c003bfb6.cu
__global__ void cuda_set_sg(int *sxz, int sxbeg, int szbeg, int jsx, int jsz, int ns, int nz) /*< set the positions of sources/geophones >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if (id<ns) sxz[id]=(szbeg+id*jsz)+nz*(sxbeg+id*jsx); } __global__ void cuda_ricker_wavelet(float *wlt, float amp, float fm, float dt, int nt) /*< generate ricker wavelet with time deley >*/ { int it=threadIdx.x+blockDim.x*blockIdx.x; if (it<nt) { float tmp = PI*fm*(it*dt-1.0/fm); tmp *=tmp; wlt[it]=amp*(1.0-2.0*tmp)*expf(-tmp); } } __global__ void cuda_add_source(float *p, float *source, int *sxz, int ns, bool add) /*< add==true, add (inject) the source; add==false, subtract the source >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if (id<ns) { if (add) p[sxz[id]]+=source[id]; else p[sxz[id]]-=source[id]; } } __global__ void cuda_record(float*p, float *seis, int *gxz, int ng) /*< record the seismogram at time it >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if (id<ng) seis[id]=p[gxz[id]]; } __global__ void cuda_step_forward(float *p0, float *p1, float *vv, float dtz, float dtx, int nz, int nx) /*< step forward: dtz=dt/dx; dtx=dt/dz; >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.y; int id=i1+i2*nz; __shared__ float s_p0[Block_Size2+2][Block_Size1+2]; __shared__ float s_p1[Block_Size2+2][Block_Size1+2]; if(threadIdx.x<1) { s_p0[threadIdx.y+1][threadIdx.x]=(blockIdx.x>0)?p0[id-1]:0.0; s_p1[threadIdx.y+1][threadIdx.x]=(blockIdx.x>0)?p1[id-1]:0.0; } if(threadIdx.x>=blockDim.x-1) { s_p0[threadIdx.y+1][threadIdx.x+2]=(blockIdx.x<gridDim.x-1)?p0[id+1]:0.0; s_p1[threadIdx.y+1][threadIdx.x+2]=(blockIdx.x<gridDim.x-1)?p1[id+1]:0.0; } if(threadIdx.y<1) { s_p0[threadIdx.y][threadIdx.x+1]=(blockIdx.y>0)?p1[id-nz]:0.0; s_p1[threadIdx.y][threadIdx.x+1]=(blockIdx.y>0)?p1[id-nz]:0.0; } if(threadIdx.y>=blockDim.y-1) { s_p0[threadIdx.y+2][threadIdx.x+1]=(blockIdx.y<gridDim.y-1)?p1[id+nz]:0.0; s_p1[threadIdx.y+2][threadIdx.x+1]=(blockIdx.y<gridDim.y-1)?p1[id+nz]:0.0; } s_p0[threadIdx.y+1][threadIdx.x+1]=p0[id]; s_p1[threadIdx.y+1][threadIdx.x+1]=p1[id]; __syncthreads(); if (i1<nz && i2<nx) { float v1=vv[id]*dtz; float v2=vv[id]*dtx; float c1=v1*v1*(s_p1[threadIdx.y+1][threadIdx.x+2]-2.0*s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y+1][threadIdx.x]); float c2=v2*v2*(s_p1[threadIdx.y+2][threadIdx.x+1]-2.0*s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y][threadIdx.x+1]); /* if(i1==0)// top boundary is free surface boundary condition, commentted!! { c1=v1*(-s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y+1][threadIdx.x+2] +s_p0[threadIdx.y+1][threadIdx.x+1]-s_p0[threadIdx.y+1][threadIdx.x+2]); if(i2>0 && i2<nx-1) c2=0.5*c2; } */ if(i1==nz-1) /* bottom boundary */ { c1=v1*(s_p1[threadIdx.y+1][threadIdx.x]-s_p1[threadIdx.y+1][threadIdx.x+1] -s_p0[threadIdx.y+1][threadIdx.x]+s_p0[threadIdx.y+1][threadIdx.x+1]); if(i2>0 && i2<nx-1) c2=0.5*c2; } if(i2==0)/* left boundary */ { if(i1>0 && i1<nz-1) c1=0.5*c1; c2=v2*(-s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y+2][threadIdx.x+1] +s_p0[threadIdx.y+1][threadIdx.x+1]-s_p0[threadIdx.y+2][threadIdx.x+1]); } if(i2==nx-1) /* right boundary */ { if(i1>0 && i1<nz-1) c1=0.5*c1; c2=v2*(s_p1[threadIdx.y][threadIdx.x+1]-s_p1[threadIdx.y+1][threadIdx.x+1] -s_p0[threadIdx.y][threadIdx.x+1]+s_p0[threadIdx.y+1][threadIdx.x+1]); } p0[id]=2.0*s_p1[threadIdx.y+1][threadIdx.x+1]-s_p0[threadIdx.y+1][threadIdx.x+1]+c1+c2; } } __global__ void cuda_rw_bndr(float *bndr, float *p1, int nz, int nx, bool write) /*< write boundaries out or read them into wavefield variables p>*/ { int id=threadIdx.x+blockIdx.x*blockDim.x; if(write){ if(id<nz) bndr[id]=p1[id];/* left boundary */ else if (id<2*nz) bndr[id]=p1[(id-nz)+nz*(nx-1)];/*right boundary */ else if (id<2*nz+nx) bndr[id]=p1[nz-1+nz*(id-2*nz)];/* bottom boundary */ }else{ if(id<nz) p1[id]=bndr[id];/*left boundary */ else if (id<2*nz) p1[(id-nz)+nz*(nx-1)]=bndr[id];/*right boundary*/ else if (id<2*nz+nx) p1[nz-1+nz*(id-2*nz)]=bndr[id];/*bottom boundary */ } } __global__ void cuda_step_backward(float *illum, float *lap, float *p0, float *p1, float *vv, float dtz, float dtx, int nz, int nx) /*< step backward >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.y; int id=i1+i2*nz; __shared__ float s_p1[Block_Size2+2][Block_Size1+2]; s_p1[threadIdx.y+1][threadIdx.x+1]=p1[id]; if(threadIdx.x<1) { s_p1[threadIdx.y+1][threadIdx.x]=(blockIdx.x>0)?p1[id-1]:0.0; } if(threadIdx.x>=blockDim.x-1) { s_p1[threadIdx.y+1][threadIdx.x+2]=(blockIdx.x<gridDim.x-1)?p1[id+1]:0.0; } if(threadIdx.y<1) { s_p1[threadIdx.y][threadIdx.x+1]=(blockIdx.y>0)?p1[id-nz]:0.0; } if(threadIdx.y>=blockDim.y-1) { s_p1[threadIdx.y+2][threadIdx.x+1]=(blockIdx.y<gridDim.y-1)?p1[id+nz]:0.0; } __syncthreads(); if (i1<nz && i2<nx) { float c1=(s_p1[threadIdx.y+1][threadIdx.x+2]-2.0*s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y+1][threadIdx.x]); float c2=(s_p1[threadIdx.y+2][threadIdx.x+1]-2.0*s_p1[threadIdx.y+1][threadIdx.x+1]+s_p1[threadIdx.y][threadIdx.x+1]); lap[id]=c1+c2; float v1=vv[id]*dtz; float v2=vv[id]*dtx; c1*=v1*v1; c2*=v2*v2; p0[id]=2.0*s_p1[threadIdx.y+1][threadIdx.x+1]-p0[id]+c1+c2; illum[id]+=s_p1[threadIdx.y+1][threadIdx.x+1]*s_p1[threadIdx.y+1][threadIdx.x+1]; } } __global__ void cuda_cal_residuals(float *dcal, float *dobs, float *derr, int ng) /*< calculate residual wavefield at the receiver positions dcal: d_{cal} dobs: d_{obs} derr: d_{err}=d_{cal}-d_{obs} >*/ { int id=blockIdx.x*blockDim.x+threadIdx.x; if (id<ng) derr[id]=dcal[id]-dobs[id]; } __global__ void cuda_cal_objective(float *obj, float *err, int ng) /*< calculate the value of objective function: obj >*/ { __shared__ float sdata[Block_Size]; int tid=threadIdx.x; sdata[tid]=0.0f; for(int s=0; s<(ng+Block_Size-1)/Block_Size; s++) { int id=s*blockDim.x+threadIdx.x; float a=(id<ng)?err[id]:0.0f; sdata[tid] += a*a; } __syncthreads(); /* do reduction in shared mem */ for(int s=blockDim.x/2; s>32; s>>=1) { if (threadIdx.x < s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid < 32) { if (blockDim.x >= 64) { sdata[tid] += sdata[tid + 32]; } if (blockDim.x >= 32) { sdata[tid] += sdata[tid + 16]; } if (blockDim.x >= 16) { sdata[tid] += sdata[tid + 8]; } if (blockDim.x >= 8) { sdata[tid] += sdata[tid + 4]; } if (blockDim.x >= 4) { sdata[tid] += sdata[tid + 2]; } if (blockDim.x >= 2) { sdata[tid] += sdata[tid + 1]; } } if (tid == 0) { *obj=sdata[0]; } } __global__ void cuda_cal_gradient(float *g1, float *lap, float *gp, int nz, int nx) /*< calculate gradient >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.y; int id=i1+nz*i2; /* Here, the second derivative of sp has been replaced with laplace according to wave equation: second_derivative{p}=v^2 lap{p} */ if (i1<nz && i2<nx) g1[id]+=lap[id]*gp[id]; } __global__ void cuda_scale_gradient(float *g1, float *vv, float *illum, int nz, int nx, bool precon) /*< scale gradient >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.y; int id=i1+nz*i2; if (i1<nz && i2<nx) { float a=vv[id]; if (precon) a*=sqrtf(illum[id]+EPS);/*precondition with residual wavefield illumination*/ g1[id]*=2.0/a; } } __global__ void cuda_cal_beta(float *beta, float *g0, float *g1, float *cg, int N) /*< calculate beta for nonlinear conjugate gradient algorithm configuration requirement: <<<1,Block_Size>>> >*/ { __shared__ float sdata[Block_Size]; __shared__ float tdata[Block_Size]; __shared__ float rdata[Block_Size]; int tid = threadIdx.x; sdata[tid] = 0.0f; tdata[tid] = 0.0f; rdata[tid] = 0.0f; for(int s=0; s<(N+Block_Size-1)/Block_Size; s++) { int id=s*blockDim.x+threadIdx.x; float a=(id<N)?g0[id]:0.0f; float b=(id<N)?g1[id]:0.0f; float c=(id<N)?cg[id]:0.0f; /* HS: Hestenses-Stiefel NLCG algorithm */ sdata[tid] += b*(b-a); // numerator of HS tdata[tid] += c*(b-a); // denominator of HS,DY rdata[tid] += b*b; // numerator of DY /* // PRP: Polark-Ribiere-Polyar NLCG algorithm sdata[tid] += b*(b-a); // numerator tdata[tid] += a*a; // denominator // HS: Hestenses-Stiefel NLCG algorithm sdata[tid] += b*(b-a); // numerator tdata[tid] += c*(b-a); // denominator // FR: Fletcher-Reeves NLCG algorithm sdata[tid] += b*b; // numerator tdata[tid] += a*a; // denominator // PRP: Polark-Ribiere-Polyar NLCG algorithm sdata[tid] += b*(b-a); // numerator tdata[tid] += a*a; // denominator // CD: Fletcher NLCG algorithm sdata[tid] += b*b; // numerator tdata[tid] -= c*a; // denominator // DY: Dai-Yuan NLCG algorithm sdata[tid] += b*b; // numerator tdata[tid] += c*(b-a); // denominator */ } __syncthreads(); /* do reduction in shared mem */ for(int s=blockDim.x/2; s>32; s>>=1) { if (threadIdx.x < s) { sdata[tid]+=sdata[tid+s]; tdata[tid]+=tdata[tid+s]; rdata[tid]+=rdata[tid+s];} __syncthreads(); } if (tid < 32) { if (blockDim.x >=64) { sdata[tid]+=sdata[tid+32]; tdata[tid]+=tdata[tid+32]; rdata[tid]+=rdata[tid+32];} if (blockDim.x >=32) { sdata[tid]+=sdata[tid+16]; tdata[tid]+=tdata[tid+16]; rdata[tid]+=rdata[tid+16];} if (blockDim.x >=16) { sdata[tid]+=sdata[tid+ 8]; tdata[tid]+=tdata[tid+ 8]; rdata[tid]+=rdata[tid+ 8];} if (blockDim.x >= 8) { sdata[tid]+=sdata[tid+ 4]; tdata[tid]+=tdata[tid+ 4]; rdata[tid]+=rdata[tid+ 4];} if (blockDim.x >= 4) { sdata[tid]+=sdata[tid+ 2]; tdata[tid]+=tdata[tid+ 2]; rdata[tid]+=rdata[tid+ 2];} if (blockDim.x >= 2) { sdata[tid]+=sdata[tid+ 1]; tdata[tid]+=tdata[tid+ 1]; rdata[tid]+=rdata[tid+ 1];} } if (tid == 0) { float beta_HS=0.0; float beta_DY=0.0; if(fabsf(tdata[0])>EPS) { beta_HS=sdata[0]/tdata[0]; beta_DY=rdata[0]/tdata[0]; } *beta=max(0.0, min(beta_HS, beta_DY));/* Hybrid HS-DY method combined with iteration restart */ } } __global__ void cuda_cal_conjgrad(float *g1, float *cg, float beta, int nz, int nx) /*< calculate nonlinear conjugate gradient >*/ { int i1=blockIdx.x*blockDim.x+threadIdx.x; int i2=blockIdx.y*blockDim.y+threadIdx.y; int id=i1+i2*nz; if (i1<nz && i2<nx) cg[id]=-g1[id]+beta*cg[id]; } __global__ void cuda_cal_epsilon(float *vv, float *cg, float *epsil, int N) /*< calculate estimated stepsize (epsil) according to Taratola's method configuration requirement: <<<1, Block_Size>>> >*/ { __shared__ float sdata[Block_Size];/* find max(|vv(:)|) */ __shared__ float tdata[Block_Size];/* find max(|cg(:)|) */ int tid = threadIdx.x; sdata[tid] = 0.0f; tdata[tid] = 0.0f; for(int s=0; s<(N+Block_Size-1)/Block_Size; s++) { int id=s*blockDim.x+threadIdx.x; float a=(id<N)?fabsf(vv[id]):0.0f; float b=(id<N)?fabsf(cg[id]):0.0f; sdata[tid]= max(sdata[tid], a); tdata[tid]= max(tdata[tid], b); } __syncthreads(); /* do reduction in shared mem */ for(int s=blockDim.x/2; s>32; s>>=1) { if (threadIdx.x < s) {sdata[tid]=max(sdata[tid], sdata[tid+s]);tdata[tid]=max(tdata[tid], tdata[tid+s]);} __syncthreads(); } if (tid < 32) { if (blockDim.x >= 64) { sdata[tid] =max(sdata[tid],sdata[tid + 32]);tdata[tid]=max(tdata[tid], tdata[tid+32]);} if (blockDim.x >= 32) { sdata[tid] =max(sdata[tid],sdata[tid + 16]);tdata[tid]=max(tdata[tid], tdata[tid+16]);} if (blockDim.x >= 16) { sdata[tid] =max(sdata[tid],sdata[tid + 8]);tdata[tid]=max(tdata[tid], tdata[tid+8]);} if (blockDim.x >= 8) { sdata[tid] =max(sdata[tid],sdata[tid + 4]);tdata[tid]=max(tdata[tid], tdata[tid+4]);} if (blockDim.x >= 4) { sdata[tid] =max(sdata[tid],sdata[tid + 2]);tdata[tid]=max(tdata[tid], tdata[tid+2]);} if (blockDim.x >= 2) { sdata[tid] =max(sdata[tid],sdata[tid + 1]);tdata[tid]=max(tdata[tid], tdata[tid+1]);} } if (tid == 0) { if(tdata[0]>EPS) *epsil=0.01*sdata[0]/tdata[0]; else *epsil=0.0;} } __global__ void cuda_cal_vtmp(float *vtmp, float *vv, float *cg, float epsil, int nz, int nx) /*< calculate temporary velocity >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.x; int id=i1+i2*nz; if (i1<nz && i2<nx) vtmp[id]=vv[id]+epsil*cg[id]; } __global__ void cuda_sum_alpha12(float *alpha1, float *alpha2, float *dcaltmp, float *dobs, float *derr, int ng) /*< calculate the numerator and denominator of alpha alpha1: numerator; length=ng alpha2: denominator; length=ng >*/ { int id=threadIdx.x+blockDim.x*blockIdx.x; if(id<ng) { float c=derr[id]; float a=dobs[id]+c;/* since f(mk)-dobs[id]=derr[id], thus f(mk)=b+c; */ float b=dcaltmp[id]-a;/* f(mk+epsil*cg)-f(mk) */ alpha1[id]-=b*c; alpha2[id]+=b*b; } } __global__ void cuda_cal_alpha(float *alpha, float *alpha1, float *alpha2, float epsil, int ng) /*< calculate searched stepsize (alpha) according to Taratola's method configuration requirement: <<<1, Block_Size>>> >*/ { __shared__ float sdata[Block_Size]; __shared__ float tdata[Block_Size]; int tid=threadIdx.x; sdata[tid]=0.0f; tdata[tid]=0.0f; for(int s=0; s<(ng+Block_Size-1)/Block_Size; s++) { int id=s*blockDim.x+threadIdx.x; float a=(id<ng)?alpha1[id]:0.0f; float b=(id<ng)?alpha2[id]:0.0f; sdata[tid] +=a; tdata[tid] +=b; } __syncthreads(); /* do reduction in shared mem */ for(int s=blockDim.x/2; s>32; s>>=1) { if (threadIdx.x < s) { sdata[tid] += sdata[tid + s];tdata[tid] += tdata[tid + s]; } __syncthreads(); } if (tid < 32) { if (blockDim.x >= 64) { sdata[tid] += sdata[tid + 32]; tdata[tid] += tdata[tid + 32];} if (blockDim.x >= 32) { sdata[tid] += sdata[tid + 16]; tdata[tid] += tdata[tid + 16];} if (blockDim.x >= 16) { sdata[tid] += sdata[tid + 8]; tdata[tid] += tdata[tid + 8];} if (blockDim.x >= 8) { sdata[tid] += sdata[tid + 4]; tdata[tid] += tdata[tid + 4];} if (blockDim.x >= 4) { sdata[tid] += sdata[tid + 2]; tdata[tid] += tdata[tid + 2];} if (blockDim.x >= 2) { sdata[tid] += sdata[tid + 1]; tdata[tid] += tdata[tid + 1];} } if (tid == 0) { if(tdata[0]>EPS) *alpha=epsil*sdata[0]/(tdata[0]+EPS); else *alpha=0.0;} } __global__ void cuda_update_vel(float *vv, float *cg, float alpha, int nz, int nx) /*< update velocity model with obtained stepsize (alpha) >*/ { int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.x; int id=i1+i2*nz; if (i1<nz && i2<nx) vv[id]=vv[id]+alpha*cg[id]; } __global__ void cuda_bell_smoothz(float *g, float *smg, int rbell, int nz, int nx) /*< smoothing with gaussian function >*/ { int i; int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.x; int id=i1+i2*nz; if(i1<nz && i2<nx) { float s=0; for(i=-rbell; i<=rbell; i++) if(i1+i>=0 && i1+i<nz) s+=expf(-(2.0*i*i)/rbell)*g[id+i]; smg[id]=s; } } __global__ void cuda_bell_smoothx(float *g, float *smg, int rbell, int nz, int nx) /*< smoothing with gaussian function >*/ { int i; int i1=threadIdx.x+blockIdx.x*blockDim.x; int i2=threadIdx.y+blockIdx.y*blockDim.x; int id=i1+i2*nz; if(i1<nz && i2<nx) { float s=0; for(i=-rbell; i<=rbell; i++) if(i2+i>=0 && i2+i<nx) s+=expf(-(2.0*i*i)/rbell)*g[id+nz*i]; smg[id]=s; } }
eb190bd64d0289afe2a2ed230316e2e2b84895cb.hip
// !!! This is a file automatically generated by hipify!!! /* MD5 Ported to CUDA by Jonathan Ohlsson ([email protected]) for md5crack (https://github.com/irvin93d/md5crack) converted to C++ class by Frank Thilo ([email protected]) for bzflag (http://www.bzflag.org) based on: md5.h and md5.c reference implemantion of RFC 1321 Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved. License to copy and use this software is granted provided that it is identified as the "RSA Data Security, Inc. MD5 Message-Digest Algorithm" in all material mentioning or referencing this software or this function. License is also granted to make and use derivative works provided that such works are identified as "derived from the RSA Data Security, Inc. MD5 Message-Digest Algorithm" in all material mentioning or referencing the derived work. RSA Data Security, Inc. makes no representations concerning either the merchantability of this software or the suitability of this software for any particular purpose. It is provided "as is" without express or implied warranty of any kind. These notices must be retained in any copies of any part of this documentation and/or software. */ /* interface header */ #include "md5.hpp" /* system implementation headers */ #include <cstdio> #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" // Constants for MD5Transform routine. #define S11 7 #define S12 12 #define S13 17 #define S14 22 #define S21 5 #define S22 9 #define S23 14 #define S24 20 #define S31 4 #define S32 11 #define S33 16 #define S34 23 #define S41 6 #define S42 10 #define S43 15 #define S44 21 /////////////////////////////////////////////// // F, G, H and I are basic MD5 functions. __host__ __device__ inline MD5::uint4 MD5::F(uint4 x, uint4 y, uint4 z) { return x&y | ~x&z; } __host__ __device__ inline MD5::uint4 MD5::G(uint4 x, uint4 y, uint4 z) { return x&z | y&~z; } __host__ __device__ inline MD5::uint4 MD5::H(uint4 x, uint4 y, uint4 z) { return x^y^z; } __host__ __device__ inline MD5::uint4 MD5::I(uint4 x, uint4 y, uint4 z) { return y ^ (x | ~z); } // rotate_left rotates x left n bits. __host__ __device__ inline MD5::uint4 MD5::rotate_left(uint4 x, int n) { return (x << n) | (x >> (32-n)); } // FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4. // Rotation is separate from addition to prevent recomputation. __host__ __device__ inline void MD5::FF(uint4 &a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { a = rotate_left(a+ F(b,c,d) + x + ac, s) + b; } __host__ __device__ inline void MD5::GG(uint4 &a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { a = rotate_left(a + G(b,c,d) + x + ac, s) + b; } __host__ __device__ inline void MD5::HH(uint4 &a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { a = rotate_left(a + H(b,c,d) + x + ac, s) + b; } __host__ __device__ inline void MD5::II(uint4 &a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { a = rotate_left(a + I(b,c,d) + x + ac, s) + b; } ////////////////////////////////////////////// // default ctor, just initailize __host__ __device__ MD5::MD5() { init(); } ////////////////////////////////////////////// // nifty shortcut ctor, compute MD5 for string and finalize it right away __host__ __device__ MD5::MD5(const char * text, size_t len) { init(); update(text, len); finalize(); } ////////////////////////////// __host__ __device__ void MD5::init() { finalized=false; count[0] = 0; count[1] = 0; // load magic initialization constants. state[0] = 0x67452301; state[1] = 0xefcdab89; state[2] = 0x98badcfe; state[3] = 0x10325476; } ////////////////////////////// // decodes input (unsigned char) into output (uint4). Assumes len is a multiple of 4. __host__ __device__ void MD5::decode(uint4 output[], const uint1 input[], size_type len) { for (unsigned int i = 0, j = 0; j < len; i++, j += 4) output[i] = ((uint4)input[j]) | (((uint4)input[j+1]) << 8) | (((uint4)input[j+2]) << 16) | (((uint4)input[j+3]) << 24); } ////////////////////////////// // encodes input (uint4) into output (unsigned char). Assumes len is // a multiple of 4. __host__ __device__ void MD5::encode(uint1 output[], const uint4 input[], size_type len) { for (size_type i = 0, j = 0; j < len; i++, j += 4) { output[j] = input[i] & 0xff; output[j+1] = (input[i] >> 8) & 0xff; output[j+2] = (input[i] >> 16) & 0xff; output[j+3] = (input[i] >> 24) & 0xff; } } ////////////////////////////// // apply MD5 algo on a block __host__ __device__ void MD5::transform(const uint1 block[blocksize]) { uint4 a = state[0], b = state[1], c = state[2], d = state[3], x[16]; decode (x, block, blocksize); /* Round 1 */ FF (a, b, c, d, x[ 0], S11, 0xd76aa478); /* 1 */ FF (d, a, b, c, x[ 1], S12, 0xe8c7b756); /* 2 */ FF (c, d, a, b, x[ 2], S13, 0x242070db); /* 3 */ FF (b, c, d, a, x[ 3], S14, 0xc1bdceee); /* 4 */ FF (a, b, c, d, x[ 4], S11, 0xf57c0faf); /* 5 */ FF (d, a, b, c, x[ 5], S12, 0x4787c62a); /* 6 */ FF (c, d, a, b, x[ 6], S13, 0xa8304613); /* 7 */ FF (b, c, d, a, x[ 7], S14, 0xfd469501); /* 8 */ FF (a, b, c, d, x[ 8], S11, 0x698098d8); /* 9 */ FF (d, a, b, c, x[ 9], S12, 0x8b44f7af); /* 10 */ FF (c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */ FF (b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */ FF (a, b, c, d, x[12], S11, 0x6b901122); /* 13 */ FF (d, a, b, c, x[13], S12, 0xfd987193); /* 14 */ FF (c, d, a, b, x[14], S13, 0xa679438e); /* 15 */ FF (b, c, d, a, x[15], S14, 0x49b40821); /* 16 */ /* Round 2 */ GG (a, b, c, d, x[ 1], S21, 0xf61e2562); /* 17 */ GG (d, a, b, c, x[ 6], S22, 0xc040b340); /* 18 */ GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */ GG (b, c, d, a, x[ 0], S24, 0xe9b6c7aa); /* 20 */ GG (a, b, c, d, x[ 5], S21, 0xd62f105d); /* 21 */ GG (d, a, b, c, x[10], S22, 0x2441453); /* 22 */ GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */ GG (b, c, d, a, x[ 4], S24, 0xe7d3fbc8); /* 24 */ GG (a, b, c, d, x[ 9], S21, 0x21e1cde6); /* 25 */ GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */ GG (c, d, a, b, x[ 3], S23, 0xf4d50d87); /* 27 */ GG (b, c, d, a, x[ 8], S24, 0x455a14ed); /* 28 */ GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */ GG (d, a, b, c, x[ 2], S22, 0xfcefa3f8); /* 30 */ GG (c, d, a, b, x[ 7], S23, 0x676f02d9); /* 31 */ GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */ /* Round 3 */ HH (a, b, c, d, x[ 5], S31, 0xfffa3942); /* 33 */ HH (d, a, b, c, x[ 8], S32, 0x8771f681); /* 34 */ HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */ HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */ HH (a, b, c, d, x[ 1], S31, 0xa4beea44); /* 37 */ HH (d, a, b, c, x[ 4], S32, 0x4bdecfa9); /* 38 */ HH (c, d, a, b, x[ 7], S33, 0xf6bb4b60); /* 39 */ HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */ HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */ HH (d, a, b, c, x[ 0], S32, 0xeaa127fa); /* 42 */ HH (c, d, a, b, x[ 3], S33, 0xd4ef3085); /* 43 */ HH (b, c, d, a, x[ 6], S34, 0x4881d05); /* 44 */ HH (a, b, c, d, x[ 9], S31, 0xd9d4d039); /* 45 */ HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */ HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */ HH (b, c, d, a, x[ 2], S34, 0xc4ac5665); /* 48 */ /* Round 4 */ II (a, b, c, d, x[ 0], S41, 0xf4292244); /* 49 */ II (d, a, b, c, x[ 7], S42, 0x432aff97); /* 50 */ II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */ II (b, c, d, a, x[ 5], S44, 0xfc93a039); /* 52 */ II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */ II (d, a, b, c, x[ 3], S42, 0x8f0ccc92); /* 54 */ II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */ II (b, c, d, a, x[ 1], S44, 0x85845dd1); /* 56 */ II (a, b, c, d, x[ 8], S41, 0x6fa87e4f); /* 57 */ II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */ II (c, d, a, b, x[ 6], S43, 0xa3014314); /* 59 */ II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */ II (a, b, c, d, x[ 4], S41, 0xf7537e82); /* 61 */ II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */ II (c, d, a, b, x[ 2], S43, 0x2ad7d2bb); /* 63 */ II (b, c, d, a, x[ 9], S44, 0xeb86d391); /* 64 */ state[0] += a; state[1] += b; state[2] += c; state[3] += d; // Zeroize sensitive information. memset(x, 0, sizeof x); } ////////////////////////////// // MD5 block update operation. Continues an MD5 message-digest // operation, processing another message block __host__ __device__ void MD5::update(const unsigned char input[], size_type length) { // compute number of bytes mod 64 size_type index = count[0] / 8 % blocksize; // Update number of bits if ((count[0] += (length << 3)) < (length << 3)) count[1]++; count[1] += (length >> 29); // number of bytes we need to fill in buffer size_type firstpart = 64 - index; size_type i; // transform as many times as possible. if (length >= firstpart) { // fill buffer first, transform memcpy(&buffer[index], input, firstpart); transform(buffer); // transform chunks of blocksize (64 bytes) for (i = firstpart; i + blocksize <= length; i += blocksize) transform(&input[i]); index = 0; } else i = 0; // buffer remaining input memcpy(&buffer[index], &input[i], length-i); } ////////////////////////////// // for convenience provide a verson with signed char __host__ __device__ void MD5::update(const char input[], size_type length) { update((const unsigned char*)input, length); } ////////////////////////////// // MD5 finalization. Ends an MD5 message-digest operation, writing the // the message digest and zeroizing the context. __host__ __device__ MD5& MD5::finalize() { static unsigned char padding[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; if (!finalized) { // Save number of bits unsigned char bits[8]; encode(bits, count, 8); // pad out to 56 mod 64. size_type index = count[0] / 8 % 64; size_type padLen = (index < 56) ? (56 - index) : (120 - index); update(padding, padLen); // Append length (before padding) update(bits, 8); // Store state in digest encode(digest, state, 16); // Zeroize sensitive information. memset(buffer, 0, sizeof buffer); memset(count, 0, sizeof count); finalized=true; } return *this; } ////////////////////////////// __host__ __device__ void MD5::get_digest(unsigned char* dst) { memcpy(dst, this->digest, 16); } ////////////////////////////// /* std::ostream& operator<<(std::ostream& out, MD5 md5) */ /* { */ /* char buf[33]; */ /* md5.hexdigest(buf); */ /* return out << std::string(buf); */ /* } */ ////////////////////////////// std::string md5(const std::string str) { MD5 md5 = MD5(str.c_str(), str.length()); unsigned char result[16]; // 128 bit md5.get_digest(result); // load the result return hexdigest(result); } // return hex representation of digest as string // REMEMBER, buf has len 33!!! std::string hexdigest(unsigned char * digest) { char buf[33]; for (int i=0; i<16; i++) sprintf(buf+i*2, "%02x", digest[i]); buf[32]=0; return std::string(buf); } // encode hex representation to binary void hexencode(char const * encode, unsigned char *buf) { for(int i=0 ; i<16 ; i++){ sscanf(encode + 2*i, "%02x", buf + i); } buf[16] = 0; }
eb190bd64d0289afe2a2ed230316e2e2b84895cb.cu
/* MD5 Ported to CUDA by Jonathan Ohlsson ([email protected]) for md5crack (https://github.com/irvin93d/md5crack) converted to C++ class by Frank Thilo ([email protected]) for bzflag (http://www.bzflag.org) based on: md5.h and md5.c reference implemantion of RFC 1321 Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved. License to copy and use this software is granted provided that it is identified as the "RSA Data Security, Inc. MD5 Message-Digest Algorithm" in all material mentioning or referencing this software or this function. License is also granted to make and use derivative works provided that such works are identified as "derived from the RSA Data Security, Inc. MD5 Message-Digest Algorithm" in all material mentioning or referencing the derived work. RSA Data Security, Inc. makes no representations concerning either the merchantability of this software or the suitability of this software for any particular purpose. It is provided "as is" without express or implied warranty of any kind. These notices must be retained in any copies of any part of this documentation and/or software. */ /* interface header */ #include "md5.hpp" /* system implementation headers */ #include <cstdio> #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" // Constants for MD5Transform routine. #define S11 7 #define S12 12 #define S13 17 #define S14 22 #define S21 5 #define S22 9 #define S23 14 #define S24 20 #define S31 4 #define S32 11 #define S33 16 #define S34 23 #define S41 6 #define S42 10 #define S43 15 #define S44 21 /////////////////////////////////////////////// // F, G, H and I are basic MD5 functions. __host__ __device__ inline MD5::uint4 MD5::F(uint4 x, uint4 y, uint4 z) { return x&y | ~x&z; } __host__ __device__ inline MD5::uint4 MD5::G(uint4 x, uint4 y, uint4 z) { return x&z | y&~z; } __host__ __device__ inline MD5::uint4 MD5::H(uint4 x, uint4 y, uint4 z) { return x^y^z; } __host__ __device__ inline MD5::uint4 MD5::I(uint4 x, uint4 y, uint4 z) { return y ^ (x | ~z); } // rotate_left rotates x left n bits. __host__ __device__ inline MD5::uint4 MD5::rotate_left(uint4 x, int n) { return (x << n) | (x >> (32-n)); } // FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4. // Rotation is separate from addition to prevent recomputation. __host__ __device__ inline void MD5::FF(uint4 &a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { a = rotate_left(a+ F(b,c,d) + x + ac, s) + b; } __host__ __device__ inline void MD5::GG(uint4 &a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { a = rotate_left(a + G(b,c,d) + x + ac, s) + b; } __host__ __device__ inline void MD5::HH(uint4 &a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { a = rotate_left(a + H(b,c,d) + x + ac, s) + b; } __host__ __device__ inline void MD5::II(uint4 &a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { a = rotate_left(a + I(b,c,d) + x + ac, s) + b; } ////////////////////////////////////////////// // default ctor, just initailize __host__ __device__ MD5::MD5() { init(); } ////////////////////////////////////////////// // nifty shortcut ctor, compute MD5 for string and finalize it right away __host__ __device__ MD5::MD5(const char * text, size_t len) { init(); update(text, len); finalize(); } ////////////////////////////// __host__ __device__ void MD5::init() { finalized=false; count[0] = 0; count[1] = 0; // load magic initialization constants. state[0] = 0x67452301; state[1] = 0xefcdab89; state[2] = 0x98badcfe; state[3] = 0x10325476; } ////////////////////////////// // decodes input (unsigned char) into output (uint4). Assumes len is a multiple of 4. __host__ __device__ void MD5::decode(uint4 output[], const uint1 input[], size_type len) { for (unsigned int i = 0, j = 0; j < len; i++, j += 4) output[i] = ((uint4)input[j]) | (((uint4)input[j+1]) << 8) | (((uint4)input[j+2]) << 16) | (((uint4)input[j+3]) << 24); } ////////////////////////////// // encodes input (uint4) into output (unsigned char). Assumes len is // a multiple of 4. __host__ __device__ void MD5::encode(uint1 output[], const uint4 input[], size_type len) { for (size_type i = 0, j = 0; j < len; i++, j += 4) { output[j] = input[i] & 0xff; output[j+1] = (input[i] >> 8) & 0xff; output[j+2] = (input[i] >> 16) & 0xff; output[j+3] = (input[i] >> 24) & 0xff; } } ////////////////////////////// // apply MD5 algo on a block __host__ __device__ void MD5::transform(const uint1 block[blocksize]) { uint4 a = state[0], b = state[1], c = state[2], d = state[3], x[16]; decode (x, block, blocksize); /* Round 1 */ FF (a, b, c, d, x[ 0], S11, 0xd76aa478); /* 1 */ FF (d, a, b, c, x[ 1], S12, 0xe8c7b756); /* 2 */ FF (c, d, a, b, x[ 2], S13, 0x242070db); /* 3 */ FF (b, c, d, a, x[ 3], S14, 0xc1bdceee); /* 4 */ FF (a, b, c, d, x[ 4], S11, 0xf57c0faf); /* 5 */ FF (d, a, b, c, x[ 5], S12, 0x4787c62a); /* 6 */ FF (c, d, a, b, x[ 6], S13, 0xa8304613); /* 7 */ FF (b, c, d, a, x[ 7], S14, 0xfd469501); /* 8 */ FF (a, b, c, d, x[ 8], S11, 0x698098d8); /* 9 */ FF (d, a, b, c, x[ 9], S12, 0x8b44f7af); /* 10 */ FF (c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */ FF (b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */ FF (a, b, c, d, x[12], S11, 0x6b901122); /* 13 */ FF (d, a, b, c, x[13], S12, 0xfd987193); /* 14 */ FF (c, d, a, b, x[14], S13, 0xa679438e); /* 15 */ FF (b, c, d, a, x[15], S14, 0x49b40821); /* 16 */ /* Round 2 */ GG (a, b, c, d, x[ 1], S21, 0xf61e2562); /* 17 */ GG (d, a, b, c, x[ 6], S22, 0xc040b340); /* 18 */ GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */ GG (b, c, d, a, x[ 0], S24, 0xe9b6c7aa); /* 20 */ GG (a, b, c, d, x[ 5], S21, 0xd62f105d); /* 21 */ GG (d, a, b, c, x[10], S22, 0x2441453); /* 22 */ GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */ GG (b, c, d, a, x[ 4], S24, 0xe7d3fbc8); /* 24 */ GG (a, b, c, d, x[ 9], S21, 0x21e1cde6); /* 25 */ GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */ GG (c, d, a, b, x[ 3], S23, 0xf4d50d87); /* 27 */ GG (b, c, d, a, x[ 8], S24, 0x455a14ed); /* 28 */ GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */ GG (d, a, b, c, x[ 2], S22, 0xfcefa3f8); /* 30 */ GG (c, d, a, b, x[ 7], S23, 0x676f02d9); /* 31 */ GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */ /* Round 3 */ HH (a, b, c, d, x[ 5], S31, 0xfffa3942); /* 33 */ HH (d, a, b, c, x[ 8], S32, 0x8771f681); /* 34 */ HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */ HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */ HH (a, b, c, d, x[ 1], S31, 0xa4beea44); /* 37 */ HH (d, a, b, c, x[ 4], S32, 0x4bdecfa9); /* 38 */ HH (c, d, a, b, x[ 7], S33, 0xf6bb4b60); /* 39 */ HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */ HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */ HH (d, a, b, c, x[ 0], S32, 0xeaa127fa); /* 42 */ HH (c, d, a, b, x[ 3], S33, 0xd4ef3085); /* 43 */ HH (b, c, d, a, x[ 6], S34, 0x4881d05); /* 44 */ HH (a, b, c, d, x[ 9], S31, 0xd9d4d039); /* 45 */ HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */ HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */ HH (b, c, d, a, x[ 2], S34, 0xc4ac5665); /* 48 */ /* Round 4 */ II (a, b, c, d, x[ 0], S41, 0xf4292244); /* 49 */ II (d, a, b, c, x[ 7], S42, 0x432aff97); /* 50 */ II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */ II (b, c, d, a, x[ 5], S44, 0xfc93a039); /* 52 */ II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */ II (d, a, b, c, x[ 3], S42, 0x8f0ccc92); /* 54 */ II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */ II (b, c, d, a, x[ 1], S44, 0x85845dd1); /* 56 */ II (a, b, c, d, x[ 8], S41, 0x6fa87e4f); /* 57 */ II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */ II (c, d, a, b, x[ 6], S43, 0xa3014314); /* 59 */ II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */ II (a, b, c, d, x[ 4], S41, 0xf7537e82); /* 61 */ II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */ II (c, d, a, b, x[ 2], S43, 0x2ad7d2bb); /* 63 */ II (b, c, d, a, x[ 9], S44, 0xeb86d391); /* 64 */ state[0] += a; state[1] += b; state[2] += c; state[3] += d; // Zeroize sensitive information. memset(x, 0, sizeof x); } ////////////////////////////// // MD5 block update operation. Continues an MD5 message-digest // operation, processing another message block __host__ __device__ void MD5::update(const unsigned char input[], size_type length) { // compute number of bytes mod 64 size_type index = count[0] / 8 % blocksize; // Update number of bits if ((count[0] += (length << 3)) < (length << 3)) count[1]++; count[1] += (length >> 29); // number of bytes we need to fill in buffer size_type firstpart = 64 - index; size_type i; // transform as many times as possible. if (length >= firstpart) { // fill buffer first, transform memcpy(&buffer[index], input, firstpart); transform(buffer); // transform chunks of blocksize (64 bytes) for (i = firstpart; i + blocksize <= length; i += blocksize) transform(&input[i]); index = 0; } else i = 0; // buffer remaining input memcpy(&buffer[index], &input[i], length-i); } ////////////////////////////// // for convenience provide a verson with signed char __host__ __device__ void MD5::update(const char input[], size_type length) { update((const unsigned char*)input, length); } ////////////////////////////// // MD5 finalization. Ends an MD5 message-digest operation, writing the // the message digest and zeroizing the context. __host__ __device__ MD5& MD5::finalize() { static unsigned char padding[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; if (!finalized) { // Save number of bits unsigned char bits[8]; encode(bits, count, 8); // pad out to 56 mod 64. size_type index = count[0] / 8 % 64; size_type padLen = (index < 56) ? (56 - index) : (120 - index); update(padding, padLen); // Append length (before padding) update(bits, 8); // Store state in digest encode(digest, state, 16); // Zeroize sensitive information. memset(buffer, 0, sizeof buffer); memset(count, 0, sizeof count); finalized=true; } return *this; } ////////////////////////////// __host__ __device__ void MD5::get_digest(unsigned char* dst) { memcpy(dst, this->digest, 16); } ////////////////////////////// /* std::ostream& operator<<(std::ostream& out, MD5 md5) */ /* { */ /* char buf[33]; */ /* md5.hexdigest(buf); */ /* return out << std::string(buf); */ /* } */ ////////////////////////////// std::string md5(const std::string str) { MD5 md5 = MD5(str.c_str(), str.length()); unsigned char result[16]; // 128 bit md5.get_digest(result); // load the result return hexdigest(result); } // return hex representation of digest as string // REMEMBER, buf has len 33!!! std::string hexdigest(unsigned char * digest) { char buf[33]; for (int i=0; i<16; i++) sprintf(buf+i*2, "%02x", digest[i]); buf[32]=0; return std::string(buf); } // encode hex representation to binary void hexencode(char const * encode, unsigned char *buf) { for(int i=0 ; i<16 ; i++){ sscanf(encode + 2*i, "%02x", buf + i); } buf[16] = 0; }
01ae3f6b80f893c4878018ffc417c99935363064.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void multMatriz(float *da, float *db, float *dc, int num){ float sum=0; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; while(j<num){ while(i<num){ for (unsigned int k = 0; k<num; k++) sum += da[i * num + k] * db[k * num + j]; dc[i*num + j] = (float) sum; i += gridDim.y * blockDim.y; } j+=gridDim.x * blockDim.x; i = threadIdx.y + blockIdx.y * blockDim.y; } }
01ae3f6b80f893c4878018ffc417c99935363064.cu
#include "includes.h" __global__ void multMatriz(float *da, float *db, float *dc, int num){ float sum=0; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; while(j<num){ while(i<num){ for (unsigned int k = 0; k<num; k++) sum += da[i * num + k] * db[k * num + j]; dc[i*num + j] = (float) sum; i += gridDim.y * blockDim.y; } j+=gridDim.x * blockDim.x; i = threadIdx.y + blockIdx.y * blockDim.y; } }
39cea6c5599c99246a2cf00b98e0ddd3a11c9308.hip
// !!! This is a file automatically generated by hipify!!! #define FW_ENABLE_ASSERT #include "bvh/BVHNode.hpp" #include "bvh/BatchSplitBVHBuilder.hpp" #include "base/Array.hpp" #include "base/Timer.hpp" #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/partition.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> #include <thrust/transform_scan.h> #define BHD __device__ // Copied from Platform.hpp since I don't want to pull that into the device. // batch processing (how many ops at the price of one) #define n_triBatchSize 1 #define n_nodeBatchSize 1 #define n_SAHTriangleCost 1.0f #define n_SAHNodeCost 1.0f using FW::S32; using FW::F32; using FW::AABB; S32 BHD roundToTriangleBatchSize(S32 n) { return ((n + n_triBatchSize - 1) / n_triBatchSize)*n_triBatchSize; } S32 BHD roundToNodeBatchSize(S32 n) { return ((n + n_nodeBatchSize - 1) / n_nodeBatchSize)*n_nodeBatchSize; } float BHD getTriangleCost(S32 n) { return roundToTriangleBatchSize(n) * n_SAHTriangleCost; } float BHD getNodeCost(S32 n) { return roundToNodeBatchSize(n) * n_SAHNodeCost; } float BHD getCost(int numChildNodes, int numTris) { return getNodeCost(numChildNodes) + getTriangleCost(numTris); } typedef thrust::tuple<AABB, S32, AABB, S32> BIBITuple; typedef thrust::tuple<AABB*, S32*, AABB*, S32*> BIBIItTuple; typedef thrust::zip_iterator<BIBIItTuple> BIBIZipIt; typedef thrust::tuple<float, S32, S32> FIITuple; struct BoundsToCost { BHD FIITuple operator()(BIBITuple x) // rightBounds, rightIdx, leftBounds, leftIdx { float rightA = thrust::get<0>(x).area(); float leftA = thrust::get<2>(x).area(); S32 rightN = thrust::get<1>(x); S32 leftN = thrust::get<3>(x); // This is just SAH of the two children. Need to add nodeSAH for it to be a full SAH. Also it's not scaled by root bounds. F32 childSAH = leftA * getTriangleCost(leftN) + rightA * getTriangleCost(rightN); // Add nodeSAH - OPT: Instead, subtract nodeSAH from leafSAH to avoid computing nodeBounds here AABB nodeBounds = thrust::get<0>(x) + thrust::get<2>(x); F32 nodeSAH = nodeBounds.area() * getNodeCost(2); F32 sah = childSAH + nodeSAH; // F32 sah = childSAH; return thrust::make_tuple(sah, leftN, rightN); } }; void FW::BatchSplitBVHBuilder::initBBArrays(S32 maxN, FW::Scene* scene, FW::BVH& bvh) { m_intArray.setManaged(true); m_intArray.resize(maxN * 10); m_boundsArray.setManaged(true); m_boundsArray.resize(maxN * 3); m_keysArray.setManaged(true); m_keysArray.resize(maxN * 2); hipMemset(m_intArray.getPtr(), 0, sizeof(S32) * m_intArray.getSize()); hipMemset(m_boundsArray.getPtr(), 0, sizeof(AABB) * m_boundsArray.getSize()); hipMemset(m_keysArray.getPtr(), 0, sizeof(U64) * m_keysArray.getSize()); m_rightIdx = m_intArray.getPtr(); m_leftIdx = m_rightIdx + maxN; m_gamma = m_leftIdx + maxN; m_segIdx = m_gamma + maxN; m_outIdxBest = m_segIdx + maxN; m_outIdxNew = m_outIdxBest + maxN; m_outCostBest = (F32*)(m_outIdxNew + maxN); m_outCostNew = m_outCostBest + maxN; m_outStratCount = (S32*)(m_outCostNew + maxN); m_refBounds = m_boundsArray.getPtr(); m_rightBounds = m_refBounds + maxN; m_leftBounds = m_rightBounds + maxN; m_keys = m_keysArray.getPtr(); m_outKeys = m_keys + maxN; bvh.getTriIndices().setManaged(true); bvh.getTriIndices().resize(maxN); m_refTriIdx = bvh.getTriIndices().getPtr(); m_tris = (const Vec3i*)scene->getTriVtxIndexBuffer().getCudaPtr(); m_verts = (const Vec3f*)scene->getVtxPosBuffer().getCudaPtr(); cuMemsetD32((hipDeviceptr_t)m_gamma, FW_S32_MIN, maxN); // Set gamma to FW_S32_MIN } void FW::BatchSplitBVHBuilder::freeArrays() { // printf("freeBBArrays()\n"); m_intArray.reset(0); m_boundsArray.reset(0); m_keysArray.reset(0); } void FW::BatchSplitBVHBuilder::doGeneration(S32& N, S32& nSegments, S32 level) { S32* refTriIdx = m_refTriIdx; S32* rightIdx = m_rightIdx; S32* leftIdx = m_leftIdx; S32* gamma = m_gamma; S32* segIdx = m_segIdx; S32* outIdxBest = m_outIdxBest; S32* outIdxNew = m_outIdxNew; F32* outCostBest = m_outCostBest; F32* outCostNew = m_outCostNew; S32* outStratCount = m_outStratCount; AABB* refBounds = m_refBounds; AABB* rightBounds = m_rightBounds; AABB* leftBounds = m_leftBounds; U64* keys = m_keys; U64* outKeys = m_outKeys; typedef thrust::tuple<S32, AABB, U64> TBKTuple; typedef thrust::tuple<S32*, AABB*, U64*> TBKItTuple; typedef thrust::zip_iterator<TBKItTuple> TBKZipIt; TBKZipIt refsTBK(thrust::make_tuple(refTriIdx, refBounds, keys)); auto OneIt = thrust::make_constant_iterator((S32)1); // Remove degenerates. // OPT: For Sweep builder move this out of the loop. If so, for speed, change it to not be a stable_partition. Split builder makes new refs. auto mid = thrust::stable_partition(thrust::device, refsTBK, refsTBK + N, [] BHD(const TBKTuple r) { Vec3f size = thrust::get<1>(r).max() - thrust::get<1>(r).min(); return !(min(size) < 0.0f || sum(size) == max(size)); }); S32 newN = thrust::get<0>(mid.get_iterator_tuple()) - refTriIdx; if (newN != N) printf("%d => %d\n", N, newN); N = newN; // Try object split in each dimension for (int dim = 0; dim < 3; dim++) { // Sort in given dimension thrust::sort(thrust::device, refsTBK, refsTBK + N, [dim] BHD(TBKTuple a, TBKTuple b) { F32 ca = thrust::get<1>(a).min()[dim] + thrust::get<1>(a).max()[dim]; F32 cb = thrust::get<1>(b).min()[dim] + thrust::get<1>(b).max()[dim]; U64 ka = thrust::get<2>(a); U64 kb = thrust::get<2>(b); return (ka < kb) || (ka == kb && (ca < cb || (ca == cb && thrust::get<0>(a) < thrust::get<0>(b)))); }); // Sweep right to left and determine bounds; rightIdx is offset from right edge of segment // leftBounds[i] and rightBounds[i] contain the two AABBs for splitting at i. typedef thrust::tuple<AABB, S32> BITuple; typedef thrust::tuple<AABB*, S32*> BIItTuple; typedef thrust::zip_iterator<BIItTuple> BIZipIt; auto BIRevIt(thrust::make_zip_iterator(thrust::make_tuple(thrust::make_reverse_iterator(refBounds + N), OneIt))); auto OutBIRevIt(thrust::make_zip_iterator(thrust::make_tuple(thrust::make_reverse_iterator(rightBounds + N), thrust::make_reverse_iterator(rightIdx + N)))); thrust::inclusive_scan_by_key(thrust::device, thrust::make_reverse_iterator(keys + N), thrust::make_reverse_iterator(keys), BIRevIt, OutBIRevIt, [] BHD(U64 ka, U64 kb) { return ka == kb; }, [] BHD(BITuple a, BITuple b) { return thrust::make_tuple(thrust::get<0>(a) + thrust::get<0>(b), thrust::get<1>(a) + thrust::get<1>(b)); }); // Sweep left to right and determine bounds; leftIdx is offset from left edge of segment BIZipIt OutBIIt(thrust::make_tuple(leftBounds, leftIdx)); // OPT: Don't need to write leftIdx and rightIdx all three times. thrust::exclusive_scan_by_key(thrust::device, keys, keys + N, thrust::make_zip_iterator(thrust::make_tuple(refBounds, OneIt)), OutBIIt, thrust::make_tuple(AABB(), (S32)0), [] BHD(U64 ka, U64 kb) { return ka == kb; }, [] BHD(BITuple a, BITuple b) { return thrust::make_tuple(thrust::get<0>(a) + thrust::get<0>(b), thrust::get<1>(a) + thrust::get<1>(b)); }); // OPT: Store a segment's full AABB into its final BVHNode, since we know its location now // Select lowest SAH. BIBIZipIt bounds(thrust::make_tuple(rightBounds, rightIdx, leftBounds, leftIdx)); typedef thrust::discard_iterator<S32> IDisIt; IDisIt Dis; // OPT: Only need to write the keys out once. Could use discard_iterator on the other two dimensions. // OPT: segIdx is unneeded; should use a discard_iterator to get rid of rightIdx output, but was getting errors. auto outValues = thrust::make_zip_iterator(thrust::make_tuple(dim == 0 ? outCostBest : outCostNew, dim == 0 ? outIdxBest : outIdxNew, segIdx)); auto outEnd = thrust::reduce_by_key(thrust::device, keys, keys + N, thrust::make_transform_iterator(bounds, BoundsToCost()), outKeys, outValues, [] BHD(U64 ka, U64 kb) { return ka == kb; }, [] BHD(FIITuple a, FIITuple b) { return thrust::get<0>(a) < thrust::get<0>(b) ? a : (thrust::get<0>(a) > thrust::get<0>(b) ? b : (abs(thrust::get<1>(a) - thrust::get<2>(a)) < abs(thrust::get<1>(b) - thrust::get<2>(b)) ? a : b)); }); nSegments = outEnd.first - outKeys; U64 demoK = outKeys[0]; S32 thisStrategy = stratObjectSplit | (dim << stratBitOffset); if (dim == 0) { // Compute the count auto IIZipIt = thrust::make_zip_iterator(thrust::make_tuple(outIdxBest, segIdx)); // These currently contain left and right counts per segment thrust::transform_exclusive_scan(thrust::device, IIZipIt, IIZipIt + nSegments, //thrust::make_zip_iterator(thrust::make_tuple(outIdxBest + nSegments, segIdx + nSegments)), outStratCount, [] BHD(auto v) { return thrust::get<0>(v) + thrust::get<1>(v); }, (S32)thisStrategy, [thisStrategy] BHD(S32 a, S32 b) { return thisStrategy | (stratNumMask & (a + b)); }); // Compute the SAH of making each segment a leaf S32 maxLeafSize = m_platform.getMaxLeafSize(), minLeafSize = m_platform.getMinLeafSize(); thrust::for_each_n(thrust::device, thrust::counting_iterator<S32>((S32)0), nSegments, [=] BHD(S32 i) { S32 ind = stratNumMask & outStratCount[i]; S32 leafN = outIdxBest[i] + segIdx[i]; F32 leafSAH = FW_F32_MAX; if (leafN <= minLeafSize) leafSAH = FW_F32_MIN; else if (leafN <= maxLeafSize) { AABB bounds = rightBounds[ind]; leafSAH = bounds.area() * getTriangleCost(leafN); } if (leafSAH < outCostBest[i]) { outCostBest[i] = leafSAH; outStratCount[i] = stratLeaf | ind; } }); } else { // OPT: Would rather do this as a conditional_iterator as part of reduce_by_key. thrust::for_each_n(thrust::device, thrust::counting_iterator<S32>((S32)0), nSegments, [=] BHD(S32 i) { if (outCostNew[i] < outCostBest[i]) { outCostBest[i] = outCostNew[i]; outIdxBest[i] = outIdxNew[i]; outStratCount[i] = thisStrategy | (stratNumMask & outStratCount[i]); } }); } printf("dim=%d nSegments=%d keys=%016llx\n", dim, nSegments, demoK); if (level == 62) { hipDeviceSynchronize(); // XXX for (int i = 0; i < nSegments; i++) { printf("%d 0x%x %d %f\n", i, (U32)outStratCount[i] >> stratBitOffset, stratNumMask & outStratCount[i], outCostBest[i]); } } } // Count how many refs want each kind of strategy to give me indices to them after they're sorted // thrust::inclusive_scan with an output tuple with a value per strategy. Could fold it into the for_each_n and use atomic counters? // Make segIdx be the per-reference index into out* thrust::transform_inclusive_scan(thrust::device, thrust::make_counting_iterator((S32)0), thrust::make_counting_iterator((S32)N), segIdx, [keys] BHD(S32 i) { return (i == 0 || keys[i] == keys[i - 1]) ? 0 : 1; }, [] BHD(S32 a, S32 b) { return a + b; }); // Sort each segment by its best dimension typedef thrust::tuple<S32, AABB, S32> TBITuple; typedef thrust::tuple<S32*, AABB*, S32*> TBIItTuple; typedef thrust::zip_iterator<TBIItTuple> TBIZipIt; TBIZipIt refsTBI(thrust::make_tuple(refTriIdx, refBounds, segIdx)); // OPT: Think about setting up strats so I can sort by it; maybe this would give good spans for doing separate algorithms in next pass; maybe lets us keep a sorted array per dim thrust::sort(thrust::device, refsTBI, refsTBI + N, [outStratCount] BHD(TBITuple a, TBITuple b) { S32 sa = thrust::get<2>(a); // Segment index in output arrays S32 sb = thrust::get<2>(b); int dim = (stratDimMask & outStratCount[sa]) >> stratBitOffset; S32 la = outStratCount[sa]; // sort by strategy and segment index in reference arrays (only strategy is relevant so far) S32 lb = outStratCount[sb]; F32 ca = thrust::get<1>(a).min()[dim] + thrust::get<1>(a).max()[dim]; // centroid in dim F32 cb = thrust::get<1>(b).min()[dim] + thrust::get<1>(b).max()[dim]; // return (la < lb) || (la == lb && ((sa < sb) || (sa == sb && (ca < cb || (ca == cb && thrust::get<0>(a) < thrust::get<0>(b)))))); return (la < lb) || (la == lb && (ca < cb || (ca == cb && thrust::get<0>(a) < thrust::get<0>(b)))); }); // Update Nactive here so only the active ones get their keys updated // Try to get rid of keys and just use segIdx. Have to be able to put them back in order to make gamma work. // XXX Will splits screw up gamma by inserting nodes between index and what it points to? // Update keys to partition each segment at the best location thrust::for_each(thrust::device, thrust::make_counting_iterator((S32)0), thrust::make_counting_iterator((S32)N), [=] BHD(S32 i) { S32 s = segIdx[i]; // Segment index in output arrays if (leftIdx[i] >= outIdxBest[s]) // My offset within segment is to the right of the split index keys[i] = keys[i] | 1ull << (U64)(63 - level); // If I'm at the start or end of the range and the gamma slot hasn't been claimed yet I record the relative split location. if (leftIdx[i] == 0 && gamma[i] == FW_S32_MIN) gamma[i] = outIdxBest[s]; if (rightIdx[i] == 1 && gamma[i] == FW_S32_MIN) gamma[i] = outIdxBest[s] - leftIdx[i]; // The (negative) offset from i to the relative split location. }); // printf("Done with generation %d.\n", level); } FW::BVHNode* FW::BatchSplitBVHBuilder::makeNodes(S32 N) { printf("makeNodes\n"); S32* gamma = m_gamma; AABB* refBounds = m_refBounds; // In parallel, make all the leaves hipDeviceSynchronize(); // Needed to allocate managed with ArrayAllocator. LeafNode* leaves = new LeafNode[N]; InnerNode* inner = new InnerNode[N]; // OPT: Store refBounds directly in BVHNodes. Or replace BVHNodes with SOA. thrust::for_each(thrust::device, thrust::make_counting_iterator((S32)0), thrust::make_counting_iterator((S32)N), [=] BHD(S32 i) { // Fill leaf node i leaves[i].m_bounds = refBounds[i]; leaves[i].m_lo = i; leaves[i].m_hi = i + 1; }); // Object splits: // Split location [i] means there are i refs to the left and N - i refs to the right. (=> [0] is unused.) // It means a split between [i-1] and [i]. (Different than Karras 2012.) // gamma[i] is the offset from i to the split of the segment that either starts or ends at i. // If that index is a segment of length 1 then the child is in Leaves; otherwise it's in Inner. // gamma[i] > 0 if i's segment is to the right; <= 0 if to the left. thrust::for_each(thrust::device, thrust::make_counting_iterator((S32)0), thrust::make_counting_iterator((S32)N - 1), [=] BHD(S32 i) { // Fill inner node i bool leftIsLeaf, rightIsLeaf; S32 dj = gamma[i]; S32 j = i + dj; if (dj <= 0) { // am a left child leftIsLeaf = gamma[j - 1] > 0; // true if the span starting at j-1 is a child of i's span, not an ancestor rightIsLeaf = dj == 0; } else { // am a right child leftIsLeaf = dj == 1; rightIsLeaf = gamma[j] <= 0; } BVHNode* left = leftIsLeaf ? (BVHNode*)(&leaves[j - 1]) : (BVHNode*)(&inner[j - 1]); BVHNode* right = rightIsLeaf ? (BVHNode*)(&leaves[j]) : (BVHNode*)(&inner[j]); left->m_parent = inner + i; right->m_parent = inner + i; inner[i].m_children[0] = left; inner[i].m_children[1] = right; }); hipError_t err = hipGetLastError(); FW_ASSERT(err == hipSuccess); return inner; // inner[0] is the root node. } FW::BVHNode* FW::BatchSplitBVHBuilder::batchRun(BatchSplitBVHBuilder& BS, AABB& rootBounds) { S32 N = BS.m_bvh.getScene()->getNumTriangles(); S32 maxN = (S32)(BS.m_params.maxDuplication * (float)N); FW_ASSERT(BS.m_platform.getTriangleBatchSize() == n_triBatchSize); FW_ASSERT(BS.m_platform.getNodeBatchSize() == n_nodeBatchSize); FW_ASSERT(BS.m_platform.getSAHTriangleCost() == n_SAHTriangleCost); FW_ASSERT(BS.m_platform.getSAHNodeCost() == n_SAHNodeCost); FW::Scene* scene = BS.m_bvh.getScene(); initBBArrays(maxN, scene, BS.m_bvh); // Do this in every function that uses these so they can be used in device lambdas S32* refTriIdx = m_refTriIdx; AABB* refBounds = m_refBounds; const Vec3i* tris = m_tris; const Vec3f* verts = m_verts; // Determine triangle and root bounds rootBounds = thrust::transform_reduce(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator(N), [refTriIdx, refBounds, tris, verts] BHD(S32 i) { refTriIdx[i] = i; refBounds[i] = AABB(); for (int j = 0; j < 3; j++) refBounds[i].grow(verts[tris[i][j]]); return refBounds[i]; }, AABB(), [] BHD(AABB a, AABB b) { return a + b; }); // Initialize rest of the members m_minOverlap = rootBounds.area() * BS.m_params.splitAlpha; //printf("rootBounds: "); //rootBounds.print(); // Build by generation S32 nSegments = -1; // Number of nodes (segments), which is the number of unique keys for (S32 level = 0; level < 64; level++) { doGeneration(N, nSegments, level); // Modifies N if (nSegments == N) break; } BVHNode* root = makeNodes(N); // OPT: BS.m_bvh.getTriIndices().compact(); Can't do this yet because we can't realloc managed. freeArrays(); return root; } FW::BatchSplitBVHBuilder::BatchSplitBVHBuilder(FW::BVH& bvh, const FW::BVH::BuildParams& params) : m_bvh(bvh), m_platform(bvh.getPlatform()), m_params(params) { } FW::BatchSplitBVHBuilder::~BatchSplitBVHBuilder(void) { } FW::BVHNode* FW::BatchSplitBVHBuilder::run(void) { printf("BatchSBVH alpha=%g minLeafSize=%d maxLeafSize=%d\n", m_params.splitAlpha, m_platform.getMinLeafSize(), m_platform.getMaxLeafSize()); Timer progressTimer; progressTimer.start(); AABB rootBounds; BVHNode* root = batchRun(*this, rootBounds); printf("BatchSplitBVHBuilder: t=%f duplicates %.0f%%\n", progressTimer.end(), 100.0f, (F32)m_numDuplicates / (F32)m_bvh.getScene()->getNumTriangles() * 100.0f); // Fix everything up on CPU for now. hipDeviceSynchronize(); root->computeSubtreeValues(m_platform, rootBounds.area(), true, true); return root; }
39cea6c5599c99246a2cf00b98e0ddd3a11c9308.cu
#define FW_ENABLE_ASSERT #include "bvh/BVHNode.hpp" #include "bvh/BatchSplitBVHBuilder.hpp" #include "base/Array.hpp" #include "base/Timer.hpp" #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/partition.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> #include <thrust/transform_scan.h> #define BHD __device__ // Copied from Platform.hpp since I don't want to pull that into the device. // batch processing (how many ops at the price of one) #define n_triBatchSize 1 #define n_nodeBatchSize 1 #define n_SAHTriangleCost 1.0f #define n_SAHNodeCost 1.0f using FW::S32; using FW::F32; using FW::AABB; S32 BHD roundToTriangleBatchSize(S32 n) { return ((n + n_triBatchSize - 1) / n_triBatchSize)*n_triBatchSize; } S32 BHD roundToNodeBatchSize(S32 n) { return ((n + n_nodeBatchSize - 1) / n_nodeBatchSize)*n_nodeBatchSize; } float BHD getTriangleCost(S32 n) { return roundToTriangleBatchSize(n) * n_SAHTriangleCost; } float BHD getNodeCost(S32 n) { return roundToNodeBatchSize(n) * n_SAHNodeCost; } float BHD getCost(int numChildNodes, int numTris) { return getNodeCost(numChildNodes) + getTriangleCost(numTris); } typedef thrust::tuple<AABB, S32, AABB, S32> BIBITuple; typedef thrust::tuple<AABB*, S32*, AABB*, S32*> BIBIItTuple; typedef thrust::zip_iterator<BIBIItTuple> BIBIZipIt; typedef thrust::tuple<float, S32, S32> FIITuple; struct BoundsToCost { BHD FIITuple operator()(BIBITuple x) // rightBounds, rightIdx, leftBounds, leftIdx { float rightA = thrust::get<0>(x).area(); float leftA = thrust::get<2>(x).area(); S32 rightN = thrust::get<1>(x); S32 leftN = thrust::get<3>(x); // This is just SAH of the two children. Need to add nodeSAH for it to be a full SAH. Also it's not scaled by root bounds. F32 childSAH = leftA * getTriangleCost(leftN) + rightA * getTriangleCost(rightN); // Add nodeSAH - OPT: Instead, subtract nodeSAH from leafSAH to avoid computing nodeBounds here AABB nodeBounds = thrust::get<0>(x) + thrust::get<2>(x); F32 nodeSAH = nodeBounds.area() * getNodeCost(2); F32 sah = childSAH + nodeSAH; // F32 sah = childSAH; return thrust::make_tuple(sah, leftN, rightN); } }; void FW::BatchSplitBVHBuilder::initBBArrays(S32 maxN, FW::Scene* scene, FW::BVH& bvh) { m_intArray.setManaged(true); m_intArray.resize(maxN * 10); m_boundsArray.setManaged(true); m_boundsArray.resize(maxN * 3); m_keysArray.setManaged(true); m_keysArray.resize(maxN * 2); cudaMemset(m_intArray.getPtr(), 0, sizeof(S32) * m_intArray.getSize()); cudaMemset(m_boundsArray.getPtr(), 0, sizeof(AABB) * m_boundsArray.getSize()); cudaMemset(m_keysArray.getPtr(), 0, sizeof(U64) * m_keysArray.getSize()); m_rightIdx = m_intArray.getPtr(); m_leftIdx = m_rightIdx + maxN; m_gamma = m_leftIdx + maxN; m_segIdx = m_gamma + maxN; m_outIdxBest = m_segIdx + maxN; m_outIdxNew = m_outIdxBest + maxN; m_outCostBest = (F32*)(m_outIdxNew + maxN); m_outCostNew = m_outCostBest + maxN; m_outStratCount = (S32*)(m_outCostNew + maxN); m_refBounds = m_boundsArray.getPtr(); m_rightBounds = m_refBounds + maxN; m_leftBounds = m_rightBounds + maxN; m_keys = m_keysArray.getPtr(); m_outKeys = m_keys + maxN; bvh.getTriIndices().setManaged(true); bvh.getTriIndices().resize(maxN); m_refTriIdx = bvh.getTriIndices().getPtr(); m_tris = (const Vec3i*)scene->getTriVtxIndexBuffer().getCudaPtr(); m_verts = (const Vec3f*)scene->getVtxPosBuffer().getCudaPtr(); cuMemsetD32((CUdeviceptr)m_gamma, FW_S32_MIN, maxN); // Set gamma to FW_S32_MIN } void FW::BatchSplitBVHBuilder::freeArrays() { // printf("freeBBArrays()\n"); m_intArray.reset(0); m_boundsArray.reset(0); m_keysArray.reset(0); } void FW::BatchSplitBVHBuilder::doGeneration(S32& N, S32& nSegments, S32 level) { S32* refTriIdx = m_refTriIdx; S32* rightIdx = m_rightIdx; S32* leftIdx = m_leftIdx; S32* gamma = m_gamma; S32* segIdx = m_segIdx; S32* outIdxBest = m_outIdxBest; S32* outIdxNew = m_outIdxNew; F32* outCostBest = m_outCostBest; F32* outCostNew = m_outCostNew; S32* outStratCount = m_outStratCount; AABB* refBounds = m_refBounds; AABB* rightBounds = m_rightBounds; AABB* leftBounds = m_leftBounds; U64* keys = m_keys; U64* outKeys = m_outKeys; typedef thrust::tuple<S32, AABB, U64> TBKTuple; typedef thrust::tuple<S32*, AABB*, U64*> TBKItTuple; typedef thrust::zip_iterator<TBKItTuple> TBKZipIt; TBKZipIt refsTBK(thrust::make_tuple(refTriIdx, refBounds, keys)); auto OneIt = thrust::make_constant_iterator((S32)1); // Remove degenerates. // OPT: For Sweep builder move this out of the loop. If so, for speed, change it to not be a stable_partition. Split builder makes new refs. auto mid = thrust::stable_partition(thrust::device, refsTBK, refsTBK + N, [] BHD(const TBKTuple r) { Vec3f size = thrust::get<1>(r).max() - thrust::get<1>(r).min(); return !(min(size) < 0.0f || sum(size) == max(size)); }); S32 newN = thrust::get<0>(mid.get_iterator_tuple()) - refTriIdx; if (newN != N) printf("%d => %d\n", N, newN); N = newN; // Try object split in each dimension for (int dim = 0; dim < 3; dim++) { // Sort in given dimension thrust::sort(thrust::device, refsTBK, refsTBK + N, [dim] BHD(TBKTuple a, TBKTuple b) { F32 ca = thrust::get<1>(a).min()[dim] + thrust::get<1>(a).max()[dim]; F32 cb = thrust::get<1>(b).min()[dim] + thrust::get<1>(b).max()[dim]; U64 ka = thrust::get<2>(a); U64 kb = thrust::get<2>(b); return (ka < kb) || (ka == kb && (ca < cb || (ca == cb && thrust::get<0>(a) < thrust::get<0>(b)))); }); // Sweep right to left and determine bounds; rightIdx is offset from right edge of segment // leftBounds[i] and rightBounds[i] contain the two AABBs for splitting at i. typedef thrust::tuple<AABB, S32> BITuple; typedef thrust::tuple<AABB*, S32*> BIItTuple; typedef thrust::zip_iterator<BIItTuple> BIZipIt; auto BIRevIt(thrust::make_zip_iterator(thrust::make_tuple(thrust::make_reverse_iterator(refBounds + N), OneIt))); auto OutBIRevIt(thrust::make_zip_iterator(thrust::make_tuple(thrust::make_reverse_iterator(rightBounds + N), thrust::make_reverse_iterator(rightIdx + N)))); thrust::inclusive_scan_by_key(thrust::device, thrust::make_reverse_iterator(keys + N), thrust::make_reverse_iterator(keys), BIRevIt, OutBIRevIt, [] BHD(U64 ka, U64 kb) { return ka == kb; }, [] BHD(BITuple a, BITuple b) { return thrust::make_tuple(thrust::get<0>(a) + thrust::get<0>(b), thrust::get<1>(a) + thrust::get<1>(b)); }); // Sweep left to right and determine bounds; leftIdx is offset from left edge of segment BIZipIt OutBIIt(thrust::make_tuple(leftBounds, leftIdx)); // OPT: Don't need to write leftIdx and rightIdx all three times. thrust::exclusive_scan_by_key(thrust::device, keys, keys + N, thrust::make_zip_iterator(thrust::make_tuple(refBounds, OneIt)), OutBIIt, thrust::make_tuple(AABB(), (S32)0), [] BHD(U64 ka, U64 kb) { return ka == kb; }, [] BHD(BITuple a, BITuple b) { return thrust::make_tuple(thrust::get<0>(a) + thrust::get<0>(b), thrust::get<1>(a) + thrust::get<1>(b)); }); // OPT: Store a segment's full AABB into its final BVHNode, since we know its location now // Select lowest SAH. BIBIZipIt bounds(thrust::make_tuple(rightBounds, rightIdx, leftBounds, leftIdx)); typedef thrust::discard_iterator<S32> IDisIt; IDisIt Dis; // OPT: Only need to write the keys out once. Could use discard_iterator on the other two dimensions. // OPT: segIdx is unneeded; should use a discard_iterator to get rid of rightIdx output, but was getting errors. auto outValues = thrust::make_zip_iterator(thrust::make_tuple(dim == 0 ? outCostBest : outCostNew, dim == 0 ? outIdxBest : outIdxNew, segIdx)); auto outEnd = thrust::reduce_by_key(thrust::device, keys, keys + N, thrust::make_transform_iterator(bounds, BoundsToCost()), outKeys, outValues, [] BHD(U64 ka, U64 kb) { return ka == kb; }, [] BHD(FIITuple a, FIITuple b) { return thrust::get<0>(a) < thrust::get<0>(b) ? a : (thrust::get<0>(a) > thrust::get<0>(b) ? b : (abs(thrust::get<1>(a) - thrust::get<2>(a)) < abs(thrust::get<1>(b) - thrust::get<2>(b)) ? a : b)); }); nSegments = outEnd.first - outKeys; U64 demoK = outKeys[0]; S32 thisStrategy = stratObjectSplit | (dim << stratBitOffset); if (dim == 0) { // Compute the count auto IIZipIt = thrust::make_zip_iterator(thrust::make_tuple(outIdxBest, segIdx)); // These currently contain left and right counts per segment thrust::transform_exclusive_scan(thrust::device, IIZipIt, IIZipIt + nSegments, //thrust::make_zip_iterator(thrust::make_tuple(outIdxBest + nSegments, segIdx + nSegments)), outStratCount, [] BHD(auto v) { return thrust::get<0>(v) + thrust::get<1>(v); }, (S32)thisStrategy, [thisStrategy] BHD(S32 a, S32 b) { return thisStrategy | (stratNumMask & (a + b)); }); // Compute the SAH of making each segment a leaf S32 maxLeafSize = m_platform.getMaxLeafSize(), minLeafSize = m_platform.getMinLeafSize(); thrust::for_each_n(thrust::device, thrust::counting_iterator<S32>((S32)0), nSegments, [=] BHD(S32 i) { S32 ind = stratNumMask & outStratCount[i]; S32 leafN = outIdxBest[i] + segIdx[i]; F32 leafSAH = FW_F32_MAX; if (leafN <= minLeafSize) leafSAH = FW_F32_MIN; else if (leafN <= maxLeafSize) { AABB bounds = rightBounds[ind]; leafSAH = bounds.area() * getTriangleCost(leafN); } if (leafSAH < outCostBest[i]) { outCostBest[i] = leafSAH; outStratCount[i] = stratLeaf | ind; } }); } else { // OPT: Would rather do this as a conditional_iterator as part of reduce_by_key. thrust::for_each_n(thrust::device, thrust::counting_iterator<S32>((S32)0), nSegments, [=] BHD(S32 i) { if (outCostNew[i] < outCostBest[i]) { outCostBest[i] = outCostNew[i]; outIdxBest[i] = outIdxNew[i]; outStratCount[i] = thisStrategy | (stratNumMask & outStratCount[i]); } }); } printf("dim=%d nSegments=%d keys=%016llx\n", dim, nSegments, demoK); if (level == 62) { cudaDeviceSynchronize(); // XXX for (int i = 0; i < nSegments; i++) { printf("%d 0x%x %d %f\n", i, (U32)outStratCount[i] >> stratBitOffset, stratNumMask & outStratCount[i], outCostBest[i]); } } } // Count how many refs want each kind of strategy to give me indices to them after they're sorted // thrust::inclusive_scan with an output tuple with a value per strategy. Could fold it into the for_each_n and use atomic counters? // Make segIdx be the per-reference index into out* thrust::transform_inclusive_scan(thrust::device, thrust::make_counting_iterator((S32)0), thrust::make_counting_iterator((S32)N), segIdx, [keys] BHD(S32 i) { return (i == 0 || keys[i] == keys[i - 1]) ? 0 : 1; }, [] BHD(S32 a, S32 b) { return a + b; }); // Sort each segment by its best dimension typedef thrust::tuple<S32, AABB, S32> TBITuple; typedef thrust::tuple<S32*, AABB*, S32*> TBIItTuple; typedef thrust::zip_iterator<TBIItTuple> TBIZipIt; TBIZipIt refsTBI(thrust::make_tuple(refTriIdx, refBounds, segIdx)); // OPT: Think about setting up strats so I can sort by it; maybe this would give good spans for doing separate algorithms in next pass; maybe lets us keep a sorted array per dim thrust::sort(thrust::device, refsTBI, refsTBI + N, [outStratCount] BHD(TBITuple a, TBITuple b) { S32 sa = thrust::get<2>(a); // Segment index in output arrays S32 sb = thrust::get<2>(b); int dim = (stratDimMask & outStratCount[sa]) >> stratBitOffset; S32 la = outStratCount[sa]; // sort by strategy and segment index in reference arrays (only strategy is relevant so far) S32 lb = outStratCount[sb]; F32 ca = thrust::get<1>(a).min()[dim] + thrust::get<1>(a).max()[dim]; // centroid in dim F32 cb = thrust::get<1>(b).min()[dim] + thrust::get<1>(b).max()[dim]; // return (la < lb) || (la == lb && ((sa < sb) || (sa == sb && (ca < cb || (ca == cb && thrust::get<0>(a) < thrust::get<0>(b)))))); return (la < lb) || (la == lb && (ca < cb || (ca == cb && thrust::get<0>(a) < thrust::get<0>(b)))); }); // Update Nactive here so only the active ones get their keys updated // Try to get rid of keys and just use segIdx. Have to be able to put them back in order to make gamma work. // XXX Will splits screw up gamma by inserting nodes between index and what it points to? // Update keys to partition each segment at the best location thrust::for_each(thrust::device, thrust::make_counting_iterator((S32)0), thrust::make_counting_iterator((S32)N), [=] BHD(S32 i) { S32 s = segIdx[i]; // Segment index in output arrays if (leftIdx[i] >= outIdxBest[s]) // My offset within segment is to the right of the split index keys[i] = keys[i] | 1ull << (U64)(63 - level); // If I'm at the start or end of the range and the gamma slot hasn't been claimed yet I record the relative split location. if (leftIdx[i] == 0 && gamma[i] == FW_S32_MIN) gamma[i] = outIdxBest[s]; if (rightIdx[i] == 1 && gamma[i] == FW_S32_MIN) gamma[i] = outIdxBest[s] - leftIdx[i]; // The (negative) offset from i to the relative split location. }); // printf("Done with generation %d.\n", level); } FW::BVHNode* FW::BatchSplitBVHBuilder::makeNodes(S32 N) { printf("makeNodes\n"); S32* gamma = m_gamma; AABB* refBounds = m_refBounds; // In parallel, make all the leaves cudaDeviceSynchronize(); // Needed to allocate managed with ArrayAllocator. LeafNode* leaves = new LeafNode[N]; InnerNode* inner = new InnerNode[N]; // OPT: Store refBounds directly in BVHNodes. Or replace BVHNodes with SOA. thrust::for_each(thrust::device, thrust::make_counting_iterator((S32)0), thrust::make_counting_iterator((S32)N), [=] BHD(S32 i) { // Fill leaf node i leaves[i].m_bounds = refBounds[i]; leaves[i].m_lo = i; leaves[i].m_hi = i + 1; }); // Object splits: // Split location [i] means there are i refs to the left and N - i refs to the right. (=> [0] is unused.) // It means a split between [i-1] and [i]. (Different than Karras 2012.) // gamma[i] is the offset from i to the split of the segment that either starts or ends at i. // If that index is a segment of length 1 then the child is in Leaves; otherwise it's in Inner. // gamma[i] > 0 if i's segment is to the right; <= 0 if to the left. thrust::for_each(thrust::device, thrust::make_counting_iterator((S32)0), thrust::make_counting_iterator((S32)N - 1), [=] BHD(S32 i) { // Fill inner node i bool leftIsLeaf, rightIsLeaf; S32 dj = gamma[i]; S32 j = i + dj; if (dj <= 0) { // am a left child leftIsLeaf = gamma[j - 1] > 0; // true if the span starting at j-1 is a child of i's span, not an ancestor rightIsLeaf = dj == 0; } else { // am a right child leftIsLeaf = dj == 1; rightIsLeaf = gamma[j] <= 0; } BVHNode* left = leftIsLeaf ? (BVHNode*)(&leaves[j - 1]) : (BVHNode*)(&inner[j - 1]); BVHNode* right = rightIsLeaf ? (BVHNode*)(&leaves[j]) : (BVHNode*)(&inner[j]); left->m_parent = inner + i; right->m_parent = inner + i; inner[i].m_children[0] = left; inner[i].m_children[1] = right; }); cudaError_t err = cudaGetLastError(); FW_ASSERT(err == cudaSuccess); return inner; // inner[0] is the root node. } FW::BVHNode* FW::BatchSplitBVHBuilder::batchRun(BatchSplitBVHBuilder& BS, AABB& rootBounds) { S32 N = BS.m_bvh.getScene()->getNumTriangles(); S32 maxN = (S32)(BS.m_params.maxDuplication * (float)N); FW_ASSERT(BS.m_platform.getTriangleBatchSize() == n_triBatchSize); FW_ASSERT(BS.m_platform.getNodeBatchSize() == n_nodeBatchSize); FW_ASSERT(BS.m_platform.getSAHTriangleCost() == n_SAHTriangleCost); FW_ASSERT(BS.m_platform.getSAHNodeCost() == n_SAHNodeCost); FW::Scene* scene = BS.m_bvh.getScene(); initBBArrays(maxN, scene, BS.m_bvh); // Do this in every function that uses these so they can be used in device lambdas S32* refTriIdx = m_refTriIdx; AABB* refBounds = m_refBounds; const Vec3i* tris = m_tris; const Vec3f* verts = m_verts; // Determine triangle and root bounds rootBounds = thrust::transform_reduce(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator(N), [refTriIdx, refBounds, tris, verts] BHD(S32 i) { refTriIdx[i] = i; refBounds[i] = AABB(); for (int j = 0; j < 3; j++) refBounds[i].grow(verts[tris[i][j]]); return refBounds[i]; }, AABB(), [] BHD(AABB a, AABB b) { return a + b; }); // Initialize rest of the members m_minOverlap = rootBounds.area() * BS.m_params.splitAlpha; //printf("rootBounds: "); //rootBounds.print(); // Build by generation S32 nSegments = -1; // Number of nodes (segments), which is the number of unique keys for (S32 level = 0; level < 64; level++) { doGeneration(N, nSegments, level); // Modifies N if (nSegments == N) break; } BVHNode* root = makeNodes(N); // OPT: BS.m_bvh.getTriIndices().compact(); Can't do this yet because we can't realloc managed. freeArrays(); return root; } FW::BatchSplitBVHBuilder::BatchSplitBVHBuilder(FW::BVH& bvh, const FW::BVH::BuildParams& params) : m_bvh(bvh), m_platform(bvh.getPlatform()), m_params(params) { } FW::BatchSplitBVHBuilder::~BatchSplitBVHBuilder(void) { } FW::BVHNode* FW::BatchSplitBVHBuilder::run(void) { printf("BatchSBVH alpha=%g minLeafSize=%d maxLeafSize=%d\n", m_params.splitAlpha, m_platform.getMinLeafSize(), m_platform.getMaxLeafSize()); Timer progressTimer; progressTimer.start(); AABB rootBounds; BVHNode* root = batchRun(*this, rootBounds); printf("BatchSplitBVHBuilder: t=%f duplicates %.0f%%\n", progressTimer.end(), 100.0f, (F32)m_numDuplicates / (F32)m_bvh.getScene()->getNumTriangles() * 100.0f); // Fix everything up on CPU for now. cudaDeviceSynchronize(); root->computeSubtreeValues(m_platform, rootBounds.area(), true, true); return root; }
3a2dcfa917f6ea8e48520540bf0088643b21bba3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <iostream> #include <fstream> using namespace std; __global__ void histogram(int *d_bins, const int *d_in, const int BIN_COUNT) { int gridDim_X = 64 ; int gridDim_Y = 64 ; int myId = threadIdx.x + (blockDim.x * blockIdx.x) + (gridDim_X * blockDim.x *blockIdx.y) + (gridDim_Y* gridDim_X* blockDim.x * blockIdx.z) ; int myItem = d_in[myId]; int myBin = myItem % BIN_COUNT; atomicAdd(&(d_bins[myBin]), 1); } int main(int argc, char **argv) { const int ARRAY_SIZE = 4096*4096; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); const int BIN_COUNT = 10; const int BIN_BYTES = BIN_COUNT * sizeof(int); cout << "SEE Output.txt File for OUTPUT __________ HIMANSHU JAIN \n" ; // reading input file and store into a matrix. int h_in[4096*4096]; ifstream inputfile; inputfile.open(argv[1]); int i =0; while(inputfile) { inputfile >> h_in[i]; i++; } // initialize Bin with zero int h_bins[BIN_COUNT]; for(int i = 0; i < BIN_COUNT; i++) { h_bins[i] = 0; } // declare GPU memory pointers int * d_in; int * d_bins; // allocate GPU memory hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_bins, BIN_BYTES); // transfer the arrays to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); hipMemcpy(d_bins, h_bins, BIN_BYTES, hipMemcpyHostToDevice); // launch the kernel int Block_x = 64; int Block_y = 64; int Block_z = 64 ; int thread_Block = 64 ; hipLaunchKernelGGL(( histogram), dim3(dim3(Block_x,Block_y,Block_z)),dim3(thread_Block), 0, 0, d_bins, d_in, BIN_COUNT); // copy back the sum from GPU hipMemcpy(h_bins, d_bins, BIN_BYTES, hipMemcpyDeviceToHost); // writing to output_file ofstream outputfile; outputfile.open("output.txt"); outputfile << "Homework 5 : CUDA Assignment (HISTOGRAM) : Himanhu Jain \n"; for(int i = 0; i < BIN_COUNT; i++) { outputfile << i << " => " << h_bins[i] << "\n"; } outputfile.close(); // free GPU memory allocation hipFree(d_in); hipFree(d_bins); return 0; } 83,1 Bot
3a2dcfa917f6ea8e48520540bf0088643b21bba3.cu
#include <stdio.h> #include <cuda_runtime.h> #include <iostream> #include <fstream> using namespace std; __global__ void histogram(int *d_bins, const int *d_in, const int BIN_COUNT) { int gridDim_X = 64 ; int gridDim_Y = 64 ; int myId = threadIdx.x + (blockDim.x * blockIdx.x) + (gridDim_X * blockDim.x *blockIdx.y) + (gridDim_Y* gridDim_X* blockDim.x * blockIdx.z) ; int myItem = d_in[myId]; int myBin = myItem % BIN_COUNT; atomicAdd(&(d_bins[myBin]), 1); } int main(int argc, char **argv) { const int ARRAY_SIZE = 4096*4096; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); const int BIN_COUNT = 10; const int BIN_BYTES = BIN_COUNT * sizeof(int); cout << "SEE Output.txt File for OUTPUT __________ HIMANSHU JAIN \n" ; // reading input file and store into a matrix. int h_in[4096*4096]; ifstream inputfile; inputfile.open(argv[1]); int i =0; while(inputfile) { inputfile >> h_in[i]; i++; } // initialize Bin with zero int h_bins[BIN_COUNT]; for(int i = 0; i < BIN_COUNT; i++) { h_bins[i] = 0; } // declare GPU memory pointers int * d_in; int * d_bins; // allocate GPU memory cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_bins, BIN_BYTES); // transfer the arrays to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(d_bins, h_bins, BIN_BYTES, cudaMemcpyHostToDevice); // launch the kernel int Block_x = 64; int Block_y = 64; int Block_z = 64 ; int thread_Block = 64 ; histogram<<< dim3(Block_x,Block_y,Block_z),thread_Block>>>(d_bins, d_in, BIN_COUNT); // copy back the sum from GPU cudaMemcpy(h_bins, d_bins, BIN_BYTES, cudaMemcpyDeviceToHost); // writing to output_file ofstream outputfile; outputfile.open("output.txt"); outputfile << "Homework 5 : CUDA Assignment (HISTOGRAM) : Himanhu Jain \n"; for(int i = 0; i < BIN_COUNT; i++) { outputfile << i << " => " << h_bins[i] << "\n"; } outputfile.close(); // free GPU memory allocation cudaFree(d_in); cudaFree(d_bins); return 0; } 83,1 Bot
6ae8443fc7ae265108c9a4ce1572431a1ffb9d14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <string.h> #include "ising.h" // CUDA Kernel __global__ void computeMoment(int8_t *readArr, int8_t *writeArr, float *weightArr, int n, int tileSize){ int row_init = blockIdx.x*(blockDim.x*tileSize) + threadIdx.x*tileSize; int col_init = blockIdx.y*(blockDim.y*tileSize) + threadIdx.y*tileSize; // Assign each thread a tileSizeXtileSize tile for(int ii=0; ii<tileSize; ++ii){ for (int jj=0; jj<tileSize; ++jj){ int row = row_init + ii; int col = col_init + jj; // If coordinates are between boundaries // update the write array accordingly if(row < n && col < n){ float influence = 0.0f; for (int i=-2; i<3; i++) { for (int j=-2; j<3; j++) { //add extra n so that modulo behaves like mathematics modulo //that is return only positive values int y = (row+i+n)%n; int x = (col+j+n)%n; influence += weightArr[i*5 + j]*readArr[y*n + x]; } } writeArr[row*n + col] = readArr[row*n + col]; if (influence<-diff) writeArr[row*n + col] = -1; else if (influence>diff) writeArr[row*n + col] = 1; __syncthreads(); } } } } void ising(int8_t *G, float *w, int k, int n) { // Allocate memory for the 3 arrays with hipMallocManaged() // because they will be used inside the kernel // The return err values are for debugging only int8_t *readArr, *writeArr; hipError_t err1 = hipMallocManaged(&readArr, n*n*sizeof(int8_t)); hipError_t err2 = hipMallocManaged(&writeArr,n*n*sizeof(int8_t)); float *weightArr_d; hipError_t er3 = hipMallocManaged(&weightArr_d, 5*5*sizeof(float)); // Copy the contents of input arrays inside // the ones we will use inside kernel memcpy(readArr, G, n*n*sizeof(int8_t)); memcpy(weightArr_d, w, 5*5*sizeof(float)); //set valid indexes to [-2..2][-2..2] weightArr_d = &weightArr_d[2*5 + 2]; weightArr_d[0] = 0.0; // Define the thread tile size, that is the size of the block of // moments a single thread will calculate. Set it to 5x5 int tileSize = 5; for (int i=1; i<=k; i++) { // Create blocks of size 32x32 threads per block // The number of blocks will adjust to fit the input n dim3 dimBlock(32, 32); int gridSz = (n + 32*tileSize)/ 32*tileSize; dim3 dimGrid(gridSz, gridSz); // Run the kernel in GPU hipLaunchKernelGGL(( computeMoment), dim3(dimGrid), dim3(dimBlock), 0, 0, readArr, writeArr, weightArr_d, n, tileSize); // Uncomment below to check for launch errors //printf("%s\n", hipGetErrorString(hipGetLastError())); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Swap read and write arrays int8_t *temp = readArr; readArr = writeArr; writeArr = temp; } //The final result now is in readArr. Copy the contents // in array G memcpy(G, readArr, n*n*sizeof(int)); hipFree( readArr ); hipFree( writeArr ); hipFree( weightArr_d ); }
6ae8443fc7ae265108c9a4ce1572431a1ffb9d14.cu
#include <stdlib.h> #include <string.h> #include "ising.h" // CUDA Kernel __global__ void computeMoment(int8_t *readArr, int8_t *writeArr, float *weightArr, int n, int tileSize){ int row_init = blockIdx.x*(blockDim.x*tileSize) + threadIdx.x*tileSize; int col_init = blockIdx.y*(blockDim.y*tileSize) + threadIdx.y*tileSize; // Assign each thread a tileSizeXtileSize tile for(int ii=0; ii<tileSize; ++ii){ for (int jj=0; jj<tileSize; ++jj){ int row = row_init + ii; int col = col_init + jj; // If coordinates are between boundaries // update the write array accordingly if(row < n && col < n){ float influence = 0.0f; for (int i=-2; i<3; i++) { for (int j=-2; j<3; j++) { //add extra n so that modulo behaves like mathematics modulo //that is return only positive values int y = (row+i+n)%n; int x = (col+j+n)%n; influence += weightArr[i*5 + j]*readArr[y*n + x]; } } writeArr[row*n + col] = readArr[row*n + col]; if (influence<-diff) writeArr[row*n + col] = -1; else if (influence>diff) writeArr[row*n + col] = 1; __syncthreads(); } } } } void ising(int8_t *G, float *w, int k, int n) { // Allocate memory for the 3 arrays with cudaMallocManaged() // because they will be used inside the kernel // The return err values are for debugging only int8_t *readArr, *writeArr; cudaError_t err1 = cudaMallocManaged(&readArr, n*n*sizeof(int8_t)); cudaError_t err2 = cudaMallocManaged(&writeArr,n*n*sizeof(int8_t)); float *weightArr_d; cudaError_t er3 = cudaMallocManaged(&weightArr_d, 5*5*sizeof(float)); // Copy the contents of input arrays inside // the ones we will use inside kernel memcpy(readArr, G, n*n*sizeof(int8_t)); memcpy(weightArr_d, w, 5*5*sizeof(float)); //set valid indexes to [-2..2][-2..2] weightArr_d = &weightArr_d[2*5 + 2]; weightArr_d[0] = 0.0; // Define the thread tile size, that is the size of the block of // moments a single thread will calculate. Set it to 5x5 int tileSize = 5; for (int i=1; i<=k; i++) { // Create blocks of size 32x32 threads per block // The number of blocks will adjust to fit the input n dim3 dimBlock(32, 32); int gridSz = (n + 32*tileSize)/ 32*tileSize; dim3 dimGrid(gridSz, gridSz); // Run the kernel in GPU computeMoment<<<dimGrid, dimBlock>>> (readArr, writeArr, weightArr_d, n, tileSize); // Uncomment below to check for launch errors //printf("%s\n", cudaGetErrorString(cudaGetLastError())); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Swap read and write arrays int8_t *temp = readArr; readArr = writeArr; writeArr = temp; } //The final result now is in readArr. Copy the contents // in array G memcpy(G, readArr, n*n*sizeof(int)); cudaFree( readArr ); cudaFree( writeArr ); cudaFree( weightArr_d ); }
255ec5cdfbacb14d11aec9934680d74096b25e43.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/hip/ForeachFunctors.cuh> namespace at { namespace native { template <template<class> class Op> std::vector<Tensor> foreach_unary_op(TensorList tensors) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; vec_res.reserve(tensors.size()); for (const auto& t: tensors) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(tensors.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<2>(tensor_lists, UnaryOpFunctor<scalar_t, /* depth */ 2, /* r_args_depth */ 1, /* res_arg_index */ 1>(), Op<opmath_t>()); }); return tensor_lists[1]; } template <template<class> class Op> void foreach_unary_op_(TensorList tensors) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(tensors.vec()); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<1>(tensor_lists, UnaryOpFunctor<scalar_t, /* depth */ 1, /* r_args_depth */ 1, /* res_arg_index */ 0>(), Op<opmath_t>()); }); } #define FOREACH_UNARY_OP(NAME, NAME1) \ template<typename T> \ struct NAME1 { \ __device__ T operator()(T t) const { return std::NAME(t); } \ }; \ \ std::vector<Tensor> foreach_tensor_##NAME##_cuda(TensorList tensors) { \ check_foreach_api_restrictions(tensors); \ \ if (!can_use_fast_route(tensors)) { \ return at::native::foreach_tensor_##NAME##_slow(tensors); \ } \ \ return foreach_unary_op<NAME1>(tensors); \ } \ \ void foreach_tensor_##NAME##_cuda_(TensorList tensors) { \ check_foreach_api_restrictions(tensors); \ \ if (!can_use_fast_route(tensors)) { \ return at::native::foreach_tensor_##NAME##_slow_(tensors); \ } \ \ foreach_unary_op_<NAME1>(tensors); \ } FOREACH_UNARY_OP(exp, Exp); FOREACH_UNARY_OP(sqrt, Sqrt); }} // namespace at::native
255ec5cdfbacb14d11aec9934680d74096b25e43.cu
#include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/cuda/ForeachFunctors.cuh> namespace at { namespace native { template <template<class> class Op> std::vector<Tensor> foreach_unary_op(TensorList tensors) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; vec_res.reserve(tensors.size()); for (const auto& t: tensors) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(tensors.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<2>(tensor_lists, UnaryOpFunctor<scalar_t, /* depth */ 2, /* r_args_depth */ 1, /* res_arg_index */ 1>(), Op<opmath_t>()); }); return tensor_lists[1]; } template <template<class> class Op> void foreach_unary_op_(TensorList tensors) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(tensors.vec()); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<1>(tensor_lists, UnaryOpFunctor<scalar_t, /* depth */ 1, /* r_args_depth */ 1, /* res_arg_index */ 0>(), Op<opmath_t>()); }); } #define FOREACH_UNARY_OP(NAME, NAME1) \ template<typename T> \ struct NAME1 { \ __device__ T operator()(T t) const { return std::NAME(t); } \ }; \ \ std::vector<Tensor> foreach_tensor_##NAME##_cuda(TensorList tensors) { \ check_foreach_api_restrictions(tensors); \ \ if (!can_use_fast_route(tensors)) { \ return at::native::foreach_tensor_##NAME##_slow(tensors); \ } \ \ return foreach_unary_op<NAME1>(tensors); \ } \ \ void foreach_tensor_##NAME##_cuda_(TensorList tensors) { \ check_foreach_api_restrictions(tensors); \ \ if (!can_use_fast_route(tensors)) { \ return at::native::foreach_tensor_##NAME##_slow_(tensors); \ } \ \ foreach_unary_op_<NAME1>(tensors); \ } FOREACH_UNARY_OP(exp, Exp); FOREACH_UNARY_OP(sqrt, Sqrt); }} // namespace at::native
12520a772fda38fbde6bb613340683340f4dc78a.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2015-2019 by Contributors * \file multiclass_metric.cc * \brief evaluation metrics for multiclass classification. * \author Kailong Chen, Tianqi Chen */ #include <rabit/rabit.h> #include <xgboost/metric.h> #include <cmath> #include "metric_common.h" #include "../common/math.h" #include "../common/common.h" #if defined(XGBOOST_USE_CUDA) #include <thrust/execution_policy.h> // thrust::hip::par #include <thrust/functional.h> // thrust::plus<> #include <thrust/transform_reduce.h> #include <thrust/iterator/counting_iterator.h> #include "../common/device_helpers.cuh" #endif // XGBOOST_USE_CUDA namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(multiclass_metric); template <typename EvalRowPolicy> class MultiClassMetricsReduction { void CheckLabelError(int32_t label_error, size_t n_class) const { CHECK(label_error >= 0 && label_error < static_cast<int32_t>(n_class)) << "MultiClassEvaluation: label must be in [0, num_class)," << " num_class=" << n_class << " but found " << label_error << " in label"; } public: MultiClassMetricsReduction() = default; PackedReduceResult CpuReduceMetrics( const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds, const size_t n_class) const { size_t ndata = labels.Size(); const auto& h_labels = labels.HostVector(); const auto& h_weights = weights.HostVector(); const auto& h_preds = preds.HostVector(); bst_float residue_sum = 0; bst_float weights_sum = 0; int label_error = 0; bool const is_null_weight = weights.Size() == 0; #pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static) for (omp_ulong idx = 0; idx < ndata; ++idx) { bst_float weight = is_null_weight ? 1.0f : h_weights[idx]; auto label = static_cast<int>(h_labels[idx]); if (label >= 0 && label < static_cast<int>(n_class)) { residue_sum += EvalRowPolicy::EvalRow( label, h_preds.data() + idx * n_class, n_class) * weight; weights_sum += weight; } else { label_error = label; } } CheckLabelError(label_error, n_class); PackedReduceResult res { residue_sum, weights_sum }; return res; } #if defined(XGBOOST_USE_CUDA) ~MultiClassMetricsReduction() { for (GPUSet::GpuIdType id = *devices_.begin(); id < *devices_.end(); ++id) { dh::safe_cuda(hipSetDevice(id)); size_t index = devices_.Index(id); allocators_.at(index).Free(); } } PackedReduceResult DeviceReduceMetrics( GPUSet::GpuIdType device_id, size_t device_index, const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds, const size_t n_class) { size_t n_data = labels.DeviceSize(device_id); thrust::counting_iterator<size_t> begin(0); thrust::counting_iterator<size_t> end = begin + n_data; auto s_labels = labels.DeviceSpan(device_id); auto s_preds = preds.DeviceSpan(device_id); auto s_weights = weights.DeviceSpan(device_id); bool const is_null_weight = weights.Size() == 0; auto s_label_error = label_error_.GetSpan<int32_t>(1); s_label_error[0] = 0; PackedReduceResult result = thrust::transform_reduce( thrust::hip::par(allocators_.at(device_index)), begin, end, [=] XGBOOST_DEVICE(size_t idx) { bst_float weight = is_null_weight ? 1.0f : s_weights[idx]; bst_float residue = 0; auto label = static_cast<int>(s_labels[idx]); if (label >= 0 && label < static_cast<int32_t>(n_class)) { residue = EvalRowPolicy::EvalRow( label, &s_preds[idx * n_class], n_class) * weight; } else { s_label_error[0] = label; } return PackedReduceResult{ residue, weight }; }, PackedReduceResult(), thrust::plus<PackedReduceResult>()); CheckLabelError(s_label_error[0], n_class); return result; } #endif // XGBOOST_USE_CUDA PackedReduceResult Reduce( const LearnerTrainParam &tparam, GPUSet devices, size_t n_class, const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds) { PackedReduceResult result; if (devices.IsEmpty()) { result = CpuReduceMetrics(weights, labels, preds, n_class); } #if defined(XGBOOST_USE_CUDA) else { // NOLINT if (allocators_.empty()) { devices_ = GPUSet::All(tparam.gpu_id, tparam.n_gpus); allocators_.resize(devices_.Size()); } preds.Shard(GPUDistribution::Granular(devices, n_class)); labels.Shard(devices); weights.Shard(devices); std::vector<PackedReduceResult> res_per_device(devices.Size()); #pragma omp parallel for schedule(static, 1) if (devices.Size() > 1) for (GPUSet::GpuIdType id = *devices.begin(); id < *devices.end(); ++id) { dh::safe_cuda(hipSetDevice(id)); size_t index = devices.Index(id); res_per_device.at(index) = DeviceReduceMetrics(id, index, weights, labels, preds, n_class); } for (auto const& res : res_per_device) { result += res; } } #endif // defined(XGBOOST_USE_CUDA) return result; } private: #if defined(XGBOOST_USE_CUDA) dh::PinnedMemory label_error_; GPUSet devices_; std::vector<dh::CubMemory> allocators_; #endif // defined(XGBOOST_USE_CUDA) }; /*! * \brief base class of multi-class evaluation * \tparam Derived the name of subclass */ template<typename Derived> struct EvalMClassBase : public Metric { bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK(preds.Size() % info.labels_.Size() == 0) << "label and prediction size not match"; const size_t nclass = preds.Size() / info.labels_.Size(); CHECK_GE(nclass, 1U) << "mlogloss and merror are only used for multi-class classification," << " use logloss for binary classification"; const auto ndata = static_cast<bst_omp_uint>(info.labels_.Size()); GPUSet devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, ndata); auto result = reducer_.Reduce(*tparam_, devices, nclass, info.weights_, info.labels_, preds); double dat[2] { result.Residue(), result.Weights() }; if (distributed) { rabit::Allreduce<rabit::op::Sum>(dat, 2); } return Derived::GetFinal(dat[0], dat[1]); } /*! * \brief to be implemented by subclass, * get evaluation result from one row * \param label label of current instance * \param pred prediction value of current instance * \param nclass number of class in the prediction */ XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass); /*! * \brief to be overridden by subclass, final transformation * \param esum the sum statistics returned by EvalRow * \param wsum sum of weight */ inline static bst_float GetFinal(bst_float esum, bst_float wsum) { return esum / wsum; } private: MultiClassMetricsReduction<Derived> reducer_; // used to store error message const char *error_msg_; }; /*! \brief match error */ struct EvalMatchError : public EvalMClassBase<EvalMatchError> { const char* Name() const override { return "merror"; } XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass) { return common::FindMaxIndex(pred, pred + nclass) != pred + static_cast<int>(label); } }; /*! \brief match error */ struct EvalMultiLogLoss : public EvalMClassBase<EvalMultiLogLoss> { const char* Name() const override { return "mlogloss"; } XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass) { const bst_float eps = 1e-16f; auto k = static_cast<size_t>(label); if (pred[k] > eps) { return -::log(pred[k]); } else { return -::log(eps); } } }; XGBOOST_REGISTER_METRIC(MatchError, "merror") .describe("Multiclass classification error.") .set_body([](const char* param) { return new EvalMatchError(); }); XGBOOST_REGISTER_METRIC(MultiLogLoss, "mlogloss") .describe("Multiclass negative loglikelihood.") .set_body([](const char* param) { return new EvalMultiLogLoss(); }); } // namespace metric } // namespace xgboost
12520a772fda38fbde6bb613340683340f4dc78a.cu
/*! * Copyright 2015-2019 by Contributors * \file multiclass_metric.cc * \brief evaluation metrics for multiclass classification. * \author Kailong Chen, Tianqi Chen */ #include <rabit/rabit.h> #include <xgboost/metric.h> #include <cmath> #include "metric_common.h" #include "../common/math.h" #include "../common/common.h" #if defined(XGBOOST_USE_CUDA) #include <thrust/execution_policy.h> // thrust::cuda::par #include <thrust/functional.h> // thrust::plus<> #include <thrust/transform_reduce.h> #include <thrust/iterator/counting_iterator.h> #include "../common/device_helpers.cuh" #endif // XGBOOST_USE_CUDA namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(multiclass_metric); template <typename EvalRowPolicy> class MultiClassMetricsReduction { void CheckLabelError(int32_t label_error, size_t n_class) const { CHECK(label_error >= 0 && label_error < static_cast<int32_t>(n_class)) << "MultiClassEvaluation: label must be in [0, num_class)," << " num_class=" << n_class << " but found " << label_error << " in label"; } public: MultiClassMetricsReduction() = default; PackedReduceResult CpuReduceMetrics( const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds, const size_t n_class) const { size_t ndata = labels.Size(); const auto& h_labels = labels.HostVector(); const auto& h_weights = weights.HostVector(); const auto& h_preds = preds.HostVector(); bst_float residue_sum = 0; bst_float weights_sum = 0; int label_error = 0; bool const is_null_weight = weights.Size() == 0; #pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static) for (omp_ulong idx = 0; idx < ndata; ++idx) { bst_float weight = is_null_weight ? 1.0f : h_weights[idx]; auto label = static_cast<int>(h_labels[idx]); if (label >= 0 && label < static_cast<int>(n_class)) { residue_sum += EvalRowPolicy::EvalRow( label, h_preds.data() + idx * n_class, n_class) * weight; weights_sum += weight; } else { label_error = label; } } CheckLabelError(label_error, n_class); PackedReduceResult res { residue_sum, weights_sum }; return res; } #if defined(XGBOOST_USE_CUDA) ~MultiClassMetricsReduction() { for (GPUSet::GpuIdType id = *devices_.begin(); id < *devices_.end(); ++id) { dh::safe_cuda(cudaSetDevice(id)); size_t index = devices_.Index(id); allocators_.at(index).Free(); } } PackedReduceResult DeviceReduceMetrics( GPUSet::GpuIdType device_id, size_t device_index, const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds, const size_t n_class) { size_t n_data = labels.DeviceSize(device_id); thrust::counting_iterator<size_t> begin(0); thrust::counting_iterator<size_t> end = begin + n_data; auto s_labels = labels.DeviceSpan(device_id); auto s_preds = preds.DeviceSpan(device_id); auto s_weights = weights.DeviceSpan(device_id); bool const is_null_weight = weights.Size() == 0; auto s_label_error = label_error_.GetSpan<int32_t>(1); s_label_error[0] = 0; PackedReduceResult result = thrust::transform_reduce( thrust::cuda::par(allocators_.at(device_index)), begin, end, [=] XGBOOST_DEVICE(size_t idx) { bst_float weight = is_null_weight ? 1.0f : s_weights[idx]; bst_float residue = 0; auto label = static_cast<int>(s_labels[idx]); if (label >= 0 && label < static_cast<int32_t>(n_class)) { residue = EvalRowPolicy::EvalRow( label, &s_preds[idx * n_class], n_class) * weight; } else { s_label_error[0] = label; } return PackedReduceResult{ residue, weight }; }, PackedReduceResult(), thrust::plus<PackedReduceResult>()); CheckLabelError(s_label_error[0], n_class); return result; } #endif // XGBOOST_USE_CUDA PackedReduceResult Reduce( const LearnerTrainParam &tparam, GPUSet devices, size_t n_class, const HostDeviceVector<bst_float>& weights, const HostDeviceVector<bst_float>& labels, const HostDeviceVector<bst_float>& preds) { PackedReduceResult result; if (devices.IsEmpty()) { result = CpuReduceMetrics(weights, labels, preds, n_class); } #if defined(XGBOOST_USE_CUDA) else { // NOLINT if (allocators_.empty()) { devices_ = GPUSet::All(tparam.gpu_id, tparam.n_gpus); allocators_.resize(devices_.Size()); } preds.Shard(GPUDistribution::Granular(devices, n_class)); labels.Shard(devices); weights.Shard(devices); std::vector<PackedReduceResult> res_per_device(devices.Size()); #pragma omp parallel for schedule(static, 1) if (devices.Size() > 1) for (GPUSet::GpuIdType id = *devices.begin(); id < *devices.end(); ++id) { dh::safe_cuda(cudaSetDevice(id)); size_t index = devices.Index(id); res_per_device.at(index) = DeviceReduceMetrics(id, index, weights, labels, preds, n_class); } for (auto const& res : res_per_device) { result += res; } } #endif // defined(XGBOOST_USE_CUDA) return result; } private: #if defined(XGBOOST_USE_CUDA) dh::PinnedMemory label_error_; GPUSet devices_; std::vector<dh::CubMemory> allocators_; #endif // defined(XGBOOST_USE_CUDA) }; /*! * \brief base class of multi-class evaluation * \tparam Derived the name of subclass */ template<typename Derived> struct EvalMClassBase : public Metric { bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK(preds.Size() % info.labels_.Size() == 0) << "label and prediction size not match"; const size_t nclass = preds.Size() / info.labels_.Size(); CHECK_GE(nclass, 1U) << "mlogloss and merror are only used for multi-class classification," << " use logloss for binary classification"; const auto ndata = static_cast<bst_omp_uint>(info.labels_.Size()); GPUSet devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, ndata); auto result = reducer_.Reduce(*tparam_, devices, nclass, info.weights_, info.labels_, preds); double dat[2] { result.Residue(), result.Weights() }; if (distributed) { rabit::Allreduce<rabit::op::Sum>(dat, 2); } return Derived::GetFinal(dat[0], dat[1]); } /*! * \brief to be implemented by subclass, * get evaluation result from one row * \param label label of current instance * \param pred prediction value of current instance * \param nclass number of class in the prediction */ XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass); /*! * \brief to be overridden by subclass, final transformation * \param esum the sum statistics returned by EvalRow * \param wsum sum of weight */ inline static bst_float GetFinal(bst_float esum, bst_float wsum) { return esum / wsum; } private: MultiClassMetricsReduction<Derived> reducer_; // used to store error message const char *error_msg_; }; /*! \brief match error */ struct EvalMatchError : public EvalMClassBase<EvalMatchError> { const char* Name() const override { return "merror"; } XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass) { return common::FindMaxIndex(pred, pred + nclass) != pred + static_cast<int>(label); } }; /*! \brief match error */ struct EvalMultiLogLoss : public EvalMClassBase<EvalMultiLogLoss> { const char* Name() const override { return "mlogloss"; } XGBOOST_DEVICE static bst_float EvalRow(int label, const bst_float *pred, size_t nclass) { const bst_float eps = 1e-16f; auto k = static_cast<size_t>(label); if (pred[k] > eps) { return -std::log(pred[k]); } else { return -std::log(eps); } } }; XGBOOST_REGISTER_METRIC(MatchError, "merror") .describe("Multiclass classification error.") .set_body([](const char* param) { return new EvalMatchError(); }); XGBOOST_REGISTER_METRIC(MultiLogLoss, "mlogloss") .describe("Multiclass negative loglikelihood.") .set_body([](const char* param) { return new EvalMultiLogLoss(); }); } // namespace metric } // namespace xgboost
826b3ead241224a07ba2f24778d3cb972382f15d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <cstdlib> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <math.h> unsigned int N_SIMS, N_RANDS, N_BLK, N_THRD, N_BYTES; const unsigned int MAX_THREADS = 512; // max threads per block // Calculate and return mean of an array of floats float calcMean(float arr[], unsigned int const n) { float sum = 0.0; for (unsigned int i=0; i<n; i++) { sum += (arr[i] / n); } return sum; } float calcRMSE(float arr[], unsigned int const n) { double sum = 0.0; double err = 0.0; for (unsigned int i=0; i<n; i++) { err = abs(arr[i] - M_PI); sum += err * err; } return (float) (sum / n); } __host__ hipEvent_t get_time(void) { hipEvent_t time; hipEventCreate(&time); hipEventRecord(time); return time; } // Estimate pi using Monte Carlo simulations __global__ void est_pi(float *pi, unsigned int N, unsigned int R) { unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { hiprandState_t state; // initialize rand state hiprand_init(tid, 0, 0, &state); // set seed to thread index int inBound = 0; // number of points within circle float x, y; for (int i = 0; i < R; i++) { x = hiprand_uniform(&state)*2 - 1; // x-coord y = hiprand_uniform(&state)*2 - 1; // y-coord inBound += (abs(y) < sqrtf( 1.0 - x*x )); // assume circle of radius=1 } pi[tid] = 4.0f * inBound / R; // pi / 4 = inBound / total } } int main(int argc, char* argv[]) { if (argc == 3) { // get number of simulations based on CMDLINE input N_SIMS = atoi(argv[1]); N_RANDS = atoi(argv[2]); } else { printf("Usage: %s [nSimulations] [nRandomNumbers].\n", argv[0]); return EXIT_FAILURE; } N_BLK = N_SIMS / MAX_THREADS + 1; // min of one block N_THRD = ::min(N_SIMS, MAX_THREADS); // num of threads per block N_BYTES = N_SIMS * sizeof(float); // size of loss array printf("Running %u simulations of %u points each...\n", N_SIMS, N_RANDS); hipEvent_t start = get_time(); // start clock float *h_pi, *d_pi; h_pi = (float*) malloc(N_BYTES); // allocate host output hipMalloc((void **) &d_pi, N_BYTES); hipLaunchKernelGGL(( est_pi), dim3(N_BLK), dim3(N_THRD), 0, 0, d_pi, N_SIMS, N_RANDS); hipMemcpy(h_pi, d_pi, N_BYTES, hipMemcpyDeviceToHost ); // copy back hipEvent_t stop = get_time(); // stop clock hipEventSynchronize(stop); float dur, mean_pi, rmse; mean_pi = calcMean(h_pi, N_SIMS); rmse = calcRMSE(h_pi, N_SIMS); hipEventElapsedTime(&dur, start, stop); printf("\tTook %.3f ms, output = %f, RMSE = %f, total error = %f\n", dur, mean_pi, rmse, abs(mean_pi-M_PI)); return EXIT_SUCCESS; }
826b3ead241224a07ba2f24778d3cb972382f15d.cu
#include <stdio.h> #include <stdlib.h> #include <algorithm> #include <cstdlib> #include <curand.h> #include <curand_kernel.h> #include <math.h> unsigned int N_SIMS, N_RANDS, N_BLK, N_THRD, N_BYTES; const unsigned int MAX_THREADS = 512; // max threads per block // Calculate and return mean of an array of floats float calcMean(float arr[], unsigned int const n) { float sum = 0.0; for (unsigned int i=0; i<n; i++) { sum += (arr[i] / n); } return sum; } float calcRMSE(float arr[], unsigned int const n) { double sum = 0.0; double err = 0.0; for (unsigned int i=0; i<n; i++) { err = abs(arr[i] - M_PI); sum += err * err; } return (float) (sum / n); } __host__ cudaEvent_t get_time(void) { cudaEvent_t time; cudaEventCreate(&time); cudaEventRecord(time); return time; } // Estimate pi using Monte Carlo simulations __global__ void est_pi(float *pi, unsigned int N, unsigned int R) { unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { curandState_t state; // initialize rand state curand_init(tid, 0, 0, &state); // set seed to thread index int inBound = 0; // number of points within circle float x, y; for (int i = 0; i < R; i++) { x = curand_uniform(&state)*2 - 1; // x-coord y = curand_uniform(&state)*2 - 1; // y-coord inBound += (abs(y) < sqrtf( 1.0 - x*x )); // assume circle of radius=1 } pi[tid] = 4.0f * inBound / R; // pi / 4 = inBound / total } } int main(int argc, char* argv[]) { if (argc == 3) { // get number of simulations based on CMDLINE input N_SIMS = atoi(argv[1]); N_RANDS = atoi(argv[2]); } else { printf("Usage: %s [nSimulations] [nRandomNumbers].\n", argv[0]); return EXIT_FAILURE; } N_BLK = N_SIMS / MAX_THREADS + 1; // min of one block N_THRD = std::min(N_SIMS, MAX_THREADS); // num of threads per block N_BYTES = N_SIMS * sizeof(float); // size of loss array printf("Running %u simulations of %u points each...\n", N_SIMS, N_RANDS); cudaEvent_t start = get_time(); // start clock float *h_pi, *d_pi; h_pi = (float*) malloc(N_BYTES); // allocate host output cudaMalloc((void **) &d_pi, N_BYTES); est_pi<<<N_BLK, N_THRD>>>(d_pi, N_SIMS, N_RANDS); cudaMemcpy(h_pi, d_pi, N_BYTES, cudaMemcpyDeviceToHost ); // copy back cudaEvent_t stop = get_time(); // stop clock cudaEventSynchronize(stop); float dur, mean_pi, rmse; mean_pi = calcMean(h_pi, N_SIMS); rmse = calcRMSE(h_pi, N_SIMS); cudaEventElapsedTime(&dur, start, stop); printf("\tTook %.3f ms, output = %f, RMSE = %f, total error = %f\n", dur, mean_pi, rmse, abs(mean_pi-M_PI)); return EXIT_SUCCESS; }
e40a797fedff5f9b78d8c9ae54fc6b9dca630ca1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //CUDA GPU Implementation of Image Smoothing and gradient processing //Bhallaji Venkatesan and Divya Sampath Kumar // Compile by nvcc -arch compute_53 -std=c++11 -I ~/NVIDIA_CUDA-8.0_Samples/common/inc/ -o GPU_imageread_smooth GPU_imageread_smooth.cu #define _DEFINE_DEPRECATED_HASH_CLASSES 0 #include <stdlib.h> #include <time.h> #include <stdio.h> //#include <cutil_inline.h> #include <helper_cuda.h> #include <helper_cuda_gl.h> #include <helper_cuda_drvapi.h> #include <helper_functions.h> #include <helper_image.h> #include <helper_math.h> #include <helper_string.h> #include <helper_timer.h> //#include "Convolution.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <assert.h> #include <string.h> #include <errno.h> #include <fcntl.h> #include <unistd.h> #include <iostream> #include <cmath> #include <limits> #include <sstream> #include <hash_set> #include <unordered_set> #include <iterator> #include <vector> #define STRONG_EDGE 0xFF #define NON_EDGE 0x0 #include "bmp.h" using namespace std; std::unordered_set<unsigned int> visitedPixels; char *BMPInFile = "car.bmp"; char *BMPOutFile = "suppress.bmp"; //computeImageGradient(); //suppressNonmaximums(); //texture<float, 2, hipReadModeElementType> deviceMatrixTexture; texture<unsigned char, 2, hipReadModeElementType> deviceMatrixTexture; __device__ __constant__ float deviceXGradientMask[9] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 }; __device__ __constant__ float deviceYGradientMask[9] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 }; __device__ __constant__ float deviceGaussianFilterMask[25] ={ 2, 4, 5, 4, 2, 4, 9, 12, 9, 4, 5, 12, 15, 12, 5, 4, 9, 12, 9, 4, 2, 4, 5, 4, 2 } ; __global__ void deviceGaussianConvolution(unsigned char * output, int matrixWidth) { int outputRow = blockIdx.y * blockDim.y + threadIdx.y; int outputColumn = blockIdx.x * blockDim.x + threadIdx.x; float accumulator = 0.0; #pragma unroll for(int i = -2; i <= 2; ++i) { unsigned matrixColumn = outputColumn + i; #pragma unroll for(int j = -2; j <= 2; ++j) { accumulator += deviceGaussianFilterMask[(2 + i) + (2 + j)] * tex2D(deviceMatrixTexture, matrixColumn, outputRow + j); } } __syncthreads(); output[outputRow * matrixWidth + outputColumn] = accumulator / 159; } __global__ void deviceComputeGradient(unsigned char* outputGradient, unsigned matrixWidth, unsigned int* outputEdgeDirectionClassifications) { int outputRow = blockIdx.y * blockDim.y + threadIdx.y; int outputColumn = blockIdx.x * blockDim.x + threadIdx.x; // Get gradients float xAccumulator = 0.0; float yAccumulator = 0.0; #pragma unroll for(int i = -1; i <= 1; ++i) { unsigned matrixColumn = outputColumn + i; #pragma unroll for(int j = -1; j <= 1; ++j) { int maskIndex = (1 + i)* 3 + (1 + j); //printf("%f\n", tex2D(deviceMatrixTexture, matrixColumn, outputRow + j)); xAccumulator += deviceXGradientMask[maskIndex] * tex2D(deviceMatrixTexture, matrixColumn, outputRow + j); yAccumulator += deviceYGradientMask[maskIndex] * tex2D(deviceMatrixTexture, matrixColumn, outputRow + j); } } int matrixIndex = outputRow * matrixWidth + outputColumn; // Get gradient magnitude outputGradient[matrixIndex] = abs(xAccumulator) + abs(yAccumulator); // Determine edge direction float edgeDirection = atan2(yAccumulator, xAccumulator) * (180 / 3.14159265) + 180.0; // Classify edge directions if((edgeDirection >= 22.5 && edgeDirection < 67.5) || (edgeDirection >= 202.5 && edgeDirection < 247.5)) { outputEdgeDirectionClassifications[matrixIndex] = 1; } else if((edgeDirection >= 67.5 && edgeDirection < 112.5) || (edgeDirection >= 247.5 && edgeDirection < 292.5)) { outputEdgeDirectionClassifications[matrixIndex] = 2; } else if((edgeDirection >= 112.5 && edgeDirection < 157.5) || (edgeDirection >= 292.5 && edgeDirection < 337.5)) { outputEdgeDirectionClassifications[matrixIndex] = 3; } else { outputEdgeDirectionClassifications[matrixIndex] = 0; } } void computeGradient(unsigned char * inputMatrix, int matrixWidth, unsigned char * outputGradient, unsigned int* outputEdgeDirections) { // Create timer. //unsigned int timer = 0; //CUT_SAFE_CALL(cutCreateTimer(&timer)); // Compute memory sizes. int matrixMemorySize = matrixWidth * matrixWidth * sizeof(unsigned char); // Set up device arrays. hipArray* deviceMatrixArray = NULL; unsigned char* deviceGradient = NULL; unsigned int* deviceEdgeDirections = NULL; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<unsigned char>(); hipMallocArray(&deviceMatrixArray, &channelDesc, matrixWidth, matrixWidth); hipMalloc((void**)&deviceGradient, matrixMemorySize); hipMalloc((void**)&deviceEdgeDirections, matrixWidth * matrixWidth * sizeof(unsigned int)); // Copy inputs to device. hipMemcpyToArray(deviceMatrixArray, 0, 0, inputMatrix, matrixMemorySize, hipMemcpyHostToDevice); // Set up image matrix as a texture. deviceMatrixTexture.addressMode[0] = hipAddressModeClamp; deviceMatrixTexture.addressMode[1] = hipAddressModeClamp; hipBindTextureToArray(deviceMatrixTexture, deviceMatrixArray); // Start timer. //CUT_SAFE_CALL(cutStartTimer(timer)); // Do it! dim3 dimGrid(matrixWidth / 16, matrixWidth / 16); dim3 dimBlock(16, 16); hipLaunchKernelGGL(( deviceGaussianConvolution), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceGradient, matrixWidth); hipMemcpy(outputGradient, deviceGradient, matrixMemorySize, hipMemcpyDeviceToHost); hipUnbindTexture(deviceMatrixTexture); hipMemcpyToArray(deviceMatrixArray, 0, 0, outputGradient, matrixMemorySize, hipMemcpyHostToDevice); hipBindTextureToArray(deviceMatrixTexture, deviceMatrixArray); hipLaunchKernelGGL(( deviceComputeGradient), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceGradient, matrixWidth, deviceEdgeDirections); // Check for errors. //CUT_CHECK_ERROR("Kernel execution failed!"); // Copy device result to host. hipMemcpy(outputGradient, deviceGradient, matrixMemorySize, hipMemcpyDeviceToHost); hipMemcpy(outputEdgeDirections, deviceEdgeDirections, matrixMemorySize, hipMemcpyDeviceToHost); hipFreeArray(deviceMatrixArray); hipFree(deviceGradient); hipFree(deviceEdgeDirections); hipUnbindTexture(deviceMatrixTexture); } void BitMapRead(char *file,struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete) { size_t palete_size; int fd; if((fd = open(file, O_RDONLY)) <0) FATAL("Open Source"); if(read(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Read BMP Header"); if(read(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Read DIB Header"); assert(dib->bpp ==8); palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if(palete_size > 0) { *palete = (unsigned char *)malloc(palete_size); int go = read(fd, *palete, palete_size); if(go != palete_size) { FATAL("Read Palete"); } } *data = (unsigned char *)malloc(dib->image_size); if(read(fd, *data, dib->image_size) != dib->image_size) //close(fd); FATAL("Read Image"); close(fd); } void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete) { size_t palete_size; int fd; palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC,S_IRUSR | S_IWUSR | S_IRGRP)) <0) FATAL("Open Destination"); if(write(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Write BMP Header"); if(write(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Write DIB Header"); if(palete_size != 0) { if(write(fd, palete, palete_size) != palete_size) FATAL("Write Palete"); } if(write(fd, data, dib->image_size) != dib->image_size) FATAL("Write Image"); close(fd); } int getClockwisePerpendicularIndex(unsigned i, unsigned j, unsigned int edgeDirectionClassification, int width) { int clockwisePerpendicularI; int clockwisePerpendicularJ; switch(edgeDirectionClassification) { case 0: clockwisePerpendicularI = i - 1; clockwisePerpendicularJ = j; break; case 1: clockwisePerpendicularI = i - 1; clockwisePerpendicularJ = j + 1; break; case 2: clockwisePerpendicularI = i; clockwisePerpendicularJ = j + 1; break; case 3: clockwisePerpendicularI = i + 1; clockwisePerpendicularJ = j + 1; break; } //cout << "\tClockwise perpendicular pixel: (" << clockwisePerpendicularI << ", " << clockwisePerpendicularJ << ") = "; if(clockwisePerpendicularI < 0 || clockwisePerpendicularJ < 0 || clockwisePerpendicularI >= width || clockwisePerpendicularJ >= width) { return -1; } else { return clockwisePerpendicularI * width + clockwisePerpendicularJ; } } int getCounterClockwisePerpendicularIndex(unsigned i, unsigned j, unsigned int edgeDirectionClassification, int width) { int counterClockwisePerpendicularI; int counterClockwisePerpendicularJ; switch(edgeDirectionClassification) { case 0: counterClockwisePerpendicularI = i + 1; counterClockwisePerpendicularJ = j; break; case 1: counterClockwisePerpendicularI = i + 1; counterClockwisePerpendicularJ = j - 1; break; case 2: counterClockwisePerpendicularI = i; counterClockwisePerpendicularJ = j - 1; break; case 3: counterClockwisePerpendicularI = i - 1; counterClockwisePerpendicularJ = j - 1; break; } //cout << "\tCounterclockwise perpendicular pixel: (" << counterClockwisePerpendicularI << ", " << counterClockwisePerpendicularJ << ") = "; if(counterClockwisePerpendicularI < 0 || counterClockwisePerpendicularJ < 0 ||counterClockwisePerpendicularJ >= width || counterClockwisePerpendicularJ >= width) { return -1; } else { return counterClockwisePerpendicularI * width + counterClockwisePerpendicularJ; } } void suppressNonmaximums(int width, unsigned int* edgeDirectionClassifications, unsigned char* gradient, int imgsize) { unsigned int* edc_local = (unsigned int *)malloc(imgsize); for(unsigned int i = 0; i < imgsize; ++i) { *(edc_local+i) = *(edgeDirectionClassifications + i); } for(unsigned int i = 0; i < width; ++i) { for(unsigned int j = 0; j < width; ++j) { unsigned int pixelIndex = i * width + j; int clockwisePerpendicularIndex = edc_local[pixelIndex];//getClockwisePerpendicularIndex(i, j, edgeDirectionClassifications[pixelIndex], width); float clockwisePerpendicularValue; float counterClockwisePerpendicularValue; if(clockwisePerpendicularIndex == -1) { clockwisePerpendicularValue = 0; } else { clockwisePerpendicularValue = gradient[clockwisePerpendicularIndex]; } int counterClockwisePerpendicularIndex =edc_local[pixelIndex];// getCounterClockwisePerpendicularIndex(i, j, edgeDirectionClassifications[pixelIndex], width); if(counterClockwisePerpendicularIndex == -1) { counterClockwisePerpendicularValue = 0; } else { if(counterClockwisePerpendicularIndex < imgsize && counterClockwisePerpendicularIndex >= 0) { counterClockwisePerpendicularValue = gradient[counterClockwisePerpendicularIndex]; } } if(gradient[pixelIndex] <= clockwisePerpendicularValue || gradient[pixelIndex] <= counterClockwisePerpendicularValue) { //cout << "\tPixel suppressed." << endl; gradient[pixelIndex] = 0; } else { //cout << "\tPixel retained." << endl; } } } } void visitNeighbors(int i, int j, float lowThreshold, unsigned char* gradientImage, unsigned char* outputEdges, int width, int imgsize) { int pixelIndex = i * width + j; if(i == 0 || j == 0 || i == width - 1 || j == width - 1 || visitedPixels.find(pixelIndex) != visitedPixels.end() ||gradientImage[pixelIndex] < lowThreshold) { //(pixelIndex); visitedPixels.insert(pixelIndex); return; } outputEdges[pixelIndex] =STRONG_EDGE; visitedPixels.insert(pixelIndex); visitNeighbors(i - 1, j - 1, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i - 1, j, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i - 1, j + 1, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i, j + 1, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i + 1, j + 1, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i + 1, j, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i + 1, j - 1, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i, j - 1, lowThreshold, gradientImage, outputEdges, width, imgsize); } void performHysteresis(unsigned char* gradientImage, float highThreshold, float lowThreshold, unsigned char* outputEdges, int width, int imgsize) { for(int i = 0; i < width; ++i) { for(int j = 0; j < width; ++j) { unsigned pixelIndex = i * width + j; // Mark out borders and all pixels below the high threshold. if(gradientImage[pixelIndex] >= highThreshold) { visitedPixels.insert(pixelIndex); outputEdges[pixelIndex] = STRONG_EDGE; visitNeighbors(i, j, lowThreshold, gradientImage, outputEdges, width, imgsize); } } } } int main() { //void computeGradient(const float* inputMatrix, int matrixWidth, float* outputGradient) //deviceGaussianConvolution<<<dimGrid, dimBlock>>>(deviceGradient, matrixWidth); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); struct bmp_header bmp; struct dib_header dib; unsigned char *palete = NULL; unsigned char *data = NULL; unsigned char *out = NULL; unsigned int *edgeDirectionClassifications = NULL; BitMapRead(BMPInFile, &bmp, &dib, &data, &palete); out = (unsigned char *)malloc(dib.image_size); edgeDirectionClassifications = (unsigned int *)malloc(dib.image_size); hipEventRecord(start); //Gaussian Smoothening computeGradient(data, dib.width, out,edgeDirectionClassifications); //convolution(data, out, dib.width, gaussianMask, 5, gaussianMaskWeight); BitMapWrite("GPU_Gaussian_Smooth_Gradient.bmp", &bmp, &dib, out, palete); hipEventRecord(stop); hipEventSynchronize(stop); data = NULL; free(data); suppressNonmaximums(dib.width, edgeDirectionClassifications, out, dib.image_size); BitMapWrite("GPU_Gaussian_Smooth_Gradient_suppression.bmp", &bmp, &dib, out, palete); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Elapsed Time for smoothing:%f\n",milliseconds); }
e40a797fedff5f9b78d8c9ae54fc6b9dca630ca1.cu
//CUDA GPU Implementation of Image Smoothing and gradient processing //Bhallaji Venkatesan and Divya Sampath Kumar // Compile by nvcc -arch compute_53 -std=c++11 -I ~/NVIDIA_CUDA-8.0_Samples/common/inc/ -o GPU_imageread_smooth GPU_imageread_smooth.cu #define _DEFINE_DEPRECATED_HASH_CLASSES 0 #include <stdlib.h> #include <time.h> #include <stdio.h> //#include <cutil_inline.h> #include <helper_cuda.h> #include <helper_cuda_gl.h> #include <helper_cuda_drvapi.h> #include <helper_functions.h> #include <helper_image.h> #include <helper_math.h> #include <helper_string.h> #include <helper_timer.h> //#include "Convolution.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <assert.h> #include <string.h> #include <errno.h> #include <fcntl.h> #include <unistd.h> #include <iostream> #include <cmath> #include <limits> #include <sstream> #include <hash_set> #include <unordered_set> #include <iterator> #include <vector> #define STRONG_EDGE 0xFF #define NON_EDGE 0x0 #include "bmp.h" using namespace std; std::unordered_set<unsigned int> visitedPixels; char *BMPInFile = "car.bmp"; char *BMPOutFile = "suppress.bmp"; //computeImageGradient(); //suppressNonmaximums(); //texture<float, 2, cudaReadModeElementType> deviceMatrixTexture; texture<unsigned char, 2, cudaReadModeElementType> deviceMatrixTexture; __device__ __constant__ float deviceXGradientMask[9] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 }; __device__ __constant__ float deviceYGradientMask[9] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 }; __device__ __constant__ float deviceGaussianFilterMask[25] ={ 2, 4, 5, 4, 2, 4, 9, 12, 9, 4, 5, 12, 15, 12, 5, 4, 9, 12, 9, 4, 2, 4, 5, 4, 2 } ; __global__ void deviceGaussianConvolution(unsigned char * output, int matrixWidth) { int outputRow = blockIdx.y * blockDim.y + threadIdx.y; int outputColumn = blockIdx.x * blockDim.x + threadIdx.x; float accumulator = 0.0; #pragma unroll for(int i = -2; i <= 2; ++i) { unsigned matrixColumn = outputColumn + i; #pragma unroll for(int j = -2; j <= 2; ++j) { accumulator += deviceGaussianFilterMask[(2 + i) + (2 + j)] * tex2D(deviceMatrixTexture, matrixColumn, outputRow + j); } } __syncthreads(); output[outputRow * matrixWidth + outputColumn] = accumulator / 159; } __global__ void deviceComputeGradient(unsigned char* outputGradient, unsigned matrixWidth, unsigned int* outputEdgeDirectionClassifications) { int outputRow = blockIdx.y * blockDim.y + threadIdx.y; int outputColumn = blockIdx.x * blockDim.x + threadIdx.x; // Get gradients float xAccumulator = 0.0; float yAccumulator = 0.0; #pragma unroll for(int i = -1; i <= 1; ++i) { unsigned matrixColumn = outputColumn + i; #pragma unroll for(int j = -1; j <= 1; ++j) { int maskIndex = (1 + i)* 3 + (1 + j); //printf("%f\n", tex2D(deviceMatrixTexture, matrixColumn, outputRow + j)); xAccumulator += deviceXGradientMask[maskIndex] * tex2D(deviceMatrixTexture, matrixColumn, outputRow + j); yAccumulator += deviceYGradientMask[maskIndex] * tex2D(deviceMatrixTexture, matrixColumn, outputRow + j); } } int matrixIndex = outputRow * matrixWidth + outputColumn; // Get gradient magnitude outputGradient[matrixIndex] = abs(xAccumulator) + abs(yAccumulator); // Determine edge direction float edgeDirection = atan2(yAccumulator, xAccumulator) * (180 / 3.14159265) + 180.0; // Classify edge directions if((edgeDirection >= 22.5 && edgeDirection < 67.5) || (edgeDirection >= 202.5 && edgeDirection < 247.5)) { outputEdgeDirectionClassifications[matrixIndex] = 1; } else if((edgeDirection >= 67.5 && edgeDirection < 112.5) || (edgeDirection >= 247.5 && edgeDirection < 292.5)) { outputEdgeDirectionClassifications[matrixIndex] = 2; } else if((edgeDirection >= 112.5 && edgeDirection < 157.5) || (edgeDirection >= 292.5 && edgeDirection < 337.5)) { outputEdgeDirectionClassifications[matrixIndex] = 3; } else { outputEdgeDirectionClassifications[matrixIndex] = 0; } } void computeGradient(unsigned char * inputMatrix, int matrixWidth, unsigned char * outputGradient, unsigned int* outputEdgeDirections) { // Create timer. //unsigned int timer = 0; //CUT_SAFE_CALL(cutCreateTimer(&timer)); // Compute memory sizes. int matrixMemorySize = matrixWidth * matrixWidth * sizeof(unsigned char); // Set up device arrays. cudaArray* deviceMatrixArray = NULL; unsigned char* deviceGradient = NULL; unsigned int* deviceEdgeDirections = NULL; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<unsigned char>(); cudaMallocArray(&deviceMatrixArray, &channelDesc, matrixWidth, matrixWidth); cudaMalloc((void**)&deviceGradient, matrixMemorySize); cudaMalloc((void**)&deviceEdgeDirections, matrixWidth * matrixWidth * sizeof(unsigned int)); // Copy inputs to device. cudaMemcpyToArray(deviceMatrixArray, 0, 0, inputMatrix, matrixMemorySize, cudaMemcpyHostToDevice); // Set up image matrix as a texture. deviceMatrixTexture.addressMode[0] = cudaAddressModeClamp; deviceMatrixTexture.addressMode[1] = cudaAddressModeClamp; cudaBindTextureToArray(deviceMatrixTexture, deviceMatrixArray); // Start timer. //CUT_SAFE_CALL(cutStartTimer(timer)); // Do it! dim3 dimGrid(matrixWidth / 16, matrixWidth / 16); dim3 dimBlock(16, 16); deviceGaussianConvolution<<<dimGrid, dimBlock>>>(deviceGradient, matrixWidth); cudaMemcpy(outputGradient, deviceGradient, matrixMemorySize, cudaMemcpyDeviceToHost); cudaUnbindTexture(deviceMatrixTexture); cudaMemcpyToArray(deviceMatrixArray, 0, 0, outputGradient, matrixMemorySize, cudaMemcpyHostToDevice); cudaBindTextureToArray(deviceMatrixTexture, deviceMatrixArray); deviceComputeGradient<<<dimGrid, dimBlock>>>(deviceGradient, matrixWidth, deviceEdgeDirections); // Check for errors. //CUT_CHECK_ERROR("Kernel execution failed!"); // Copy device result to host. cudaMemcpy(outputGradient, deviceGradient, matrixMemorySize, cudaMemcpyDeviceToHost); cudaMemcpy(outputEdgeDirections, deviceEdgeDirections, matrixMemorySize, cudaMemcpyDeviceToHost); cudaFreeArray(deviceMatrixArray); cudaFree(deviceGradient); cudaFree(deviceEdgeDirections); cudaUnbindTexture(deviceMatrixTexture); } void BitMapRead(char *file,struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete) { size_t palete_size; int fd; if((fd = open(file, O_RDONLY)) <0) FATAL("Open Source"); if(read(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Read BMP Header"); if(read(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Read DIB Header"); assert(dib->bpp ==8); palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if(palete_size > 0) { *palete = (unsigned char *)malloc(palete_size); int go = read(fd, *palete, palete_size); if(go != palete_size) { FATAL("Read Palete"); } } *data = (unsigned char *)malloc(dib->image_size); if(read(fd, *data, dib->image_size) != dib->image_size) //close(fd); FATAL("Read Image"); close(fd); } void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete) { size_t palete_size; int fd; palete_size = bmp->offset - BMP_SIZE - DIB_SIZE; if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC,S_IRUSR | S_IWUSR | S_IRGRP)) <0) FATAL("Open Destination"); if(write(fd, bmp, BMP_SIZE) != BMP_SIZE) FATAL("Write BMP Header"); if(write(fd, dib, DIB_SIZE) != DIB_SIZE) FATAL("Write DIB Header"); if(palete_size != 0) { if(write(fd, palete, palete_size) != palete_size) FATAL("Write Palete"); } if(write(fd, data, dib->image_size) != dib->image_size) FATAL("Write Image"); close(fd); } int getClockwisePerpendicularIndex(unsigned i, unsigned j, unsigned int edgeDirectionClassification, int width) { int clockwisePerpendicularI; int clockwisePerpendicularJ; switch(edgeDirectionClassification) { case 0: clockwisePerpendicularI = i - 1; clockwisePerpendicularJ = j; break; case 1: clockwisePerpendicularI = i - 1; clockwisePerpendicularJ = j + 1; break; case 2: clockwisePerpendicularI = i; clockwisePerpendicularJ = j + 1; break; case 3: clockwisePerpendicularI = i + 1; clockwisePerpendicularJ = j + 1; break; } //cout << "\tClockwise perpendicular pixel: (" << clockwisePerpendicularI << ", " << clockwisePerpendicularJ << ") = "; if(clockwisePerpendicularI < 0 || clockwisePerpendicularJ < 0 || clockwisePerpendicularI >= width || clockwisePerpendicularJ >= width) { return -1; } else { return clockwisePerpendicularI * width + clockwisePerpendicularJ; } } int getCounterClockwisePerpendicularIndex(unsigned i, unsigned j, unsigned int edgeDirectionClassification, int width) { int counterClockwisePerpendicularI; int counterClockwisePerpendicularJ; switch(edgeDirectionClassification) { case 0: counterClockwisePerpendicularI = i + 1; counterClockwisePerpendicularJ = j; break; case 1: counterClockwisePerpendicularI = i + 1; counterClockwisePerpendicularJ = j - 1; break; case 2: counterClockwisePerpendicularI = i; counterClockwisePerpendicularJ = j - 1; break; case 3: counterClockwisePerpendicularI = i - 1; counterClockwisePerpendicularJ = j - 1; break; } //cout << "\tCounterclockwise perpendicular pixel: (" << counterClockwisePerpendicularI << ", " << counterClockwisePerpendicularJ << ") = "; if(counterClockwisePerpendicularI < 0 || counterClockwisePerpendicularJ < 0 ||counterClockwisePerpendicularJ >= width || counterClockwisePerpendicularJ >= width) { return -1; } else { return counterClockwisePerpendicularI * width + counterClockwisePerpendicularJ; } } void suppressNonmaximums(int width, unsigned int* edgeDirectionClassifications, unsigned char* gradient, int imgsize) { unsigned int* edc_local = (unsigned int *)malloc(imgsize); for(unsigned int i = 0; i < imgsize; ++i) { *(edc_local+i) = *(edgeDirectionClassifications + i); } for(unsigned int i = 0; i < width; ++i) { for(unsigned int j = 0; j < width; ++j) { unsigned int pixelIndex = i * width + j; int clockwisePerpendicularIndex = edc_local[pixelIndex];//getClockwisePerpendicularIndex(i, j, edgeDirectionClassifications[pixelIndex], width); float clockwisePerpendicularValue; float counterClockwisePerpendicularValue; if(clockwisePerpendicularIndex == -1) { clockwisePerpendicularValue = 0; } else { clockwisePerpendicularValue = gradient[clockwisePerpendicularIndex]; } int counterClockwisePerpendicularIndex =edc_local[pixelIndex];// getCounterClockwisePerpendicularIndex(i, j, edgeDirectionClassifications[pixelIndex], width); if(counterClockwisePerpendicularIndex == -1) { counterClockwisePerpendicularValue = 0; } else { if(counterClockwisePerpendicularIndex < imgsize && counterClockwisePerpendicularIndex >= 0) { counterClockwisePerpendicularValue = gradient[counterClockwisePerpendicularIndex]; } } if(gradient[pixelIndex] <= clockwisePerpendicularValue || gradient[pixelIndex] <= counterClockwisePerpendicularValue) { //cout << "\tPixel suppressed." << endl; gradient[pixelIndex] = 0; } else { //cout << "\tPixel retained." << endl; } } } } void visitNeighbors(int i, int j, float lowThreshold, unsigned char* gradientImage, unsigned char* outputEdges, int width, int imgsize) { int pixelIndex = i * width + j; if(i == 0 || j == 0 || i == width - 1 || j == width - 1 || visitedPixels.find(pixelIndex) != visitedPixels.end() ||gradientImage[pixelIndex] < lowThreshold) { //(pixelIndex); visitedPixels.insert(pixelIndex); return; } outputEdges[pixelIndex] =STRONG_EDGE; visitedPixels.insert(pixelIndex); visitNeighbors(i - 1, j - 1, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i - 1, j, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i - 1, j + 1, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i, j + 1, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i + 1, j + 1, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i + 1, j, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i + 1, j - 1, lowThreshold, gradientImage, outputEdges, width, imgsize); visitNeighbors(i, j - 1, lowThreshold, gradientImage, outputEdges, width, imgsize); } void performHysteresis(unsigned char* gradientImage, float highThreshold, float lowThreshold, unsigned char* outputEdges, int width, int imgsize) { for(int i = 0; i < width; ++i) { for(int j = 0; j < width; ++j) { unsigned pixelIndex = i * width + j; // Mark out borders and all pixels below the high threshold. if(gradientImage[pixelIndex] >= highThreshold) { visitedPixels.insert(pixelIndex); outputEdges[pixelIndex] = STRONG_EDGE; visitNeighbors(i, j, lowThreshold, gradientImage, outputEdges, width, imgsize); } } } } int main() { //void computeGradient(const float* inputMatrix, int matrixWidth, float* outputGradient) //deviceGaussianConvolution<<<dimGrid, dimBlock>>>(deviceGradient, matrixWidth); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); struct bmp_header bmp; struct dib_header dib; unsigned char *palete = NULL; unsigned char *data = NULL; unsigned char *out = NULL; unsigned int *edgeDirectionClassifications = NULL; BitMapRead(BMPInFile, &bmp, &dib, &data, &palete); out = (unsigned char *)malloc(dib.image_size); edgeDirectionClassifications = (unsigned int *)malloc(dib.image_size); cudaEventRecord(start); //Gaussian Smoothening computeGradient(data, dib.width, out,edgeDirectionClassifications); //convolution(data, out, dib.width, gaussianMask, 5, gaussianMaskWeight); BitMapWrite("GPU_Gaussian_Smooth_Gradient.bmp", &bmp, &dib, out, palete); cudaEventRecord(stop); cudaEventSynchronize(stop); data = NULL; free(data); suppressNonmaximums(dib.width, edgeDirectionClassifications, out, dib.image_size); BitMapWrite("GPU_Gaussian_Smooth_Gradient_suppression.bmp", &bmp, &dib, out, palete); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Elapsed Time for smoothing:%f\n",milliseconds); }
68a737db309ca45e7e8718fae469da9ffed49a7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <string> #include <sstream> #include <fstream> #include <cmath> using namespace std; __global__ void create_histogram(int *hist, int *img, int *num_rows, int *num_cols){ __shared__ int smallMatrix[3][3]; __shared__ int decimal = 0; //each block handles one pixel in the image for histogram //hence each block has one small matrix int i = blockIdx.x; int j = blockIdx.y; int thx = threadIdx.x; if( img[i*num_cols j] < = img[(i - 1 + (thx / 3 ))*num_cols + j - 1 + (thx % 3)] ) { smallMatrix[(thx / 3 )][(thx % 3)] = 0; } else{ smallMatrix[(thx / 3 )][(thx % 3)] = 1; } __syncthreads(); if(threadIdx.x ==0){ decimal = smallMatrix[0][0] * int(pow(2, 7)) + smallMatrix[0][1] * int(pow(2, 6)) + smallMatrix[0][2] * int(pow(2, 5)) + smallMatrix[1][2] * int(pow(2, 4)) + smallMatrix[2][2] * int(pow(2, 3)) + smallMatrix[2][1] * int(pow(2, 2)) + smallMatrix[2][0] * int(pow(2, 1)) + smallMatrix[1][0] * 1; atomicAdd(*hist[decimal], 1); } } double distance(int * a, int *b, int size) { double distance = 0; for (int i = 0; i < size; i ++) { if (a[i] + b[i] == 0) { distance += 0; } else { distance += 0.5 * pow ((a[i]- b[i]), 2) / (a[i] + b[i]); } } // printf("nbefore returning from distance function\n"); return distance; } int find_closest(int ***training_set, int num_persons, int num_training, int size, int * test_image) { double ** dist = new double * [num_persons]; //make an array which will store the comparison values for (int i = 0; i < num_persons; i++) { dist[i] = new double [num_training]; } for (int i = 0; i < num_persons; i++) { //populate dhe distance array for (int j =0; j < num_training; j++) { dist[i][j] = distance(training_set[i][j], test_image, size); } } double closestValue = dist[0][0]; int closest = 1e9 ; for (int i = 0; i < num_persons; i++) { for (int j =0; j < num_training; j++) { if (dist[i][j] < closestValue){ closestValue = dist[i][j]; closest = i; } } } for (int i = 0; i < num_persons; ++i) { delete dist[i]; } delete []dist; // printf("before returning from find closest function\n"); return closest + 1; } int **alloc_2d_matrix(int r, int c) { int** a = new int*[r]; for(int i = 0;i<r;i++) { a[i] = new int[c]; } return a; } void dealloc_2d_matrix(int **a, int r, int c) { for(int i = 0;i<r;i++) { delete a[i]; } delete [] a; } int ** read_image_data(string file_name, int h, int w) { ifstream File; File.open(file_name); //cout << file_name << endl; int** data=alloc_2d_matrix(h,w); int tmp; for (int i = 0; i < h;i++) { for (int j = 0; j < w; j++) { File >> tmp; data[i][j] = tmp; // cout <<data[i][j] ; } } File.close(); return data; } int main() { int nrOfIds = 34; int nrOfPhotosPerId = 30; int num_rows = 125; int num_cols = 94; int histogramSize = 256; int start_s=clock(); int *hist, *d_img, *d_num_rows, *d_num_cols; int *** training_set = new int **[nrOfIds]; //nr of people, nr of images per person, histogram size for (int i = 0; i < nrOfIds; i++) { training_set[i] = alloc_2d_matrix(nrOfPhotosPerId,histogramSize); } for (int i = 0; i < nrOfIds; i++) { //initialize training set to 0 for (int j = 0; j < nrOfPhotosPerId; j++) { for (int e = 0; e < histogramSize; e++) { training_set[i][j][e] = 0; } } } //get file name string filename; for (int w = 1; w <= 9; w++) { for (int q = 1; q <= 5; q++) { //get all the file's names filename = "s0" + to_string(w) + "_0" + to_string(q) + "resized.txt"; //cout<<filename<<endl; int **image = read_image_data(filename, num_rows, num_cols); int **img = alloc_2d_matrix((num_rows + 2), (num_cols + 2)); //enhanced image matrix with 0 in the corners for (int i = 0; i < (num_rows + 2); i++) { //initialize enhanced img matrix 0 for (int j = 0; j < (num_cols + 2); j++) { if(i==0 || j==0 || i == num_rows+1 || j == num_cols+1) img[i][j] = 0; else img[i][j] = image[i - 1][j - 1]; } } hipMalloc(void** &hist, sizeof(int)*histogramSize); hipMalloc(void** &d_img, sizeof(int)*num_rows*num_cols); hipMalloc(void** &d_num_cols, sizeof(int)); hipMalloc(void** &d_num_rows, sizeof(int)); hipMemcpy(d_img, img[0], sizeof(int)*num_rows*num_cols, hipMemcpyHostToDevice); hipMemcpy(d_num_rows, num_rows, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_num_rows, num_cols, sizeof(int), hipMemcpyHostToDevice); dim3 griddim(150,200); hipLaunchKernelGGL(( create_histogram), dim3(griddim),dim3(9), 0, 0, hist, d_img, d_num_rows, d_num_cols); hipMemcpy(training_set[w - 1][q - 1], hist, sizeof(int)*histogramSize, hipMemcpyDeviceToHost); string err = hipGetErrorString(hipGetLastError ()); cout<<err<<endl; hipFree(hist); hipFree(d_img); hipFree(d_num_rows); hipFree(d_num_cols); hipDeviceSynchronize(); //deallocate images dealloc_2d_matrix(image, num_rows, num_cols); dealloc_2d_matrix(img, (num_rows + 2), (num_cols + 2)); } } //TESTING PART string filenames; filenames = "s" + to_string(36) + "_0" + to_string(1) + "resized.txt"; //cout<<filename<<endl; int **image11 = read_image_data(filenames, num_rows, num_cols); int **img11 = alloc_2d_matrix((num_rows + 2), (num_cols + 2)); //enhanced image matrix with 0 in the corners for (int i = 0; i < (num_rows + 2); i++) { //initialize enhanced img matrix 0 for (int j = 0; j < (num_cols + 2); j++) { if(i==0 || j==0 || i == num_rows+1 || j == num_cols+1) img[i][j] = 0; else img[i][j] = image[i - 1][j - 1]; } } int *A = new int[256]; for(int i =0; i< 256; i++){ A[i] = 0; } hipMalloc(void** &hist, sizeof(int)*histogramSize); hipMalloc(void** &d_img, sizeof(int)*num_rows*num_cols); hipMalloc(void** &d_num_cols, sizeof(int)); hipMalloc(void** &d_num_rows, sizeof(int)); hipMemcpy(d_img, img11[0], sizeof(int)*num_rows*num_cols, hipMemcpyHostToDevice); hipMemcpy(d_num_rows, num_rows, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_num_rows, num_cols, sizeof(int), hipMemcpyHostToDevice); dim3 griddim(150,200); hipLaunchKernelGGL(( create_histogram), dim3(griddim),dim3(9), 0, 0, hist, d_img, d_num_rows, d_num_cols); hipMemcpy(A, hist, sizeof(int)*histogramSize, hipMemcpyDeviceToHost); string err = hipGetErrorString(hipGetLastError ()); cout<<err<<endl; hipFree(hist); hipFree(d_img); hipFree(d_num_rows); hipFree(d_num_cols); hipDeviceSynchronize(); for(int i =0; i<256; i++){ //cout<<A[i]<<endl; } //deallocate images dealloc_2d_matrix(image11, num_rows, num_cols); dealloc_2d_matrix(img11, (num_rows + 2), (num_cols + 2)); int testResultId; testResultId = find_closest(training_set,nrOfIds,nrOfPhotosPerId,histogramSize,A); cout<<testResultId<<endl; cout<<"Sucess!"<<endl; delete [] A; }
68a737db309ca45e7e8718fae469da9ffed49a7d.cu
#include <iostream> #include <string> #include <sstream> #include <fstream> #include <cmath> using namespace std; __global__ void create_histogram(int *hist, int *img, int *num_rows, int *num_cols){ __shared__ int smallMatrix[3][3]; __shared__ int decimal = 0; //each block handles one pixel in the image for histogram //hence each block has one small matrix int i = blockIdx.x; int j = blockIdx.y; int thx = threadIdx.x; if( img[i*num_cols j] < = img[(i - 1 + (thx / 3 ))*num_cols + j - 1 + (thx % 3)] ) { smallMatrix[(thx / 3 )][(thx % 3)] = 0; } else{ smallMatrix[(thx / 3 )][(thx % 3)] = 1; } __syncthreads(); if(threadIdx.x ==0){ decimal = smallMatrix[0][0] * int(pow(2, 7)) + smallMatrix[0][1] * int(pow(2, 6)) + smallMatrix[0][2] * int(pow(2, 5)) + smallMatrix[1][2] * int(pow(2, 4)) + smallMatrix[2][2] * int(pow(2, 3)) + smallMatrix[2][1] * int(pow(2, 2)) + smallMatrix[2][0] * int(pow(2, 1)) + smallMatrix[1][0] * 1; atomicAdd(*hist[decimal], 1); } } double distance(int * a, int *b, int size) { double distance = 0; for (int i = 0; i < size; i ++) { if (a[i] + b[i] == 0) { distance += 0; } else { distance += 0.5 * pow ((a[i]- b[i]), 2) / (a[i] + b[i]); } } // printf("nbefore returning from distance function\n"); return distance; } int find_closest(int ***training_set, int num_persons, int num_training, int size, int * test_image) { double ** dist = new double * [num_persons]; //make an array which will store the comparison values for (int i = 0; i < num_persons; i++) { dist[i] = new double [num_training]; } for (int i = 0; i < num_persons; i++) { //populate dhe distance array for (int j =0; j < num_training; j++) { dist[i][j] = distance(training_set[i][j], test_image, size); } } double closestValue = dist[0][0]; int closest = 1e9 ; for (int i = 0; i < num_persons; i++) { for (int j =0; j < num_training; j++) { if (dist[i][j] < closestValue){ closestValue = dist[i][j]; closest = i; } } } for (int i = 0; i < num_persons; ++i) { delete dist[i]; } delete []dist; // printf("before returning from find closest function\n"); return closest + 1; } int **alloc_2d_matrix(int r, int c) { int** a = new int*[r]; for(int i = 0;i<r;i++) { a[i] = new int[c]; } return a; } void dealloc_2d_matrix(int **a, int r, int c) { for(int i = 0;i<r;i++) { delete a[i]; } delete [] a; } int ** read_image_data(string file_name, int h, int w) { ifstream File; File.open(file_name); //cout << file_name << endl; int** data=alloc_2d_matrix(h,w); int tmp; for (int i = 0; i < h;i++) { for (int j = 0; j < w; j++) { File >> tmp; data[i][j] = tmp; // cout <<data[i][j] ; } } File.close(); return data; } int main() { int nrOfIds = 34; int nrOfPhotosPerId = 30; int num_rows = 125; int num_cols = 94; int histogramSize = 256; int start_s=clock(); int *hist, *d_img, *d_num_rows, *d_num_cols; int *** training_set = new int **[nrOfIds]; //nr of people, nr of images per person, histogram size for (int i = 0; i < nrOfIds; i++) { training_set[i] = alloc_2d_matrix(nrOfPhotosPerId,histogramSize); } for (int i = 0; i < nrOfIds; i++) { //initialize training set to 0 for (int j = 0; j < nrOfPhotosPerId; j++) { for (int e = 0; e < histogramSize; e++) { training_set[i][j][e] = 0; } } } //get file name string filename; for (int w = 1; w <= 9; w++) { for (int q = 1; q <= 5; q++) { //get all the file's names filename = "s0" + to_string(w) + "_0" + to_string(q) + "resized.txt"; //cout<<filename<<endl; int **image = read_image_data(filename, num_rows, num_cols); int **img = alloc_2d_matrix((num_rows + 2), (num_cols + 2)); //enhanced image matrix with 0 in the corners for (int i = 0; i < (num_rows + 2); i++) { //initialize enhanced img matrix 0 for (int j = 0; j < (num_cols + 2); j++) { if(i==0 || j==0 || i == num_rows+1 || j == num_cols+1) img[i][j] = 0; else img[i][j] = image[i - 1][j - 1]; } } cudaMalloc(void** &hist, sizeof(int)*histogramSize); cudaMalloc(void** &d_img, sizeof(int)*num_rows*num_cols); cudaMalloc(void** &d_num_cols, sizeof(int)); cudaMalloc(void** &d_num_rows, sizeof(int)); cudaMemcpy(d_img, img[0], sizeof(int)*num_rows*num_cols, cudaMemcpyHostToDevice); cudaMemcpy(d_num_rows, num_rows, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_num_rows, num_cols, sizeof(int), cudaMemcpyHostToDevice); dim3 griddim(150,200); create_histogram<<<griddim,9>>>( hist, d_img, d_num_rows, d_num_cols); cudaMemcpy(training_set[w - 1][q - 1], hist, sizeof(int)*histogramSize, cudaMemcpyDeviceToHost); string err = cudaGetErrorString(cudaGetLastError ()); cout<<err<<endl; cudaFree(hist); cudaFree(d_img); cudaFree(d_num_rows); cudaFree(d_num_cols); cudaDeviceSynchronize(); //deallocate images dealloc_2d_matrix(image, num_rows, num_cols); dealloc_2d_matrix(img, (num_rows + 2), (num_cols + 2)); } } //TESTING PART string filenames; filenames = "s" + to_string(36) + "_0" + to_string(1) + "resized.txt"; //cout<<filename<<endl; int **image11 = read_image_data(filenames, num_rows, num_cols); int **img11 = alloc_2d_matrix((num_rows + 2), (num_cols + 2)); //enhanced image matrix with 0 in the corners for (int i = 0; i < (num_rows + 2); i++) { //initialize enhanced img matrix 0 for (int j = 0; j < (num_cols + 2); j++) { if(i==0 || j==0 || i == num_rows+1 || j == num_cols+1) img[i][j] = 0; else img[i][j] = image[i - 1][j - 1]; } } int *A = new int[256]; for(int i =0; i< 256; i++){ A[i] = 0; } cudaMalloc(void** &hist, sizeof(int)*histogramSize); cudaMalloc(void** &d_img, sizeof(int)*num_rows*num_cols); cudaMalloc(void** &d_num_cols, sizeof(int)); cudaMalloc(void** &d_num_rows, sizeof(int)); cudaMemcpy(d_img, img11[0], sizeof(int)*num_rows*num_cols, cudaMemcpyHostToDevice); cudaMemcpy(d_num_rows, num_rows, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_num_rows, num_cols, sizeof(int), cudaMemcpyHostToDevice); dim3 griddim(150,200); create_histogram<<<griddim,9>>>( hist, d_img, d_num_rows, d_num_cols); cudaMemcpy(A, hist, sizeof(int)*histogramSize, cudaMemcpyDeviceToHost); string err = cudaGetErrorString(cudaGetLastError ()); cout<<err<<endl; cudaFree(hist); cudaFree(d_img); cudaFree(d_num_rows); cudaFree(d_num_cols); cudaDeviceSynchronize(); for(int i =0; i<256; i++){ //cout<<A[i]<<endl; } //deallocate images dealloc_2d_matrix(image11, num_rows, num_cols); dealloc_2d_matrix(img11, (num_rows + 2), (num_cols + 2)); int testResultId; testResultId = find_closest(training_set,nrOfIds,nrOfPhotosPerId,histogramSize,A); cout<<testResultId<<endl; cout<<"Sucess!"<<endl; delete [] A; }
6219bd4463672bf34f260ee4a37a395bba2203ab.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "rdiv.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *res = NULL; hipMalloc(&res, XSIZE*YSIZE); const unsigned int *fsum = NULL; hipMalloc(&fsum, XSIZE*YSIZE); const float *csum = NULL; hipMalloc(&csum, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( rdiv), dim3(gridBlock),dim3(threadBlock), 0, 0, res,fsum,csum); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( rdiv), dim3(gridBlock),dim3(threadBlock), 0, 0, res,fsum,csum); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( rdiv), dim3(gridBlock),dim3(threadBlock), 0, 0, res,fsum,csum); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6219bd4463672bf34f260ee4a37a395bba2203ab.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "rdiv.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *res = NULL; cudaMalloc(&res, XSIZE*YSIZE); const unsigned int *fsum = NULL; cudaMalloc(&fsum, XSIZE*YSIZE); const float *csum = NULL; cudaMalloc(&csum, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); rdiv<<<gridBlock,threadBlock>>>(res,fsum,csum); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { rdiv<<<gridBlock,threadBlock>>>(res,fsum,csum); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { rdiv<<<gridBlock,threadBlock>>>(res,fsum,csum); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9b014202263ec54e8fa9e14ce40222bf85078a0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Udacity HW 6 //Poisson Blending /* Background ========== The goal for this assignment is to take one image (the source) and paste it into another image (the destination) attempting to match the two images so that the pasting is non-obvious. This is known as a "seamless clone". The basic ideas are as follows: 1) Figure out the interior and border of the source image 2) Use the values of the border pixels in the destination image as boundary conditions for solving a Poisson equation that tells us how to blend the images. No pixels from the destination except pixels on the border are used to compute the match. Solving the Poisson Equation ============================ There are multiple ways to solve this equation - we choose an iterative method - specifically the Jacobi method. Iterative methods start with a guess of the solution and then iterate to try and improve the guess until it stops changing. If the problem was well-suited for the method then it will stop and where it stops will be the solution. The Jacobi method is the simplest iterative method and converges slowly - that is we need a lot of iterations to get to the answer, but it is the easiest method to write. Jacobi Iterations ================= Our initial guess is going to be the source image itself. This is a pretty good guess for what the blended image will look like and it means that we won't have to do as many iterations compared to if we had started far from the final solution. ImageGuess_prev (Floating point) ImageGuess_next (Floating point) DestinationImg SourceImg Follow these steps to implement one iteration: 1) For every pixel p in the interior, compute two sums over the four neighboring pixels: Sum1: If the neighbor is in the interior += ImageGuess_prev[neighbor] else if the neighbor in on the border += DestinationImg[neighbor] Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) 2) Calculate the new pixel value: float newVal= (Sum1 + Sum2) / 4.f // floating point ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255] In this assignment we will do 800 iterations. */ #include "utils.h" #include <thrust/host_vector.h> #include "reference_calc.cpp" #define min(a,b) (((a)<(b))?(a):(b)) #define max(a,b) (((a)>(b))?(a):(b)) __global__ void gen_mask(unsigned int * d_mask, unsigned char * d_red, unsigned char * d_green, unsigned char * d_blue, const size_t numRows, const size_t numCols) { //calculate a 1D offset const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if ( thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows ) return; int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; int2 idx_u, idx_d, idx_r, idx_l; int u, d, l, r; if (d_red[idx] != 255 || d_green[idx] != 255 || d_blue[idx] != 255) { idx_u = make_int2( thread_2D_pos.x, blockIdx.y * blockDim.y + threadIdx.y - 1); idx_d = make_int2( thread_2D_pos.x, blockIdx.y * blockDim.y + threadIdx.y + 1); idx_l = make_int2( blockIdx.x * blockDim.x + threadIdx.x + 1, thread_2D_pos.y); idx_r = make_int2( blockIdx.x * blockDim.x + threadIdx.x - 1, thread_2D_pos.y); u = idx_u.x * numCols + idx_u.y; d = idx_d.x * numCols + idx_d.y; l = idx_l.x * numCols + idx_l.y; r = idx_r.x * numCols + idx_r.y; if(d_red[u] == 255 || d_red[d] == 255 || d_red[l] == 255 || d_red[r] == 255 || d_blue[u] == 255 || d_blue[d] == 255 || d_blue[l] == 255 || d_blue[r] == 255 || d_green[u] == 255 || d_green[d] == 255 || d_green[l] == 255 || d_green[r] == 255) d_mask[idx] = 2; else d_mask[idx] = 1; } } __global__ void apply_mask(unsigned int * d_mask, uchar4 * d_image, const size_t numRows, const size_t numCols) { //calculate a 1D offset const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if ( thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows ) return; int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; if (d_mask[idx] == 2) { d_image[idx].x = 0; d_image[idx].y = 0; d_image[idx].z = 0; } else if (d_mask[idx] == 1) { d_image[idx].x = 255; d_image[idx].y = 255; d_image[idx].z = 0; } } __global__ void init_guess(unsigned int * d_mask, float * d_guess_color, unsigned char * d_image_color, const size_t numRows, const size_t numCols) { //calculate a 1D offset const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if ( thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows ) return; int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; if (d_mask[idx] > 0) { d_guess_color[idx] = (float) d_image_color[idx]; } } __global__ void separateChannels(uchar4* d_sourceImg, int numRows, int numCols, unsigned char* const d_red, unsigned char* const d_green, unsigned char* const d_blue) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if ( thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows ) return; const int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; uchar4 rgba = d_sourceImg[idx]; d_red[idx] = rgba.x; d_green[idx] = rgba.y; d_blue[idx] = rgba.z; } __global__ void replace_dest(float * d_guess_red, float * d_guess_green, float * d_guess_blue, unsigned int * d_mask, uchar4* const d_destImg, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows) return; unsigned char red = (unsigned char) d_guess_red[idx]; unsigned char green = (unsigned char) d_guess_green[idx]; unsigned char blue = (unsigned char) d_guess_blue[idx]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); if (d_mask[idx] == 1) { d_destImg[idx] = outputPixel; } } __global__ void solve(float * d_guess, float * d_guess_prev, unsigned int * d_mask, unsigned char* const d_source, unsigned char* const d_dest, const size_t numRows, const size_t numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows) return; if (d_mask[idx] == 1) { float sum1 = 0.0; float sum2 = 0.0; int2 idx_u, idx_d, idx_r, idx_l; int u, d, l, r; idx_u = make_int2( thread_2D_pos.x, blockIdx.y * blockDim.y + threadIdx.y - 1); idx_d = make_int2( thread_2D_pos.x, blockIdx.y * blockDim.y + threadIdx.y + 1); idx_l = make_int2( blockIdx.x * blockDim.x + threadIdx.x + 1, thread_2D_pos.y); idx_r = make_int2( blockIdx.x * blockDim.x + threadIdx.x - 1, thread_2D_pos.y); u = idx_u.x * numCols + idx_u.y; d = idx_d.x * numCols + idx_d.y; l = idx_l.x * numCols + idx_l.y; r = idx_r.x * numCols + idx_r.y; if (d_mask[u] == 1) sum1 += d_guess_prev[u]; else if (d_mask[u] == 2) sum1 += d_dest[u]; if (d_mask[d] == 1) sum1 += d_guess_prev[d]; else if (d_mask[d] == 2) sum1 += d_dest[d]; if (d_mask[l] == 1) sum1 += d_guess_prev[l]; else if (d_mask[l] == 2) sum1 += d_dest[l]; if (d_mask[r] == 1) sum1 += d_guess_prev[r]; else if (d_mask[r] == 2) sum1 += d_dest[r]; d_guess_prev[idx] = d_guess[idx]; sum2 += 4.f*d_source[idx] - d_source[u] - d_source[d] - d_source[l] - d_source[r]; float newVal = (sum1 + sum2) / 4.f; d_guess[idx] = min(255.0, max(0.0, newVal)); } } void your_blend(const uchar4* const h_sourceImg, //IN const size_t numRows, const size_t numCols, const uchar4* const h_destImg, //IN uchar4* const h_blendedImg) //OUT { const unsigned int n = numRows * numCols; int k = 16; // Set reasonable block size (i.e., number of threads per block) const dim3 threads( k, k, 1); // Compute correct grid size (i.e., number of blocks per kernel launch) // from the image size and and block size. const dim3 blocks( numRows/k+1, numCols/k+1, 1); //const int maxThreadsPerBlock = 1024; //int threads = maxThreadsPerBlock; //int blocks = n / maxThreadsPerBlock + 1; //int shared = threads * sizeof(float); unsigned char * d_source_red, * d_source_green, * d_source_blue; unsigned char * d_dest_red, * d_dest_green, * d_dest_blue; float * d_guess_red, * d_guess_blue, * d_guess_green; float * d_guess_prev_red, * d_guess_prev_blue, * d_guess_prev_green; unsigned int * d_mask; uchar4 * d_sourceImg, * d_destImg; // sizes of arrays : size_t p_size = sizeof(unsigned char) * n; size_t f_size = sizeof(float) * n; size_t i_size = sizeof(uchar4) * n; size_t m_size = sizeof(unsigned int) * n; //============================================================================ // allocate the memory : // allocate memory for the three different channels (source and dest) : checkCudaErrors(hipMalloc(&d_source_red, p_size)); checkCudaErrors(hipMalloc(&d_source_green, p_size)); checkCudaErrors(hipMalloc(&d_source_blue, p_size)); checkCudaErrors(hipMalloc(&d_dest_red, p_size)); checkCudaErrors(hipMalloc(&d_dest_green, p_size)); checkCudaErrors(hipMalloc(&d_dest_blue, p_size)); // allocate memory for the previous and current guesses : checkCudaErrors(hipMalloc(&d_guess_red, p_size)); checkCudaErrors(hipMalloc(&d_guess_green, p_size)); checkCudaErrors(hipMalloc(&d_guess_blue, p_size)); checkCudaErrors(hipMalloc(&d_guess_prev_red, p_size)); checkCudaErrors(hipMalloc(&d_guess_prev_green, p_size)); checkCudaErrors(hipMalloc(&d_guess_prev_blue, p_size)); // allocate memory for the source image and copy from host : checkCudaErrors(hipMalloc((void**) &d_sourceImg, i_size)); checkCudaErrors(hipMemcpy(d_sourceImg, h_sourceImg, i_size, hipMemcpyHostToDevice)); // allocate memory for the destination image and copy from host : checkCudaErrors(hipMalloc((void**) &d_destImg, i_size)); checkCudaErrors(hipMemcpy(d_destImg, h_destImg, i_size, hipMemcpyHostToDevice)); // allocate memory for the mask and init to 0 : checkCudaErrors(hipMalloc((void**) &d_mask, m_size)); checkCudaErrors(hipMemset(d_mask, 0, m_size)); //============================================================================ /* 1) Separate out the incoming images into three separate channels */ hipLaunchKernelGGL(( separateChannels), dim3(blocks), dim3(threads), 0, 0, d_sourceImg, numRows, numCols, d_source_red, d_source_green, d_source_blue); hipLaunchKernelGGL(( separateChannels), dim3(blocks), dim3(threads), 0, 0, d_destImg, numRows, numCols, d_dest_red, d_dest_green, d_dest_blue); //hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); /* 2) Compute a mask of the pixels from the source image to be copied The pixels that shouldn't be copied are completely white, they have R=255, G=255, B=255. Any other pixels SHOULD be copied. */ /* 3) Compute the interior and border regions of the mask. An interior pixel has all 4 neighbors also inside the mask. A border pixel is in the mask itself, but has at least one neighbor that isn't. */ hipLaunchKernelGGL(( gen_mask), dim3(blocks), dim3(threads), 0, 0, d_mask, d_source_red, d_source_green, d_source_blue, numRows, numCols); //hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); /* apply_mask<<<blocks, threads>>>(d_mask, d_sourceImg, numRows, numCols); checkCudaErrors(hipMemcpy(h_blendedImg, d_sourceImg, i_size, hipMemcpyDeviceToHost)); */ /* 4) Create two float(!) buffers for each color channel that will act as our guesses. Initialize them to the respective color channel of the source image since that will act as our intial guess. */ hipLaunchKernelGGL(( init_guess), dim3(blocks), dim3(threads), 0, 0, d_mask, d_guess_red, d_source_red, numRows, numCols); hipLaunchKernelGGL(( init_guess), dim3(blocks), dim3(threads), 0, 0, d_mask, d_guess_green, d_source_green, numRows, numCols); hipLaunchKernelGGL(( init_guess), dim3(blocks), dim3(threads), 0, 0, d_mask, d_guess_blue, d_source_blue, numRows, numCols); hipLaunchKernelGGL(( init_guess), dim3(blocks), dim3(threads), 0, 0, d_mask, d_guess_prev_red, d_source_red, numRows, numCols); hipLaunchKernelGGL(( init_guess), dim3(blocks), dim3(threads), 0, 0, d_mask, d_guess_prev_green, d_source_green, numRows, numCols); hipLaunchKernelGGL(( init_guess), dim3(blocks), dim3(threads), 0, 0, d_mask, d_guess_prev_blue, d_source_blue, numRows, numCols); /* 5) For each color channel perform the Jacobi iteration described above 800 times. */ for (int i = 0; i < 800; i++) { hipLaunchKernelGGL(( solve), dim3(blocks), dim3(threads), 0, 0, d_guess_red, d_guess_prev_red, d_mask, d_source_red, d_dest_red, numRows, numCols); hipLaunchKernelGGL(( solve), dim3(blocks), dim3(threads), 0, 0, d_guess_green, d_guess_prev_green, d_mask, d_source_green, d_dest_green, numRows, numCols); hipLaunchKernelGGL(( solve), dim3(blocks), dim3(threads), 0, 0, d_guess_blue, d_guess_prev_blue, d_mask, d_source_blue, d_dest_blue, numRows, numCols); //hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } /* 6) Create the output image by replacing all the interior pixels in the destination image with the result of the Jacobi iterations. Just cast the floating point values to unsigned chars since we have already made sure to clamp them to the correct range. */ hipLaunchKernelGGL(( replace_dest), dim3(blocks), dim3(threads), 0, 0, d_guess_red, d_guess_green, d_guess_blue, d_mask, d_destImg, numRows, numCols); checkCudaErrors(hipMemcpy(h_blendedImg, d_destImg, i_size, hipMemcpyDeviceToHost)); /* uchar4* h_reference = new uchar4[n]; reference_calc(h_sourceImg, numRows, numCols, h_destImg, h_reference); checkResultsEps((unsigned char *)h_reference, (unsigned char *)h_blendedImg, 4 * n, 2, .01); delete[] h_reference; */ }
9b014202263ec54e8fa9e14ce40222bf85078a0e.cu
//Udacity HW 6 //Poisson Blending /* Background ========== The goal for this assignment is to take one image (the source) and paste it into another image (the destination) attempting to match the two images so that the pasting is non-obvious. This is known as a "seamless clone". The basic ideas are as follows: 1) Figure out the interior and border of the source image 2) Use the values of the border pixels in the destination image as boundary conditions for solving a Poisson equation that tells us how to blend the images. No pixels from the destination except pixels on the border are used to compute the match. Solving the Poisson Equation ============================ There are multiple ways to solve this equation - we choose an iterative method - specifically the Jacobi method. Iterative methods start with a guess of the solution and then iterate to try and improve the guess until it stops changing. If the problem was well-suited for the method then it will stop and where it stops will be the solution. The Jacobi method is the simplest iterative method and converges slowly - that is we need a lot of iterations to get to the answer, but it is the easiest method to write. Jacobi Iterations ================= Our initial guess is going to be the source image itself. This is a pretty good guess for what the blended image will look like and it means that we won't have to do as many iterations compared to if we had started far from the final solution. ImageGuess_prev (Floating point) ImageGuess_next (Floating point) DestinationImg SourceImg Follow these steps to implement one iteration: 1) For every pixel p in the interior, compute two sums over the four neighboring pixels: Sum1: If the neighbor is in the interior += ImageGuess_prev[neighbor] else if the neighbor in on the border += DestinationImg[neighbor] Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) 2) Calculate the new pixel value: float newVal= (Sum1 + Sum2) / 4.f // floating point ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255] In this assignment we will do 800 iterations. */ #include "utils.h" #include <thrust/host_vector.h> #include "reference_calc.cpp" #define min(a,b) (((a)<(b))?(a):(b)) #define max(a,b) (((a)>(b))?(a):(b)) __global__ void gen_mask(unsigned int * d_mask, unsigned char * d_red, unsigned char * d_green, unsigned char * d_blue, const size_t numRows, const size_t numCols) { //calculate a 1D offset const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if ( thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows ) return; int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; int2 idx_u, idx_d, idx_r, idx_l; int u, d, l, r; if (d_red[idx] != 255 || d_green[idx] != 255 || d_blue[idx] != 255) { idx_u = make_int2( thread_2D_pos.x, blockIdx.y * blockDim.y + threadIdx.y - 1); idx_d = make_int2( thread_2D_pos.x, blockIdx.y * blockDim.y + threadIdx.y + 1); idx_l = make_int2( blockIdx.x * blockDim.x + threadIdx.x + 1, thread_2D_pos.y); idx_r = make_int2( blockIdx.x * blockDim.x + threadIdx.x - 1, thread_2D_pos.y); u = idx_u.x * numCols + idx_u.y; d = idx_d.x * numCols + idx_d.y; l = idx_l.x * numCols + idx_l.y; r = idx_r.x * numCols + idx_r.y; if(d_red[u] == 255 || d_red[d] == 255 || d_red[l] == 255 || d_red[r] == 255 || d_blue[u] == 255 || d_blue[d] == 255 || d_blue[l] == 255 || d_blue[r] == 255 || d_green[u] == 255 || d_green[d] == 255 || d_green[l] == 255 || d_green[r] == 255) d_mask[idx] = 2; else d_mask[idx] = 1; } } __global__ void apply_mask(unsigned int * d_mask, uchar4 * d_image, const size_t numRows, const size_t numCols) { //calculate a 1D offset const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if ( thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows ) return; int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; if (d_mask[idx] == 2) { d_image[idx].x = 0; d_image[idx].y = 0; d_image[idx].z = 0; } else if (d_mask[idx] == 1) { d_image[idx].x = 255; d_image[idx].y = 255; d_image[idx].z = 0; } } __global__ void init_guess(unsigned int * d_mask, float * d_guess_color, unsigned char * d_image_color, const size_t numRows, const size_t numCols) { //calculate a 1D offset const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if ( thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows ) return; int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; if (d_mask[idx] > 0) { d_guess_color[idx] = (float) d_image_color[idx]; } } __global__ void separateChannels(uchar4* d_sourceImg, int numRows, int numCols, unsigned char* const d_red, unsigned char* const d_green, unsigned char* const d_blue) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if ( thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows ) return; const int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; uchar4 rgba = d_sourceImg[idx]; d_red[idx] = rgba.x; d_green[idx] = rgba.y; d_blue[idx] = rgba.z; } __global__ void replace_dest(float * d_guess_red, float * d_guess_green, float * d_guess_blue, unsigned int * d_mask, uchar4* const d_destImg, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows) return; unsigned char red = (unsigned char) d_guess_red[idx]; unsigned char green = (unsigned char) d_guess_green[idx]; unsigned char blue = (unsigned char) d_guess_blue[idx]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); if (d_mask[idx] == 1) { d_destImg[idx] = outputPixel; } } __global__ void solve(float * d_guess, float * d_guess_prev, unsigned int * d_mask, unsigned char* const d_source, unsigned char* const d_dest, const size_t numRows, const size_t numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int idx = thread_2D_pos.x * numCols + thread_2D_pos.y; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.y >= numCols || thread_2D_pos.x >= numRows) return; if (d_mask[idx] == 1) { float sum1 = 0.0; float sum2 = 0.0; int2 idx_u, idx_d, idx_r, idx_l; int u, d, l, r; idx_u = make_int2( thread_2D_pos.x, blockIdx.y * blockDim.y + threadIdx.y - 1); idx_d = make_int2( thread_2D_pos.x, blockIdx.y * blockDim.y + threadIdx.y + 1); idx_l = make_int2( blockIdx.x * blockDim.x + threadIdx.x + 1, thread_2D_pos.y); idx_r = make_int2( blockIdx.x * blockDim.x + threadIdx.x - 1, thread_2D_pos.y); u = idx_u.x * numCols + idx_u.y; d = idx_d.x * numCols + idx_d.y; l = idx_l.x * numCols + idx_l.y; r = idx_r.x * numCols + idx_r.y; if (d_mask[u] == 1) sum1 += d_guess_prev[u]; else if (d_mask[u] == 2) sum1 += d_dest[u]; if (d_mask[d] == 1) sum1 += d_guess_prev[d]; else if (d_mask[d] == 2) sum1 += d_dest[d]; if (d_mask[l] == 1) sum1 += d_guess_prev[l]; else if (d_mask[l] == 2) sum1 += d_dest[l]; if (d_mask[r] == 1) sum1 += d_guess_prev[r]; else if (d_mask[r] == 2) sum1 += d_dest[r]; d_guess_prev[idx] = d_guess[idx]; sum2 += 4.f*d_source[idx] - d_source[u] - d_source[d] - d_source[l] - d_source[r]; float newVal = (sum1 + sum2) / 4.f; d_guess[idx] = min(255.0, max(0.0, newVal)); } } void your_blend(const uchar4* const h_sourceImg, //IN const size_t numRows, const size_t numCols, const uchar4* const h_destImg, //IN uchar4* const h_blendedImg) //OUT { const unsigned int n = numRows * numCols; int k = 16; // Set reasonable block size (i.e., number of threads per block) const dim3 threads( k, k, 1); // Compute correct grid size (i.e., number of blocks per kernel launch) // from the image size and and block size. const dim3 blocks( numRows/k+1, numCols/k+1, 1); //const int maxThreadsPerBlock = 1024; //int threads = maxThreadsPerBlock; //int blocks = n / maxThreadsPerBlock + 1; //int shared = threads * sizeof(float); unsigned char * d_source_red, * d_source_green, * d_source_blue; unsigned char * d_dest_red, * d_dest_green, * d_dest_blue; float * d_guess_red, * d_guess_blue, * d_guess_green; float * d_guess_prev_red, * d_guess_prev_blue, * d_guess_prev_green; unsigned int * d_mask; uchar4 * d_sourceImg, * d_destImg; // sizes of arrays : size_t p_size = sizeof(unsigned char) * n; size_t f_size = sizeof(float) * n; size_t i_size = sizeof(uchar4) * n; size_t m_size = sizeof(unsigned int) * n; //============================================================================ // allocate the memory : // allocate memory for the three different channels (source and dest) : checkCudaErrors(cudaMalloc(&d_source_red, p_size)); checkCudaErrors(cudaMalloc(&d_source_green, p_size)); checkCudaErrors(cudaMalloc(&d_source_blue, p_size)); checkCudaErrors(cudaMalloc(&d_dest_red, p_size)); checkCudaErrors(cudaMalloc(&d_dest_green, p_size)); checkCudaErrors(cudaMalloc(&d_dest_blue, p_size)); // allocate memory for the previous and current guesses : checkCudaErrors(cudaMalloc(&d_guess_red, p_size)); checkCudaErrors(cudaMalloc(&d_guess_green, p_size)); checkCudaErrors(cudaMalloc(&d_guess_blue, p_size)); checkCudaErrors(cudaMalloc(&d_guess_prev_red, p_size)); checkCudaErrors(cudaMalloc(&d_guess_prev_green, p_size)); checkCudaErrors(cudaMalloc(&d_guess_prev_blue, p_size)); // allocate memory for the source image and copy from host : checkCudaErrors(cudaMalloc((void**) &d_sourceImg, i_size)); checkCudaErrors(cudaMemcpy(d_sourceImg, h_sourceImg, i_size, cudaMemcpyHostToDevice)); // allocate memory for the destination image and copy from host : checkCudaErrors(cudaMalloc((void**) &d_destImg, i_size)); checkCudaErrors(cudaMemcpy(d_destImg, h_destImg, i_size, cudaMemcpyHostToDevice)); // allocate memory for the mask and init to 0 : checkCudaErrors(cudaMalloc((void**) &d_mask, m_size)); checkCudaErrors(cudaMemset(d_mask, 0, m_size)); //============================================================================ /* 1) Separate out the incoming images into three separate channels */ separateChannels<<<blocks, threads>>>(d_sourceImg, numRows, numCols, d_source_red, d_source_green, d_source_blue); separateChannels<<<blocks, threads>>>(d_destImg, numRows, numCols, d_dest_red, d_dest_green, d_dest_blue); //cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); /* 2) Compute a mask of the pixels from the source image to be copied The pixels that shouldn't be copied are completely white, they have R=255, G=255, B=255. Any other pixels SHOULD be copied. */ /* 3) Compute the interior and border regions of the mask. An interior pixel has all 4 neighbors also inside the mask. A border pixel is in the mask itself, but has at least one neighbor that isn't. */ gen_mask<<<blocks, threads>>>(d_mask, d_source_red, d_source_green, d_source_blue, numRows, numCols); //cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); /* apply_mask<<<blocks, threads>>>(d_mask, d_sourceImg, numRows, numCols); checkCudaErrors(cudaMemcpy(h_blendedImg, d_sourceImg, i_size, cudaMemcpyDeviceToHost)); */ /* 4) Create two float(!) buffers for each color channel that will act as our guesses. Initialize them to the respective color channel of the source image since that will act as our intial guess. */ init_guess<<<blocks, threads>>>(d_mask, d_guess_red, d_source_red, numRows, numCols); init_guess<<<blocks, threads>>>(d_mask, d_guess_green, d_source_green, numRows, numCols); init_guess<<<blocks, threads>>>(d_mask, d_guess_blue, d_source_blue, numRows, numCols); init_guess<<<blocks, threads>>>(d_mask, d_guess_prev_red, d_source_red, numRows, numCols); init_guess<<<blocks, threads>>>(d_mask, d_guess_prev_green, d_source_green, numRows, numCols); init_guess<<<blocks, threads>>>(d_mask, d_guess_prev_blue, d_source_blue, numRows, numCols); /* 5) For each color channel perform the Jacobi iteration described above 800 times. */ for (int i = 0; i < 800; i++) { solve<<<blocks, threads>>>(d_guess_red, d_guess_prev_red, d_mask, d_source_red, d_dest_red, numRows, numCols); solve<<<blocks, threads>>>(d_guess_green, d_guess_prev_green, d_mask, d_source_green, d_dest_green, numRows, numCols); solve<<<blocks, threads>>>(d_guess_blue, d_guess_prev_blue, d_mask, d_source_blue, d_dest_blue, numRows, numCols); //cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } /* 6) Create the output image by replacing all the interior pixels in the destination image with the result of the Jacobi iterations. Just cast the floating point values to unsigned chars since we have already made sure to clamp them to the correct range. */ replace_dest<<<blocks, threads>>>(d_guess_red, d_guess_green, d_guess_blue, d_mask, d_destImg, numRows, numCols); checkCudaErrors(cudaMemcpy(h_blendedImg, d_destImg, i_size, cudaMemcpyDeviceToHost)); /* uchar4* h_reference = new uchar4[n]; reference_calc(h_sourceImg, numRows, numCols, h_destImg, h_reference); checkResultsEps((unsigned char *)h_reference, (unsigned char *)h_blendedImg, 4 * n, 2, .01); delete[] h_reference; */ }
page25_sum.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> __global__ void add(int a, int b, int *c) { *c = a + b; //must compiled under compiler:cuda4.0 or above, runned under Fermi architecture //eg /opt/cuda42/bin/nvcc -arch sm_20 page25_sum.cu printf("I am inside.\n"); } int main (void){ int c; int *dev_c; hipMalloc ((void**)&dev_c, sizeof(int)); hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2, 7, dev_c); hipMemcpy (&c, dev_c, sizeof(int), hipMemcpyDeviceToHost ); printf( "2 + 7 = %d\n", c ); hipFree( dev_c ); return 0; }
page25_sum.cu
#include <iostream> #include <stdio.h> __global__ void add(int a, int b, int *c) { *c = a + b; //must compiled under compiler:cuda4.0 or above, runned under Fermi architecture //eg /opt/cuda42/bin/nvcc -arch sm_20 page25_sum.cu printf("I am inside.\n"); } int main (void){ int c; int *dev_c; cudaMalloc ((void**)&dev_c, sizeof(int)); add<<<1,1>>>(2, 7, dev_c); cudaMemcpy (&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost ); printf( "2 + 7 = %d\n", c ); cudaFree( dev_c ); return 0; }
5a5f97fad5f2c8f23d94437384d4501ccbac4b8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"TypeTools.test.cuh" #include"TypeTools.cuh" #include"../Stacktor/Stacktor.cuh" #include"../../Primitives/Pure/Vector3/Vector3.h" #include"../../../Namespaces/Tests/Tests.h" #ifndef __GNUC__ namespace TypeToolsTest { struct OneElem; struct TwoElem; struct ThreeElem; struct FourElem; template<typename Type> struct OneElemTemplate; template<typename Type> struct TwoElemTemplate; template<typename Type> struct ThreeElemTemplate; template<typename Type> struct FourElemTemplate; class SomethingTerriblyHuge; } TYPE_TOOLS_REDEFINE_1_PART(TypeToolsTest::OneElem, int); TYPE_TOOLS_REDEFINE_2_PART(TypeToolsTest::TwoElem, int, Vector3); TYPE_TOOLS_REDEFINE_3_PART(TypeToolsTest::ThreeElem, int, Vector3, Stacktor<char>); TYPE_TOOLS_REDEFINE_4_PART(TypeToolsTest::FourElem, int, Vector3, Stacktor<char>, Stacktor<Stacktor<short> >); TYPE_TOOLS_REDEFINE_1_PART_TEMPLATE(TypeToolsTest::OneElemTemplate, TemplateType, typename TemplateType); TYPE_TOOLS_REDEFINE_2_PART_TEMPLATE(TypeToolsTest::TwoElemTemplate, TemplateType, TemplateType, typename TemplateType); TYPE_TOOLS_REDEFINE_3_PART_TEMPLATE(TypeToolsTest::ThreeElemTemplate, TemplateType, TemplateType, TemplateType, typename TemplateType); TYPE_TOOLS_REDEFINE_4_PART_TEMPLATE(TypeToolsTest::FourElemTemplate, TemplateType, TemplateType, TemplateType, TemplateType, typename TemplateType); TYPE_TOOLS_REDEFINE_1_PART(TypeToolsTest::SomethingTerriblyHuge, Stacktor<int>); namespace TypeToolsTest { struct OneElem { int elem; __dumb__ void setValues() { elem = 9978; } __dumb__ bool checkValues() const { return (elem == 9978); } TYPE_TOOLS_ADD_COMPONENT_GETTER(OneElem, elem); }; struct TwoElem { int elem0; Vector3 elem1; __dumb__ void setValues() { elem0 = 9978; elem1 = Vector3(213.1f, 213.324f, 29130.28f); } __dumb__ bool checkValues() const { return (elem0 == 9978 && elem1 == Vector3(213.1f, 213.324f, 29130.28f)); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_2(TwoElem, elem0, elem1); }; struct ThreeElem { int elem0; Vector3 elem1; Stacktor<char> elem2; __dumb__ void setValues() { elem0 = 9978; elem1 = Vector3(213.1f, 213.324f, 29130.28f); elem2.clear(); elem2.push(0, 1, 2, 3, 4); } __dumb__ bool checkValues() const { bool stacktorOk = (elem2.size() == 5); if (stacktorOk) for (int i = 0; i < 5; i++) stacktorOk &= (elem2[i] == i); return (elem0 == 9978 && elem1 == Vector3(213.1f, 213.324f, 29130.28f) && stacktorOk); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_3(ThreeElem, elem0, elem1, elem2); }; struct FourElem { int elem0; Vector3 elem1; Stacktor<char> elem2; Stacktor<Stacktor<short> > elem3; __dumb__ void setValues() { elem0 = 9978; elem1 = Vector3(213.1f, 213.324f, 29130.28f); elem2.clear(); elem2.push(0, 1, 2, 3, 4); elem3.clear(); elem3.push(Stacktor<short>(9, 8, 7), Stacktor<short>(5), Stacktor<short>()); } __dumb__ bool checkValues() const { bool stacktorOk = (elem2.size() == 5); if (stacktorOk) for (int i = 0; i < 5; i++) stacktorOk &= (elem2[i] == i); stacktorOk &= (elem3.size() == 3); if (stacktorOk) stacktorOk &= (elem3[0].size() == 3 && elem3[1].size() == 1 && elem3[2].size() == 0); if (stacktorOk) stacktorOk &= (elem3[0][0] == 9 && elem3[0][1] == 8 && elem3[0][2] == 7 && elem3[1][0] == 5); return (elem0 == 9978 && elem1 == Vector3(213.1f, 213.324f, 29130.28f) && stacktorOk); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_4(FourElem, elem0, elem1, elem2, elem3); }; template<typename Type> struct OneElemTemplate { Type elem; __dumb__ void setValues() { elem.setValues(); } __dumb__ bool checkValues() const { return elem.checkValues(); } TYPE_TOOLS_ADD_COMPONENT_GETTER(OneElemTemplate, elem); }; template<typename Type> struct TwoElemTemplate { Type elem0, elem1; __dumb__ void setValues() { elem0.setValues(); elem1.setValues(); } __dumb__ bool checkValues() const { return (elem0.checkValues() && elem1.checkValues()); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_2(TwoElemTemplate, elem0, elem1); }; template<typename Type> struct ThreeElemTemplate { Type elem0, elem1, elem2; __dumb__ void setValues() { elem0.setValues(); elem1.setValues(); elem2.setValues(); } __dumb__ bool checkValues() const { return (elem0.checkValues() && elem1.checkValues() && elem2.checkValues()); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_3(ThreeElemTemplate, elem0, elem1, elem2); }; template<typename Type> struct FourElemTemplate { Type elem0, elem1, elem2, elem3; __dumb__ void setValues() { elem0.setValues(); elem1.setValues(); elem2.setValues(); elem3.setValues(); } __dumb__ bool checkValues() const { return (elem0.checkValues() && elem1.checkValues() && elem2.checkValues() && elem3.checkValues()); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_4(FourElemTemplate, elem0, elem1, elem2, elem3); }; class SomethingTerriblyHuge { private: Stacktor<int> data; TYPE_TOOLS_ADD_COMPONENT_GETTER(SomethingTerriblyHuge, data); public: __dumb__ void setValues() { data.clear(); for (int i = 0; i < (1 << 28); i++) data.push(i); printf("PUSHED %d NUMBERS\n", (1 << 28)); } __dumb__ bool checkValues() const { int size = data.size(); if (size == (1 << 28)) { printf("DATA SIZE CORECT\n"); printf("FOR SAKE OF PRESERVING TIME, CHECKING ONLY THE FIRST 256 VALUES\n"); bool pass = true; for (int i = 0; i < 256; i++) pass &= (data[i] == i); return pass; } else { printf("DATA SIZE INCORRECT (%d INSTEAD OF %d)\n", size, (1 << 28)); return false; } } }; } TYPE_TOOLS_IMPLEMENT_1_PART(TypeToolsTest::OneElem); TYPE_TOOLS_IMPLEMENT_2_PART(TypeToolsTest::TwoElem); TYPE_TOOLS_IMPLEMENT_3_PART(TypeToolsTest::ThreeElem); TYPE_TOOLS_IMPLEMENT_4_PART(TypeToolsTest::FourElem); TYPE_TOOLS_IMPLEMENT_1_PART_TEMPLATE(TypeToolsTest::OneElemTemplate, typename TemplateType); TYPE_TOOLS_IMPLEMENT_2_PART_TEMPLATE(TypeToolsTest::TwoElemTemplate, typename TemplateType); TYPE_TOOLS_IMPLEMENT_3_PART_TEMPLATE(TypeToolsTest::ThreeElemTemplate, typename TemplateType); TYPE_TOOLS_IMPLEMENT_4_PART_TEMPLATE(TypeToolsTest::FourElemTemplate, typename TemplateType); TYPE_TOOLS_IMPLEMENT_1_PART(TypeToolsTest::SomethingTerriblyHuge); namespace TypeToolsTest { template<typename Type> __global__ static void checkValues(const Type *object, bool *rv) { if (threadIdx.x == 0 && blockIdx.x == 0) { (*rv) = object->checkValues(); if (*rv) printf("VALUES ARE CORRECT\n"); else printf("VALUES ARE WRONG\n"); } } template<typename Type> inline static bool testType(const std::string &className, bool log = true) { std::cout << "______________________________________" << std::endl; std::cout << "...........TEST STRATED..............." << std::endl; std::cout << "TESTING: " << className << std::endl; std::cout << "--------------------------" << std::endl; char rawMemory[sizeof(Type)]; Type *elem = ((Type*)rawMemory); Type other; TypeTools<Type>::init(*elem); elem->setValues(); TypeTools<Type>::transfer(*elem, other); TypeTools<Type>::swap(*elem, other); char hosCloneRaw[sizeof(Type)]; Type *hosClone = ((Type*)hosCloneRaw); Type *devTarget; bool success = true; if (hipMalloc((void**)&devTarget, sizeof(Type)) == hipSuccess) { if (log) std::cout << "ALLOCATION SUCCESSFUL" << std::endl; if (TypeTools<Type>::prepareForCpyLoad(elem, hosClone, devTarget, 1)) { if (log) std::cout << "CPY LOAD PREPARATION SUCCESSFUL" << std::endl; if (hipMemcpy(devTarget, hosClone, sizeof(Type), hipMemcpyHostToDevice) == hipSuccess) { if (log) std::cout << "UPLOAD SUCCESSFUL" << std::endl; bool *devRv; if (hipMalloc(&devRv, sizeof(bool)) == hipSuccess) { checkValues << <1, 1 >> > (devTarget, devRv); if (hipDeviceSynchronize() == hipSuccess) { if (log) std::cout << "KERNEL EXECUTION COMPLETE" << std::endl; bool kernelRv = false; if (hipMemcpy(&kernelRv, devRv, sizeof(bool), hipMemcpyDeviceToHost) != hipSuccess) { std::cout << "FAILED LOADING RV" << std::endl; kernelRv = false; } success &= kernelRv; if (hipDeviceSynchronize() != hipSuccess) { std::cout << "DEVICE SYNCHRONISATION FAILED" << std::endl; success = false; } } else { std::cout << "KERNEL EXECUTION FAILED" << std::endl; success = false; } if (hipFree(devRv) != hipSuccess) { std::cout << "RETURN VALUE DEALLOCATION FAILED" << std::endl; success = false; } } else { std::cout << "FAILED TO ALLOCATE KERNEL RETURN VALUE"; success = false; } if (TypeTools<Type>::devArrayNeedsToBeDisposed()) { if (TypeTools<Type>::disposeDevArray(devTarget, 1)) { if (log) std::cout << "DISPOSE SUCCESSFUL" << std::endl; } else { std::cout << "DISPOSE ERROR" << std::endl; success = false; } } else if (log) std::cout << "NO DISPOSAL NEEDED" << std::endl; } else { std::cout << "UPLOAD FAILED" << std::endl; success = false; } } else { std::cout << "UPLOAD ERROR" << std::endl; success = false; } if (hipFree(devTarget) == hipSuccess) { if (log) std::cout << "DEALLOCATION SUCCESSFUL" << std::endl; } else { std::cout << "DEALLOCATION ERROR" << std::endl; success = false; } } else { std::cout << "ALLOCATION ERROR" << std::endl; success = false; } TypeTools<Type>::dispose(*elem); std::cout << "...........TEST FINISHED............." << std::endl << std::endl; std::cout << "_______STATUS: " << (success ? "PASS" : "FAIL") << std::endl; return success; } static void testFunction() { int device; if (hipGetDevice(&device) == hipSuccess) { bool success = testType<OneElem>("OneElem"); success &= testType<TwoElem>("TwoElem"); success &= testType<ThreeElem>("ThreeElem"); success &= testType<FourElem>("FourElem"); success &= testType<OneElemTemplate<OneElem> >("OneElemTemplate<OneElem>"); success &= testType<TwoElemTemplate<TwoElem> >("TwoElemTemplate<TwoElem>"); success &= testType<ThreeElemTemplate<ThreeElem> >("ThreeElemTemplate<ThreeElem>"); success &= testType<FourElemTemplate<FourElem> >("FourElemTemplate<FourElem>"); success &= testType<SomethingTerriblyHuge>("SomethingTerriblyHuge"); const int n = 8; std::cout << std::endl << std::endl << std::endl << "ENTER ANYTHING TO RE-RUN THE TEST FOR SomethingTerriblyHuge " << n << " MORE TIMES... "; std::string s; std::getline(std::cin, s); if (s.length() > 0) for (int i = 0; i < n; i++) success &= testType<SomethingTerriblyHuge>("SomethingTerriblyHuge", false); std::cout << std::endl << std::endl << std::endl << "============================================================" << std::endl; std::cout << "DONE; FULL TEST RESULT: " << (success ? "PASS" : "FAIL") << std::endl; std::cout << "MAKE SURE, RAM AND VRAM USAGES ARE UNDER CONTROLL..." << std::endl; } else std::cout << "NO ACTIVE CUDA DEVICE FOUND TO RUN THE TEST..." << std::endl; } #else namespace TypeToolsTest { #endif void test() { #ifdef __GNUC__ std::cout << "GCC does not compile what's tested here yet... Sorry..." << std::endl; #else Tests::runTest(testFunction, "RUNNING TESTS FOR DEFAULT IMPLEMENTATIONS OF TYPE_TOOLS"); #endif } }
5a5f97fad5f2c8f23d94437384d4501ccbac4b8b.cu
#include"TypeTools.test.cuh" #include"TypeTools.cuh" #include"../Stacktor/Stacktor.cuh" #include"../../Primitives/Pure/Vector3/Vector3.h" #include"../../../Namespaces/Tests/Tests.h" #ifndef __GNUC__ namespace TypeToolsTest { struct OneElem; struct TwoElem; struct ThreeElem; struct FourElem; template<typename Type> struct OneElemTemplate; template<typename Type> struct TwoElemTemplate; template<typename Type> struct ThreeElemTemplate; template<typename Type> struct FourElemTemplate; class SomethingTerriblyHuge; } TYPE_TOOLS_REDEFINE_1_PART(TypeToolsTest::OneElem, int); TYPE_TOOLS_REDEFINE_2_PART(TypeToolsTest::TwoElem, int, Vector3); TYPE_TOOLS_REDEFINE_3_PART(TypeToolsTest::ThreeElem, int, Vector3, Stacktor<char>); TYPE_TOOLS_REDEFINE_4_PART(TypeToolsTest::FourElem, int, Vector3, Stacktor<char>, Stacktor<Stacktor<short> >); TYPE_TOOLS_REDEFINE_1_PART_TEMPLATE(TypeToolsTest::OneElemTemplate, TemplateType, typename TemplateType); TYPE_TOOLS_REDEFINE_2_PART_TEMPLATE(TypeToolsTest::TwoElemTemplate, TemplateType, TemplateType, typename TemplateType); TYPE_TOOLS_REDEFINE_3_PART_TEMPLATE(TypeToolsTest::ThreeElemTemplate, TemplateType, TemplateType, TemplateType, typename TemplateType); TYPE_TOOLS_REDEFINE_4_PART_TEMPLATE(TypeToolsTest::FourElemTemplate, TemplateType, TemplateType, TemplateType, TemplateType, typename TemplateType); TYPE_TOOLS_REDEFINE_1_PART(TypeToolsTest::SomethingTerriblyHuge, Stacktor<int>); namespace TypeToolsTest { struct OneElem { int elem; __dumb__ void setValues() { elem = 9978; } __dumb__ bool checkValues() const { return (elem == 9978); } TYPE_TOOLS_ADD_COMPONENT_GETTER(OneElem, elem); }; struct TwoElem { int elem0; Vector3 elem1; __dumb__ void setValues() { elem0 = 9978; elem1 = Vector3(213.1f, 213.324f, 29130.28f); } __dumb__ bool checkValues() const { return (elem0 == 9978 && elem1 == Vector3(213.1f, 213.324f, 29130.28f)); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_2(TwoElem, elem0, elem1); }; struct ThreeElem { int elem0; Vector3 elem1; Stacktor<char> elem2; __dumb__ void setValues() { elem0 = 9978; elem1 = Vector3(213.1f, 213.324f, 29130.28f); elem2.clear(); elem2.push(0, 1, 2, 3, 4); } __dumb__ bool checkValues() const { bool stacktorOk = (elem2.size() == 5); if (stacktorOk) for (int i = 0; i < 5; i++) stacktorOk &= (elem2[i] == i); return (elem0 == 9978 && elem1 == Vector3(213.1f, 213.324f, 29130.28f) && stacktorOk); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_3(ThreeElem, elem0, elem1, elem2); }; struct FourElem { int elem0; Vector3 elem1; Stacktor<char> elem2; Stacktor<Stacktor<short> > elem3; __dumb__ void setValues() { elem0 = 9978; elem1 = Vector3(213.1f, 213.324f, 29130.28f); elem2.clear(); elem2.push(0, 1, 2, 3, 4); elem3.clear(); elem3.push(Stacktor<short>(9, 8, 7), Stacktor<short>(5), Stacktor<short>()); } __dumb__ bool checkValues() const { bool stacktorOk = (elem2.size() == 5); if (stacktorOk) for (int i = 0; i < 5; i++) stacktorOk &= (elem2[i] == i); stacktorOk &= (elem3.size() == 3); if (stacktorOk) stacktorOk &= (elem3[0].size() == 3 && elem3[1].size() == 1 && elem3[2].size() == 0); if (stacktorOk) stacktorOk &= (elem3[0][0] == 9 && elem3[0][1] == 8 && elem3[0][2] == 7 && elem3[1][0] == 5); return (elem0 == 9978 && elem1 == Vector3(213.1f, 213.324f, 29130.28f) && stacktorOk); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_4(FourElem, elem0, elem1, elem2, elem3); }; template<typename Type> struct OneElemTemplate { Type elem; __dumb__ void setValues() { elem.setValues(); } __dumb__ bool checkValues() const { return elem.checkValues(); } TYPE_TOOLS_ADD_COMPONENT_GETTER(OneElemTemplate, elem); }; template<typename Type> struct TwoElemTemplate { Type elem0, elem1; __dumb__ void setValues() { elem0.setValues(); elem1.setValues(); } __dumb__ bool checkValues() const { return (elem0.checkValues() && elem1.checkValues()); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_2(TwoElemTemplate, elem0, elem1); }; template<typename Type> struct ThreeElemTemplate { Type elem0, elem1, elem2; __dumb__ void setValues() { elem0.setValues(); elem1.setValues(); elem2.setValues(); } __dumb__ bool checkValues() const { return (elem0.checkValues() && elem1.checkValues() && elem2.checkValues()); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_3(ThreeElemTemplate, elem0, elem1, elem2); }; template<typename Type> struct FourElemTemplate { Type elem0, elem1, elem2, elem3; __dumb__ void setValues() { elem0.setValues(); elem1.setValues(); elem2.setValues(); elem3.setValues(); } __dumb__ bool checkValues() const { return (elem0.checkValues() && elem1.checkValues() && elem2.checkValues() && elem3.checkValues()); } TYPE_TOOLS_ADD_COMPONENT_GETTERS_4(FourElemTemplate, elem0, elem1, elem2, elem3); }; class SomethingTerriblyHuge { private: Stacktor<int> data; TYPE_TOOLS_ADD_COMPONENT_GETTER(SomethingTerriblyHuge, data); public: __dumb__ void setValues() { data.clear(); for (int i = 0; i < (1 << 28); i++) data.push(i); printf("PUSHED %d NUMBERS\n", (1 << 28)); } __dumb__ bool checkValues() const { int size = data.size(); if (size == (1 << 28)) { printf("DATA SIZE CORECT\n"); printf("FOR SAKE OF PRESERVING TIME, CHECKING ONLY THE FIRST 256 VALUES\n"); bool pass = true; for (int i = 0; i < 256; i++) pass &= (data[i] == i); return pass; } else { printf("DATA SIZE INCORRECT (%d INSTEAD OF %d)\n", size, (1 << 28)); return false; } } }; } TYPE_TOOLS_IMPLEMENT_1_PART(TypeToolsTest::OneElem); TYPE_TOOLS_IMPLEMENT_2_PART(TypeToolsTest::TwoElem); TYPE_TOOLS_IMPLEMENT_3_PART(TypeToolsTest::ThreeElem); TYPE_TOOLS_IMPLEMENT_4_PART(TypeToolsTest::FourElem); TYPE_TOOLS_IMPLEMENT_1_PART_TEMPLATE(TypeToolsTest::OneElemTemplate, typename TemplateType); TYPE_TOOLS_IMPLEMENT_2_PART_TEMPLATE(TypeToolsTest::TwoElemTemplate, typename TemplateType); TYPE_TOOLS_IMPLEMENT_3_PART_TEMPLATE(TypeToolsTest::ThreeElemTemplate, typename TemplateType); TYPE_TOOLS_IMPLEMENT_4_PART_TEMPLATE(TypeToolsTest::FourElemTemplate, typename TemplateType); TYPE_TOOLS_IMPLEMENT_1_PART(TypeToolsTest::SomethingTerriblyHuge); namespace TypeToolsTest { template<typename Type> __global__ static void checkValues(const Type *object, bool *rv) { if (threadIdx.x == 0 && blockIdx.x == 0) { (*rv) = object->checkValues(); if (*rv) printf("VALUES ARE CORRECT\n"); else printf("VALUES ARE WRONG\n"); } } template<typename Type> inline static bool testType(const std::string &className, bool log = true) { std::cout << "______________________________________" << std::endl; std::cout << "...........TEST STRATED..............." << std::endl; std::cout << "TESTING: " << className << std::endl; std::cout << "--------------------------" << std::endl; char rawMemory[sizeof(Type)]; Type *elem = ((Type*)rawMemory); Type other; TypeTools<Type>::init(*elem); elem->setValues(); TypeTools<Type>::transfer(*elem, other); TypeTools<Type>::swap(*elem, other); char hosCloneRaw[sizeof(Type)]; Type *hosClone = ((Type*)hosCloneRaw); Type *devTarget; bool success = true; if (cudaMalloc((void**)&devTarget, sizeof(Type)) == cudaSuccess) { if (log) std::cout << "ALLOCATION SUCCESSFUL" << std::endl; if (TypeTools<Type>::prepareForCpyLoad(elem, hosClone, devTarget, 1)) { if (log) std::cout << "CPY LOAD PREPARATION SUCCESSFUL" << std::endl; if (cudaMemcpy(devTarget, hosClone, sizeof(Type), cudaMemcpyHostToDevice) == cudaSuccess) { if (log) std::cout << "UPLOAD SUCCESSFUL" << std::endl; bool *devRv; if (cudaMalloc(&devRv, sizeof(bool)) == cudaSuccess) { checkValues << <1, 1 >> > (devTarget, devRv); if (cudaDeviceSynchronize() == cudaSuccess) { if (log) std::cout << "KERNEL EXECUTION COMPLETE" << std::endl; bool kernelRv = false; if (cudaMemcpy(&kernelRv, devRv, sizeof(bool), cudaMemcpyDeviceToHost) != cudaSuccess) { std::cout << "FAILED LOADING RV" << std::endl; kernelRv = false; } success &= kernelRv; if (cudaDeviceSynchronize() != cudaSuccess) { std::cout << "DEVICE SYNCHRONISATION FAILED" << std::endl; success = false; } } else { std::cout << "KERNEL EXECUTION FAILED" << std::endl; success = false; } if (cudaFree(devRv) != cudaSuccess) { std::cout << "RETURN VALUE DEALLOCATION FAILED" << std::endl; success = false; } } else { std::cout << "FAILED TO ALLOCATE KERNEL RETURN VALUE"; success = false; } if (TypeTools<Type>::devArrayNeedsToBeDisposed()) { if (TypeTools<Type>::disposeDevArray(devTarget, 1)) { if (log) std::cout << "DISPOSE SUCCESSFUL" << std::endl; } else { std::cout << "DISPOSE ERROR" << std::endl; success = false; } } else if (log) std::cout << "NO DISPOSAL NEEDED" << std::endl; } else { std::cout << "UPLOAD FAILED" << std::endl; success = false; } } else { std::cout << "UPLOAD ERROR" << std::endl; success = false; } if (cudaFree(devTarget) == cudaSuccess) { if (log) std::cout << "DEALLOCATION SUCCESSFUL" << std::endl; } else { std::cout << "DEALLOCATION ERROR" << std::endl; success = false; } } else { std::cout << "ALLOCATION ERROR" << std::endl; success = false; } TypeTools<Type>::dispose(*elem); std::cout << "...........TEST FINISHED............." << std::endl << std::endl; std::cout << "_______STATUS: " << (success ? "PASS" : "FAIL") << std::endl; return success; } static void testFunction() { int device; if (cudaGetDevice(&device) == cudaSuccess) { bool success = testType<OneElem>("OneElem"); success &= testType<TwoElem>("TwoElem"); success &= testType<ThreeElem>("ThreeElem"); success &= testType<FourElem>("FourElem"); success &= testType<OneElemTemplate<OneElem> >("OneElemTemplate<OneElem>"); success &= testType<TwoElemTemplate<TwoElem> >("TwoElemTemplate<TwoElem>"); success &= testType<ThreeElemTemplate<ThreeElem> >("ThreeElemTemplate<ThreeElem>"); success &= testType<FourElemTemplate<FourElem> >("FourElemTemplate<FourElem>"); success &= testType<SomethingTerriblyHuge>("SomethingTerriblyHuge"); const int n = 8; std::cout << std::endl << std::endl << std::endl << "ENTER ANYTHING TO RE-RUN THE TEST FOR SomethingTerriblyHuge " << n << " MORE TIMES... "; std::string s; std::getline(std::cin, s); if (s.length() > 0) for (int i = 0; i < n; i++) success &= testType<SomethingTerriblyHuge>("SomethingTerriblyHuge", false); std::cout << std::endl << std::endl << std::endl << "============================================================" << std::endl; std::cout << "DONE; FULL TEST RESULT: " << (success ? "PASS" : "FAIL") << std::endl; std::cout << "MAKE SURE, RAM AND VRAM USAGES ARE UNDER CONTROLL..." << std::endl; } else std::cout << "NO ACTIVE CUDA DEVICE FOUND TO RUN THE TEST..." << std::endl; } #else namespace TypeToolsTest { #endif void test() { #ifdef __GNUC__ std::cout << "GCC does not compile what's tested here yet... Sorry..." << std::endl; #else Tests::runTest(testFunction, "RUNNING TESTS FOR DEFAULT IMPLEMENTATIONS OF TYPE_TOOLS"); #endif } }
e9a0c0fcf53630a15ac1ed44625f0c037f93b9c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // nvcc SeayJohnnyHW1.cu -o SeayJohnnyHW1; ./'SeayJohnnyHW1' #include <sys/time.h> #include <stdio.h> #define N 67043328 __global__ void addition(float *A, float *B, float *C, int n, int maxThreads) { int id = threadIdx.x + blockIdx.x*maxThreads; if(id < n) { C[id] = A[id] + B[id]; } } int main ( void ) { /* Getting the device properties in order to create the minimum requried amount of blocks regardless of device. */ hipDeviceProp_t prop; hipGetDeviceProperties( &prop, 0); int maxThreads = prop.maxThreadsPerBlock; double sum, gpuTime, totalTime; float *A_CPU, *B_CPU, *C_CPU; //Pointers for memory on the host float *A_GPU, *B_GPU, *C_GPU; //Pointers for memory on the device timeval start, end; dim3 dimBlock; dim3 dimGrid; //Threads in a block dimBlock.x = maxThreads; dimBlock.y = 1; dimBlock.z = 1; //Blocks in a grid dimGrid.x = ( (N-1)/maxThreads ) + 1; dimGrid.y = 1; dimGrid.z = 1; printf("\n Length of vector:\t\t\t%d\n", N); printf(" Max number of threads per block:\t%d\n", maxThreads); printf(" Number of blocks created:\t\t%d\n\n", dimGrid.x); //Allocate Host (CPU) Memory A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); C_CPU = (float*)malloc(N*sizeof(float)); //Allocate Device (GPU) Memory hipMalloc(&A_GPU,N*sizeof(float)); hipMalloc(&B_GPU,N*sizeof(float)); hipMalloc(&C_GPU,N*sizeof(float)); //Loads values into vectors that we will add. for(long id = 0; id < N; id++) { A_CPU[id] = 1; B_CPU[id] = 0; } //Move A and B vectors from CPU to GPU gettimeofday(&start, NULL); hipMemcpyAsync(A_GPU, A_CPU, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpyAsync(B_GPU, B_CPU, N*sizeof(float), hipMemcpyHostToDevice); gettimeofday(&end, NULL); gpuTime = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); printf(" Time to copy A and B to GPU \t= %.10f ms\n", (gpuTime/1000.0)); totalTime = gpuTime; //Add the two vectors together on the GPU gettimeofday(&start, NULL); hipLaunchKernelGGL(( addition), dim3(dimGrid), dim3(dimBlock), 0, 0, A_GPU, B_GPU, C_GPU, N, maxThreads); gettimeofday(&end, NULL); gpuTime = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); printf(" Time to add A and B on GPU \t= %.10f ms\n", (gpuTime/1000.0)); totalTime += gpuTime; //Move the results from the GPU to the CPU gettimeofday(&start, NULL); hipMemcpyAsync(C_CPU, C_GPU, N*sizeof(float), hipMemcpyDeviceToHost); gettimeofday(&end, NULL); gpuTime = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); printf(" Time to get results from GPU \t= %.10f ms\n", (gpuTime/1000.0)); totalTime += gpuTime; for(int i=0;i<50;i++){printf("-");} printf("\n Total time spent \n interacting with GPU \t\t= %.10f ms\n", (totalTime/1000.0)); sum = 0.0; for(long id = 0; id < N; id++) { sum += C_CPU[id]; } printf("\n Sum of C_CPU from GPU addition = %.10f\n", sum); free(A_CPU); free(B_CPU); free(C_CPU); hipFree(A_GPU); hipFree(B_GPU); hipFree(C_GPU); return(0); }
e9a0c0fcf53630a15ac1ed44625f0c037f93b9c3.cu
// nvcc SeayJohnnyHW1.cu -o SeayJohnnyHW1; ./'SeayJohnnyHW1' #include <sys/time.h> #include <stdio.h> #define N 67043328 __global__ void addition(float *A, float *B, float *C, int n, int maxThreads) { int id = threadIdx.x + blockIdx.x*maxThreads; if(id < n) { C[id] = A[id] + B[id]; } } int main ( void ) { /* Getting the device properties in order to create the minimum requried amount of blocks regardless of device. */ cudaDeviceProp prop; cudaGetDeviceProperties( &prop, 0); int maxThreads = prop.maxThreadsPerBlock; double sum, gpuTime, totalTime; float *A_CPU, *B_CPU, *C_CPU; //Pointers for memory on the host float *A_GPU, *B_GPU, *C_GPU; //Pointers for memory on the device timeval start, end; dim3 dimBlock; dim3 dimGrid; //Threads in a block dimBlock.x = maxThreads; dimBlock.y = 1; dimBlock.z = 1; //Blocks in a grid dimGrid.x = ( (N-1)/maxThreads ) + 1; dimGrid.y = 1; dimGrid.z = 1; printf("\n Length of vector:\t\t\t%d\n", N); printf(" Max number of threads per block:\t%d\n", maxThreads); printf(" Number of blocks created:\t\t%d\n\n", dimGrid.x); //Allocate Host (CPU) Memory A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); C_CPU = (float*)malloc(N*sizeof(float)); //Allocate Device (GPU) Memory cudaMalloc(&A_GPU,N*sizeof(float)); cudaMalloc(&B_GPU,N*sizeof(float)); cudaMalloc(&C_GPU,N*sizeof(float)); //Loads values into vectors that we will add. for(long id = 0; id < N; id++) { A_CPU[id] = 1; B_CPU[id] = 0; } //Move A and B vectors from CPU to GPU gettimeofday(&start, NULL); cudaMemcpyAsync(A_GPU, A_CPU, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyAsync(B_GPU, B_CPU, N*sizeof(float), cudaMemcpyHostToDevice); gettimeofday(&end, NULL); gpuTime = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); printf(" Time to copy A and B to GPU \t= %.10f ms\n", (gpuTime/1000.0)); totalTime = gpuTime; //Add the two vectors together on the GPU gettimeofday(&start, NULL); addition<<<dimGrid, dimBlock>>>(A_GPU, B_GPU, C_GPU, N, maxThreads); gettimeofday(&end, NULL); gpuTime = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); printf(" Time to add A and B on GPU \t= %.10f ms\n", (gpuTime/1000.0)); totalTime += gpuTime; //Move the results from the GPU to the CPU gettimeofday(&start, NULL); cudaMemcpyAsync(C_CPU, C_GPU, N*sizeof(float), cudaMemcpyDeviceToHost); gettimeofday(&end, NULL); gpuTime = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); printf(" Time to get results from GPU \t= %.10f ms\n", (gpuTime/1000.0)); totalTime += gpuTime; for(int i=0;i<50;i++){printf("-");} printf("\n Total time spent \n interacting with GPU \t\t= %.10f ms\n", (totalTime/1000.0)); sum = 0.0; for(long id = 0; id < N; id++) { sum += C_CPU[id]; } printf("\n Sum of C_CPU from GPU addition = %.10f\n", sum); free(A_CPU); free(B_CPU); free(C_CPU); cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU); return(0); }
fb1eb19039f63b9a3f399da0d831f0b63a7fb613.hip
// !!! This is a file automatically generated by hipify!!! #include "onmt/cuda/Kernels.cuh" #include <hip/hip_runtime.h> namespace onmt { namespace cuda { namespace kernels { struct AddOp { __device__ __forceinline__ void operator()(float* out, const float* in) { *out += *in; } }; template <typename Op> __global__ void pointwise2_kernel(float* __restrict__ dst, const float* __restrict__ src, int len) { int stride = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; Op op; for (int i = tid; i < len; i += stride) { op(dst + i, src + i); } } template <typename Op> void pointwise2(float* dst, const float* src, int len) { int grid_size = -1; int block_size = -1; hipOccupancyMaxPotentialBlockSize(&grid_size, &block_size, &pointwise2_kernel<Op>); grid_size = (len + block_size - 1) / block_size; hipLaunchKernelGGL(( pointwise2_kernel<Op>), dim3(grid_size), dim3(block_size), 0, 0, dst, src, len); } void add(float* a, const float* b, int len) { pointwise2<AddOp>(a, b, len); } } } }
fb1eb19039f63b9a3f399da0d831f0b63a7fb613.cu
#include "onmt/cuda/Kernels.cuh" #include <cuda_runtime.h> namespace onmt { namespace cuda { namespace kernels { struct AddOp { __device__ __forceinline__ void operator()(float* out, const float* in) { *out += *in; } }; template <typename Op> __global__ void pointwise2_kernel(float* __restrict__ dst, const float* __restrict__ src, int len) { int stride = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; Op op; for (int i = tid; i < len; i += stride) { op(dst + i, src + i); } } template <typename Op> void pointwise2(float* dst, const float* src, int len) { int grid_size = -1; int block_size = -1; cudaOccupancyMaxPotentialBlockSize(&grid_size, &block_size, &pointwise2_kernel<Op>); grid_size = (len + block_size - 1) / block_size; pointwise2_kernel<Op><<<grid_size, block_size>>>(dst, src, len); } void add(float* a, const float* b, int len) { pointwise2<AddOp>(a, b, len); } } } }
e9b3008cc5412ec18cc28523ff703dbe5b159570.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { // CUDA kernele for forward template <typename Dtype> __global__ void PReLUForward(const int n, const int channels, const int dim, const Dtype* in, Dtype* out, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } } // CUDA kernel for bottom backward template <typename Dtype> __global__ void PReLUBackward(const int n, const int channels, const int dim, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); } } // CUDA kernel for element-wise parameter backward template <typename Dtype> __global__ void PReLUParamBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); } } template <typename Dtype> void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); const int div_factor = channel_shared_ ? channels : 1; // For in-place computation if (top[0] == bottom[0]) { caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data()); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, dim, bottom_data, top_data, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); // For in-place computation if (top[0] == bottom[0]) { bottom_data = bottom_memory_.gpu_data(); } // Propagte to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); // slope_diff is set as 0, then accumulated over batches // caffe_gpu_set<Dtype>(this->blobs_[0]->count(), Dtype(0), slope_diff); int cdim = channels * dim; Dtype dsum = 0.; for (int n = 0; n < bottom[0]->num(); ++n) { Dtype* temp_buff = multiplier_.mutable_gpu_diff(); // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, cdim, top_diff + top[0]->offset(n), bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; caffe_gpu_dot<Dtype>(channels * dim, multiplier_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1., multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { // caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff); caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); int div_factor = channel_shared_ ? channels : 1; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer); } // namespace caffe
e9b3008cc5412ec18cc28523ff703dbe5b159570.cu
#include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { // CUDA kernele for forward template <typename Dtype> __global__ void PReLUForward(const int n, const int channels, const int dim, const Dtype* in, Dtype* out, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c]; } } // CUDA kernel for bottom backward template <typename Dtype> __global__ void PReLUBackward(const int n, const int channels, const int dim, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, const Dtype* slope_data, const int div_factor) { CUDA_KERNEL_LOOP(index, n) { int c = (index / dim) % channels / div_factor; out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * slope_data[c]); } } // CUDA kernel for element-wise parameter backward template <typename Dtype> __global__ void PReLUParamBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0); } } template <typename Dtype> void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); const int div_factor = channel_shared_ ? channels : 1; // For in-place computation if (top[0] == bottom[0]) { caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data()); } // NOLINT_NEXT_LINE(whitespace/operators) PReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, dim, bottom_data, top_data, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const int count = bottom[0]->count(); const int dim = bottom[0]->count(2); const int channels = bottom[0]->channels(); // For in-place computation if (top[0] == bottom[0]) { bottom_data = bottom_memory_.gpu_data(); } // Propagte to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); // slope_diff is set as 0, then accumulated over batches // caffe_gpu_set<Dtype>(this->blobs_[0]->count(), Dtype(0), slope_diff); int cdim = channels * dim; Dtype dsum = 0.; for (int n = 0; n < bottom[0]->num(); ++n) { Dtype* temp_buff = multiplier_.mutable_gpu_diff(); // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) PReLUParamBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( cdim, top_diff + top[0]->offset(n), bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; caffe_gpu_dot<Dtype>(channels * dim, multiplier_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1., multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { // caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff); caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* slope_data = this->blobs_[0]->gpu_data(); int div_factor = channel_shared_ ? channels : 1; // NOLINT_NEXT_LINE(whitespace/operators) PReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data, div_factor); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer); } // namespace caffe
e4fab9152d89e321192fe0cbdd8918137c43a08b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <pthread.h> #include "helpers.cuh" #include "sw.cuh" template<bool fixedTop> __global__ void sw_single_block_global(CellDecision* decisions, int* bestScores, int* bestI, int* bestJ, char *seq1, unsigned long len1, char *seq2, unsigned long len2) { // Using abitrary threads, ideally len1 <= len2 if (threadIdx.x == 0) decisions[0] = (CellDecision) {0, Nil}; for (int gridRow = 0; gridRow * blockDim.x < len2; gridRow++) { int j = threadIdx.x + gridRow * blockDim.x; if (j < len2) { decisions[j + 1] = fixedTop ? (CellDecision) {(j + 1) * GAP_PENALTY, Left} : (CellDecision) {0, Nil}; } for (int gridCol = 0; gridCol * blockDim.x < len1; gridCol++) { int iStart = gridCol * blockDim.x; if (iStart + threadIdx.x + 1 <= len1) { decisions[(iStart + threadIdx.x + 1) * (len2+1)] = fixedTop ? (CellDecision) {(iStart + (int)threadIdx.x + 1) * GAP_PENALTY, Above} : (CellDecision) {0, Nil}; } __syncthreads(); char seq2_symbol = '\0'; if (j < len2) seq2_symbol = seq2[j]; for (unsigned long k = 0; k < 2*blockDim.x - 1; k++) { int i = iStart + k - threadIdx.x; if (iStart <= i && i < iStart + blockDim.x && i < len1 && j < len2) { CellDecision current; if (fixedTop) { current = decideCellNW( decisions[i*(len2+1) + j].score + match(seq1[i], seq2_symbol), decisions[i*(len2+1) + (j+1)].score + GAP_PENALTY, decisions[(i+1)*(len2+1) + j].score + GAP_PENALTY ); } else { current = decideCellSW( decisions[i*(len2+1) + j].score + match(seq1[i], seq2_symbol), decisions[i*(len2+1) + (j+1)].score + GAP_PENALTY, decisions[(i+1)*(len2+1) + j].score + GAP_PENALTY ); } decisions[(i+1)*(len2+1) + (j+1)] = current; if (current.score > bestScores[threadIdx.x]) { bestScores[threadIdx.x] = current.score; bestI[threadIdx.x] = i + 1; bestJ[threadIdx.x] = j + 1; } } __syncthreads(); } } } for (unsigned int s= blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s && threadIdx.x + s < blockDim.x && threadIdx.x + s < len2) { if (bestScores[threadIdx.x] < bestScores[threadIdx.x + s]) { bestScores[threadIdx.x] = bestScores[threadIdx.x + s]; bestI[threadIdx.x] = bestI[threadIdx.x + s]; bestJ[threadIdx.x] = bestJ[threadIdx.x + s]; } } __syncthreads(); } // Bring best values to front of array, if last block if (threadIdx.x == 0) { bestScores[0] = bestScores[0]; bestScores[1] = bestI[0]; bestScores[2] = bestJ[0]; } } AlignedPair* sw_single_block(hipStream_t stream, char *seq1, unsigned long len1, char *seq2, unsigned long len2, bool fixedTop, bool fixedBottom) { AlignedPair* alignedPair; hipMallocManaged(&alignedPair, sizeof(AlignedPair)); char* aligned1; hipMallocManaged(&aligned1, (len1 + len2 +1) * sizeof(char)); alignedPair->seq1 = aligned1; char* aligned2; hipMallocManaged(&aligned2, (len1 + len2 +1) * sizeof(char)); alignedPair->seq2 = aligned2; if (len1 == 0 || len2 == 0) { if (fixedTop && fixedBottom) { if (len1 == 0) { hipMemcpy(aligned2, seq2, len2 * sizeof(char), hipMemcpyDeviceToHost); hipMemset(aligned1, '-', len2*sizeof(char)); aligned1[len2] = '\0'; aligned2[len2] = '\0'; } else { hipMemcpy(aligned1, seq1, len1 * sizeof(char), hipMemcpyDeviceToHost); hipMemset(aligned2, '-', len1*sizeof(char)); aligned1[len1] = '\0'; aligned2[len1] = '\0'; } } else { aligned1[0] = '\0'; aligned2[0] = '\0'; } // printf("%s\n", aligned1); // printf("%s\n", aligned2); return alignedPair; } unsigned int spaceNeeded = (len1+1) * (len2+1) * sizeof(CellDecision); CellDecision* decisions; hipMalloc(&decisions, spaceNeeded); int* bestScores; hipMalloc(&bestScores, max(len2, 3L) * sizeof(int)); hipMemset(bestScores, 0, max(len2, 3L) * sizeof(int)); int* bestI; hipMalloc(&bestI, len2 * sizeof(int)); int* bestJ; hipMalloc(&bestJ, len2 * sizeof(int)); if (fixedTop) hipLaunchKernelGGL(( sw_single_block_global<true>), dim3(1), dim3(MAX_THREADS), 0, stream, decisions, bestScores, bestI, bestJ, seq1, len1, seq2, len2); else hipLaunchKernelGGL(( sw_single_block_global<false>), dim3(1), dim3(MAX_THREADS), 0, stream, decisions, bestScores, bestI, bestJ, seq1, len1, seq2, len2); hipStreamSynchronize(stream); BestCell bestCell = (BestCell){0, 0, 0}; hipMemcpy(&(bestCell.score), bestScores, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&(bestCell.i), bestScores+1, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&(bestCell.j), bestScores+2, sizeof(int), hipMemcpyDeviceToHost); hipLaunchKernelGGL(( backtraceRunner), dim3(1),dim3(1),0,stream, seq1, len1, seq2, len2, decisions, bestCell, fixedBottom, alignedPair); hipStreamSynchronize(stream); // printf("%s\n", aligned1); // printf("%s\n", aligned2); hipFree(decisions); hipFree(bestScores); hipFree(bestI); hipFree(bestJ); return alignedPair; }
e4fab9152d89e321192fe0cbdd8918137c43a08b.cu
#include <stdio.h> #include <pthread.h> #include "helpers.cuh" #include "sw.cuh" template<bool fixedTop> __global__ void sw_single_block_global(CellDecision* decisions, int* bestScores, int* bestI, int* bestJ, char *seq1, unsigned long len1, char *seq2, unsigned long len2) { // Using abitrary threads, ideally len1 <= len2 if (threadIdx.x == 0) decisions[0] = (CellDecision) {0, Nil}; for (int gridRow = 0; gridRow * blockDim.x < len2; gridRow++) { int j = threadIdx.x + gridRow * blockDim.x; if (j < len2) { decisions[j + 1] = fixedTop ? (CellDecision) {(j + 1) * GAP_PENALTY, Left} : (CellDecision) {0, Nil}; } for (int gridCol = 0; gridCol * blockDim.x < len1; gridCol++) { int iStart = gridCol * blockDim.x; if (iStart + threadIdx.x + 1 <= len1) { decisions[(iStart + threadIdx.x + 1) * (len2+1)] = fixedTop ? (CellDecision) {(iStart + (int)threadIdx.x + 1) * GAP_PENALTY, Above} : (CellDecision) {0, Nil}; } __syncthreads(); char seq2_symbol = '\0'; if (j < len2) seq2_symbol = seq2[j]; for (unsigned long k = 0; k < 2*blockDim.x - 1; k++) { int i = iStart + k - threadIdx.x; if (iStart <= i && i < iStart + blockDim.x && i < len1 && j < len2) { CellDecision current; if (fixedTop) { current = decideCellNW( decisions[i*(len2+1) + j].score + match(seq1[i], seq2_symbol), decisions[i*(len2+1) + (j+1)].score + GAP_PENALTY, decisions[(i+1)*(len2+1) + j].score + GAP_PENALTY ); } else { current = decideCellSW( decisions[i*(len2+1) + j].score + match(seq1[i], seq2_symbol), decisions[i*(len2+1) + (j+1)].score + GAP_PENALTY, decisions[(i+1)*(len2+1) + j].score + GAP_PENALTY ); } decisions[(i+1)*(len2+1) + (j+1)] = current; if (current.score > bestScores[threadIdx.x]) { bestScores[threadIdx.x] = current.score; bestI[threadIdx.x] = i + 1; bestJ[threadIdx.x] = j + 1; } } __syncthreads(); } } } for (unsigned int s= blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s && threadIdx.x + s < blockDim.x && threadIdx.x + s < len2) { if (bestScores[threadIdx.x] < bestScores[threadIdx.x + s]) { bestScores[threadIdx.x] = bestScores[threadIdx.x + s]; bestI[threadIdx.x] = bestI[threadIdx.x + s]; bestJ[threadIdx.x] = bestJ[threadIdx.x + s]; } } __syncthreads(); } // Bring best values to front of array, if last block if (threadIdx.x == 0) { bestScores[0] = bestScores[0]; bestScores[1] = bestI[0]; bestScores[2] = bestJ[0]; } } AlignedPair* sw_single_block(cudaStream_t stream, char *seq1, unsigned long len1, char *seq2, unsigned long len2, bool fixedTop, bool fixedBottom) { AlignedPair* alignedPair; cudaMallocManaged(&alignedPair, sizeof(AlignedPair)); char* aligned1; cudaMallocManaged(&aligned1, (len1 + len2 +1) * sizeof(char)); alignedPair->seq1 = aligned1; char* aligned2; cudaMallocManaged(&aligned2, (len1 + len2 +1) * sizeof(char)); alignedPair->seq2 = aligned2; if (len1 == 0 || len2 == 0) { if (fixedTop && fixedBottom) { if (len1 == 0) { cudaMemcpy(aligned2, seq2, len2 * sizeof(char), cudaMemcpyDeviceToHost); cudaMemset(aligned1, '-', len2*sizeof(char)); aligned1[len2] = '\0'; aligned2[len2] = '\0'; } else { cudaMemcpy(aligned1, seq1, len1 * sizeof(char), cudaMemcpyDeviceToHost); cudaMemset(aligned2, '-', len1*sizeof(char)); aligned1[len1] = '\0'; aligned2[len1] = '\0'; } } else { aligned1[0] = '\0'; aligned2[0] = '\0'; } // printf("%s\n", aligned1); // printf("%s\n", aligned2); return alignedPair; } unsigned int spaceNeeded = (len1+1) * (len2+1) * sizeof(CellDecision); CellDecision* decisions; cudaMalloc(&decisions, spaceNeeded); int* bestScores; cudaMalloc(&bestScores, max(len2, 3L) * sizeof(int)); cudaMemset(bestScores, 0, max(len2, 3L) * sizeof(int)); int* bestI; cudaMalloc(&bestI, len2 * sizeof(int)); int* bestJ; cudaMalloc(&bestJ, len2 * sizeof(int)); if (fixedTop) sw_single_block_global<true><<<1, MAX_THREADS, 0, stream>>>(decisions, bestScores, bestI, bestJ, seq1, len1, seq2, len2); else sw_single_block_global<false><<<1, MAX_THREADS, 0, stream>>>(decisions, bestScores, bestI, bestJ, seq1, len1, seq2, len2); cudaStreamSynchronize(stream); BestCell bestCell = (BestCell){0, 0, 0}; cudaMemcpy(&(bestCell.score), bestScores, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&(bestCell.i), bestScores+1, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&(bestCell.j), bestScores+2, sizeof(int), cudaMemcpyDeviceToHost); backtraceRunner<<<1,1,0,stream>>>(seq1, len1, seq2, len2, decisions, bestCell, fixedBottom, alignedPair); cudaStreamSynchronize(stream); // printf("%s\n", aligned1); // printf("%s\n", aligned2); cudaFree(decisions); cudaFree(bestScores); cudaFree(bestI); cudaFree(bestJ); return alignedPair; }
3a1012dc75fcd02a925ff70725efd2927551db0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* part4.cu */ #include "part4_conf.h" #include "part4_cpu.h" #include "part4_kernel.cu" /* ====================== Main_CPU =========================== */ int main(int argc, char* argv[]) { int *a, *a_device, *b, *b_device; int *solution, *solution_device, *meilleure_solution; int *voisin, *voisin_device, *ij; int i, j, k, score, best_score; int n, temp, m, condition, nb_blocks, nb_solution; int seed = time(NULL); // donnes temporelles clock_t initial_time; /* Initial time in micro-seconds */ clock_t final_time; /* Final time in micro-seconds */ float cpu_time; /* Total time of the cpu in seconds */ float gpu_time; /* Total time of the gpu in seconds */ if (argc < 3) { printf("Please give a data file in argument 1 and the number of iterations in argument 2.\n"); exit(1); } loadInstances(argv[1],n,a,b); nb_solution = atoi(argv[2]); m = n*(n-1)/2; // taille du tableau voisin nb_blocks = m/NB_THREAD; // nombre de blocs if ( m % NB_THREAD != 0) nb_blocks++; // Allocations dynamiques solution = (int*) malloc(n*sizeof(int)); meilleure_solution = (int*) malloc(n*sizeof(int)); voisin = (int*) malloc(n*(n-1)/2*sizeof(int)); ij = (int*) malloc(3*sizeof(int)); hipMalloc( (void **) &a_device, n*n*sizeof(int) ); hipMalloc( (void **) &b_device, n*n*sizeof(int) ); hipMalloc( (void **) &solution_device, n*sizeof(int) ); hipMalloc( (void **) &voisin_device, m*sizeof(int) ); hipMemcpy( a_device, a, n*n*sizeof(int), hipMemcpyHostToDevice ); hipMemcpy( b_device, b, n*n*sizeof(int), hipMemcpyHostToDevice ); /* ================================================================== = = = Multistart CPU : = = = = on lance le hill-climbing nb_solution fois = = (rentr en ligne de commande) = = = ================================================================== */ /* ces instructions sont les mmes que dans pour le multistart du gpu donc je me permets de ne pas les commenter.*/ initial_time = clock(); srand(seed); for (k=0; k<nb_solution; k++) { create(solution,n); score = evaluation(a,b,solution,n); condition = 0; while ( condition == 0 ) { ij[0] = 0; ij[1] = 1; ij[2] = 1; for (i=0; i<(n-1); i++) { for (j=i+1; j<n; j++) { temp = compute_delta_cpu(a, b, solution, i, j, n); if (temp < ij[0]) { ij[0] = temp; ij[1] = i; ij[2] = j; } } } if (ij[0] >= 0) condition = 1; temp = solution[ij[1]]; solution[ij[1]] = solution[ij[2]]; solution[ij[2]] = temp; score = score + ij[0]; } if ( (k == 0) || ( (k != 0) && (score < best_score) ) ) { memcpy(meilleure_solution, solution, n*sizeof(int)); best_score = score; } } final_time = clock(); cpu_time = (final_time - initial_time)*1e-6; // affichage des rsultats finaux sur CPU printf("Le meilleur score trouv par les Hill-climbing avec le CPU est :\n"); printf("z(pi) = %d \n", best_score); printf("pi = [ "); for (k=0; k<n; k++) printf("%d ", meilleure_solution[k]); printf("] \n"); printf("Temps d'excution CPU : %f s\n\n", cpu_time); /* ================================================================== = = = Multistart GPU : = = = = on lance le hill-climbing nb_solution fois = = (rentr en ligne de commande) = = = ================================================================== */ initial_time = clock(); srand(seed); for (k=0; k<nb_solution; k++) { create(solution,n); // gnre une solution pi score = evaluation(a,b,solution,n); // value le score z(pi) condition = 0; // boolen conditionnel pour la boucle // Recherche d'une meilleure solution tant qu'un voisin en propose une while ( condition == 0 ) { hipMemcpy( solution_device, solution, n*sizeof(int), hipMemcpyHostToDevice ); // le gpu calcule le dcalage d'un voisin hipLaunchKernelGGL(( main_gpu), dim3(nb_blocks), dim3(NB_THREAD), 0, 0, voisin_device, a_device, b_device, solution_device, n); hipMemcpy( voisin, voisin_device, m*sizeof(int), hipMemcpyDeviceToHost ); // le cpu dfinit les lments de solution permuter pour avoir un meilleur score min_tab(ij, voisin, m, n, condition); // on permute les lments trouvs temp = solution[ij[0]]; solution[ij[0]] = solution[ij[1]]; solution[ij[1]] = temp; // on calcule le nouveau score score = score + ij[2]; } // initialisation de la meilleure solution et du meilleur score l'tape k=0 if (k == 0) { memcpy(meilleure_solution, solution, n*sizeof(int)); best_score = score; } // cre la meilleure solution et le meilleur score si il en a un if ( (k != 0) && (score < best_score) ) { memcpy(meilleure_solution, solution, n*sizeof(int)); best_score = score; } } final_time = clock(); gpu_time = (final_time - initial_time)*1e-6; printf("Le meilleur score trouv par les Hill-climbing avec le GPU est :\n"); printf("z(pi) = %d \n", best_score); printf("pi = [ "); for (k=0; k<n; k++) printf("%d ", meilleure_solution[k]); printf("] \n"); printf("Temps d'excution GPU : %f s\n\n", gpu_time); /* ====================================================== = = = fin de ce merveilleux programme = = = ====================================================== */ // dsallocation des tableaux hipFree(a_device); hipFree(b_device); hipFree(solution_device); hipFree(voisin_device); free(a); free(b); free(ij); free(solution); free(voisin); free(meilleure_solution); }
3a1012dc75fcd02a925ff70725efd2927551db0d.cu
/* part4.cu */ #include "part4_conf.h" #include "part4_cpu.h" #include "part4_kernel.cu" /* ====================== Main_CPU =========================== */ int main(int argc, char* argv[]) { int *a, *a_device, *b, *b_device; int *solution, *solution_device, *meilleure_solution; int *voisin, *voisin_device, *ij; int i, j, k, score, best_score; int n, temp, m, condition, nb_blocks, nb_solution; int seed = time(NULL); // données temporelles clock_t initial_time; /* Initial time in micro-seconds */ clock_t final_time; /* Final time in micro-seconds */ float cpu_time; /* Total time of the cpu in seconds */ float gpu_time; /* Total time of the gpu in seconds */ if (argc < 3) { printf("Please give a data file in argument 1 and the number of iterations in argument 2.\n"); exit(1); } loadInstances(argv[1],n,a,b); nb_solution = atoi(argv[2]); m = n*(n-1)/2; // taille du tableau voisin nb_blocks = m/NB_THREAD; // nombre de blocs if ( m % NB_THREAD != 0) nb_blocks++; // Allocations dynamiques solution = (int*) malloc(n*sizeof(int)); meilleure_solution = (int*) malloc(n*sizeof(int)); voisin = (int*) malloc(n*(n-1)/2*sizeof(int)); ij = (int*) malloc(3*sizeof(int)); cudaMalloc( (void **) &a_device, n*n*sizeof(int) ); cudaMalloc( (void **) &b_device, n*n*sizeof(int) ); cudaMalloc( (void **) &solution_device, n*sizeof(int) ); cudaMalloc( (void **) &voisin_device, m*sizeof(int) ); cudaMemcpy( a_device, a, n*n*sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( b_device, b, n*n*sizeof(int), cudaMemcpyHostToDevice ); /* ================================================================== = = = Multistart CPU : = = = = on lance le hill-climbing nb_solution fois = = (rentré en ligne de commande) = = = ================================================================== */ /* ces instructions sont les mêmes que dans pour le multistart du gpu donc je me permets de ne pas les commenter.*/ initial_time = clock(); srand(seed); for (k=0; k<nb_solution; k++) { create(solution,n); score = evaluation(a,b,solution,n); condition = 0; while ( condition == 0 ) { ij[0] = 0; ij[1] = 1; ij[2] = 1; for (i=0; i<(n-1); i++) { for (j=i+1; j<n; j++) { temp = compute_delta_cpu(a, b, solution, i, j, n); if (temp < ij[0]) { ij[0] = temp; ij[1] = i; ij[2] = j; } } } if (ij[0] >= 0) condition = 1; temp = solution[ij[1]]; solution[ij[1]] = solution[ij[2]]; solution[ij[2]] = temp; score = score + ij[0]; } if ( (k == 0) || ( (k != 0) && (score < best_score) ) ) { memcpy(meilleure_solution, solution, n*sizeof(int)); best_score = score; } } final_time = clock(); cpu_time = (final_time - initial_time)*1e-6; // affichage des résultats finaux sur CPU printf("Le meilleur score trouvé par les Hill-climbing avec le CPU est :\n"); printf("z(pi) = %d \n", best_score); printf("pi = [ "); for (k=0; k<n; k++) printf("%d ", meilleure_solution[k]); printf("] \n"); printf("Temps d'exécution CPU : %f s\n\n", cpu_time); /* ================================================================== = = = Multistart GPU : = = = = on lance le hill-climbing nb_solution fois = = (rentré en ligne de commande) = = = ================================================================== */ initial_time = clock(); srand(seed); for (k=0; k<nb_solution; k++) { create(solution,n); // génère une solution pi score = evaluation(a,b,solution,n); // évalue le score z(pi) condition = 0; // booléen conditionnel pour la boucle // Recherche d'une meilleure solution tant qu'un voisin en propose une while ( condition == 0 ) { cudaMemcpy( solution_device, solution, n*sizeof(int), cudaMemcpyHostToDevice ); // le gpu calcule le décalage d'un voisin main_gpu<<<nb_blocks, NB_THREAD>>>(voisin_device, a_device, b_device, solution_device, n); cudaMemcpy( voisin, voisin_device, m*sizeof(int), cudaMemcpyDeviceToHost ); // le cpu définit les éléments de solution à permuter pour avoir un meilleur score min_tab(ij, voisin, m, n, condition); // on permute les éléments trouvés temp = solution[ij[0]]; solution[ij[0]] = solution[ij[1]]; solution[ij[1]] = temp; // on calcule le nouveau score score = score + ij[2]; } // initialisation de la meilleure solution et du meilleur score à l'étape k=0 if (k == 0) { memcpy(meilleure_solution, solution, n*sizeof(int)); best_score = score; } // crée la meilleure solution et le meilleur score si il en a un if ( (k != 0) && (score < best_score) ) { memcpy(meilleure_solution, solution, n*sizeof(int)); best_score = score; } } final_time = clock(); gpu_time = (final_time - initial_time)*1e-6; printf("Le meilleur score trouvé par les Hill-climbing avec le GPU est :\n"); printf("z(pi) = %d \n", best_score); printf("pi = [ "); for (k=0; k<n; k++) printf("%d ", meilleure_solution[k]); printf("] \n"); printf("Temps d'exécution GPU : %f s\n\n", gpu_time); /* ====================================================== = = = fin de ce merveilleux programme = = = ====================================================== */ // désallocation des tableaux cudaFree(a_device); cudaFree(b_device); cudaFree(solution_device); cudaFree(voisin_device); free(a); free(b); free(ij); free(solution); free(voisin); free(meilleure_solution); }
af6545caad3c0eac86f7017ad65c2810cf8bd3fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <time.h> #include <malloc.h> #include <mcheck.h> #include "rocblas.h" #include "mdgpu.h" #include "util.h" #include "cutil.h" #include "cs.h" #include "etree.h" #define blasint int #define _Complex #ifndef _WIN32 extern "C" { #endif #include "cblas.h" #ifndef _WIN32 } #endif #define MatrizGetFk MatrizGetFk_3 #define FactorAux_CPU FactorAux_CPU_3 #define FactorAux_GPU FactorAux_GPU_2 bool GPU = true; double flops = 0; clock_t ticksMalloc = 0; clock_t ticksMallocGPU = 0; clock_t ticksFree = 0; clock_t ticksFreeGPU = 0; clock_t ticksFactorAux = 0; clock_t ticksFactorAux1 = 0; clock_t ticksFactorAux2 = 0; clock_t ticksFactorAux3 = 0; clock_t ticksMemcpy = 0; clock_t ticksMemcpy2 = 0; clock_t ticksMemcpy21 = 0; clock_t ticksMemcpyX = 0; clock_t ticksMerge = 0; clock_t ticksExtendAdd = 0; clock_t ticksSymbolic = 0; clock_t ticksGetFk = 0; clock_t ticksMemcpyHost = 0; clock_t ticksTRSM_GPU = 0; clock_t ticksGEMM_GPU = 0; long countExtendAdd = 0; int countGetFk = 0; long bytesMemcpy2 = 0; long bytesMemcpy21 = 0; int block_size = -1; bool pad = false; FILE* logger; Matriz* MatrizGetFk_1(cs* A, int k, int cols) { clock_t t = tic(); Matriz* m = (Matriz*)my_malloc(sizeof(Matriz)); m->k = k; m->offset = 0; m->n = 0; for (int i = k+cols; i < A->n; i++) { bool b = false; for (int h = 0; h < cols; h++) { if (MatrizGet(A, i, k+h) != 0) { b = true; break; } } if (b) { m->n++; } } m->n += cols; MatrizAlloc(m); m->q = (int*)my_malloc((m->n)*sizeof(int)); for (int i = 0; i < cols; i++) { for (int h = 0; h < cols; h++) { MatrizSet(m, i, h, MatrizGet(A, k+i, k+h)); } m->q[i] = k+i; } int j = 0; for (int i = k+cols; i < A->n; i++) { bool b = false; for (int h = 0; h < cols; h++) { if (MatrizGet(A, i, k+h) != 0) { b = true; break; } } if (b) { for (int h = 0; h < cols; h++) { MatrizSet(m, j+cols, h, MatrizGet(A, i, k+h)); } m->q[j+cols] = i; j++; } } ticksGetFk += toc(t); return m; } Matriz* MatrizGetFk_2(cs* A, int k, int cols) { clock_t t = tic(); int* pos = (int*) my_malloc(cols*sizeof(int)); memset(pos, 0, cols*sizeof(int)); Matriz* m = (Matriz*)my_malloc(sizeof(Matriz)); m->k = k; m->offset = 0; m->n = 0; for (int h = 0; h < cols; h++) { while (A->i[A->p[k+h] + pos[h]] < k+h) { pos[h]++; } } for (int i = k+cols; i < A->n; i++) { bool b = false; for (int h = 0; h < cols; h++) { while (pos[h] < A->p[k+h+1] - A->p[k+h] && A->i[A->p[k+h] + pos[h]] < i) { pos[h]++; } if (pos[h] < A->p[k+h+1] - A->p[k+h] && A->i[A->p[k+h] + pos[h]] == i) { b = true; } } if (b) { m->n++; } } m->n += cols; MatrizAlloc(m); m->q = (int*)my_malloc((m->n)*sizeof(int)); for (int i = 0; i < cols; i++) { for (int h = 0; h < cols; h++) { MatrizSet(m, i, h, MatrizGet(A, k+i, k+h)); } m->q[i] = k+i; } memset(pos, 0, cols*sizeof(int)); int j = 0; for (int i = k+cols; i < A->n; i++) { bool b = false; for (int h = 0; h < cols; h++) { while (pos[h] < A->p[k+h+1] - A->p[k+h] && A->i[A->p[k+h] + pos[h]] < i) { pos[h]++; } if (pos[h] < A->p[k+h+1] - A->p[k+h] && A->i[A->p[k+h] + pos[h]] == i) { MatrizSet(m, j+cols, h, /*MatrizGet(A, i, k+h)*/ A->x[A->p[k+h] + pos[h]]); b = true; } } if (b) { m->q[j+cols] = i; j++; } } my_free(pos); ticksGetFk += toc(t); return m; } Matriz* MatrizGetFk_3(cs* C, int k, int c) { const int MAX_INT = 0x7fffffff; Matriz* M = (Matriz*)my_malloc(sizeof(Matriz)); M->k = k; M->offset = 0; M->n = 0; //int* pos = (int*)malloc(c*sizeof(int)); //int* min_i = (int*)malloc(c*sizeof(int)); //int* min_i2 = (int*)malloc(c*sizeof(int)); int pos[c]; int min_i[c]; int min_i2[c]; int minr; int* C_p = &C->p[k]; int* C_i = C->i; FLOTANTE* C_x = C->x; static int* merge = NULL; if (merge == NULL) { merge = (int*) malloc(C->n*sizeof(int)); } clock_t t = tic(); for (int i = 0; i < c; i++) { merge[M->n] = k+i; M->n++; } for (int i = 0; i < c; i++) { int pos_i = C_p[i]; while (C_i[pos_i] < k+c && pos_i < C_p[i+1]) { pos_i++; } pos[i] = pos_i; } int h = 0; while (true) { minr = MAX_INT; if (h == 0) { for (int i = 0; i < c; i++) { if (pos[i]+1 < C_p[i+1]) { min_i[i] = C_i[pos[i]]; min_i2[i] = C_i[pos[i]+1]; } else if (pos[i] < C_p[i+1]) { min_i[i] = C_i[pos[i]]; min_i2[i] = MAX_INT; } else { min_i[i] = MAX_INT; min_i2[i] = MAX_INT; } } } for (int i = 0; i < c; i++) { if (min_i[i] < minr) { minr = min_i[i]; } } if (minr == MAX_INT) { break; } merge[M->n] = minr; M->n++; for (int i = 0; i < c; i++) { if (min_i[i] == minr) { pos[i]++; min_i[i] = min_i2[i]; } } h = h == 0 ? 1 : 0; } MatrizAlloc(M); M->q = (int*)my_malloc(M->n*sizeof(int)); memcpy(M->q, merge, M->n*sizeof(int)); for (int i = 0; i < c; i++) { int p = 0; for (int j = C_p[i]; j < C_p[i+1]; j++) { while (merge[p] < C_i[j]) { p++; } _MatrizSet(M, p, i, C_x[j]); } } ticksGetFk += toc(t); //my_free(pos); //my_free(min_i); //my_free(min_i2); return M; } inline int log2(int x) { int square = 2; int count = 1; while (square < x) { square *= 2; ++count; } return count; } inline int pow2(int x) { int count = 1; for (int i = 0; i < x; i++) { count *= 2; } return count; } inline static int sizeNivel(int x) { return pow2(x); } inline static int comienzoNivel(int x) { return pow2(x)-1; } typedef struct { int x; int ptr; } KMergeTree; const int INF = 0x7fffffff; #define kmerge_min(x, y) (x <= y) Matriz* MatrizGetFk_4(cs* C, int k, int c) { clock_t t; Matriz* M = (Matriz*)my_malloc(sizeof(Matriz)); M->k = k; M->offset = 0; M->n = 0; int* C_p = &C->p[k]; int* C_i = C->i; int i, x1, x2, n1, n2; int niv = log2(c); int n_tree = sizeNivel(niv+1)-1; int pos[c]; KMergeTree tree[n_tree]; KMergeTree treeLoser[n_tree]; int sizeNiveles[niv+1]; int comienzoNiveles[niv+1]; /*int* pos = (int*)malloc(c*sizeof(int)); KMergeTree* tree = (KMergeTree*) my_malloc(n_tree*sizeof(KMergeTree)); KMergeTree* treeLoser = (KMergeTree*) my_malloc(n_tree*sizeof(KMergeTree)); int* sizeNiveles = (int*) my_malloc((niv+1)*sizeof(int)); int* comienzoNiveles = (int*) my_malloc((niv+1)*sizeof(int));*/ for (i = 0; i <= niv; i++) { sizeNiveles[i] = sizeNivel(i); comienzoNiveles[i] = comienzoNivel(i); } t = tic(); M->n = c; for (i = 0; i < c; i++) { pos[i] = C_p[i]; while (pos[i] < C_p[i+1] && C_i[pos[i]] < k+c) { pos[i]++; } } for (i = 0; i < n_tree; i++) { tree[i].x = INF; tree[i].ptr = INF; treeLoser[i].x = INF; treeLoser[i].ptr = INF; } for (i = 0; i < c; i++) { tree[comienzoNiveles[niv]+i].x = treeLoser[comienzoNiveles[niv]+i].x = pos[i] < C_p[i+1] ? C_i[pos[i]] : INF; tree[comienzoNiveles[niv]+i].ptr = treeLoser[comienzoNiveles[niv]+i].ptr = i; } for (int n = niv-1; n >= 0; n--) { for (i = 0; i < sizeNiveles[n]; i++) { x1 = tree[comienzoNiveles[n+1]+i*2].x; x2 = tree[comienzoNiveles[n+1]+i*2+1].x; n1 = tree[comienzoNiveles[n+1]+i*2].ptr; n2 = tree[comienzoNiveles[n+1]+i*2+1].ptr; tree[comienzoNiveles[n]+i].x = kmerge_min(x1,x2) ? x1 : x2; tree[comienzoNiveles[n]+i].ptr = kmerge_min(x1,x2) ? n1 : n2; treeLoser[comienzoNiveles[n]+i].x = kmerge_min(x1,x2) ? x2 : x1; treeLoser[comienzoNiveles[n]+i].ptr = kmerge_min(x1,x2) ? n2 : n1; } } KMergeTree winner = tree[0]; int ant; ant = INF; while (winner.x != INF) { if (ant == INF || ant != winner.x) { ant = winner.x; M->n++; } int h = winner.ptr; int m = comienzoNiveles[niv]+h; if (pos[h]+1 < C_p[h+1]) { pos[h]++; treeLoser[m].x = C_i[pos[h]]; } else { treeLoser[m].x = INF; } winner = treeLoser[m]; for (i = niv-1; i >= 0; i--) { //countGetFk++; int p = (m-1)/2; if (!kmerge_min(winner.x, treeLoser[p].x)) { KMergeTree tmp; tmp = winner; winner = treeLoser[p]; treeLoser[p] = tmp; } m = p; } } MatrizAlloc(M); M->q = (int*)my_malloc(M->n*sizeof(int)); for (i = 0; i < c; i++) { pos[i] = C_p[i]; } for (i = 0; i < n_tree; i++) { tree[i].x = INF; tree[i].ptr = INF; treeLoser[i].x = INF; treeLoser[i].ptr = INF; } for (i = 0; i < c; i++) { tree[comienzoNiveles[niv]+i].x = treeLoser[comienzoNiveles[niv]+i].x = C_i[pos[i]]; tree[comienzoNiveles[niv]+i].ptr = treeLoser[comienzoNiveles[niv]+i].ptr = i; } for (int n = niv-1; n >= 0; n--) { for (i = 0; i < sizeNiveles[n]; i++) { x1 = tree[comienzoNiveles[n+1]+i*2].x; x2 = tree[comienzoNiveles[n+1]+i*2+1].x; n1 = tree[comienzoNiveles[n+1]+i*2].ptr; n2 = tree[comienzoNiveles[n+1]+i*2+1].ptr; tree[comienzoNiveles[n]+i].x = kmerge_min(x1,x2) ? x1 : x2; tree[comienzoNiveles[n]+i].ptr = kmerge_min(x1,x2) ? n1 : n2; treeLoser[comienzoNiveles[n]+i].x = kmerge_min(x1,x2) ? x2 : x1; treeLoser[comienzoNiveles[n]+i].ptr = kmerge_min(x1,x2) ? n2 : n1; } } winner = tree[0]; int row = 0; ant = INF; KMergeTree tmp; while (winner.x != INF) { countGetFk++; if (ant == INF || ant != winner.x) { ant = winner.x; row++; } int h = winner.ptr; int m = comienzoNiveles[niv]+h; if (pos[h] < C_p[h+1]) { MatrizSet(M, row-1, h, C->x[pos[h]]); M->q[row-1] = C_i[pos[h]]; } if (pos[h]+1 < C_p[h+1]) { pos[h]++; treeLoser[m].x = C_i[pos[h]]; } else { treeLoser[m].x = INF; } winner = treeLoser[m]; for (i = niv-1; i >= 0; i--) { int p = (m-1)/2; if (!kmerge_min(winner.x, treeLoser[p].x)) { tmp = winner; winner = treeLoser[p]; treeLoser[p] = tmp; } m = p; } } ticksGetFk += toc(t); /*my_free(comienzoNiveles); my_free(sizeNiveles); my_free(treeLoser); my_free(tree); my_free(pos);*/ return M; } void VectorMerge(int* q1, int n1, int* q2, int n2, /* out */ int** q, /* out */ int* n) { int* qq = (int*) my_malloc(sizeof(int)*(n1+n2)); *q = qq; clock_t tick = tic(); int nn = 0; int k1 = 0, k2 = 0; while (k1 < n1 || k2 < n2) { if (k1 < n1 && k2 < n2) { if (q1[k1] < q2[k2]) { if (nn == 0 || qq[nn-1] != q1[k1]) { qq[nn] = q1[k1]; nn++; k1++; } else { k1++; } } else { if (nn == 0 || qq[nn-1] != q2[k2]) { qq[nn] = q2[k2]; nn++; k2++; } else { k2++; } } } else if (k1 < n1) { if (qq[nn-1] != q1[k1]) { qq[nn] = q1[k1]; nn++; } k1++; } else { if (qq[nn-1] != q2[k2]) { qq[nn] = q2[k2]; nn++; } k2++; } } *n = nn; ticksMerge += toc(tick); } void VectorMergeIndices(int* Q, int N, int* q, int n, /* out */ int** ql) { *ql = (int*) my_malloc(sizeof(int)*n); int i = 0, j = 0; while (i < n) { if (q[i] == Q[j]) { (*ql)[i] = j; i++; } j++; } } void AllocFrenteGPU(Frente** F, int nF) { clock_t tick = tic(); int size_x = 0; int size_q = 0; for (int i = 0; i < nF; i++) { int n = F[i]->h_frente->w; size_x += n*n*sizeof(FLOTANTE); size_q += n*sizeof(int); } FLOTANTE* x; cutilSafeCall( hipMalloc((void**)&x, size_x) ); cutilSafeCall( hipMemset(x, 0, size_x) ); /*int* q; cutilSafeCall( hipMalloc((void**)&q, size_q) );*/ //Matriz* f; ///*cutilSafeCall(*/ hipMalloc((void**)&f, size_f) /*)*/; size_x = 0; size_q = 0; for (int i = 0; i < nF; i++) { int n = F[i]->h_frente->w; F[i]->hd_frente = (Matriz*) my_malloc(sizeof(Matriz)); F[i]->hd_frente->w = F[i]->h_frente->w; F[i]->hd_frente->n = F[i]->h_frente->n; F[i]->hd_frente->k = F[i]->h_frente->k; F[i]->hd_frente->offset = F[i]->h_frente->offset; F[i]->hd_frente->x = &x[size_x]; size_x += n*n; /*F[i]->hd_frente->q = &q[size_q]; size_q += n;*/ } ticksMallocGPU += toc(tick); } void AllocFrenteGPU(Frente* F) { AllocFrenteGPU(&F, 1); } void FreeFrenteGPU(Frente** F, int n) { clock_t tick = tic(); for (int i = 0; i < n; i++) { if (F[i]->GPU) { my_free(F[i]->h_frente->ql); my_free(F[i]->h_frente->q); my_free(F[i]->h_frente->x); my_free(F[i]->h_frente); if (i == 0) { cutilSafeCall( hipFree(F[i]->hd_frente->x) ); //cutilSafeCall( hipFree(F[i]->hd_frente->q) ); } cutilSafeCall( hipFree(F[i]->hd_frente->ql) ); my_free(F[i]->hd_frente); my_free(F[i]); } else { my_free(F[i]->h_frente->x); my_free(F[i]->h_frente); my_free(F[i]); } } ticksFreeGPU += toc(tick); } void FreeFrenteGPU(Frente* F) { FreeFrenteGPU(&F, 1); } void MoverFrenteAGPU(Frente* F/*, hipStream_t stream*/) { int n = F->h_frente->w; /*static FLOTANTE* buffer = NULL; static int sizeBuffer = 0; if (buffer == NULL || n*F->h_frente->offset*sizeof(FLOTANTE) > sizeBuffer) { if (buffer != NULL) { hipHostFree(buffer); } sizeBuffer = n*F->h_frente->offset*sizeof(FLOTANTE); hipHostMalloc((void**)&buffer, sizeBuffer); } clock_t tick; //tick = tic(); memcpy(buffer, F->h_frente->x, n*F->h_frente->offset*sizeof(FLOTANTE)); //ticksMemcpyHost += toc(tick);*/ clock_t tick; tick = tic(); cutilSafeCall( hipMemcpy(F->hd_frente->x, F->h_frente->x, n*F->h_frente->offset*sizeof(FLOTANTE), hipMemcpyHostToDevice/*, stream*/) ); ticksMemcpyX += toc(tick); } void MoverFrenteQlAGPU(Frente* F) { int n = F->h_frente->n /*- F->h_frente->offset*/; clock_t tick; tick = tic(); cutilSafeCall( hipMalloc((void**)&F->hd_frente->ql, n*sizeof(int)) ); ticksMallocGPU += toc(tick); tick = tic(); cutilSafeCall( hipMemcpy(F->hd_frente->ql, F->h_frente->ql, n*sizeof(int), hipMemcpyHostToDevice) ); ticksMemcpy += toc(tick); } /*void MoverFrenteXACPU(Frente* F) { int n = F->hd_frente->w; clock_t tick = tic(); cutilSafeCall( hipMemcpy(F->h_frente->x, F->hd_frente->x, n*n*sizeof(FLOTANTE), hipMemcpyDeviceToHost) ); ticksMemcpy += toc(tick); }*/ void ExtendAdd_CPU_old(Matriz* src, Matriz* dst) { int qi; int qj; for (int j = 0; j < src->n-src->offset; j++) { qj = src->ql[j]; for (int i = 0; i < src->n-src->offset; i++) { qi = src->ql[i]; MatrizSet(dst, qi, qj, MatrizGet(src, i+src->offset, j+src->offset)+MatrizGet(dst, qi, qj)); } } } void ExtendAdd_CPU(Matriz* src, Matriz* dst) { int qi; int qj; int i, j, cd, cs; int n = src->n-src->offset; FLOTANTE* xd = dst->x; FLOTANTE* xs = src->x; for (j = 0; j < n; j++) { qj = src->ql[j]; cd = qj*dst->w; cs = (j+src->offset)*src->w+src->offset; for (i = 0; i < n; i++) { qi = src->ql[i]; xd[cd+qi] += xs[cs+i]; countExtendAdd++; } } } __global__ void ExtendAdd_GPU_device(int src_offset, int src_n, int dst_n, FLOTANTE* src_x, FLOTANTE* dst_x, int* src_ql, int src_n2) { int i = blockIdx.x * 16 + threadIdx.x; int j = blockIdx.y * 16 + threadIdx.y; __shared__ int p1[16]; __shared__ int p2[16]; if (threadIdx.y == 0) { p1[threadIdx.x] = src_ql[i]; p2[threadIdx.x] = src_ql[blockIdx.y * 16 + threadIdx.x] * dst_n; //printf("p1[%i] = %i; p2[%i] = %i\n", threadIdx.x, p1[threadIdx.x], threadIdx.x, p2[threadIdx.x]); } __syncthreads(); if (i < src_n2 && j < src_n2) { dst_x[p2[threadIdx.y]+p1[threadIdx.x]] += src_x[j*src_n+i]; } } void ExtendAdd_GPU(Frente* src, Frente* dst) { int n = src->h_frente->n; dim3 block(16,16); dim3 grid(n / 16 + 1, n / 16 + 1); hipLaunchKernelGGL(( ExtendAdd_GPU_device), dim3(grid),dim3(block), 0, 0, src->h_frente->offset, src->h_frente->w, dst->h_frente->w, &src->hd_frente->x[src->h_frente->offset*src->h_frente->w+src->h_frente->offset], dst->hd_frente->x, src->hd_frente->ql, src->h_frente->n-src->h_frente->offset); //hipDeviceSynchronize(); cutilCheckMsg("ExtendAdd_GPU"); } void ExtendAdd(Frente* src, Frente* dst, bool b) { clock_t tick = tic(); if (GPU && !b) { ExtendAdd_GPU(src, dst); } else { ExtendAdd_CPU(src->h_frente, dst->h_frente); } ticksExtendAdd += toc(tick); } __global__ void FactorAux1_GPU_1_device(int c, Matriz* Uk, int* spL_i, FLOTANTE* spL_x) { int i = blockIdx.x * 256 + threadIdx.x; FLOTANTE raiz; int n = Uk->n; int w = Uk->w; raiz = sqrt(Uk->x[c*w+c]); if (i >= c && i < n) { spL_i[i-c] = Uk->q[i]; spL_x[i-c] = Uk->x[c*w+i]/raiz; } } /*void FactorAux_GPU_1(Frente* Uk, cs* spL) { //clock_t tick; int cols = Uk->h_frente->offset; int n = Uk->hd_frente->n; int w = Uk->hd_frente->w; int k = Uk->hd_frente->k; FLOTANTE* x = Uk->hd_frente->x; FLOTANTE f; //printf("\n"); //MatrizPrint(Uk->d_frente, "%f "); for (int c = 0; c < cols; c++) { //tick = tic(); //FactorAux1_GPU(c, Uk, spL); //ticksFactorAux1 += toc(tick); FLOTANTE* d_spL_x; cutilSafeCall( hipMalloc((void**)&d_spL_x, (n-c)*sizeof(FLOTANTE)) ); int* d_spL_i; cutilSafeCall( hipMalloc((void**)&d_spL_i, (n-c)*sizeof(int)) ); dim3 block(256); int s = n / 256 + 1; dim3 grid(s); FactorAux1_GPU_1_device<<<grid,block>>>(c, Uk->d_frente, d_spL_i, d_spL_x); cutilCheckMsg("FactorAux1_GPU"); cutilSafeCall( hipMemcpy(&spL->i[spL->p[k+c]], d_spL_i, (n-c)*sizeof(int), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(&spL->x[spL->p[k+c]], d_spL_x, (n-c)*sizeof(FLOTANTE), hipMemcpyDeviceToHost) ); hipFree(d_spL_i); hipFree(d_spL_x); //tick = tic(); //FactorAux2_GPU_CUBLAS(c, Uk); //ticksFactorAux2 += toc(tick); cutilSafeCall( hipMemcpy((void**)&f, x+(c*w+c), sizeof(FLOTANTE), hipMemcpyDeviceToHost) ); mdgpu_cublasXsyr('L', n-c-1, (FLOTANTE)-1.0/f, &x[(c+0)*w+(c+1)], 1, &x[(c+1)*w+(c+1)], w); cublasStatus status = hipblasGetError(); if (status != HIPBLAS_STATUS_SUCCESS) { printf("Error en cublaSgemm()"); exit(1); } //hipDeviceSynchronize(); } //printf("\n"); //MatrizPrint(Uk->d_frente, "%f "); }*/ __global__ void FactorAux1_GPU_2_device_1(FLOTANTE* x) { x[0] = sqrt(x[0]); } __global__ void FactorAux1_GPU_2_device_2(int n, FLOTANTE* x) { int i = blockIdx.x * 256 + threadIdx.x; FLOTANTE f = x[0]; if (i > 0 && i < n) { x[i] /= f; } } void FactorAux_GPU_2(Frente** F, int nF, cs* spL) { clock_t tick; clock_t tick2; cublasStatus status; if (block_size == -1) { printf("block_size == -1\n"); exit(1); } #define cols(i) (F[i]->h_frente->offset) int b = block_size; int max_bloques = 16; static FLOTANTE* bloqueP = NULL; static FLOTANTE** bloques = NULL; static FLOTANTE* d_bloqueP = NULL; static FLOTANTE** d_bloques = NULL; if (bloqueP == NULL) { cutilSafeCall( hipHostMalloc((void**)&bloqueP, max_bloques*b*b*sizeof(FLOTANTE)) ); //bloqueP = (FLOTANTE*) my_malloc(max_bloques*b*b*sizeof(FLOTANTE)); bloques = (FLOTANTE**) my_malloc(max_bloques*sizeof(FLOTANTE*)); for (int i = 0; i < max_bloques; i++) { bloques[i] = &bloqueP[i*b*b]; } cutilSafeCall( hipMalloc((void**)&d_bloqueP, max_bloques*b*b*sizeof(FLOTANTE)) ); d_bloques = (FLOTANTE**) my_malloc(max_bloques*sizeof(FLOTANTE*)); for (int i = 0; i < max_bloques; i++) { d_bloques[i] = &d_bloqueP[i*b*b]; } } int max_cols = 0; for (int i = 0; i < nF; i++) { if (cols(i) > max_cols) { max_cols = cols(i); } } for (int j = 0; j < nF; j++) { F[j]->tiempoFact = 0; } for (int i = 0; i < max_cols; i += b) { for (int j = 0; j < nF; j++) { clock_t tini = tic(); if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; bytesMemcpy2 += b2*b2*sizeof(FLOTANTE); if (j == 0) { //cutilSafeCall( hipMemcpy2D(bloque, b*sizeof(FLOTANTE), &x[i*w+i], w*sizeof(FLOTANTE), b2*sizeof(FLOTANTE), b2, hipMemcpyDeviceToHost); //); } } if (i < cols(j)) { if (j < nF-1) { int j2 = j+1; FLOTANTE* x = F[j2]->hd_frente->x; int w = F[j2]->hd_frente->w; int nb = min(i+b,cols(j2)); int b2 = nb-i; FLOTANTE* bloque = bloques[j2]; bytesMemcpy2 += b2*b2*sizeof(FLOTANTE); tick = tic(); //cutilSafeCall( hipMemcpy2DAsync(bloque, b*sizeof(FLOTANTE), &x[i*w+i], w*sizeof(FLOTANTE), b2*sizeof(FLOTANTE), b2, hipMemcpyDeviceToHost); //); //hipDeviceSynchronize(); ticksMemcpy21 += toc(tick); } int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; tick = tic(); int cb = 0; for (int c = 0; c < b2; c++) { mdgpu_cblasXscal(b2-c, 1.0/sqrt(bloque[cb+c]), &bloque[cb+c], 1); mdgpu_cblasXsyr(CblasColMajor, CblasLower, b2-c-1, -1.0, &bloque[cb+c+1], 1, &bloque[cb+b+c+1], b); cb += b; } ticksFactorAux1 += toc(tick); hipDeviceSynchronize(); } if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; tick = tic(); //cutilSafeCall( hipMemcpy2D(&x[i*w+i], w*sizeof(FLOTANTE), bloque, b*sizeof(FLOTANTE), b2*sizeof(FLOTANTE), b2, hipMemcpyHostToDevice); //); ticksMemcpy2 += toc(tick); } if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int n = F[j]->hd_frente->n; int nb = min(i+b,cols(j)); int b2 = nb-i; if (n-nb > 0) { tick = tic(); mdgpu_cublasXtrsm('R', 'L', 'T', 'N', n-nb, nb-i, 1.0f, &x[i*w+i], w, &x[i*w+i+b2], w); hipDeviceSynchronize(); status = hipblasGetError(); ticksTRSM_GPU += toc(tick); if (status != HIPBLAS_STATUS_SUCCESS) { printf("Error en cublaXtrsm()"); exit(1); } tick = tic(); mdgpu_cublasXgemm('N', 'T', n-nb, n-nb, b2, -1.0f, &x[i*w+i+b2], w, &x[i*w+i+b2], w, 1.0f, &x[(i+b2)*w+i+b2], w); hipDeviceSynchronize(); status = hipblasGetError(); ticksGEMM_GPU += toc(tick); if (status != HIPBLAS_STATUS_SUCCESS) { printf("Error en cublaXgemm()"); exit(1); } } } hipDeviceSynchronize(); F[j]->tiempoFact += toc(tini); } /*tick = tic(); for (int j = 0; j < nF; j++) { if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; bytesMemcpy2 += b2*b2*sizeof(FLOTANTE); //cutilSafeCall( hipMemcpy2D(bloque, b*sizeof(FLOTANTE), &x[i*w+i], w*sizeof(FLOTANTE), b2*sizeof(FLOTANTE), b2, hipMemcpyDeviceToHost); //); } } hipDeviceSynchronize(); ticksMemcpy21 += toc(tick); tick = tic(); for (int j = 0; j < nF; j++) { if (i < cols(j)) { int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; int cb = 0; for (int c = 0; c < b2; c++) { mdgpu_cblasXscal(b2-c, 1.0/sqrt(bloque[cb+c]), &bloque[cb+c], 1); mdgpu_cblasXsyr(CblasColMajor, CblasLower, b2-c-1, -1.0, &bloque[cb+c+1], 1, &bloque[cb+b+c+1], b); cb += b; } } } ticksFactorAux1 += toc(tick); tick = tic(); for (int j = 0; j < nF; j++) { if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; //cutilSafeCall( hipMemcpy2D(&x[i*w+i], w*sizeof(FLOTANTE), bloque, b*sizeof(FLOTANTE), b2*sizeof(FLOTANTE), b2, hipMemcpyHostToDevice); //); } } hipDeviceSynchronize(); ticksMemcpy2 += toc(tick); tick2 = tic(); for (int j = 0; j < nF; j++) { if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int n = F[j]->hd_frente->n; int nb = min(i+b,cols(j)); int b2 = nb-i; if (n-nb > 0) { tick = tic(); mdgpu_cublasXtrsm('R', 'L', 'T', 'N', n-nb, nb-i, 1.0f, &x[i*w+i], w, &x[i*w+i+b2], w); hipDeviceSynchronize(); status = hipblasGetError(); ticksTRSM_GPU += toc(tick); if (status != HIPBLAS_STATUS_SUCCESS) { printf("Error en cublaXtrsm()"); exit(1); } tick = tic(); mdgpu_cublasXgemm('N', 'T', n-nb, n-nb, b2, -1.0f, &x[i*w+i+b2], w, &x[i*w+i+b2], w, 1.0f, &x[(i+b2)*w+i+b2], w); hipDeviceSynchronize(); status = hipblasGetError(); ticksGEMM_GPU += toc(tick); if (status != HIPBLAS_STATUS_SUCCESS) { printf("Error en cublaXgemm()"); exit(1); } } } } hipDeviceSynchronize(); ticksFactorAux2 += toc(tick2);*/ } printf("\n"); for (int j = 0; j < nF; j++) { printf("Procesando frente %i, size = %i, cols = %i, tiempo = %.10f\n", F[j]->h_frente->k, F[j]->h_frente->n, F[j]->h_frente->offset, ticks2seg(F[j]->tiempoFact)); } tick = tic(); int* spL_i; for (int j = 0; j < nF; j++) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int n = F[j]->hd_frente->n; int k = F[j]->hd_frente->k; for (int c = 0; c < cols(j); c++) { spL_i = &spL->i[spL->p[k+c]-c]; memcpy(&spL_i[c], &F[j]->h_frente->q[c], (n-c)*sizeof(int)); //cutilSafeCall( hipMemcpy(&spL_i[c], &F[j]->h_frente->q[c], (n-c)*sizeof(int), hipMemcpyHostToHost) ); // TODO: traer L en bloques mas grandes //cutilSafeCall( hipMemcpy(&spL->x[spL->p[k+c]], &x[c*w+c], (n-c)*sizeof(float), hipMemcpyDeviceToHost) ); } } ticksFactorAux3 += toc(tick); #undef cols } void FactorAux_CPU_1(Frente* Uk, cs* spL) { //clock_t tick; int cols = Uk->h_frente->offset; FLOTANTE raiz; int x; int n = Uk->h_frente->n; int k = Uk->h_frente->k; for (int c = 0; c < cols; c++) { //FactorAux1_CPU(c, Uk->h_frente, spL); flops += (n-c) + 1; raiz = sqrt(MatrizGet(Uk->h_frente, c, c)); for (int i = c; i < n; i++) { x = spL->p[k+c] + i - c; spL->i[x] = Uk->h_frente->q[i]; spL->x[x] = MatrizGet(Uk->h_frente, i, c)/raiz; //printf("%i %i %lf %lf\n", i, c, raiz, MatrizGet(Uk, i, c)); if (MatrizGet(Uk->h_frente, c, c) < 0) { putchar('X'); fflush(stdout); //printf("La matriz no es definida positiva!!!\n"); //exit(1); } } //FactorAux2_CPU(c, Uk->h_frente); for (int j = c+1; j < n; j++) { for (int i = j; i < n; i++) { MatrizAdd(Uk->h_frente, i, j, - MatrizGet(Uk->h_frente, i, c) * MatrizGet(Uk->h_frente, j, c) / MatrizGet(Uk->h_frente, c, c)); } } } } void FactorAux_CPU_2(Frente* Uk, cs* spL) { //clock_t tick; int cols = Uk->h_frente->offset; FLOTANTE raiz; int x; int n = Uk->h_frente->n; int k = Uk->h_frente->k; for (int c = 0; c < cols; c++) { //FactorAux1_CPU(c, Uk->h_frente, spL); flops += (n-c) + 1; raiz = sqrt(MatrizGet(Uk->h_frente, c, c)); for (int i = c; i < n; i++) { x = spL->p[k+c] + i - c; spL->i[x] = Uk->h_frente->q[i]; spL->x[x] = MatrizGet(Uk->h_frente, i, c)/raiz; //printf("%i %i %lf %lf\n", i, c, raiz, MatrizGet(Uk, i, c)); if (MatrizGet(Uk->h_frente, c, c) < 0) { putchar('X'); fflush(stdout); //printf("La matriz no es definida positiva!!!\n"); //exit(1); } } //FactorAux2_CPU(c, Uk->h_frente); FLOTANTE f = (FLOTANTE)-1.0 / MatrizGet(Uk->h_frente, c, c); flops += (n-c)*(n-c-1); mdgpu_cblasXsyr(CblasColMajor, CblasLower, n-c-1, f, &Uk->h_frente->x[(c+0)*n+(c+1)], 1, &Uk->h_frente->x[(c+1)*n+(c+1)], n); } } void FactorAux_CPU_3(Frente* Uk, cs* spL) { //clock_t tick; int cols = Uk->h_frente->offset; FLOTANTE raiz; int x; int n = Uk->h_frente->n; int w = Uk->h_frente->w; int k = Uk->h_frente->k; //printf("\nk = %i\ncols = %i\n", k, cols); //MatrizPrint(Uk->h_frente, "%f "); int b = block_size == -1 ? n : block_size; for (int i = 0; i < cols; i += b) { int nb = min(i+b,cols); int b2 = nb-i; for (int c = i; c < nb; c++) { raiz = sqrt(MatrizGet(Uk->h_frente, c, c)); for (int j = c; j < nb; j++) { MatrizSet(Uk->h_frente, j, c, MatrizGet(Uk->h_frente, j, c)/raiz); } mdgpu_cblasXsyr(CblasColMajor, CblasLower, nb-c-1, -1.0, Uk->h_frente->x+((c+0)*w+(c+1)), 1, Uk->h_frente->x+((c+1)*w+(c+1)), w); } if (n-nb > 0) { mdgpu_cblasXtrsm(CblasColMajor, CblasRight, CblasLower, CblasTrans, CblasNonUnit, n-nb, nb-i, 1.0f, Uk->h_frente->x+(i*w+i), w, Uk->h_frente->x+(i*w+i+b2), w); //printf("\n"); //MatrizPrint(Uk->h_frente, "%f "); mdgpu_cblasXgemm(CblasColMajor, CblasNoTrans, CblasTrans, n-nb, n-nb, b2, -1.0f, &Uk->h_frente->x[i*w+i+b2], w, &Uk->h_frente->x[i*w+i+b2], w, 1.0f, &Uk->h_frente->x[(i+b2)*w+i+b2], w); } } //printf("\n"); //MatrizPrint(Uk->h_frente, "%f "); for (int c = 0; c < cols; c++) { for (int j = c; j < n; j++) { x = spL->p[k+c] + j - c; spL->i[x] = Uk->h_frente->q[j]; spL->x[x] = MatrizGet(Uk->h_frente, j, c); } } for (int c = 0; c < cols; c++) { flops += (n-c) + 1; flops += (n-c)*(n-c-1); } } void FactorAux(Frente* Uk, cs* spL) { clock_t tick = tic(); if (GPU) { FactorAux_GPU(&Uk, 1, spL); } else { FactorAux_CPU(Uk, spL); } ticksFactorAux += toc(tick); } void FactorAux(Frente** U, int n_U, cs* spL) { clock_t tick = tic(); if (GPU) { /*for (int i = 0; i < n_U; i++) { FactorAux_GPU(&U[i], 1, spL); }*/ FactorAux_GPU(U, n_U, spL); } else { for (int i = 0; i < n_U; i++) { FactorAux_CPU(U[i], spL); } } ticksFactorAux += toc(tick); } void Factor(cs* A, cs* spL, struct ETree** listaNodos, int cantNodos) { Frente** stack = (Frente**) my_malloc(cantNodos*sizeof(Frente*)); int n_stack = 0; for (int c = 0; c < cantNodos; c++) { struct ETree* tree = listaNodos[c]; if (c % 20 == 0) { //putchar('.'); //fflush(stdout); } if (tree->nHijos == 0) { continue; } Frente** F = (Frente**) my_malloc(tree->nHijos*sizeof(Frente*)); for (int i = 0; i < tree->nHijos; i++) { F[i] = (Frente*) my_malloc(sizeof(Frente)); F[i]->h_frente = MatrizGetFk(A, tree->hijos[i]->nodo, tree->hijos[i]->cols); F[i]->GPU = false; F[i]->h_frente->offset = tree->hijos[i]->cols; } if (GPU) { AllocFrenteGPU(F, tree->nHijos); } for (int i = 0; i < tree->nHijos; i++) { if (GPU) { MoverFrenteAGPU(F[i]); F[i]->GPU = true; } } Frente** U = (Frente**) my_malloc(sizeof(Frente*)*tree->nHijos); for (int i = 0; i < tree->nHijos; i++) { if (tree->hijos[tree->nHijos-1-i]->nHijos == 0) { U[tree->nHijos-1-i] = NULL; } else { U[tree->nHijos-1-i] = stack[--n_stack]; } } Frente** U2 = (Frente**) my_malloc(sizeof(Frente*)*tree->nHijos); for (int i = 0; i < tree->nHijos; i++) { int* q; int n; if (U[i] == NULL) { U2[i] = F[i]; //U2[i]->h_frente->offset = tree->hijos[i]->cols; } else { VectorMerge(F[i]->h_frente->q, F[i]->h_frente->n, U[i]->h_frente->q + U[i]->h_frente->offset, U[i]->h_frente->n - U[i]->h_frente->offset, &q, &n); VectorMergeIndices(q, n, F[i]->h_frente->q, F[i]->h_frente->n, &F[i]->h_frente->ql); VectorMergeIndices(q, n, U[i]->h_frente->q, U[i]->h_frente->n, &U[i]->h_frente->ql); U2[i] = (Frente*) my_malloc(sizeof(Frente)); U2[i]->GPU = false; U2[i]->h_frente = (Matriz*) my_malloc(sizeof(Matriz)); U2[i]->h_frente->n = n; U2[i]->h_frente->q = q; U2[i]->h_frente->k = tree->hijos[i]->nodo; U2[i]->h_frente->offset = tree->hijos[i]->cols; MatrizAlloc(U2[i]->h_frente); if (GPU) { AllocFrenteGPU(U2[i]); U2[i]->GPU = true; } if (GPU) { MoverFrenteQlAGPU(F[i]); MoverFrenteQlAGPU(U[i]); } F[i]->h_frente->offset = 0; ExtendAdd(F[i], U2[i], false); ExtendAdd(U[i], U2[i], false); } } FactorAux(U2, tree->nHijos, spL); int n = U2[0]->h_frente->n - U2[0]->h_frente->offset; int* q = (int*) my_malloc(n*sizeof(int)); memcpy(q, U2[0]->h_frente->q + U2[0]->h_frente->offset, n*sizeof(int)); for (int i = 1; i < tree->nHijos; i++) { int n2; int* q2; VectorMerge(q, n, U2[i]->h_frente->q + U2[i]->h_frente->offset, U2[i]->h_frente->n - U2[i]->h_frente->offset, &q2, &n2); my_free(q); n = n2; q = q2; } for (int i = 0; i < tree->nHijos; i++) { VectorMergeIndices(q, n, U2[i]->h_frente->q + U2[i]->h_frente->offset, U2[i]->h_frente->n - U2[i]->h_frente->offset, &U2[i]->h_frente->ql); } Frente* Uk = (Frente*) my_malloc(sizeof(Frente)); Uk->GPU = false; Uk->h_frente = (Matriz*) my_malloc(sizeof(Matriz)); Uk->h_frente->n = n; Uk->h_frente->q = q; Uk->h_frente->k = tree->nodo; Uk->h_frente->offset = 0; MatrizAlloc(Uk->h_frente); if (GPU) { AllocFrenteGPU(Uk); Uk->GPU = true; } for (int i = 0; i < tree->nHijos; i++) { if (GPU) { MoverFrenteQlAGPU(U2[i]); } ExtendAdd(U2[i], Uk, false); } FreeFrenteGPU(F, tree->nHijos); for (int i = 0; i < tree->nHijos; i++) { if (U[i] != NULL) { FreeFrenteGPU(U[i]); FreeFrenteGPU(U2[i]); } } my_free(U); my_free(U2); my_free(F); //fprintf(logger, "%i ", Uk->h_frente->n); stack[n_stack++] = Uk; } Frente* U = stack[--n_stack]; Frente* F = (Frente*) my_malloc(sizeof(Frente)); F->GPU = false; F->h_frente = MatrizGetFk(A, listaNodos[cantNodos-1]->nodo, listaNodos[cantNodos-1]->cols); F->h_frente->offset = /*listaNodos[cantNodos-1]->cols*/ 0; if (GPU) { AllocFrenteGPU(F); MoverFrenteAGPU(F/*, 0*/); F->GPU = true; } int n; int* q; VectorMerge(U->h_frente->q, U->h_frente->n, F->h_frente->q, F->h_frente->n, &q, &n); VectorMergeIndices(q, n, U->h_frente->q, U->h_frente->n, &U->h_frente->ql); VectorMergeIndices(q, n, F->h_frente->q, F->h_frente->n, &F->h_frente->ql); Frente* U2 = (Frente*) my_malloc(sizeof(Frente)); U2->GPU = false; U2->h_frente = (Matriz*) my_malloc(sizeof(Matriz)); U2->h_frente->n = n; U2->h_frente->q = q; U2->h_frente->k = listaNodos[cantNodos-1]->nodo; U2->h_frente->offset = listaNodos[cantNodos-1]->cols; MatrizAlloc(U2->h_frente); if (GPU) { AllocFrenteGPU(U2); U2->GPU = true; } if (GPU) { MoverFrenteQlAGPU(F); MoverFrenteQlAGPU(U); } ExtendAdd(F, U2, false); ExtendAdd(U, U2, false); my_free(F->h_frente->x); my_free(F->h_frente->q); my_free(F->h_frente); my_free(F); my_free(U->h_frente->x); my_free(U->h_frente); my_free(U); FactorAux(U2, spL); my_free(U2->h_frente->x); my_free(U2->h_frente->q); my_free(U2->h_frente); } void testVectorMerge() { int q1[] = {2, 5}; int q2[] = {0, 1, 2, 3, 4, 5}; int n; int* q; VectorMerge(q1, 2, q2, 6, &q, &n); VectorPrint(q, n); int* ql1; VectorMergeIndices(q, n, q1, 2, &ql1); VectorPrint(ql1, 2); } /* true for off-diagonal entries */ static int dropdiag (int i, int j, FLOTANTE aij, void *other) { return (i != j) ;} /* C = A + triu(A,1)' */ static cs *make_sym (cs *A) { cs *AT, *C ; AT = cs_transpose (A, 1) ; /* AT = A' */ cs_fkeep (AT, &dropdiag, NULL) ; /* drop diagonal entries from AT */ C = cs_add (A, AT, 1, 1) ; /* C = A+AT */ cs_spfree (AT) ; return (C) ; } /* infinity-norm of x */ static FLOTANTE norm (FLOTANTE *x, int n) { int i ; FLOTANTE normx = 0 ; for (i = 0 ; i < n ; i++) normx = CS_MAX (normx, fabs (x [i])) ; return (normx) ; } void help() { printf("mdgpu [-gpu|-cpu] [-amd] [-sn|-rn c] [-b bs] [-pad] matriz\n"); exit(1); } void test() { FILE* f = fopen("../../mat/nos4.mtx", "r"); cs* T = cs_load(f); cs* C = cs_compress(T); cs_spfree(T); Matriz* m = MatrizGetFk_3(C, 0, 5); MatrizPrint(m, "%3.4f "); } int main(int argc, char** argv) { mallopt(M_MMAP_MAX, 0); //test(); //exit(1); //printf("\nMemory: %i KB\n", memory_usage() ); cublasStatus status; status = hipblasInit(); if (status != HIPBLAS_STATUS_SUCCESS) { printf("Error en hipblasInit(%i)", status); exit(1); } logger = fopen("salida.txt", "w"); clock_t mainTick = 0; clock_t factorTick = 0; clock_t loadTick = 0; mainTick = tic(); GPU = false; bool amd = false; bool supernodos = false; bool relaxednodos = false; int relaxednodosMaxCeros = 0; for (int i = 1; i < argc-1; i++) { //printf("%i -> %s\n", i, argv[i]); if (strcmp(argv[i], "-gpu") == 0) { //printf("gpu\n"); GPU = true; } else if (strcmp(argv[i], "-cpu") == 0) { //printf("cpu\n"); GPU = false; } else if (strcmp(argv[i], "-amd") == 0) { //printf("amd\n"); amd = true; } else if (strcmp(argv[i], "-sn") == 0) { //printf("sn\n"); supernodos = true; } else if (strcmp(argv[i], "-rn") == 0) { //printf("sn\n"); relaxednodos = true; relaxednodosMaxCeros = atoi(argv[i+1]); } else if (strcmp(argv[i], "-b") == 0) { block_size = atoi(argv[i+1]); } else if (strcmp(argv[i], "-pad") == 0) { pad = true; } else if (strcmp(argv[i], "-help") == 0) { help(); } } loadTick = tic(); FILE* f = fopen(argv[argc-1], "r"); cs* T = cs_load(f); cs* C_low = cs_compress(T); cs_spfree(T); loadTick = toc(loadTick); //printf("\nMemory: %i KB\n", memory_usage() ); ticksSymbolic = tic(); cs* A_low = NULL; cs* A = NULL; //MatrizSpy(C_low); int* amd_perm; int* amd_pinv; if (amd) { cs* C_up = cs_transpose(C_low, 1); //cs_spfree(C_low); //printf("\nC = \n"); //MatrizPrint(C, "%10.4lf "); amd_perm = cs_amd(1, C_up); amd_pinv = cs_pinv(amd_perm, C_up->n); cs* A_amd_U = cs_symperm(C_up, amd_pinv, 1); cs_spfree(C_up); A_low = cs_transpose(A_amd_U, 1); cs_spfree(A_amd_U); //printf("\nA_amd_U = \n"); //MatrizPrint(A_amd_U, "%10.4lf "); A = make_sym(A_low); //printf("\nA = \n"); //MatrizPrint(A, "%10.4lf "); } else { A_low = C_low; A = make_sym(A_low); } /*if (amd) { printf("amd_perm: "); VectorPrint(amd_perm, A->n); printf("amd_pinv: "); VectorPrint(amd_pinv, A->n); }*/ int* parent = cs_etree(A, 0); int* post = cs_post(parent, A->n); int* count = cs_counts(A, parent, post, 0); //printf("counts: "); //VectorPrint(count, A->n); //printf("sum counts: "); //VectorPrint(spL->p, A->n); //printf("etree: "); //VectorPrint(parent, A->n); //printf("post: "); //VectorPrint(post, A->n); struct ETree** listaNodos; //printf("\nMemory: %i KB\n", memory_usage() ); int cantNodos; struct ETree* tree = GetETree(A, parent, post, count, &listaNodos, &cantNodos); //printf("Memory ETree: %i KB\n", memory_usage() ); //PrintETree(tree, 0); //printf("\n---------------------\n\n"); //PrintETree(listaNodos[cantNodos-1], 0); printf("\nCant Nodos = %i\n", cantNodos); struct ETree** listaNodosSuper; int cantNodosSuper; if (supernodos) { GetSuperETree(listaNodos, cantNodos, &listaNodosSuper, &cantNodosSuper); printf("\nCant SuperNodos = %i\n", cantNodosSuper); } else { listaNodosSuper = listaNodos; cantNodosSuper = cantNodos; } //PrintETree(listaNodosSuper[cantNodosSuper-1], 0); if (relaxednodos) { int* permRelaxedETree; GetRelaxedETree(listaNodos, cantNodos, relaxednodosMaxCeros, count, &permRelaxedETree, &listaNodosSuper, &cantNodosSuper); my_free(listaNodos); printf("\nCant RelaxedNodos = %i\n", cantNodosSuper); int* ipermRelaxedETree = cs_pinv(permRelaxedETree, cantNodos); cs* tmp1 = cs_transpose(A_low, 1); cs_spfree(A_low); cs* tmp2 = cs_symperm(tmp1, ipermRelaxedETree, 1); cs_spfree(tmp1); A_low = cs_transpose(tmp2, 1); cs_spfree(tmp2); int* count2 = (int*)malloc(cantNodos*sizeof(int)); cs_pvec(permRelaxedETree, count, count2, cantNodos); count = count2; int* amd_perm2 = (int*)malloc(cantNodos*sizeof(int)); cs_pvec(permRelaxedETree, amd_perm, amd_perm2, cantNodos); amd_perm = amd_perm2; } //MatrizPrint(A, "%f "); //printf("\nMemory: %i KB\n", memory_usage() ); cs* spL = (cs*) my_malloc(sizeof(cs)); spL->n = A->n; spL->m = A->m; spL->p = (int*) my_malloc((spL->n+1)*sizeof(int)); int nz = 0; for (int i = 0; i < spL->n; i++) { spL->p[i] = nz; nz += count[i]; } spL->p[spL->n] = nz; spL->nzmax = nz; spL->nz = -1; spL->i = (int*) my_malloc(nz*sizeof(int)); memset(spL->i, 0, nz*sizeof(int)); //spL->x = (FLOTANTE*) my_malloc(nz*sizeof(FLOTANTE)); printf("L.nz = %i\n", nz); //printf("\nMemory: %i KB\n", memory_usage() ); my_free(parent); my_free(post); my_free(count); ticksSymbolic = toc(ticksSymbolic); cs_spfree(A); //mtrace(); factorTick = tic(); Factor(A_low, spL, listaNodosSuper, cantNodosSuper); factorTick = toc(factorTick); //muntrace(); printf("\n"); //MatrizSpy(spL); //cs_print(spL, 0); //MatrizPrint(logger, spL); //MatrizPrint(spL, "%f "); mainTick = toc(mainTick); printf("\n"); printf("MFlops = %.2f\n", flops/1000000.0); printf("MFlops/s = %.3f\n", flops/1000000.0/ticks2seg(ticksFactorAux)); printf("\n"); printf("Total = %.6f\n\n", ticks2seg(mainTick)); printf("Factor = %.6f Factorizacion numerica total\n", ticks2seg(factorTick)); printf("FactorAux = %.6f Factorizacion numerica solo calculo\n", ticks2seg(ticksFactorAux)); printf(" BLAS CPU = %.6f\n", ticks2seg(ticksFactorAux1)); printf(" BLAS GPU = %.6f\n", ticks2seg(ticksFactorAux2)); printf(" TRSM GPU = %.6f\n", ticks2seg(ticksTRSM_GPU)); printf(" GEMM GPU = %.6f\n", ticks2seg(ticksGEMM_GPU)); printf(" Otros = %.6f\n", ticks2seg(ticksFactorAux3)); printf("Memcpy = %.6f\n", ticks2seg(ticksMemcpy)+ticks2seg(ticksMemcpy2)+ticks2seg(ticksMemcpy21)+ticks2seg(ticksMemcpyX)); printf(" Memcpy2 = %.6f Copia paneles CPU<->GPU\n", ticks2seg(ticksMemcpy2+ticksMemcpy21)); //printf(" Memcpy21 = %.6f\n", ticks2seg(ticksMemcpy21)); printf(" MemcpyX = %.6f Copia inicial del frente CPU->GPU\n", ticks2seg(ticksMemcpyX)); printf(" Otros = %.6f\n", ticks2seg(ticksMemcpy)); //("MemcpyHost = %.6f\n", ticks2seg(ticksMemcpyHost)); printf("Merge = %.6f\n", ticks2seg(ticksMerge)); printf("GetFk = %.6f Formar el frente\n", ticks2seg(ticksGetFk)); printf("ExtendAdd = %.6f Suma extendida\n", ticks2seg(ticksExtendAdd)); printf("Malloc CPU = %.6f\n", ticks2seg(ticksMalloc)); printf("Free CPU = %.6f\n", ticks2seg(ticksFree)); printf("Malloc GPU = %.6f\n", ticks2seg(ticksMallocGPU)); printf("Free GPU = %.6f\n\n", ticks2seg(ticksFreeGPU)); printf("Load = %.6f\n", ticks2seg(loadTick)); printf("Symbolic = %.6f\n", ticks2seg(ticksSymbolic)); //printf("\ncountExtendAdd = %li\n", countExtendAdd); //printf("countGetFk = %i\n", countGetFk); //printf("bytesMemcpy2 = %li\n", bytesMemcpy2); //printf("bytesMemcpy21 = %li\n", bytesMemcpy21); fclose(logger); exit(1); A = make_sym(A_low); FLOTANTE* b = (FLOTANTE*) my_malloc(A->n*sizeof(FLOTANTE)); FLOTANTE* x = (FLOTANTE*) my_malloc(A->n*sizeof(FLOTANTE)); FLOTANTE* x2 = (FLOTANTE*) my_malloc(A->n*sizeof(FLOTANTE)); for (int i = 0; i < A->n; i++) { b[i] = 1 + ((FLOTANTE) i) / A->n; x[i] = b[i]; } if (amd) { cs_pvec(amd_perm, b, x, A->n); } cs_lsolve(spL, x); cs_ltsolve(spL, x); if (amd) { cs_ipvec(amd_perm, x, x2, A->n); } FLOTANTE* resid = (FLOTANTE*) my_malloc(A->n*sizeof(FLOTANTE)); for (int i = 0; i < A->n; i++) { resid[i] = -b[i]; } cs* C = make_sym(C_low); cs_gaxpy(C, x2, resid); printf ("resid: %8.2e\n", norm (resid,A->n) / (cs_norm (A) * norm (x,A->n) + norm (b,A->n))) ; printf ("resid: %8.2e\n", norm (resid,A->n)) ; hipDeviceReset(); hipblasShutdown(); printf("FIN\n"); //getchar(); exit(0); }
af6545caad3c0eac86f7017ad65c2810cf8bd3fc.cu
#include <stdio.h> #include <time.h> #include <malloc.h> #include <mcheck.h> #include "cublas.h" #include "mdgpu.h" #include "util.h" #include "cutil.h" #include "cs.h" #include "etree.h" #define blasint int #define _Complex #ifndef _WIN32 extern "C" { #endif #include "cblas.h" #ifndef _WIN32 } #endif #define MatrizGetFk MatrizGetFk_3 #define FactorAux_CPU FactorAux_CPU_3 #define FactorAux_GPU FactorAux_GPU_2 bool GPU = true; double flops = 0; clock_t ticksMalloc = 0; clock_t ticksMallocGPU = 0; clock_t ticksFree = 0; clock_t ticksFreeGPU = 0; clock_t ticksFactorAux = 0; clock_t ticksFactorAux1 = 0; clock_t ticksFactorAux2 = 0; clock_t ticksFactorAux3 = 0; clock_t ticksMemcpy = 0; clock_t ticksMemcpy2 = 0; clock_t ticksMemcpy21 = 0; clock_t ticksMemcpyX = 0; clock_t ticksMerge = 0; clock_t ticksExtendAdd = 0; clock_t ticksSymbolic = 0; clock_t ticksGetFk = 0; clock_t ticksMemcpyHost = 0; clock_t ticksTRSM_GPU = 0; clock_t ticksGEMM_GPU = 0; long countExtendAdd = 0; int countGetFk = 0; long bytesMemcpy2 = 0; long bytesMemcpy21 = 0; int block_size = -1; bool pad = false; FILE* logger; Matriz* MatrizGetFk_1(cs* A, int k, int cols) { clock_t t = tic(); Matriz* m = (Matriz*)my_malloc(sizeof(Matriz)); m->k = k; m->offset = 0; m->n = 0; for (int i = k+cols; i < A->n; i++) { bool b = false; for (int h = 0; h < cols; h++) { if (MatrizGet(A, i, k+h) != 0) { b = true; break; } } if (b) { m->n++; } } m->n += cols; MatrizAlloc(m); m->q = (int*)my_malloc((m->n)*sizeof(int)); for (int i = 0; i < cols; i++) { for (int h = 0; h < cols; h++) { MatrizSet(m, i, h, MatrizGet(A, k+i, k+h)); } m->q[i] = k+i; } int j = 0; for (int i = k+cols; i < A->n; i++) { bool b = false; for (int h = 0; h < cols; h++) { if (MatrizGet(A, i, k+h) != 0) { b = true; break; } } if (b) { for (int h = 0; h < cols; h++) { MatrizSet(m, j+cols, h, MatrizGet(A, i, k+h)); } m->q[j+cols] = i; j++; } } ticksGetFk += toc(t); return m; } Matriz* MatrizGetFk_2(cs* A, int k, int cols) { clock_t t = tic(); int* pos = (int*) my_malloc(cols*sizeof(int)); memset(pos, 0, cols*sizeof(int)); Matriz* m = (Matriz*)my_malloc(sizeof(Matriz)); m->k = k; m->offset = 0; m->n = 0; for (int h = 0; h < cols; h++) { while (A->i[A->p[k+h] + pos[h]] < k+h) { pos[h]++; } } for (int i = k+cols; i < A->n; i++) { bool b = false; for (int h = 0; h < cols; h++) { while (pos[h] < A->p[k+h+1] - A->p[k+h] && A->i[A->p[k+h] + pos[h]] < i) { pos[h]++; } if (pos[h] < A->p[k+h+1] - A->p[k+h] && A->i[A->p[k+h] + pos[h]] == i) { b = true; } } if (b) { m->n++; } } m->n += cols; MatrizAlloc(m); m->q = (int*)my_malloc((m->n)*sizeof(int)); for (int i = 0; i < cols; i++) { for (int h = 0; h < cols; h++) { MatrizSet(m, i, h, MatrizGet(A, k+i, k+h)); } m->q[i] = k+i; } memset(pos, 0, cols*sizeof(int)); int j = 0; for (int i = k+cols; i < A->n; i++) { bool b = false; for (int h = 0; h < cols; h++) { while (pos[h] < A->p[k+h+1] - A->p[k+h] && A->i[A->p[k+h] + pos[h]] < i) { pos[h]++; } if (pos[h] < A->p[k+h+1] - A->p[k+h] && A->i[A->p[k+h] + pos[h]] == i) { MatrizSet(m, j+cols, h, /*MatrizGet(A, i, k+h)*/ A->x[A->p[k+h] + pos[h]]); b = true; } } if (b) { m->q[j+cols] = i; j++; } } my_free(pos); ticksGetFk += toc(t); return m; } Matriz* MatrizGetFk_3(cs* C, int k, int c) { const int MAX_INT = 0x7fffffff; Matriz* M = (Matriz*)my_malloc(sizeof(Matriz)); M->k = k; M->offset = 0; M->n = 0; //int* pos = (int*)malloc(c*sizeof(int)); //int* min_i = (int*)malloc(c*sizeof(int)); //int* min_i2 = (int*)malloc(c*sizeof(int)); int pos[c]; int min_i[c]; int min_i2[c]; int minr; int* C_p = &C->p[k]; int* C_i = C->i; FLOTANTE* C_x = C->x; static int* merge = NULL; if (merge == NULL) { merge = (int*) malloc(C->n*sizeof(int)); } clock_t t = tic(); for (int i = 0; i < c; i++) { merge[M->n] = k+i; M->n++; } for (int i = 0; i < c; i++) { int pos_i = C_p[i]; while (C_i[pos_i] < k+c && pos_i < C_p[i+1]) { pos_i++; } pos[i] = pos_i; } int h = 0; while (true) { minr = MAX_INT; if (h == 0) { for (int i = 0; i < c; i++) { if (pos[i]+1 < C_p[i+1]) { min_i[i] = C_i[pos[i]]; min_i2[i] = C_i[pos[i]+1]; } else if (pos[i] < C_p[i+1]) { min_i[i] = C_i[pos[i]]; min_i2[i] = MAX_INT; } else { min_i[i] = MAX_INT; min_i2[i] = MAX_INT; } } } for (int i = 0; i < c; i++) { if (min_i[i] < minr) { minr = min_i[i]; } } if (minr == MAX_INT) { break; } merge[M->n] = minr; M->n++; for (int i = 0; i < c; i++) { if (min_i[i] == minr) { pos[i]++; min_i[i] = min_i2[i]; } } h = h == 0 ? 1 : 0; } MatrizAlloc(M); M->q = (int*)my_malloc(M->n*sizeof(int)); memcpy(M->q, merge, M->n*sizeof(int)); for (int i = 0; i < c; i++) { int p = 0; for (int j = C_p[i]; j < C_p[i+1]; j++) { while (merge[p] < C_i[j]) { p++; } _MatrizSet(M, p, i, C_x[j]); } } ticksGetFk += toc(t); //my_free(pos); //my_free(min_i); //my_free(min_i2); return M; } inline int log2(int x) { int square = 2; int count = 1; while (square < x) { square *= 2; ++count; } return count; } inline int pow2(int x) { int count = 1; for (int i = 0; i < x; i++) { count *= 2; } return count; } inline static int sizeNivel(int x) { return pow2(x); } inline static int comienzoNivel(int x) { return pow2(x)-1; } typedef struct { int x; int ptr; } KMergeTree; const int INF = 0x7fffffff; #define kmerge_min(x, y) (x <= y) Matriz* MatrizGetFk_4(cs* C, int k, int c) { clock_t t; Matriz* M = (Matriz*)my_malloc(sizeof(Matriz)); M->k = k; M->offset = 0; M->n = 0; int* C_p = &C->p[k]; int* C_i = C->i; int i, x1, x2, n1, n2; int niv = log2(c); int n_tree = sizeNivel(niv+1)-1; int pos[c]; KMergeTree tree[n_tree]; KMergeTree treeLoser[n_tree]; int sizeNiveles[niv+1]; int comienzoNiveles[niv+1]; /*int* pos = (int*)malloc(c*sizeof(int)); KMergeTree* tree = (KMergeTree*) my_malloc(n_tree*sizeof(KMergeTree)); KMergeTree* treeLoser = (KMergeTree*) my_malloc(n_tree*sizeof(KMergeTree)); int* sizeNiveles = (int*) my_malloc((niv+1)*sizeof(int)); int* comienzoNiveles = (int*) my_malloc((niv+1)*sizeof(int));*/ for (i = 0; i <= niv; i++) { sizeNiveles[i] = sizeNivel(i); comienzoNiveles[i] = comienzoNivel(i); } t = tic(); M->n = c; for (i = 0; i < c; i++) { pos[i] = C_p[i]; while (pos[i] < C_p[i+1] && C_i[pos[i]] < k+c) { pos[i]++; } } for (i = 0; i < n_tree; i++) { tree[i].x = INF; tree[i].ptr = INF; treeLoser[i].x = INF; treeLoser[i].ptr = INF; } for (i = 0; i < c; i++) { tree[comienzoNiveles[niv]+i].x = treeLoser[comienzoNiveles[niv]+i].x = pos[i] < C_p[i+1] ? C_i[pos[i]] : INF; tree[comienzoNiveles[niv]+i].ptr = treeLoser[comienzoNiveles[niv]+i].ptr = i; } for (int n = niv-1; n >= 0; n--) { for (i = 0; i < sizeNiveles[n]; i++) { x1 = tree[comienzoNiveles[n+1]+i*2].x; x2 = tree[comienzoNiveles[n+1]+i*2+1].x; n1 = tree[comienzoNiveles[n+1]+i*2].ptr; n2 = tree[comienzoNiveles[n+1]+i*2+1].ptr; tree[comienzoNiveles[n]+i].x = kmerge_min(x1,x2) ? x1 : x2; tree[comienzoNiveles[n]+i].ptr = kmerge_min(x1,x2) ? n1 : n2; treeLoser[comienzoNiveles[n]+i].x = kmerge_min(x1,x2) ? x2 : x1; treeLoser[comienzoNiveles[n]+i].ptr = kmerge_min(x1,x2) ? n2 : n1; } } KMergeTree winner = tree[0]; int ant; ant = INF; while (winner.x != INF) { if (ant == INF || ant != winner.x) { ant = winner.x; M->n++; } int h = winner.ptr; int m = comienzoNiveles[niv]+h; if (pos[h]+1 < C_p[h+1]) { pos[h]++; treeLoser[m].x = C_i[pos[h]]; } else { treeLoser[m].x = INF; } winner = treeLoser[m]; for (i = niv-1; i >= 0; i--) { //countGetFk++; int p = (m-1)/2; if (!kmerge_min(winner.x, treeLoser[p].x)) { KMergeTree tmp; tmp = winner; winner = treeLoser[p]; treeLoser[p] = tmp; } m = p; } } MatrizAlloc(M); M->q = (int*)my_malloc(M->n*sizeof(int)); for (i = 0; i < c; i++) { pos[i] = C_p[i]; } for (i = 0; i < n_tree; i++) { tree[i].x = INF; tree[i].ptr = INF; treeLoser[i].x = INF; treeLoser[i].ptr = INF; } for (i = 0; i < c; i++) { tree[comienzoNiveles[niv]+i].x = treeLoser[comienzoNiveles[niv]+i].x = C_i[pos[i]]; tree[comienzoNiveles[niv]+i].ptr = treeLoser[comienzoNiveles[niv]+i].ptr = i; } for (int n = niv-1; n >= 0; n--) { for (i = 0; i < sizeNiveles[n]; i++) { x1 = tree[comienzoNiveles[n+1]+i*2].x; x2 = tree[comienzoNiveles[n+1]+i*2+1].x; n1 = tree[comienzoNiveles[n+1]+i*2].ptr; n2 = tree[comienzoNiveles[n+1]+i*2+1].ptr; tree[comienzoNiveles[n]+i].x = kmerge_min(x1,x2) ? x1 : x2; tree[comienzoNiveles[n]+i].ptr = kmerge_min(x1,x2) ? n1 : n2; treeLoser[comienzoNiveles[n]+i].x = kmerge_min(x1,x2) ? x2 : x1; treeLoser[comienzoNiveles[n]+i].ptr = kmerge_min(x1,x2) ? n2 : n1; } } winner = tree[0]; int row = 0; ant = INF; KMergeTree tmp; while (winner.x != INF) { countGetFk++; if (ant == INF || ant != winner.x) { ant = winner.x; row++; } int h = winner.ptr; int m = comienzoNiveles[niv]+h; if (pos[h] < C_p[h+1]) { MatrizSet(M, row-1, h, C->x[pos[h]]); M->q[row-1] = C_i[pos[h]]; } if (pos[h]+1 < C_p[h+1]) { pos[h]++; treeLoser[m].x = C_i[pos[h]]; } else { treeLoser[m].x = INF; } winner = treeLoser[m]; for (i = niv-1; i >= 0; i--) { int p = (m-1)/2; if (!kmerge_min(winner.x, treeLoser[p].x)) { tmp = winner; winner = treeLoser[p]; treeLoser[p] = tmp; } m = p; } } ticksGetFk += toc(t); /*my_free(comienzoNiveles); my_free(sizeNiveles); my_free(treeLoser); my_free(tree); my_free(pos);*/ return M; } void VectorMerge(int* q1, int n1, int* q2, int n2, /* out */ int** q, /* out */ int* n) { int* qq = (int*) my_malloc(sizeof(int)*(n1+n2)); *q = qq; clock_t tick = tic(); int nn = 0; int k1 = 0, k2 = 0; while (k1 < n1 || k2 < n2) { if (k1 < n1 && k2 < n2) { if (q1[k1] < q2[k2]) { if (nn == 0 || qq[nn-1] != q1[k1]) { qq[nn] = q1[k1]; nn++; k1++; } else { k1++; } } else { if (nn == 0 || qq[nn-1] != q2[k2]) { qq[nn] = q2[k2]; nn++; k2++; } else { k2++; } } } else if (k1 < n1) { if (qq[nn-1] != q1[k1]) { qq[nn] = q1[k1]; nn++; } k1++; } else { if (qq[nn-1] != q2[k2]) { qq[nn] = q2[k2]; nn++; } k2++; } } *n = nn; ticksMerge += toc(tick); } void VectorMergeIndices(int* Q, int N, int* q, int n, /* out */ int** ql) { *ql = (int*) my_malloc(sizeof(int)*n); int i = 0, j = 0; while (i < n) { if (q[i] == Q[j]) { (*ql)[i] = j; i++; } j++; } } void AllocFrenteGPU(Frente** F, int nF) { clock_t tick = tic(); int size_x = 0; int size_q = 0; for (int i = 0; i < nF; i++) { int n = F[i]->h_frente->w; size_x += n*n*sizeof(FLOTANTE); size_q += n*sizeof(int); } FLOTANTE* x; cutilSafeCall( cudaMalloc((void**)&x, size_x) ); cutilSafeCall( cudaMemset(x, 0, size_x) ); /*int* q; cutilSafeCall( cudaMalloc((void**)&q, size_q) );*/ //Matriz* f; ///*cutilSafeCall(*/ cudaMalloc((void**)&f, size_f) /*)*/; size_x = 0; size_q = 0; for (int i = 0; i < nF; i++) { int n = F[i]->h_frente->w; F[i]->hd_frente = (Matriz*) my_malloc(sizeof(Matriz)); F[i]->hd_frente->w = F[i]->h_frente->w; F[i]->hd_frente->n = F[i]->h_frente->n; F[i]->hd_frente->k = F[i]->h_frente->k; F[i]->hd_frente->offset = F[i]->h_frente->offset; F[i]->hd_frente->x = &x[size_x]; size_x += n*n; /*F[i]->hd_frente->q = &q[size_q]; size_q += n;*/ } ticksMallocGPU += toc(tick); } void AllocFrenteGPU(Frente* F) { AllocFrenteGPU(&F, 1); } void FreeFrenteGPU(Frente** F, int n) { clock_t tick = tic(); for (int i = 0; i < n; i++) { if (F[i]->GPU) { my_free(F[i]->h_frente->ql); my_free(F[i]->h_frente->q); my_free(F[i]->h_frente->x); my_free(F[i]->h_frente); if (i == 0) { cutilSafeCall( cudaFree(F[i]->hd_frente->x) ); //cutilSafeCall( cudaFree(F[i]->hd_frente->q) ); } cutilSafeCall( cudaFree(F[i]->hd_frente->ql) ); my_free(F[i]->hd_frente); my_free(F[i]); } else { my_free(F[i]->h_frente->x); my_free(F[i]->h_frente); my_free(F[i]); } } ticksFreeGPU += toc(tick); } void FreeFrenteGPU(Frente* F) { FreeFrenteGPU(&F, 1); } void MoverFrenteAGPU(Frente* F/*, cudaStream_t stream*/) { int n = F->h_frente->w; /*static FLOTANTE* buffer = NULL; static int sizeBuffer = 0; if (buffer == NULL || n*F->h_frente->offset*sizeof(FLOTANTE) > sizeBuffer) { if (buffer != NULL) { cudaFreeHost(buffer); } sizeBuffer = n*F->h_frente->offset*sizeof(FLOTANTE); cudaMallocHost((void**)&buffer, sizeBuffer); } clock_t tick; //tick = tic(); memcpy(buffer, F->h_frente->x, n*F->h_frente->offset*sizeof(FLOTANTE)); //ticksMemcpyHost += toc(tick);*/ clock_t tick; tick = tic(); cutilSafeCall( cudaMemcpy(F->hd_frente->x, F->h_frente->x, n*F->h_frente->offset*sizeof(FLOTANTE), cudaMemcpyHostToDevice/*, stream*/) ); ticksMemcpyX += toc(tick); } void MoverFrenteQlAGPU(Frente* F) { int n = F->h_frente->n /*- F->h_frente->offset*/; clock_t tick; tick = tic(); cutilSafeCall( cudaMalloc((void**)&F->hd_frente->ql, n*sizeof(int)) ); ticksMallocGPU += toc(tick); tick = tic(); cutilSafeCall( cudaMemcpy(F->hd_frente->ql, F->h_frente->ql, n*sizeof(int), cudaMemcpyHostToDevice) ); ticksMemcpy += toc(tick); } /*void MoverFrenteXACPU(Frente* F) { int n = F->hd_frente->w; clock_t tick = tic(); cutilSafeCall( cudaMemcpy(F->h_frente->x, F->hd_frente->x, n*n*sizeof(FLOTANTE), cudaMemcpyDeviceToHost) ); ticksMemcpy += toc(tick); }*/ void ExtendAdd_CPU_old(Matriz* src, Matriz* dst) { int qi; int qj; for (int j = 0; j < src->n-src->offset; j++) { qj = src->ql[j]; for (int i = 0; i < src->n-src->offset; i++) { qi = src->ql[i]; MatrizSet(dst, qi, qj, MatrizGet(src, i+src->offset, j+src->offset)+MatrizGet(dst, qi, qj)); } } } void ExtendAdd_CPU(Matriz* src, Matriz* dst) { int qi; int qj; int i, j, cd, cs; int n = src->n-src->offset; FLOTANTE* xd = dst->x; FLOTANTE* xs = src->x; for (j = 0; j < n; j++) { qj = src->ql[j]; cd = qj*dst->w; cs = (j+src->offset)*src->w+src->offset; for (i = 0; i < n; i++) { qi = src->ql[i]; xd[cd+qi] += xs[cs+i]; countExtendAdd++; } } } __global__ void ExtendAdd_GPU_device(int src_offset, int src_n, int dst_n, FLOTANTE* src_x, FLOTANTE* dst_x, int* src_ql, int src_n2) { int i = blockIdx.x * 16 + threadIdx.x; int j = blockIdx.y * 16 + threadIdx.y; __shared__ int p1[16]; __shared__ int p2[16]; if (threadIdx.y == 0) { p1[threadIdx.x] = src_ql[i]; p2[threadIdx.x] = src_ql[blockIdx.y * 16 + threadIdx.x] * dst_n; //printf("p1[%i] = %i; p2[%i] = %i\n", threadIdx.x, p1[threadIdx.x], threadIdx.x, p2[threadIdx.x]); } __syncthreads(); if (i < src_n2 && j < src_n2) { dst_x[p2[threadIdx.y]+p1[threadIdx.x]] += src_x[j*src_n+i]; } } void ExtendAdd_GPU(Frente* src, Frente* dst) { int n = src->h_frente->n; dim3 block(16,16); dim3 grid(n / 16 + 1, n / 16 + 1); ExtendAdd_GPU_device<<<grid,block>>>(src->h_frente->offset, src->h_frente->w, dst->h_frente->w, &src->hd_frente->x[src->h_frente->offset*src->h_frente->w+src->h_frente->offset], dst->hd_frente->x, src->hd_frente->ql, src->h_frente->n-src->h_frente->offset); //cudaThreadSynchronize(); cutilCheckMsg("ExtendAdd_GPU"); } void ExtendAdd(Frente* src, Frente* dst, bool b) { clock_t tick = tic(); if (GPU && !b) { ExtendAdd_GPU(src, dst); } else { ExtendAdd_CPU(src->h_frente, dst->h_frente); } ticksExtendAdd += toc(tick); } __global__ void FactorAux1_GPU_1_device(int c, Matriz* Uk, int* spL_i, FLOTANTE* spL_x) { int i = blockIdx.x * 256 + threadIdx.x; FLOTANTE raiz; int n = Uk->n; int w = Uk->w; raiz = sqrt(Uk->x[c*w+c]); if (i >= c && i < n) { spL_i[i-c] = Uk->q[i]; spL_x[i-c] = Uk->x[c*w+i]/raiz; } } /*void FactorAux_GPU_1(Frente* Uk, cs* spL) { //clock_t tick; int cols = Uk->h_frente->offset; int n = Uk->hd_frente->n; int w = Uk->hd_frente->w; int k = Uk->hd_frente->k; FLOTANTE* x = Uk->hd_frente->x; FLOTANTE f; //printf("\n"); //MatrizPrint(Uk->d_frente, "%f "); for (int c = 0; c < cols; c++) { //tick = tic(); //FactorAux1_GPU(c, Uk, spL); //ticksFactorAux1 += toc(tick); FLOTANTE* d_spL_x; cutilSafeCall( cudaMalloc((void**)&d_spL_x, (n-c)*sizeof(FLOTANTE)) ); int* d_spL_i; cutilSafeCall( cudaMalloc((void**)&d_spL_i, (n-c)*sizeof(int)) ); dim3 block(256); int s = n / 256 + 1; dim3 grid(s); FactorAux1_GPU_1_device<<<grid,block>>>(c, Uk->d_frente, d_spL_i, d_spL_x); cutilCheckMsg("FactorAux1_GPU"); cutilSafeCall( cudaMemcpy(&spL->i[spL->p[k+c]], d_spL_i, (n-c)*sizeof(int), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(&spL->x[spL->p[k+c]], d_spL_x, (n-c)*sizeof(FLOTANTE), cudaMemcpyDeviceToHost) ); cudaFree(d_spL_i); cudaFree(d_spL_x); //tick = tic(); //FactorAux2_GPU_CUBLAS(c, Uk); //ticksFactorAux2 += toc(tick); cutilSafeCall( cudaMemcpy((void**)&f, x+(c*w+c), sizeof(FLOTANTE), cudaMemcpyDeviceToHost) ); mdgpu_cublasXsyr('L', n-c-1, (FLOTANTE)-1.0/f, &x[(c+0)*w+(c+1)], 1, &x[(c+1)*w+(c+1)], w); cublasStatus status = cublasGetError(); if (status != CUBLAS_STATUS_SUCCESS) { printf("Error en cublaSgemm()"); exit(1); } //cudaThreadSynchronize(); } //printf("\n"); //MatrizPrint(Uk->d_frente, "%f "); }*/ __global__ void FactorAux1_GPU_2_device_1(FLOTANTE* x) { x[0] = sqrt(x[0]); } __global__ void FactorAux1_GPU_2_device_2(int n, FLOTANTE* x) { int i = blockIdx.x * 256 + threadIdx.x; FLOTANTE f = x[0]; if (i > 0 && i < n) { x[i] /= f; } } void FactorAux_GPU_2(Frente** F, int nF, cs* spL) { clock_t tick; clock_t tick2; cublasStatus status; if (block_size == -1) { printf("block_size == -1\n"); exit(1); } #define cols(i) (F[i]->h_frente->offset) int b = block_size; int max_bloques = 16; static FLOTANTE* bloqueP = NULL; static FLOTANTE** bloques = NULL; static FLOTANTE* d_bloqueP = NULL; static FLOTANTE** d_bloques = NULL; if (bloqueP == NULL) { cutilSafeCall( cudaMallocHost((void**)&bloqueP, max_bloques*b*b*sizeof(FLOTANTE)) ); //bloqueP = (FLOTANTE*) my_malloc(max_bloques*b*b*sizeof(FLOTANTE)); bloques = (FLOTANTE**) my_malloc(max_bloques*sizeof(FLOTANTE*)); for (int i = 0; i < max_bloques; i++) { bloques[i] = &bloqueP[i*b*b]; } cutilSafeCall( cudaMalloc((void**)&d_bloqueP, max_bloques*b*b*sizeof(FLOTANTE)) ); d_bloques = (FLOTANTE**) my_malloc(max_bloques*sizeof(FLOTANTE*)); for (int i = 0; i < max_bloques; i++) { d_bloques[i] = &d_bloqueP[i*b*b]; } } int max_cols = 0; for (int i = 0; i < nF; i++) { if (cols(i) > max_cols) { max_cols = cols(i); } } for (int j = 0; j < nF; j++) { F[j]->tiempoFact = 0; } for (int i = 0; i < max_cols; i += b) { for (int j = 0; j < nF; j++) { clock_t tini = tic(); if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; bytesMemcpy2 += b2*b2*sizeof(FLOTANTE); if (j == 0) { //cutilSafeCall( cudaMemcpy2D(bloque, b*sizeof(FLOTANTE), &x[i*w+i], w*sizeof(FLOTANTE), b2*sizeof(FLOTANTE), b2, cudaMemcpyDeviceToHost); //); } } if (i < cols(j)) { if (j < nF-1) { int j2 = j+1; FLOTANTE* x = F[j2]->hd_frente->x; int w = F[j2]->hd_frente->w; int nb = min(i+b,cols(j2)); int b2 = nb-i; FLOTANTE* bloque = bloques[j2]; bytesMemcpy2 += b2*b2*sizeof(FLOTANTE); tick = tic(); //cutilSafeCall( cudaMemcpy2DAsync(bloque, b*sizeof(FLOTANTE), &x[i*w+i], w*sizeof(FLOTANTE), b2*sizeof(FLOTANTE), b2, cudaMemcpyDeviceToHost); //); //cudaThreadSynchronize(); ticksMemcpy21 += toc(tick); } int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; tick = tic(); int cb = 0; for (int c = 0; c < b2; c++) { mdgpu_cblasXscal(b2-c, 1.0/sqrt(bloque[cb+c]), &bloque[cb+c], 1); mdgpu_cblasXsyr(CblasColMajor, CblasLower, b2-c-1, -1.0, &bloque[cb+c+1], 1, &bloque[cb+b+c+1], b); cb += b; } ticksFactorAux1 += toc(tick); cudaThreadSynchronize(); } if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; tick = tic(); //cutilSafeCall( cudaMemcpy2D(&x[i*w+i], w*sizeof(FLOTANTE), bloque, b*sizeof(FLOTANTE), b2*sizeof(FLOTANTE), b2, cudaMemcpyHostToDevice); //); ticksMemcpy2 += toc(tick); } if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int n = F[j]->hd_frente->n; int nb = min(i+b,cols(j)); int b2 = nb-i; if (n-nb > 0) { tick = tic(); mdgpu_cublasXtrsm('R', 'L', 'T', 'N', n-nb, nb-i, 1.0f, &x[i*w+i], w, &x[i*w+i+b2], w); cudaThreadSynchronize(); status = cublasGetError(); ticksTRSM_GPU += toc(tick); if (status != CUBLAS_STATUS_SUCCESS) { printf("Error en cublaXtrsm()"); exit(1); } tick = tic(); mdgpu_cublasXgemm('N', 'T', n-nb, n-nb, b2, -1.0f, &x[i*w+i+b2], w, &x[i*w+i+b2], w, 1.0f, &x[(i+b2)*w+i+b2], w); cudaThreadSynchronize(); status = cublasGetError(); ticksGEMM_GPU += toc(tick); if (status != CUBLAS_STATUS_SUCCESS) { printf("Error en cublaXgemm()"); exit(1); } } } cudaThreadSynchronize(); F[j]->tiempoFact += toc(tini); } /*tick = tic(); for (int j = 0; j < nF; j++) { if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; bytesMemcpy2 += b2*b2*sizeof(FLOTANTE); //cutilSafeCall( cudaMemcpy2D(bloque, b*sizeof(FLOTANTE), &x[i*w+i], w*sizeof(FLOTANTE), b2*sizeof(FLOTANTE), b2, cudaMemcpyDeviceToHost); //); } } cudaThreadSynchronize(); ticksMemcpy21 += toc(tick); tick = tic(); for (int j = 0; j < nF; j++) { if (i < cols(j)) { int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; int cb = 0; for (int c = 0; c < b2; c++) { mdgpu_cblasXscal(b2-c, 1.0/sqrt(bloque[cb+c]), &bloque[cb+c], 1); mdgpu_cblasXsyr(CblasColMajor, CblasLower, b2-c-1, -1.0, &bloque[cb+c+1], 1, &bloque[cb+b+c+1], b); cb += b; } } } ticksFactorAux1 += toc(tick); tick = tic(); for (int j = 0; j < nF; j++) { if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int nb = min(i+b,cols(j)); int b2 = nb-i; FLOTANTE* bloque = bloques[j]; //cutilSafeCall( cudaMemcpy2D(&x[i*w+i], w*sizeof(FLOTANTE), bloque, b*sizeof(FLOTANTE), b2*sizeof(FLOTANTE), b2, cudaMemcpyHostToDevice); //); } } cudaThreadSynchronize(); ticksMemcpy2 += toc(tick); tick2 = tic(); for (int j = 0; j < nF; j++) { if (i < cols(j)) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int n = F[j]->hd_frente->n; int nb = min(i+b,cols(j)); int b2 = nb-i; if (n-nb > 0) { tick = tic(); mdgpu_cublasXtrsm('R', 'L', 'T', 'N', n-nb, nb-i, 1.0f, &x[i*w+i], w, &x[i*w+i+b2], w); cudaThreadSynchronize(); status = cublasGetError(); ticksTRSM_GPU += toc(tick); if (status != CUBLAS_STATUS_SUCCESS) { printf("Error en cublaXtrsm()"); exit(1); } tick = tic(); mdgpu_cublasXgemm('N', 'T', n-nb, n-nb, b2, -1.0f, &x[i*w+i+b2], w, &x[i*w+i+b2], w, 1.0f, &x[(i+b2)*w+i+b2], w); cudaThreadSynchronize(); status = cublasGetError(); ticksGEMM_GPU += toc(tick); if (status != CUBLAS_STATUS_SUCCESS) { printf("Error en cublaXgemm()"); exit(1); } } } } cudaThreadSynchronize(); ticksFactorAux2 += toc(tick2);*/ } printf("\n"); for (int j = 0; j < nF; j++) { printf("Procesando frente %i, size = %i, cols = %i, tiempo = %.10f\n", F[j]->h_frente->k, F[j]->h_frente->n, F[j]->h_frente->offset, ticks2seg(F[j]->tiempoFact)); } tick = tic(); int* spL_i; for (int j = 0; j < nF; j++) { FLOTANTE* x = F[j]->hd_frente->x; int w = F[j]->hd_frente->w; int n = F[j]->hd_frente->n; int k = F[j]->hd_frente->k; for (int c = 0; c < cols(j); c++) { spL_i = &spL->i[spL->p[k+c]-c]; memcpy(&spL_i[c], &F[j]->h_frente->q[c], (n-c)*sizeof(int)); //cutilSafeCall( cudaMemcpy(&spL_i[c], &F[j]->h_frente->q[c], (n-c)*sizeof(int), cudaMemcpyHostToHost) ); // TODO: traer L en bloques mas grandes //cutilSafeCall( cudaMemcpy(&spL->x[spL->p[k+c]], &x[c*w+c], (n-c)*sizeof(float), cudaMemcpyDeviceToHost) ); } } ticksFactorAux3 += toc(tick); #undef cols } void FactorAux_CPU_1(Frente* Uk, cs* spL) { //clock_t tick; int cols = Uk->h_frente->offset; FLOTANTE raiz; int x; int n = Uk->h_frente->n; int k = Uk->h_frente->k; for (int c = 0; c < cols; c++) { //FactorAux1_CPU(c, Uk->h_frente, spL); flops += (n-c) + 1; raiz = sqrt(MatrizGet(Uk->h_frente, c, c)); for (int i = c; i < n; i++) { x = spL->p[k+c] + i - c; spL->i[x] = Uk->h_frente->q[i]; spL->x[x] = MatrizGet(Uk->h_frente, i, c)/raiz; //printf("%i %i %lf %lf\n", i, c, raiz, MatrizGet(Uk, i, c)); if (MatrizGet(Uk->h_frente, c, c) < 0) { putchar('X'); fflush(stdout); //printf("La matriz no es definida positiva!!!\n"); //exit(1); } } //FactorAux2_CPU(c, Uk->h_frente); for (int j = c+1; j < n; j++) { for (int i = j; i < n; i++) { MatrizAdd(Uk->h_frente, i, j, - MatrizGet(Uk->h_frente, i, c) * MatrizGet(Uk->h_frente, j, c) / MatrizGet(Uk->h_frente, c, c)); } } } } void FactorAux_CPU_2(Frente* Uk, cs* spL) { //clock_t tick; int cols = Uk->h_frente->offset; FLOTANTE raiz; int x; int n = Uk->h_frente->n; int k = Uk->h_frente->k; for (int c = 0; c < cols; c++) { //FactorAux1_CPU(c, Uk->h_frente, spL); flops += (n-c) + 1; raiz = sqrt(MatrizGet(Uk->h_frente, c, c)); for (int i = c; i < n; i++) { x = spL->p[k+c] + i - c; spL->i[x] = Uk->h_frente->q[i]; spL->x[x] = MatrizGet(Uk->h_frente, i, c)/raiz; //printf("%i %i %lf %lf\n", i, c, raiz, MatrizGet(Uk, i, c)); if (MatrizGet(Uk->h_frente, c, c) < 0) { putchar('X'); fflush(stdout); //printf("La matriz no es definida positiva!!!\n"); //exit(1); } } //FactorAux2_CPU(c, Uk->h_frente); FLOTANTE f = (FLOTANTE)-1.0 / MatrizGet(Uk->h_frente, c, c); flops += (n-c)*(n-c-1); mdgpu_cblasXsyr(CblasColMajor, CblasLower, n-c-1, f, &Uk->h_frente->x[(c+0)*n+(c+1)], 1, &Uk->h_frente->x[(c+1)*n+(c+1)], n); } } void FactorAux_CPU_3(Frente* Uk, cs* spL) { //clock_t tick; int cols = Uk->h_frente->offset; FLOTANTE raiz; int x; int n = Uk->h_frente->n; int w = Uk->h_frente->w; int k = Uk->h_frente->k; //printf("\nk = %i\ncols = %i\n", k, cols); //MatrizPrint(Uk->h_frente, "%f "); int b = block_size == -1 ? n : block_size; for (int i = 0; i < cols; i += b) { int nb = min(i+b,cols); int b2 = nb-i; for (int c = i; c < nb; c++) { raiz = sqrt(MatrizGet(Uk->h_frente, c, c)); for (int j = c; j < nb; j++) { MatrizSet(Uk->h_frente, j, c, MatrizGet(Uk->h_frente, j, c)/raiz); } mdgpu_cblasXsyr(CblasColMajor, CblasLower, nb-c-1, -1.0, Uk->h_frente->x+((c+0)*w+(c+1)), 1, Uk->h_frente->x+((c+1)*w+(c+1)), w); } if (n-nb > 0) { mdgpu_cblasXtrsm(CblasColMajor, CblasRight, CblasLower, CblasTrans, CblasNonUnit, n-nb, nb-i, 1.0f, Uk->h_frente->x+(i*w+i), w, Uk->h_frente->x+(i*w+i+b2), w); //printf("\n"); //MatrizPrint(Uk->h_frente, "%f "); mdgpu_cblasXgemm(CblasColMajor, CblasNoTrans, CblasTrans, n-nb, n-nb, b2, -1.0f, &Uk->h_frente->x[i*w+i+b2], w, &Uk->h_frente->x[i*w+i+b2], w, 1.0f, &Uk->h_frente->x[(i+b2)*w+i+b2], w); } } //printf("\n"); //MatrizPrint(Uk->h_frente, "%f "); for (int c = 0; c < cols; c++) { for (int j = c; j < n; j++) { x = spL->p[k+c] + j - c; spL->i[x] = Uk->h_frente->q[j]; spL->x[x] = MatrizGet(Uk->h_frente, j, c); } } for (int c = 0; c < cols; c++) { flops += (n-c) + 1; flops += (n-c)*(n-c-1); } } void FactorAux(Frente* Uk, cs* spL) { clock_t tick = tic(); if (GPU) { FactorAux_GPU(&Uk, 1, spL); } else { FactorAux_CPU(Uk, spL); } ticksFactorAux += toc(tick); } void FactorAux(Frente** U, int n_U, cs* spL) { clock_t tick = tic(); if (GPU) { /*for (int i = 0; i < n_U; i++) { FactorAux_GPU(&U[i], 1, spL); }*/ FactorAux_GPU(U, n_U, spL); } else { for (int i = 0; i < n_U; i++) { FactorAux_CPU(U[i], spL); } } ticksFactorAux += toc(tick); } void Factor(cs* A, cs* spL, struct ETree** listaNodos, int cantNodos) { Frente** stack = (Frente**) my_malloc(cantNodos*sizeof(Frente*)); int n_stack = 0; for (int c = 0; c < cantNodos; c++) { struct ETree* tree = listaNodos[c]; if (c % 20 == 0) { //putchar('.'); //fflush(stdout); } if (tree->nHijos == 0) { continue; } Frente** F = (Frente**) my_malloc(tree->nHijos*sizeof(Frente*)); for (int i = 0; i < tree->nHijos; i++) { F[i] = (Frente*) my_malloc(sizeof(Frente)); F[i]->h_frente = MatrizGetFk(A, tree->hijos[i]->nodo, tree->hijos[i]->cols); F[i]->GPU = false; F[i]->h_frente->offset = tree->hijos[i]->cols; } if (GPU) { AllocFrenteGPU(F, tree->nHijos); } for (int i = 0; i < tree->nHijos; i++) { if (GPU) { MoverFrenteAGPU(F[i]); F[i]->GPU = true; } } Frente** U = (Frente**) my_malloc(sizeof(Frente*)*tree->nHijos); for (int i = 0; i < tree->nHijos; i++) { if (tree->hijos[tree->nHijos-1-i]->nHijos == 0) { U[tree->nHijos-1-i] = NULL; } else { U[tree->nHijos-1-i] = stack[--n_stack]; } } Frente** U2 = (Frente**) my_malloc(sizeof(Frente*)*tree->nHijos); for (int i = 0; i < tree->nHijos; i++) { int* q; int n; if (U[i] == NULL) { U2[i] = F[i]; //U2[i]->h_frente->offset = tree->hijos[i]->cols; } else { VectorMerge(F[i]->h_frente->q, F[i]->h_frente->n, U[i]->h_frente->q + U[i]->h_frente->offset, U[i]->h_frente->n - U[i]->h_frente->offset, &q, &n); VectorMergeIndices(q, n, F[i]->h_frente->q, F[i]->h_frente->n, &F[i]->h_frente->ql); VectorMergeIndices(q, n, U[i]->h_frente->q, U[i]->h_frente->n, &U[i]->h_frente->ql); U2[i] = (Frente*) my_malloc(sizeof(Frente)); U2[i]->GPU = false; U2[i]->h_frente = (Matriz*) my_malloc(sizeof(Matriz)); U2[i]->h_frente->n = n; U2[i]->h_frente->q = q; U2[i]->h_frente->k = tree->hijos[i]->nodo; U2[i]->h_frente->offset = tree->hijos[i]->cols; MatrizAlloc(U2[i]->h_frente); if (GPU) { AllocFrenteGPU(U2[i]); U2[i]->GPU = true; } if (GPU) { MoverFrenteQlAGPU(F[i]); MoverFrenteQlAGPU(U[i]); } F[i]->h_frente->offset = 0; ExtendAdd(F[i], U2[i], false); ExtendAdd(U[i], U2[i], false); } } FactorAux(U2, tree->nHijos, spL); int n = U2[0]->h_frente->n - U2[0]->h_frente->offset; int* q = (int*) my_malloc(n*sizeof(int)); memcpy(q, U2[0]->h_frente->q + U2[0]->h_frente->offset, n*sizeof(int)); for (int i = 1; i < tree->nHijos; i++) { int n2; int* q2; VectorMerge(q, n, U2[i]->h_frente->q + U2[i]->h_frente->offset, U2[i]->h_frente->n - U2[i]->h_frente->offset, &q2, &n2); my_free(q); n = n2; q = q2; } for (int i = 0; i < tree->nHijos; i++) { VectorMergeIndices(q, n, U2[i]->h_frente->q + U2[i]->h_frente->offset, U2[i]->h_frente->n - U2[i]->h_frente->offset, &U2[i]->h_frente->ql); } Frente* Uk = (Frente*) my_malloc(sizeof(Frente)); Uk->GPU = false; Uk->h_frente = (Matriz*) my_malloc(sizeof(Matriz)); Uk->h_frente->n = n; Uk->h_frente->q = q; Uk->h_frente->k = tree->nodo; Uk->h_frente->offset = 0; MatrizAlloc(Uk->h_frente); if (GPU) { AllocFrenteGPU(Uk); Uk->GPU = true; } for (int i = 0; i < tree->nHijos; i++) { if (GPU) { MoverFrenteQlAGPU(U2[i]); } ExtendAdd(U2[i], Uk, false); } FreeFrenteGPU(F, tree->nHijos); for (int i = 0; i < tree->nHijos; i++) { if (U[i] != NULL) { FreeFrenteGPU(U[i]); FreeFrenteGPU(U2[i]); } } my_free(U); my_free(U2); my_free(F); //fprintf(logger, "%i ", Uk->h_frente->n); stack[n_stack++] = Uk; } Frente* U = stack[--n_stack]; Frente* F = (Frente*) my_malloc(sizeof(Frente)); F->GPU = false; F->h_frente = MatrizGetFk(A, listaNodos[cantNodos-1]->nodo, listaNodos[cantNodos-1]->cols); F->h_frente->offset = /*listaNodos[cantNodos-1]->cols*/ 0; if (GPU) { AllocFrenteGPU(F); MoverFrenteAGPU(F/*, 0*/); F->GPU = true; } int n; int* q; VectorMerge(U->h_frente->q, U->h_frente->n, F->h_frente->q, F->h_frente->n, &q, &n); VectorMergeIndices(q, n, U->h_frente->q, U->h_frente->n, &U->h_frente->ql); VectorMergeIndices(q, n, F->h_frente->q, F->h_frente->n, &F->h_frente->ql); Frente* U2 = (Frente*) my_malloc(sizeof(Frente)); U2->GPU = false; U2->h_frente = (Matriz*) my_malloc(sizeof(Matriz)); U2->h_frente->n = n; U2->h_frente->q = q; U2->h_frente->k = listaNodos[cantNodos-1]->nodo; U2->h_frente->offset = listaNodos[cantNodos-1]->cols; MatrizAlloc(U2->h_frente); if (GPU) { AllocFrenteGPU(U2); U2->GPU = true; } if (GPU) { MoverFrenteQlAGPU(F); MoverFrenteQlAGPU(U); } ExtendAdd(F, U2, false); ExtendAdd(U, U2, false); my_free(F->h_frente->x); my_free(F->h_frente->q); my_free(F->h_frente); my_free(F); my_free(U->h_frente->x); my_free(U->h_frente); my_free(U); FactorAux(U2, spL); my_free(U2->h_frente->x); my_free(U2->h_frente->q); my_free(U2->h_frente); } void testVectorMerge() { int q1[] = {2, 5}; int q2[] = {0, 1, 2, 3, 4, 5}; int n; int* q; VectorMerge(q1, 2, q2, 6, &q, &n); VectorPrint(q, n); int* ql1; VectorMergeIndices(q, n, q1, 2, &ql1); VectorPrint(ql1, 2); } /* true for off-diagonal entries */ static int dropdiag (int i, int j, FLOTANTE aij, void *other) { return (i != j) ;} /* C = A + triu(A,1)' */ static cs *make_sym (cs *A) { cs *AT, *C ; AT = cs_transpose (A, 1) ; /* AT = A' */ cs_fkeep (AT, &dropdiag, NULL) ; /* drop diagonal entries from AT */ C = cs_add (A, AT, 1, 1) ; /* C = A+AT */ cs_spfree (AT) ; return (C) ; } /* infinity-norm of x */ static FLOTANTE norm (FLOTANTE *x, int n) { int i ; FLOTANTE normx = 0 ; for (i = 0 ; i < n ; i++) normx = CS_MAX (normx, fabs (x [i])) ; return (normx) ; } void help() { printf("mdgpu [-gpu|-cpu] [-amd] [-sn|-rn c] [-b bs] [-pad] matriz\n"); exit(1); } void test() { FILE* f = fopen("../../mat/nos4.mtx", "r"); cs* T = cs_load(f); cs* C = cs_compress(T); cs_spfree(T); Matriz* m = MatrizGetFk_3(C, 0, 5); MatrizPrint(m, "%3.4f "); } int main(int argc, char** argv) { mallopt(M_MMAP_MAX, 0); //test(); //exit(1); //printf("\nMemory: %i KB\n", memory_usage() ); cublasStatus status; status = cublasInit(); if (status != CUBLAS_STATUS_SUCCESS) { printf("Error en cublasInit(%i)", status); exit(1); } logger = fopen("salida.txt", "w"); clock_t mainTick = 0; clock_t factorTick = 0; clock_t loadTick = 0; mainTick = tic(); GPU = false; bool amd = false; bool supernodos = false; bool relaxednodos = false; int relaxednodosMaxCeros = 0; for (int i = 1; i < argc-1; i++) { //printf("%i -> %s\n", i, argv[i]); if (strcmp(argv[i], "-gpu") == 0) { //printf("gpu\n"); GPU = true; } else if (strcmp(argv[i], "-cpu") == 0) { //printf("cpu\n"); GPU = false; } else if (strcmp(argv[i], "-amd") == 0) { //printf("amd\n"); amd = true; } else if (strcmp(argv[i], "-sn") == 0) { //printf("sn\n"); supernodos = true; } else if (strcmp(argv[i], "-rn") == 0) { //printf("sn\n"); relaxednodos = true; relaxednodosMaxCeros = atoi(argv[i+1]); } else if (strcmp(argv[i], "-b") == 0) { block_size = atoi(argv[i+1]); } else if (strcmp(argv[i], "-pad") == 0) { pad = true; } else if (strcmp(argv[i], "-help") == 0) { help(); } } loadTick = tic(); FILE* f = fopen(argv[argc-1], "r"); cs* T = cs_load(f); cs* C_low = cs_compress(T); cs_spfree(T); loadTick = toc(loadTick); //printf("\nMemory: %i KB\n", memory_usage() ); ticksSymbolic = tic(); cs* A_low = NULL; cs* A = NULL; //MatrizSpy(C_low); int* amd_perm; int* amd_pinv; if (amd) { cs* C_up = cs_transpose(C_low, 1); //cs_spfree(C_low); //printf("\nC = \n"); //MatrizPrint(C, "%10.4lf "); amd_perm = cs_amd(1, C_up); amd_pinv = cs_pinv(amd_perm, C_up->n); cs* A_amd_U = cs_symperm(C_up, amd_pinv, 1); cs_spfree(C_up); A_low = cs_transpose(A_amd_U, 1); cs_spfree(A_amd_U); //printf("\nA_amd_U = \n"); //MatrizPrint(A_amd_U, "%10.4lf "); A = make_sym(A_low); //printf("\nA = \n"); //MatrizPrint(A, "%10.4lf "); } else { A_low = C_low; A = make_sym(A_low); } /*if (amd) { printf("amd_perm: "); VectorPrint(amd_perm, A->n); printf("amd_pinv: "); VectorPrint(amd_pinv, A->n); }*/ int* parent = cs_etree(A, 0); int* post = cs_post(parent, A->n); int* count = cs_counts(A, parent, post, 0); //printf("counts: "); //VectorPrint(count, A->n); //printf("sum counts: "); //VectorPrint(spL->p, A->n); //printf("etree: "); //VectorPrint(parent, A->n); //printf("post: "); //VectorPrint(post, A->n); struct ETree** listaNodos; //printf("\nMemory: %i KB\n", memory_usage() ); int cantNodos; struct ETree* tree = GetETree(A, parent, post, count, &listaNodos, &cantNodos); //printf("Memory ETree: %i KB\n", memory_usage() ); //PrintETree(tree, 0); //printf("\n---------------------\n\n"); //PrintETree(listaNodos[cantNodos-1], 0); printf("\nCant Nodos = %i\n", cantNodos); struct ETree** listaNodosSuper; int cantNodosSuper; if (supernodos) { GetSuperETree(listaNodos, cantNodos, &listaNodosSuper, &cantNodosSuper); printf("\nCant SuperNodos = %i\n", cantNodosSuper); } else { listaNodosSuper = listaNodos; cantNodosSuper = cantNodos; } //PrintETree(listaNodosSuper[cantNodosSuper-1], 0); if (relaxednodos) { int* permRelaxedETree; GetRelaxedETree(listaNodos, cantNodos, relaxednodosMaxCeros, count, &permRelaxedETree, &listaNodosSuper, &cantNodosSuper); my_free(listaNodos); printf("\nCant RelaxedNodos = %i\n", cantNodosSuper); int* ipermRelaxedETree = cs_pinv(permRelaxedETree, cantNodos); cs* tmp1 = cs_transpose(A_low, 1); cs_spfree(A_low); cs* tmp2 = cs_symperm(tmp1, ipermRelaxedETree, 1); cs_spfree(tmp1); A_low = cs_transpose(tmp2, 1); cs_spfree(tmp2); int* count2 = (int*)malloc(cantNodos*sizeof(int)); cs_pvec(permRelaxedETree, count, count2, cantNodos); count = count2; int* amd_perm2 = (int*)malloc(cantNodos*sizeof(int)); cs_pvec(permRelaxedETree, amd_perm, amd_perm2, cantNodos); amd_perm = amd_perm2; } //MatrizPrint(A, "%f "); //printf("\nMemory: %i KB\n", memory_usage() ); cs* spL = (cs*) my_malloc(sizeof(cs)); spL->n = A->n; spL->m = A->m; spL->p = (int*) my_malloc((spL->n+1)*sizeof(int)); int nz = 0; for (int i = 0; i < spL->n; i++) { spL->p[i] = nz; nz += count[i]; } spL->p[spL->n] = nz; spL->nzmax = nz; spL->nz = -1; spL->i = (int*) my_malloc(nz*sizeof(int)); memset(spL->i, 0, nz*sizeof(int)); //spL->x = (FLOTANTE*) my_malloc(nz*sizeof(FLOTANTE)); printf("L.nz = %i\n", nz); //printf("\nMemory: %i KB\n", memory_usage() ); my_free(parent); my_free(post); my_free(count); ticksSymbolic = toc(ticksSymbolic); cs_spfree(A); //mtrace(); factorTick = tic(); Factor(A_low, spL, listaNodosSuper, cantNodosSuper); factorTick = toc(factorTick); //muntrace(); printf("\n"); //MatrizSpy(spL); //cs_print(spL, 0); //MatrizPrint(logger, spL); //MatrizPrint(spL, "%f "); mainTick = toc(mainTick); printf("\n"); printf("MFlops = %.2f\n", flops/1000000.0); printf("MFlops/s = %.3f\n", flops/1000000.0/ticks2seg(ticksFactorAux)); printf("\n"); printf("Total = %.6f\n\n", ticks2seg(mainTick)); printf("Factor = %.6f Factorizacion numerica total\n", ticks2seg(factorTick)); printf("FactorAux = %.6f Factorizacion numerica solo calculo\n", ticks2seg(ticksFactorAux)); printf(" BLAS CPU = %.6f\n", ticks2seg(ticksFactorAux1)); printf(" BLAS GPU = %.6f\n", ticks2seg(ticksFactorAux2)); printf(" TRSM GPU = %.6f\n", ticks2seg(ticksTRSM_GPU)); printf(" GEMM GPU = %.6f\n", ticks2seg(ticksGEMM_GPU)); printf(" Otros = %.6f\n", ticks2seg(ticksFactorAux3)); printf("Memcpy = %.6f\n", ticks2seg(ticksMemcpy)+ticks2seg(ticksMemcpy2)+ticks2seg(ticksMemcpy21)+ticks2seg(ticksMemcpyX)); printf(" Memcpy2 = %.6f Copia paneles CPU<->GPU\n", ticks2seg(ticksMemcpy2+ticksMemcpy21)); //printf(" Memcpy21 = %.6f\n", ticks2seg(ticksMemcpy21)); printf(" MemcpyX = %.6f Copia inicial del frente CPU->GPU\n", ticks2seg(ticksMemcpyX)); printf(" Otros = %.6f\n", ticks2seg(ticksMemcpy)); //("MemcpyHost = %.6f\n", ticks2seg(ticksMemcpyHost)); printf("Merge = %.6f\n", ticks2seg(ticksMerge)); printf("GetFk = %.6f Formar el frente\n", ticks2seg(ticksGetFk)); printf("ExtendAdd = %.6f Suma extendida\n", ticks2seg(ticksExtendAdd)); printf("Malloc CPU = %.6f\n", ticks2seg(ticksMalloc)); printf("Free CPU = %.6f\n", ticks2seg(ticksFree)); printf("Malloc GPU = %.6f\n", ticks2seg(ticksMallocGPU)); printf("Free GPU = %.6f\n\n", ticks2seg(ticksFreeGPU)); printf("Load = %.6f\n", ticks2seg(loadTick)); printf("Symbolic = %.6f\n", ticks2seg(ticksSymbolic)); //printf("\ncountExtendAdd = %li\n", countExtendAdd); //printf("countGetFk = %i\n", countGetFk); //printf("bytesMemcpy2 = %li\n", bytesMemcpy2); //printf("bytesMemcpy21 = %li\n", bytesMemcpy21); fclose(logger); exit(1); A = make_sym(A_low); FLOTANTE* b = (FLOTANTE*) my_malloc(A->n*sizeof(FLOTANTE)); FLOTANTE* x = (FLOTANTE*) my_malloc(A->n*sizeof(FLOTANTE)); FLOTANTE* x2 = (FLOTANTE*) my_malloc(A->n*sizeof(FLOTANTE)); for (int i = 0; i < A->n; i++) { b[i] = 1 + ((FLOTANTE) i) / A->n; x[i] = b[i]; } if (amd) { cs_pvec(amd_perm, b, x, A->n); } cs_lsolve(spL, x); cs_ltsolve(spL, x); if (amd) { cs_ipvec(amd_perm, x, x2, A->n); } FLOTANTE* resid = (FLOTANTE*) my_malloc(A->n*sizeof(FLOTANTE)); for (int i = 0; i < A->n; i++) { resid[i] = -b[i]; } cs* C = make_sym(C_low); cs_gaxpy(C, x2, resid); printf ("resid: %8.2e\n", norm (resid,A->n) / (cs_norm (A) * norm (x,A->n) + norm (b,A->n))) ; printf ("resid: %8.2e\n", norm (resid,A->n)) ; cudaThreadExit(); cublasShutdown(); printf("FIN\n"); //getchar(); exit(0); }
1139f4b0f294dcaac5e055ed8cd0fda9ef546653.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "amir_cuda_util/cuda_util.h" #include "deform_roi_pool.h" #include "deform_roi_pool_cuda_kernel.cuh" namespace amirstan { namespace plugin { using namespace amirstan::cuda; template <typename scalar_t> void DeformRoIPoolForwardCUDAKernelLauncher( scalar_t *input, scalar_t *rois, scalar_t *offset, scalar_t *output, int pooled_height, int pooled_width, int output_size, int channels, int height, int width, float spatial_scale, int sampling_ratio, float gamma, hipStream_t stream) { hipLaunchKernelGGL(( deform_roi_pool_forward_cuda_kernel<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(CUDA_NUM_THREADS), 0, stream, output_size, input, rois, offset, output, pooled_height, pooled_width, static_cast<scalar_t>(spatial_scale), sampling_ratio, static_cast<scalar_t>(gamma), channels, height, width); } void deform_roi_pool_forward(float *input, float *rois, float *offset, float *output, int pooled_height, int pooled_width, int output_size, int channels, int height, int width, float spatial_scale, int sampling_ratio, float gamma, hipStream_t stream) { DeformRoIPoolForwardCUDAKernelLauncher<float>( input, rois, offset, output, pooled_height, pooled_width, output_size, channels, height, width, spatial_scale, sampling_ratio, gamma, stream); } } // namespace plugin } // namespace amirstan
1139f4b0f294dcaac5e055ed8cd0fda9ef546653.cu
#include "amir_cuda_util/cuda_util.h" #include "deform_roi_pool.h" #include "deform_roi_pool_cuda_kernel.cuh" namespace amirstan { namespace plugin { using namespace amirstan::cuda; template <typename scalar_t> void DeformRoIPoolForwardCUDAKernelLauncher( scalar_t *input, scalar_t *rois, scalar_t *offset, scalar_t *output, int pooled_height, int pooled_width, int output_size, int channels, int height, int width, float spatial_scale, int sampling_ratio, float gamma, cudaStream_t stream) { deform_roi_pool_forward_cuda_kernel<scalar_t> <<<GET_BLOCKS(output_size), CUDA_NUM_THREADS, 0, stream>>>( output_size, input, rois, offset, output, pooled_height, pooled_width, static_cast<scalar_t>(spatial_scale), sampling_ratio, static_cast<scalar_t>(gamma), channels, height, width); } void deform_roi_pool_forward(float *input, float *rois, float *offset, float *output, int pooled_height, int pooled_width, int output_size, int channels, int height, int width, float spatial_scale, int sampling_ratio, float gamma, cudaStream_t stream) { DeformRoIPoolForwardCUDAKernelLauncher<float>( input, rois, offset, output, pooled_height, pooled_width, output_size, channels, height, width, spatial_scale, sampling_ratio, gamma, stream); } } // namespace plugin } // namespace amirstan
5218d76f2557279de511ac0b2730fcd86052c119.hip
// !!! This is a file automatically generated by hipify!!! #include"header_simulation_engine.h" #define NUMITERATION 10 #define NUMPROJECTIONS 14 #define DETECTORWIDTH 100 #define DETECTORHEIGHT 100 #define NUMLOCALX 32 #define NUMLOCALY 32 #define PITCH 0.1 int main(){ char buffer[50]; int i, idxProjection, idxImigrationLocal, idxImigrationImage, *imageReal, *imageRealVertical, *imageRealHorizontal, idxIteration, objectPointNum=50*50*50, numLocal=NUMLOCALX*NUMLOCALY, detectorNumWidth=DETECTORWIDTH/PITCH, detectorNumHeight=DETECTORHEIGHT/PITCH, detectorNum=detectorNumWidth*detectorNumHeight, *cor, *cor_dev, numBlockAdjust=(numLocal*NUMPROJECTIONS)/32+1, driftX=detectorNumWidth/2-NUMLOCALX/2, driftY=detectorNumHeight/2-NUMLOCALY/2; float *object, *image, *imageVertical, *imageHorizontal, *image_dev, *imageVertical_dev, *imageHorizontal_dev, *psfPartial, *psfPartial_dev, psfBuffer, integrationAdjust, integrationPsf; FILE *fidRecon, *fidImageReal, *fidPSF; object=(float *)malloc(sizeof(float)*objectPointNum); image=(float *)malloc(sizeof(float)*detectorNumWidth*detectorNumHeight*NUMPROJECTIONS); imageVertical=(float *)malloc(sizeof(float)*detectorNumWidth*NUMPROJECTIONS); imageHorizontal=(float *)malloc(sizeof(float)*detectorNumWidth*NUMPROJECTIONS); imageReal=(int *)malloc(sizeof(int)*detectorNumWidth*detectorNumHeight*NUMPROJECTIONS); imageRealVertical=(int *)malloc(sizeof(int)*detectorNumWidth*NUMPROJECTIONS); imageRealHorizontal=(int *)malloc(sizeof(int)*detectorNumWidth*NUMPROJECTIONS); hipMalloc((void**)&image_dev,sizeof(float)*detectorNumWidth*detectorNumHeight*NUMPROJECTIONS); hipMalloc((void**)&imageVertical_dev,sizeof(float)*detectorNumWidth*NUMPROJECTIONS); hipMalloc((void**)&imageHorizontal_dev,sizeof(float)*detectorNumWidth*NUMPROJECTIONS); cor=(int *)malloc(sizeof(int)*2); hipMalloc((void**)&cor_dev,sizeof(int)*2); psfPartial=(float *)malloc(sizeof(float)*numLocal); hipMalloc((void**)&psfPartial_dev,sizeof(float)*numLocal); ///* fidImageReal=fopen("G:\\research\\data\\projectionImage.bin", "rb"); fread(imageReal, sizeof(int), detectorNumWidth*detectorNumHeight*NUMPROJECTIONS, fidImageReal); fclose(fidImageReal); //*/ ///* fidImageReal=fopen("G:\\research\\data\\projectionImageVerticalSlit.bin", "rb"); fread(imageRealVertical, sizeof(int), detectorNumWidth*NUMPROJECTIONS, fidImageReal); fclose(fidImageReal); fidImageReal=fopen("G:\\research\\data\\projectionImageHorizontalSlit.bin", "rb"); fread(imageRealHorizontal, sizeof(int), detectorNumWidth*NUMPROJECTIONS, fidImageReal); fclose(fidImageReal); //*/ for(i=0;i<objectPointNum;i++){ object[i]=1; } for(idxIteration=0;idxIteration<NUMITERATION;idxIteration++){ ///* for(i=0;i<detectorNumWidth*detectorNumHeight*NUMPROJECTIONS;i++){ image[i]=0; } hipMemcpy(image_dev, image, sizeof(float)*detectorNumWidth*detectorNumHeight*NUMPROJECTIONS, hipMemcpyHostToDevice); //*/ ///* for(i=0;i<detectorNumWidth*NUMPROJECTIONS;i++){ imageVertical[i]=0; } hipMemcpy(imageVertical_dev, imageVertical, sizeof(float)*detectorNumWidth*NUMPROJECTIONS, hipMemcpyHostToDevice); for(i=0;i<detectorNumWidth*NUMPROJECTIONS;i++){ imageHorizontal[i]=0; } hipMemcpy(imageHorizontal_dev, imageHorizontal, sizeof(float)*detectorNumWidth*NUMPROJECTIONS, hipMemcpyHostToDevice); forwardProjectionAllPoints(image, image_dev, cor, cor_dev, psfPartial, psfPartial_dev, NUMPROJECTIONS, object, objectPointNum, NUMLOCALX, NUMLOCALY, detectorNumWidth, detectorNumHeight, detectorNum, 'p'); forwardProjectionAllPoints(imageVertical, imageVertical_dev, cor, cor_dev, psfPartial, psfPartial_dev, NUMPROJECTIONS, object, objectPointNum, NUMLOCALX, NUMLOCALY, detectorNumWidth, detectorNumHeight, detectorNumWidth, 'v'); forwardProjectionAllPoints(imageHorizontal, imageHorizontal_dev, cor, cor_dev, psfPartial, psfPartial_dev, NUMPROJECTIONS, object, objectPointNum, NUMLOCALX, NUMLOCALY, detectorNumWidth, detectorNumHeight, detectorNumWidth, 'h'); for(i=0;i<objectPointNum;i++){ integrationAdjust=0; integrationPsf=0; for(idxProjection=0;idxProjection<NUMPROJECTIONS;idxProjection++){ sprintf(buffer, "G:\\research\\data\\PSF%d.bin", idxProjection); fidPSF=fopen(buffer, "rb"); fseek(fidPSF, ((numLocal+2)*i)*4, SEEK_SET); fread(cor, sizeof(int), 2, fidPSF); for(idxImigrationLocal=0;idxImigrationLocal<numLocal;idxImigrationLocal++){ fread(&psfBuffer, sizeof(float), 1, fidPSF); idxImigrationImage=(cor[0]+driftX+idxImigrationLocal/32)*detectorNumHeight+(cor[1]+driftY+idxImigrationLocal%32); if(idxImigrationImage>=0&&idxImigrationImage<detectorNum){ if(image[idxImigrationImage+detectorNum*idxProjection]){ integrationAdjust=integrationAdjust+imageReal[idxImigrationImage+detectorNum*idxProjection]/image[idxImigrationImage+detectorNum*idxProjection]*psfBuffer; integrationPsf=integrationPsf+psfBuffer; } } } fclose(fidPSF); ///* sprintf(buffer, "G:\\research\\data\\vslitPSF%d.bin", idxProjection); fidPSF=fopen(buffer, "rb"); fseek(fidPSF, ((NUMLOCALX+1)*i)*4, SEEK_SET); fread(cor, sizeof(int), 1, fidPSF); for(idxImigrationLocal=0;idxImigrationLocal<NUMLOCALX;idxImigrationLocal++){ fread(&psfBuffer, sizeof(float), 1, fidPSF); idxImigrationImage=cor[0]+driftX+idxImigrationLocal; if(idxImigrationImage>=0&&idxImigrationImage<detectorNumWidth){ if(imageVertical[idxImigrationImage+detectorNumWidth*idxProjection]){ integrationAdjust=integrationAdjust+imageRealVertical[idxImigrationImage+detectorNumWidth*idxProjection]/imageVertical[idxImigrationImage+detectorNumWidth*idxProjection]*psfBuffer; integrationPsf=integrationPsf+psfBuffer; } } } fclose(fidPSF); sprintf(buffer, "G:\\research\\data\\hslitPSF%d.bin", idxProjection); fidPSF=fopen(buffer, "rb"); fseek(fidPSF, ((NUMLOCALX+1)*i)*4, SEEK_SET); fread(cor, sizeof(int), 1, fidPSF); for(idxImigrationLocal=0;idxImigrationLocal<NUMLOCALX;idxImigrationLocal++){ fread(&psfBuffer, sizeof(float), 1, fidPSF); idxImigrationImage=cor[0]+driftX+idxImigrationLocal; if(idxImigrationImage>=0&&idxImigrationImage<detectorNumWidth){ if(imageHorizontal[idxImigrationImage+detectorNumWidth*idxProjection]){ integrationAdjust=integrationAdjust+imageRealHorizontal[idxImigrationImage+detectorNumWidth*idxProjection]/imageHorizontal[idxImigrationImage+detectorNumWidth*idxProjection]*psfBuffer; integrationPsf=integrationPsf+psfBuffer; } } } fclose(fidPSF); //*/ } object[i]=object[i]*integrationAdjust/integrationPsf; } printf("Iteration %d is finished!\n", idxIteration); fidRecon=fopen("G:\\research\\data\\recon.bin", "wb"); fwrite(object, sizeof(float), objectPointNum, fidRecon); fclose(fidRecon); } fidRecon=fopen("G:\\research\\data\\recon.bin", "wb"); fwrite(object, sizeof(float), objectPointNum, fidRecon); fclose(fidRecon); getchar(); }
5218d76f2557279de511ac0b2730fcd86052c119.cu
#include"header_simulation_engine.h" #define NUMITERATION 10 #define NUMPROJECTIONS 14 #define DETECTORWIDTH 100 #define DETECTORHEIGHT 100 #define NUMLOCALX 32 #define NUMLOCALY 32 #define PITCH 0.1 int main(){ char buffer[50]; int i, idxProjection, idxImigrationLocal, idxImigrationImage, *imageReal, *imageRealVertical, *imageRealHorizontal, idxIteration, objectPointNum=50*50*50, numLocal=NUMLOCALX*NUMLOCALY, detectorNumWidth=DETECTORWIDTH/PITCH, detectorNumHeight=DETECTORHEIGHT/PITCH, detectorNum=detectorNumWidth*detectorNumHeight, *cor, *cor_dev, numBlockAdjust=(numLocal*NUMPROJECTIONS)/32+1, driftX=detectorNumWidth/2-NUMLOCALX/2, driftY=detectorNumHeight/2-NUMLOCALY/2; float *object, *image, *imageVertical, *imageHorizontal, *image_dev, *imageVertical_dev, *imageHorizontal_dev, *psfPartial, *psfPartial_dev, psfBuffer, integrationAdjust, integrationPsf; FILE *fidRecon, *fidImageReal, *fidPSF; object=(float *)malloc(sizeof(float)*objectPointNum); image=(float *)malloc(sizeof(float)*detectorNumWidth*detectorNumHeight*NUMPROJECTIONS); imageVertical=(float *)malloc(sizeof(float)*detectorNumWidth*NUMPROJECTIONS); imageHorizontal=(float *)malloc(sizeof(float)*detectorNumWidth*NUMPROJECTIONS); imageReal=(int *)malloc(sizeof(int)*detectorNumWidth*detectorNumHeight*NUMPROJECTIONS); imageRealVertical=(int *)malloc(sizeof(int)*detectorNumWidth*NUMPROJECTIONS); imageRealHorizontal=(int *)malloc(sizeof(int)*detectorNumWidth*NUMPROJECTIONS); cudaMalloc((void**)&image_dev,sizeof(float)*detectorNumWidth*detectorNumHeight*NUMPROJECTIONS); cudaMalloc((void**)&imageVertical_dev,sizeof(float)*detectorNumWidth*NUMPROJECTIONS); cudaMalloc((void**)&imageHorizontal_dev,sizeof(float)*detectorNumWidth*NUMPROJECTIONS); cor=(int *)malloc(sizeof(int)*2); cudaMalloc((void**)&cor_dev,sizeof(int)*2); psfPartial=(float *)malloc(sizeof(float)*numLocal); cudaMalloc((void**)&psfPartial_dev,sizeof(float)*numLocal); ///* fidImageReal=fopen("G:\\research\\data\\projectionImage.bin", "rb"); fread(imageReal, sizeof(int), detectorNumWidth*detectorNumHeight*NUMPROJECTIONS, fidImageReal); fclose(fidImageReal); //*/ ///* fidImageReal=fopen("G:\\research\\data\\projectionImageVerticalSlit.bin", "rb"); fread(imageRealVertical, sizeof(int), detectorNumWidth*NUMPROJECTIONS, fidImageReal); fclose(fidImageReal); fidImageReal=fopen("G:\\research\\data\\projectionImageHorizontalSlit.bin", "rb"); fread(imageRealHorizontal, sizeof(int), detectorNumWidth*NUMPROJECTIONS, fidImageReal); fclose(fidImageReal); //*/ for(i=0;i<objectPointNum;i++){ object[i]=1; } for(idxIteration=0;idxIteration<NUMITERATION;idxIteration++){ ///* for(i=0;i<detectorNumWidth*detectorNumHeight*NUMPROJECTIONS;i++){ image[i]=0; } cudaMemcpy(image_dev, image, sizeof(float)*detectorNumWidth*detectorNumHeight*NUMPROJECTIONS, cudaMemcpyHostToDevice); //*/ ///* for(i=0;i<detectorNumWidth*NUMPROJECTIONS;i++){ imageVertical[i]=0; } cudaMemcpy(imageVertical_dev, imageVertical, sizeof(float)*detectorNumWidth*NUMPROJECTIONS, cudaMemcpyHostToDevice); for(i=0;i<detectorNumWidth*NUMPROJECTIONS;i++){ imageHorizontal[i]=0; } cudaMemcpy(imageHorizontal_dev, imageHorizontal, sizeof(float)*detectorNumWidth*NUMPROJECTIONS, cudaMemcpyHostToDevice); forwardProjectionAllPoints(image, image_dev, cor, cor_dev, psfPartial, psfPartial_dev, NUMPROJECTIONS, object, objectPointNum, NUMLOCALX, NUMLOCALY, detectorNumWidth, detectorNumHeight, detectorNum, 'p'); forwardProjectionAllPoints(imageVertical, imageVertical_dev, cor, cor_dev, psfPartial, psfPartial_dev, NUMPROJECTIONS, object, objectPointNum, NUMLOCALX, NUMLOCALY, detectorNumWidth, detectorNumHeight, detectorNumWidth, 'v'); forwardProjectionAllPoints(imageHorizontal, imageHorizontal_dev, cor, cor_dev, psfPartial, psfPartial_dev, NUMPROJECTIONS, object, objectPointNum, NUMLOCALX, NUMLOCALY, detectorNumWidth, detectorNumHeight, detectorNumWidth, 'h'); for(i=0;i<objectPointNum;i++){ integrationAdjust=0; integrationPsf=0; for(idxProjection=0;idxProjection<NUMPROJECTIONS;idxProjection++){ sprintf(buffer, "G:\\research\\data\\PSF%d.bin", idxProjection); fidPSF=fopen(buffer, "rb"); fseek(fidPSF, ((numLocal+2)*i)*4, SEEK_SET); fread(cor, sizeof(int), 2, fidPSF); for(idxImigrationLocal=0;idxImigrationLocal<numLocal;idxImigrationLocal++){ fread(&psfBuffer, sizeof(float), 1, fidPSF); idxImigrationImage=(cor[0]+driftX+idxImigrationLocal/32)*detectorNumHeight+(cor[1]+driftY+idxImigrationLocal%32); if(idxImigrationImage>=0&&idxImigrationImage<detectorNum){ if(image[idxImigrationImage+detectorNum*idxProjection]){ integrationAdjust=integrationAdjust+imageReal[idxImigrationImage+detectorNum*idxProjection]/image[idxImigrationImage+detectorNum*idxProjection]*psfBuffer; integrationPsf=integrationPsf+psfBuffer; } } } fclose(fidPSF); ///* sprintf(buffer, "G:\\research\\data\\vslitPSF%d.bin", idxProjection); fidPSF=fopen(buffer, "rb"); fseek(fidPSF, ((NUMLOCALX+1)*i)*4, SEEK_SET); fread(cor, sizeof(int), 1, fidPSF); for(idxImigrationLocal=0;idxImigrationLocal<NUMLOCALX;idxImigrationLocal++){ fread(&psfBuffer, sizeof(float), 1, fidPSF); idxImigrationImage=cor[0]+driftX+idxImigrationLocal; if(idxImigrationImage>=0&&idxImigrationImage<detectorNumWidth){ if(imageVertical[idxImigrationImage+detectorNumWidth*idxProjection]){ integrationAdjust=integrationAdjust+imageRealVertical[idxImigrationImage+detectorNumWidth*idxProjection]/imageVertical[idxImigrationImage+detectorNumWidth*idxProjection]*psfBuffer; integrationPsf=integrationPsf+psfBuffer; } } } fclose(fidPSF); sprintf(buffer, "G:\\research\\data\\hslitPSF%d.bin", idxProjection); fidPSF=fopen(buffer, "rb"); fseek(fidPSF, ((NUMLOCALX+1)*i)*4, SEEK_SET); fread(cor, sizeof(int), 1, fidPSF); for(idxImigrationLocal=0;idxImigrationLocal<NUMLOCALX;idxImigrationLocal++){ fread(&psfBuffer, sizeof(float), 1, fidPSF); idxImigrationImage=cor[0]+driftX+idxImigrationLocal; if(idxImigrationImage>=0&&idxImigrationImage<detectorNumWidth){ if(imageHorizontal[idxImigrationImage+detectorNumWidth*idxProjection]){ integrationAdjust=integrationAdjust+imageRealHorizontal[idxImigrationImage+detectorNumWidth*idxProjection]/imageHorizontal[idxImigrationImage+detectorNumWidth*idxProjection]*psfBuffer; integrationPsf=integrationPsf+psfBuffer; } } } fclose(fidPSF); //*/ } object[i]=object[i]*integrationAdjust/integrationPsf; } printf("Iteration %d is finished!\n", idxIteration); fidRecon=fopen("G:\\research\\data\\recon.bin", "wb"); fwrite(object, sizeof(float), objectPointNum, fidRecon); fclose(fidRecon); } fidRecon=fopen("G:\\research\\data\\recon.bin", "wb"); fwrite(object, sizeof(float), objectPointNum, fidRecon); fclose(fidRecon); getchar(); }
46dbbf2bcd301229492c595dbb166660401b2b41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <fstream> #include <cutil.h> #include <types.h> //#include <classical/strength/ahat.h> //#include <classical/strength/all.h> #include <classical/strength/affinity.h> #include <thrust/detail/integer_traits.h> #include <float.h> #include <specific_spmv.h> #include <util.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/transform.h> #include <thrust/logical.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/random.h> #include <sm_utils.inl> /* * Note: * This implementation assumes that off-diag entries all have the opposite sign * comparing to the diag entry. This is true for most practical cases. * It would even work if the offending off-diag entries are just a few. * But if there are many off-diag entries violate this assumption, * the interpolation based on this strength would be inaccurate. * This is explained in "Intro to Algebraic multigrid" by K. Stuben. */ namespace amgx { namespace { int level_counter = 0; bool is_random_initialized = false; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> Strength_Affinity<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::Strength_Affinity(AMG_Config &cfg, const std::string &cfg_scope) : Strength_AffinityBase<TConfig_d>(cfg, cfg_scope) { AMG_Config default_cfg; std::string default_cfg_scope = "default"; affinity_iters = cfg.AMG_Config::template getParameter<int>("affinity_iterations", cfg_scope); n_TV = cfg.AMG_Config::template getParameter<int>("affinity_vectors", cfg_scope); if (n_TV > 32) { FatalError("Strength affinity: Test vectors > 32 is not supported, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET); } solver = new MulticolorGaussSeidelSolver<TConfig_d>(cfg, cfg_scope); this->solver->set_max_iters(affinity_iters); } /************************************************************************* * "random" hash function for both device and host ************************************************************************/ __host__ __device__ __forceinline__ static float ourHash(const int i) { unsigned int a = i; a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) + (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a ^ 0xd3a2646c) + (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) + (a >> 16); return (a ^ 0x4a51e590) / (float)UINT_MAX; } struct prg { float a, b; __host__ __device__ prg(float _a = -1.f, float _b = 1.f) : a(_a), b(_b) {}; __host__ __device__ float operator()(const unsigned int n) const { #if 1 thrust::default_random_engine rng; thrust::uniform_real_distribution<float> dist(a, b); rng.discard(n); return dist(rng); #else float ru = ourHash(n); return (ru - 0.5f) * 2.0f; #endif } }; template <class Vector> void initRandom(Vector &vec, int size) { vec.resize(size); thrust::counting_iterator<unsigned int> index_sequence_begin(0); thrust::transform(index_sequence_begin, index_sequence_begin + size, vec.begin(), prg(-1.f, 1.f)); } // Device utils template <class T> static __device__ __inline__ T cahedRead (const T *ar) { return utils::Ld<utils::LD_NC>::load(ar); } // ! shfl is defined for int/float only in arch 600 template< typename ValueType > __forceinline__ __device__ void reduce_inwarp_mul( const ValueType vValueA, const ValueType vValueB, const int vecId, const int n_per_warp, volatile ValueType *smem, double *s_xy) { ValueType sum = vValueA * vValueB; #pragma unroll for ( int offset = 16 / n_per_warp ; offset > 0 ; offset /= 2 ) { sum += utils::shfl_xor(sum, offset, 16); } *s_xy = sum; } template< typename ValueType > __forceinline__ __device__ void reduce_inblock_mul( const int blockSize, const ValueType vValueA, const ValueType vValueB, const int vecId, const int n_per_block, volatile ValueType *smem, double *s_xy) { ValueType sum = vValueA * vValueB; smem[threadIdx.x] = sum; __syncthreads(); if (blockSize >= 512) { if (vecId < 256 / n_per_block) { smem[threadIdx.x] = sum = sum + smem[threadIdx.x + 256 / n_per_block]; } __syncthreads(); } if (blockSize >= 256) { if (vecId < 128 / n_per_block) { smem[threadIdx.x] = sum = sum + smem[threadIdx.x + 128 / n_per_block]; } __syncthreads(); } if (blockSize >= 128) { if (vecId < 64 / n_per_block) { smem[threadIdx.x] = sum = sum + smem[threadIdx.x + 64 / n_per_block]; } __syncthreads(); } #pragma unroll for ( int offset = 16 ; offset > 0 ; offset /= 2 ) if ( vecId < offset ) { smem[threadIdx.x] = sum = sum + smem[threadIdx.x + offset]; } if ( vecId == 0 ) { // If laneId=0, then sum is in smem[threadIdx.x]. *s_xy = sum; } } /************************************************************************* * Computes affinity matrix (device) ************************************************************************/ template< typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize > __global__ void computeAffinity_1x1_Kernel(const int *A_rows, const int *A_cols, const ValueTypeB *X, const int nTV, const int A_num_rows, ValueTypeA *affinity ) { const ValueTypeB epsilon = 1.e-12; const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int vid = tid % nTV; const int num_vecs_per_warp = 32 / nTV; const int num_rows_per_iter = gridDim.x * blockDim.x / nTV; //const int num_vecs_per_block = blockDim.x / nTV; ValueTypeB *smem = NULL; double s_xx, s_xy, s_yy; for ( int aRowId = tid / nTV ; aRowId < A_num_rows ; aRowId += num_rows_per_iter ) { const int aRowBegin = A_rows[aRowId]; const int aRowEnd = A_rows[aRowId + 1]; ValueTypeB vValueA = X[aRowId * nTV + vid]; reduce_inwarp_mul(vValueA, vValueA, vid, num_vecs_per_warp, smem, &s_xx); #if 0 if (num_vecs_per_warp > 0) { reduce_inwarp_mul(vValueA, vValueA, vid, num_vecs_per_warp, smem, &s_xx); } else { reduce_inblock_mul(kCtaSize, vValueA, vValueA, vid, num_vecs_per_block, smem, &s_xx); } #endif for ( IndexType aRowIt = aRowBegin ; aRowIt < aRowEnd; aRowIt++ ) { IndexType aColId = A_cols[aRowIt]; if (aColId != aRowId) { ValueTypeB vValueB = cahedRead(X + aColId * nTV + vid); reduce_inwarp_mul(vValueA, vValueB, vid, num_vecs_per_warp, smem, &s_xy); reduce_inwarp_mul(vValueB, vValueB, vid, num_vecs_per_warp, smem, &s_yy); if (vid == 0) { affinity[aRowIt] = (s_xy * s_xy / (s_xx * s_yy + epsilon)); } } } } } /************************************************************************* * Computes the strength matrix and the connection weights (device) ************************************************************************/ template< typename IndexType, typename ValueType, int kCtaSize, bool singleGPU > __global__ void computeStrongConnectionsAndWeightsFromAffinityKernel( const int *A_rows, const int *A_cols, const ValueType *A_vals, int A_num_rows, bool *s_con, float *weights, ValueType alpha, int64_t base_index) { // One warp works on each row and hence one iteration handles // num_warps*numBlock rows. This means atomicAdd() is inevitable. const int num_warps = kCtaSize / 32; const int num_rows_per_iter = num_warps * gridDim.x; __shared__ volatile ValueType smem[kCtaSize]; __shared__ volatile ValueType s_threshold[num_warps]; const int warpId = threadIdx.x / 32; const int laneId = threadIdx.x % 32; for ( int aRowId = blockIdx.x * num_warps + warpId ; aRowId < A_num_rows ; aRowId += num_rows_per_iter ) { ValueType maxVal(0); // get diagonal, min/max off-idagonals const int aRowBegin = A_rows[aRowId ]; const int aRowEnd = A_rows[aRowId + 1]; for ( IndexType aRowIt = aRowBegin + laneId ; utils::any( aRowIt < aRowEnd ) ; aRowIt += 32 ) { IndexType aColId = aRowIt < aRowEnd ? A_cols[aRowIt] : -1; ValueType aValue = aRowIt < aRowEnd ? A_vals[aRowIt] : ValueType(0); bool is_off_diagonal = aRowIt < aRowEnd && aColId != aRowId; if ( is_off_diagonal ) { maxVal = max( maxVal, aValue ); } } // init weights[] with a random number if ( laneId == 0 ) { if ( singleGPU ) { atomicAdd( &weights[aRowId], ourHash(aRowId) ); } else { atomicAdd( &weights[aRowId], ourHash( (int) base_index + aRowId) ); } } smem[threadIdx.x] = maxVal; #pragma unroll for ( int offset = 16 ; offset > 0 ; offset /= 2 ) if ( laneId < offset ) { smem[threadIdx.x] = maxVal = max( maxVal, smem[threadIdx.x + offset] ); } if ( laneId == 0 ) { // If laneId=0, then maxVal or minVal is in smem[threadIdx.x]. s_threshold[warpId] = smem[threadIdx.x] * alpha; } // sum of the column of S for ( IndexType aRowIt = aRowBegin + laneId ; utils::any( aRowIt < aRowEnd ) ; aRowIt += 32 ) { IndexType aColId = aRowIt < aRowEnd ? A_cols[aRowIt] : -1; ValueType aValue = aRowIt < aRowEnd ? A_vals[aRowIt] : ValueType(0); bool is_off_diagonal = aRowIt < aRowEnd && aColId != aRowId; bool is_strongly_connected = is_off_diagonal && (aValue > s_threshold[warpId]); if ( is_strongly_connected && aRowIt < aRowEnd && aColId < A_num_rows) { atomicAdd( &weights[aColId], 1.0f ); } if ( aRowIt < aRowEnd ) { s_con[aRowIt] = is_strongly_connected; } } } } // Save to file template <class T_Config> void dump_matrix_vector(const char *fname, const Matrix<T_Config> &A, const Vector<T_Config> &solution) { unsigned int num_nz = A.get_num_nz(); unsigned int num_rows = A.get_num_rows(); unsigned int vec_sz = solution.size(); std::ofstream fout; fout.open(fname, std::ofstream::out | std::ofstream::app); fout << num_rows << " " << num_nz << " " << vec_sz << std::endl; typename Vector<T_Config>::value_type val; for (int i = 0; i < num_rows + 1; i++) { fout << A.row_offsets[i] << std::endl; } for (int i = 0; i < num_nz; i++) { fout << A.col_indices[i] << std::endl; } fout.precision(14); fout.width(16); for (int i = 0; i < num_nz; i++) { val = A.values[i]; fout << val << std::endl; } for (int i = 0; i < vec_sz; i++) { val = solution[i]; fout << val << std::endl; } fout << std::endl; fout.close(); } /************************************************************************* * Computes the strength matrix and the connection weights (device) ************************************************************************/ template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Strength_Affinity<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >:: computeStrongConnectionsAndWeights_1x1(Matrix_d &A, BVector &s_con, FVector &weights, const double max_row_sum) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; typedef typename Matrix_d::index_type IndexType; typedef typename Matrix_d::value_type ValueTypeA; typedef typename Vector<TConfig_d>::value_type ValueTypeB; bool level_opt_flag = ((int) max_row_sum != -1); m_aff.copy(A); if (!is_random_initialized) { unsigned int nTV_num_rows = this->n_TV * A.get_num_rows(); m_rhs.resize(nTV_num_rows, 0.0); initRandom(m_x, nTV_num_rows); if (level_opt_flag) { is_random_initialized = true; } level_counter = n_TV; } else { this->n_TV = min(level_counter, 32); unsigned int nTV_num_rows = this->n_TV * A.get_num_rows(); m_rhs.resize(nTV_num_rows, 0.0); if (m_x.size() < nTV_num_rows) { initRandom(m_x, nTV_num_rows); } } level_counter *= 2; this->solver->setup_no_throw(m_aff, false); TimerCPU timer(true); timer.start(); for (int iter = 0; iter < affinity_iters; iter ++) { this->solver->batch_smooth_1x1_fast(m_aff, this->n_TV, m_rhs, m_x); } hipDeviceSynchronize(); double elapsed = timer.elapsed(); // End of GS check // get the raw pointers for everything I need const IndexType *offsets_ptr = m_aff.row_offsets.raw(); const IndexType *column_indices_ptr = m_aff.col_indices.raw(); bool *s_con_ptr = s_con.raw(); float *weights_ptr = weights.raw(); ValueTypeA *m_aff_values_ptr = m_aff.values.raw(); //m_aff_values.raw(); ValueTypeB *m_x_ptr = m_x.raw(); // choose a blocksize. Use 1 warp per row const int blockSize = 256; const int numWarps = blockSize / 32; const int numBlocks = (int) (A.get_num_rows() + numWarps - 1) / numWarps; if (A.get_num_rows() > 0) { hipLaunchKernelGGL(( computeAffinity_1x1_Kernel<IndexType, ValueTypeA, ValueTypeB, blockSize>) , dim3(numBlocks), dim3(blockSize), 0, 0, A.row_offsets.raw(), A.col_indices.raw(), m_x_ptr, this->n_TV, A.get_num_rows(), m_aff_values_ptr ); if (A.is_matrix_singleGPU()) hipLaunchKernelGGL(( computeStrongConnectionsAndWeightsFromAffinityKernel<IndexType, ValueTypeA, blockSize, true>) , dim3(numBlocks), dim3(blockSize), 0, 0, m_aff.row_offsets.raw(), m_aff.col_indices.raw(), m_aff_values_ptr, m_aff.get_num_rows(), s_con.raw(), weights.raw(), this->alpha, int64_t(0)); else hipLaunchKernelGGL(( computeStrongConnectionsAndWeightsFromAffinityKernel<IndexType, ValueTypeA, blockSize, false>) , dim3(numBlocks), dim3(blockSize), 0, 0, m_aff.row_offsets.raw(), m_aff.col_indices.raw(), m_aff_values_ptr, m_aff.get_num_rows(), s_con.raw(), weights.raw(), this->alpha, A.manager->base_index()); } if (!A.is_matrix_singleGPU() && A.currentView() == OWNED) { // Need to add neighbors contribution to my weights weights.dirtybit = 1; A.manager->add_from_halo(weights, weights.tag); } cudaCheckError(); } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class Strength_AffinityBase<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Strength_Affinity<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace amgx
46dbbf2bcd301229492c595dbb166660401b2b41.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <fstream> #include <cutil.h> #include <types.h> //#include <classical/strength/ahat.h> //#include <classical/strength/all.h> #include <classical/strength/affinity.h> #include <thrust/detail/integer_traits.h> #include <float.h> #include <specific_spmv.h> #include <util.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/transform.h> #include <thrust/logical.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/random.h> #include <sm_utils.inl> /* * Note: * This implementation assumes that off-diag entries all have the opposite sign * comparing to the diag entry. This is true for most practical cases. * It would even work if the offending off-diag entries are just a few. * But if there are many off-diag entries violate this assumption, * the interpolation based on this strength would be inaccurate. * This is explained in "Intro to Algebraic multigrid" by K. Stuben. */ namespace amgx { namespace { int level_counter = 0; bool is_random_initialized = false; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> Strength_Affinity<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::Strength_Affinity(AMG_Config &cfg, const std::string &cfg_scope) : Strength_AffinityBase<TConfig_d>(cfg, cfg_scope) { AMG_Config default_cfg; std::string default_cfg_scope = "default"; affinity_iters = cfg.AMG_Config::template getParameter<int>("affinity_iterations", cfg_scope); n_TV = cfg.AMG_Config::template getParameter<int>("affinity_vectors", cfg_scope); if (n_TV > 32) { FatalError("Strength affinity: Test vectors > 32 is not supported, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET); } solver = new MulticolorGaussSeidelSolver<TConfig_d>(cfg, cfg_scope); this->solver->set_max_iters(affinity_iters); } /************************************************************************* * "random" hash function for both device and host ************************************************************************/ __host__ __device__ __forceinline__ static float ourHash(const int i) { unsigned int a = i; a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) + (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a ^ 0xd3a2646c) + (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) + (a >> 16); return (a ^ 0x4a51e590) / (float)UINT_MAX; } struct prg { float a, b; __host__ __device__ prg(float _a = -1.f, float _b = 1.f) : a(_a), b(_b) {}; __host__ __device__ float operator()(const unsigned int n) const { #if 1 thrust::default_random_engine rng; thrust::uniform_real_distribution<float> dist(a, b); rng.discard(n); return dist(rng); #else float ru = ourHash(n); return (ru - 0.5f) * 2.0f; #endif } }; template <class Vector> void initRandom(Vector &vec, int size) { vec.resize(size); thrust::counting_iterator<unsigned int> index_sequence_begin(0); thrust::transform(index_sequence_begin, index_sequence_begin + size, vec.begin(), prg(-1.f, 1.f)); } // Device utils template <class T> static __device__ __inline__ T cahedRead (const T *ar) { return utils::Ld<utils::LD_NC>::load(ar); } // ! shfl is defined for int/float only in arch 600 template< typename ValueType > __forceinline__ __device__ void reduce_inwarp_mul( const ValueType vValueA, const ValueType vValueB, const int vecId, const int n_per_warp, volatile ValueType *smem, double *s_xy) { ValueType sum = vValueA * vValueB; #pragma unroll for ( int offset = 16 / n_per_warp ; offset > 0 ; offset /= 2 ) { sum += utils::shfl_xor(sum, offset, 16); } *s_xy = sum; } template< typename ValueType > __forceinline__ __device__ void reduce_inblock_mul( const int blockSize, const ValueType vValueA, const ValueType vValueB, const int vecId, const int n_per_block, volatile ValueType *smem, double *s_xy) { ValueType sum = vValueA * vValueB; smem[threadIdx.x] = sum; __syncthreads(); if (blockSize >= 512) { if (vecId < 256 / n_per_block) { smem[threadIdx.x] = sum = sum + smem[threadIdx.x + 256 / n_per_block]; } __syncthreads(); } if (blockSize >= 256) { if (vecId < 128 / n_per_block) { smem[threadIdx.x] = sum = sum + smem[threadIdx.x + 128 / n_per_block]; } __syncthreads(); } if (blockSize >= 128) { if (vecId < 64 / n_per_block) { smem[threadIdx.x] = sum = sum + smem[threadIdx.x + 64 / n_per_block]; } __syncthreads(); } #pragma unroll for ( int offset = 16 ; offset > 0 ; offset /= 2 ) if ( vecId < offset ) { smem[threadIdx.x] = sum = sum + smem[threadIdx.x + offset]; } if ( vecId == 0 ) { // If laneId=0, then sum is in smem[threadIdx.x]. *s_xy = sum; } } /************************************************************************* * Computes affinity matrix (device) ************************************************************************/ template< typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize > __global__ void computeAffinity_1x1_Kernel(const int *A_rows, const int *A_cols, const ValueTypeB *X, const int nTV, const int A_num_rows, ValueTypeA *affinity ) { const ValueTypeB epsilon = 1.e-12; const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int vid = tid % nTV; const int num_vecs_per_warp = 32 / nTV; const int num_rows_per_iter = gridDim.x * blockDim.x / nTV; //const int num_vecs_per_block = blockDim.x / nTV; ValueTypeB *smem = NULL; double s_xx, s_xy, s_yy; for ( int aRowId = tid / nTV ; aRowId < A_num_rows ; aRowId += num_rows_per_iter ) { const int aRowBegin = A_rows[aRowId]; const int aRowEnd = A_rows[aRowId + 1]; ValueTypeB vValueA = X[aRowId * nTV + vid]; reduce_inwarp_mul(vValueA, vValueA, vid, num_vecs_per_warp, smem, &s_xx); #if 0 if (num_vecs_per_warp > 0) { reduce_inwarp_mul(vValueA, vValueA, vid, num_vecs_per_warp, smem, &s_xx); } else { reduce_inblock_mul(kCtaSize, vValueA, vValueA, vid, num_vecs_per_block, smem, &s_xx); } #endif for ( IndexType aRowIt = aRowBegin ; aRowIt < aRowEnd; aRowIt++ ) { IndexType aColId = A_cols[aRowIt]; if (aColId != aRowId) { ValueTypeB vValueB = cahedRead(X + aColId * nTV + vid); reduce_inwarp_mul(vValueA, vValueB, vid, num_vecs_per_warp, smem, &s_xy); reduce_inwarp_mul(vValueB, vValueB, vid, num_vecs_per_warp, smem, &s_yy); if (vid == 0) { affinity[aRowIt] = (s_xy * s_xy / (s_xx * s_yy + epsilon)); } } } } } /************************************************************************* * Computes the strength matrix and the connection weights (device) ************************************************************************/ template< typename IndexType, typename ValueType, int kCtaSize, bool singleGPU > __global__ void computeStrongConnectionsAndWeightsFromAffinityKernel( const int *A_rows, const int *A_cols, const ValueType *A_vals, int A_num_rows, bool *s_con, float *weights, ValueType alpha, int64_t base_index) { // One warp works on each row and hence one iteration handles // num_warps*numBlock rows. This means atomicAdd() is inevitable. const int num_warps = kCtaSize / 32; const int num_rows_per_iter = num_warps * gridDim.x; __shared__ volatile ValueType smem[kCtaSize]; __shared__ volatile ValueType s_threshold[num_warps]; const int warpId = threadIdx.x / 32; const int laneId = threadIdx.x % 32; for ( int aRowId = blockIdx.x * num_warps + warpId ; aRowId < A_num_rows ; aRowId += num_rows_per_iter ) { ValueType maxVal(0); // get diagonal, min/max off-idagonals const int aRowBegin = A_rows[aRowId ]; const int aRowEnd = A_rows[aRowId + 1]; for ( IndexType aRowIt = aRowBegin + laneId ; utils::any( aRowIt < aRowEnd ) ; aRowIt += 32 ) { IndexType aColId = aRowIt < aRowEnd ? A_cols[aRowIt] : -1; ValueType aValue = aRowIt < aRowEnd ? A_vals[aRowIt] : ValueType(0); bool is_off_diagonal = aRowIt < aRowEnd && aColId != aRowId; if ( is_off_diagonal ) { maxVal = max( maxVal, aValue ); } } // init weights[] with a random number if ( laneId == 0 ) { if ( singleGPU ) { atomicAdd( &weights[aRowId], ourHash(aRowId) ); } else { atomicAdd( &weights[aRowId], ourHash( (int) base_index + aRowId) ); } } smem[threadIdx.x] = maxVal; #pragma unroll for ( int offset = 16 ; offset > 0 ; offset /= 2 ) if ( laneId < offset ) { smem[threadIdx.x] = maxVal = max( maxVal, smem[threadIdx.x + offset] ); } if ( laneId == 0 ) { // If laneId=0, then maxVal or minVal is in smem[threadIdx.x]. s_threshold[warpId] = smem[threadIdx.x] * alpha; } // sum of the column of S for ( IndexType aRowIt = aRowBegin + laneId ; utils::any( aRowIt < aRowEnd ) ; aRowIt += 32 ) { IndexType aColId = aRowIt < aRowEnd ? A_cols[aRowIt] : -1; ValueType aValue = aRowIt < aRowEnd ? A_vals[aRowIt] : ValueType(0); bool is_off_diagonal = aRowIt < aRowEnd && aColId != aRowId; bool is_strongly_connected = is_off_diagonal && (aValue > s_threshold[warpId]); if ( is_strongly_connected && aRowIt < aRowEnd && aColId < A_num_rows) { atomicAdd( &weights[aColId], 1.0f ); } if ( aRowIt < aRowEnd ) { s_con[aRowIt] = is_strongly_connected; } } } } // Save to file template <class T_Config> void dump_matrix_vector(const char *fname, const Matrix<T_Config> &A, const Vector<T_Config> &solution) { unsigned int num_nz = A.get_num_nz(); unsigned int num_rows = A.get_num_rows(); unsigned int vec_sz = solution.size(); std::ofstream fout; fout.open(fname, std::ofstream::out | std::ofstream::app); fout << num_rows << " " << num_nz << " " << vec_sz << std::endl; typename Vector<T_Config>::value_type val; for (int i = 0; i < num_rows + 1; i++) { fout << A.row_offsets[i] << std::endl; } for (int i = 0; i < num_nz; i++) { fout << A.col_indices[i] << std::endl; } fout.precision(14); fout.width(16); for (int i = 0; i < num_nz; i++) { val = A.values[i]; fout << val << std::endl; } for (int i = 0; i < vec_sz; i++) { val = solution[i]; fout << val << std::endl; } fout << std::endl; fout.close(); } /************************************************************************* * Computes the strength matrix and the connection weights (device) ************************************************************************/ template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Strength_Affinity<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >:: computeStrongConnectionsAndWeights_1x1(Matrix_d &A, BVector &s_con, FVector &weights, const double max_row_sum) { typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d; typedef typename Matrix_d::index_type IndexType; typedef typename Matrix_d::value_type ValueTypeA; typedef typename Vector<TConfig_d>::value_type ValueTypeB; bool level_opt_flag = ((int) max_row_sum != -1); m_aff.copy(A); if (!is_random_initialized) { unsigned int nTV_num_rows = this->n_TV * A.get_num_rows(); m_rhs.resize(nTV_num_rows, 0.0); initRandom(m_x, nTV_num_rows); if (level_opt_flag) { is_random_initialized = true; } level_counter = n_TV; } else { this->n_TV = min(level_counter, 32); unsigned int nTV_num_rows = this->n_TV * A.get_num_rows(); m_rhs.resize(nTV_num_rows, 0.0); if (m_x.size() < nTV_num_rows) { initRandom(m_x, nTV_num_rows); } } level_counter *= 2; this->solver->setup_no_throw(m_aff, false); TimerCPU timer(true); timer.start(); for (int iter = 0; iter < affinity_iters; iter ++) { this->solver->batch_smooth_1x1_fast(m_aff, this->n_TV, m_rhs, m_x); } cudaDeviceSynchronize(); double elapsed = timer.elapsed(); // End of GS check // get the raw pointers for everything I need const IndexType *offsets_ptr = m_aff.row_offsets.raw(); const IndexType *column_indices_ptr = m_aff.col_indices.raw(); bool *s_con_ptr = s_con.raw(); float *weights_ptr = weights.raw(); ValueTypeA *m_aff_values_ptr = m_aff.values.raw(); //m_aff_values.raw(); ValueTypeB *m_x_ptr = m_x.raw(); // choose a blocksize. Use 1 warp per row const int blockSize = 256; const int numWarps = blockSize / 32; const int numBlocks = (int) (A.get_num_rows() + numWarps - 1) / numWarps; if (A.get_num_rows() > 0) { computeAffinity_1x1_Kernel<IndexType, ValueTypeA, ValueTypeB, blockSize> <<< numBlocks, blockSize>>>( A.row_offsets.raw(), A.col_indices.raw(), m_x_ptr, this->n_TV, A.get_num_rows(), m_aff_values_ptr ); if (A.is_matrix_singleGPU()) computeStrongConnectionsAndWeightsFromAffinityKernel<IndexType, ValueTypeA, blockSize, true> <<< numBlocks, blockSize>>>( m_aff.row_offsets.raw(), m_aff.col_indices.raw(), m_aff_values_ptr, m_aff.get_num_rows(), s_con.raw(), weights.raw(), this->alpha, int64_t(0)); else computeStrongConnectionsAndWeightsFromAffinityKernel<IndexType, ValueTypeA, blockSize, false> <<< numBlocks, blockSize>>>( m_aff.row_offsets.raw(), m_aff.col_indices.raw(), m_aff_values_ptr, m_aff.get_num_rows(), s_con.raw(), weights.raw(), this->alpha, A.manager->base_index()); } if (!A.is_matrix_singleGPU() && A.currentView() == OWNED) { // Need to add neighbors contribution to my weights weights.dirtybit = 1; A.manager->add_from_halo(weights, weights.tag); } cudaCheckError(); } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class Strength_AffinityBase<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Strength_Affinity<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace amgx
cb47d092c06ac187b7f50d806730e9e4f5d733b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /////////////////////////////////////////////////////////////////////////////// // filters.cu // // Contains implementation of `filters` library /////////////////////////////////////////////////////////////////////////////// #include "filters.hpp" #include <cassert> #include <cstdlib> #include <opencv2/imgproc.hpp> #include "filters_errors.hpp" namespace filters { namespace { // // Private globals // //! Maximum size of the squared kernel const auto KSizeMax = 64; //! Fixed size constant buffer for convolution filter kernels __constant__ float c_kernel[KSizeMax * KSizeMax]; //! Number of threads in both X and Y dimensions in the block const auto K = 32; //! Whether to print logs to stdout or not bool g_verbose = false; //! Number of cuda device to use unsigned g_devnum = 0; // // Private functions // /** * @brief Retrieves attribute of the device * @details * * @param attr attribute to get * @return attribute value */ int get_attribute(hipDeviceAttribute_t attr) { int value; check_errors(hipDeviceGetAttribute(&value, attr, g_devnum)); return value; } /** * @brief Gets from environment, whether verbosity should be enabled * @details * @return verbosity status */ bool obtain_verbosity() { const auto verbose = std::getenv("VERBOSE"); if(verbose == nullptr || (std::strcmp(verbose, "0") == 0)) { return false; } return true; } /** * @brief Gets from environment number of device to select * @details * @return number of device to select */ int obtain_devnum() { const auto devnum = std::getenv("DEVNUM"); if(devnum == nullptr) { return 0; } return atoi(devnum); } /** * @brief Prints device attributes * @details */ void print_attributes() { printf("Device attributes:\n"); printf(" hipDeviceAttributeMaxThreadsPerBlock=%d\n", get_attribute(hipDeviceAttributeMaxThreadsPerBlock)); printf(" hipDeviceAttributeMaxBlockDimX=%d\n", get_attribute(hipDeviceAttributeMaxBlockDimX)); printf(" hipDeviceAttributeMaxBlockDimY=%d\n", get_attribute(hipDeviceAttributeMaxBlockDimY)); printf(" hipDeviceAttributeMaxBlockDimZ=%d\n", get_attribute(hipDeviceAttributeMaxBlockDimZ)); printf(" hipDeviceAttributeMaxGridDimX=%d\n", get_attribute(hipDeviceAttributeMaxGridDimX)); printf(" hipDeviceAttributeMaxGridDimY=%d\n", get_attribute(hipDeviceAttributeMaxGridDimY)); printf(" hipDeviceAttributeMaxGridDimZ=%d\n", get_attribute(hipDeviceAttributeMaxGridDimZ)); printf(" hipDeviceAttributeMaxSharedMemoryPerBlock=%d\n", get_attribute(hipDeviceAttributeMaxSharedMemoryPerBlock)); printf(" hipDeviceAttributeTotalConstantMemory=%d\n", get_attribute(hipDeviceAttributeTotalConstantMemory)); printf(" hipDeviceAttributeWarpSize=%d\n", get_attribute(hipDeviceAttributeWarpSize)); printf(" hipDeviceAttributeClockRate=%d\n", get_attribute(hipDeviceAttributeClockRate)); printf(" hipDeviceAttributeMultiprocessorCount=%d\n", get_attribute(hipDeviceAttributeMultiprocessorCount)); printf(" hipDeviceAttributeMemoryClockRate=%d\n", get_attribute(hipDeviceAttributeMemoryClockRate)); printf(" hipDeviceAttributeL2CacheSize=%d\n", get_attribute(hipDeviceAttributeL2CacheSize)); printf(" hipDeviceAttributeMaxThreadsPerMultiProcessor=%d\n", get_attribute(hipDeviceAttributeMaxThreadsPerMultiProcessor)); printf(" hipDeviceAttributeComputeCapabilityMajor=%d\n", get_attribute(hipDeviceAttributeComputeCapabilityMajor)); printf(" hipDeviceAttributeComputeCapabilityMinor=%d\n", get_attribute(hipDeviceAttributeComputeCapabilityMinor)); printf(" hipDeviceAttributeMaxSharedMemoryPerMultiprocessor=%d\n", get_attribute(hipDeviceAttributeMaxSharedMemoryPerMultiprocessor)); printf(" hipDeviceAttributeMaxRegistersPerMultiprocessor=%d\n", get_attribute(hipDeviceAttributeMaxRegistersPerMultiprocessor)); } } // namespace __host__ void init() { // Obtain environment variables g_verbose = obtain_verbosity(); g_devnum = obtain_devnum(); // If verbose, print device attributes if(g_verbose) { print_attributes(); } // Select proper device check_errors(hipSetDevice(g_devnum)); } __host__ void cleanup() { check_errors(hipDeviceReset()); } std::pair<uchar* /*d_img*/, size_t /*d_pitch*/> create_image(size_t cols, size_t rows) { uchar* d_img; size_t d_pitch; check_errors(hipMallocPitch(&d_img, &d_pitch, cols * sizeof(uchar), rows)); return {d_img, d_pitch}; } void free_image(uchar* d_img) { check_errors(hipFree(d_img)); } void set_image(uchar* d_dst, size_t d_dpitch, const uchar* src, size_t spitch, size_t cols, size_t rows) { const auto width = (cols * sizeof(uchar)); const auto height = rows; check_errors(hipMemcpy2D(d_dst, d_dpitch, src, spitch, width, height, hipMemcpyHostToDevice)); } void get_image(uchar* dst, size_t dpitch, const uchar* d_src, size_t d_spitch, size_t cols, size_t rows) { const auto width = (cols * sizeof(uchar)); const auto height = rows; check_errors(hipMemcpy2D(dst, dpitch, d_src, d_spitch, width, height, hipMemcpyDeviceToHost)); } __host__ void set_kernel(const float* kernel, size_t ksize) { // Ensure proper size of the kernel assert(ksize <= KSizeMax); // Copy data from host kernel to constant memory check_errors(hipMemcpyToSymbol(c_kernel, kernel, ksize * ksize * sizeof(float))); } __host__ cv::Mat filter2d(const cv::Mat& src, const cv::Mat& kernel) { auto dst = cv::Mat(src.rows, src.cols, src.type()); filter2d(src, kernel, dst); return dst; } __host__ void filter2d(const cv::Mat& src, const cv::Mat& kernel, cv::Mat& dst) { // Ensure, that images have equal size assert(src.rows == dst.rows); assert(src.cols == dst.cols); const auto cols = src.cols; const auto rows = src.rows; // Ensure, that image is laid without spaces between assert(src.isContinuous() && dst.isContinuous()); const auto spitch = cols * sizeof(uchar); const auto dpitch = cols * sizeof(uchar); // Ensure proper type of images assert(src.type() == CV_8UC1 && dst.type() == CV_8UC1); const auto src_data = (const uchar*) src.data; const auto dst_data = (uchar*) dst.data; // Ensure, that kernel is squared assert(kernel.rows == kernel.cols); const auto ksize = kernel.rows; // Ensure proper type of kernel assert(kernel.type() == CV_32F); assert(kernel.isContinuous()); const auto kernel_data = (const float*) kernel.data; // Invoke low-level filtering method filter2d(src_data, spitch, cols, rows, kernel_data, ksize, dst_data, dpitch); } __host__ void filter2d( const uchar* src, size_t spitch, size_t cols, size_t rows, const float* kernel, size_t ksize, uchar* dst, size_t dpitch) { // Allocate memories uchar* d_src; size_t d_spitch; std::tie(d_src, d_spitch) = create_image(cols, rows); uchar* d_dst; size_t d_dpitch; std::tie(d_dst, d_dpitch) = create_image(cols, rows); // Copy input data set_image(d_src, d_spitch, src, spitch, cols, rows); set_kernel(kernel, ksize); // Launch filtering CUDA kernel filter2d_launch(d_src, d_spitch, cols, rows, ksize, d_dst, d_dpitch); // Wait for kernel launch to be done check_errors(hipDeviceSynchronize()); // Copy output data get_image(dst, dpitch, d_dst, d_dpitch, cols, rows); // Free memories free_image(d_src); free_image(d_dst); } __host__ void filter2d_launch( const uchar* d_src, size_t d_spitch, size_t cols, size_t rows, size_t ksize, uchar* d_dst, size_t d_dpitch) { // Let use as much threads in block as possible const auto dim_block = dim3(K, K); // Use as much KxK blocks as needed for this image const auto dim_grid_x = ((cols+K-1) / K); const auto dim_grid_y = ((rows+K-1) / K); const auto dim_grid = dim3(dim_grid_x, dim_grid_y); // Invoke algorithm hipLaunchKernelGGL(( filter2d_kernel), dim3(dim_grid), dim3(dim_block), 0, 0, d_src, d_spitch, cols, rows, ksize, d_dst, d_dpitch); // Check errors in kernel invocation check_errors(hipGetLastError()); } __global__ void filter2d_kernel( const uchar* src, size_t spitch, size_t cols, size_t rows, size_t ksize, uchar* dst, size_t dpitch) { // We need shared memory buffer to cache pixels from image. // In general, in every pixel of the block we must provide access to // surrounding halfes of the kernel (which at the end gives full kernel size) constexpr auto BufferSizeMax = (K + KSizeMax); // Note that we are declaring buffer using 2D notation, instead of 1D notation // This is because, after benchmarking, iterating over rows of fixed size // works faster than iterating over dynamic size array. __shared__ uchar s_buffer[BufferSizeMax][BufferSizeMax]; // Cache source image into shared memory // Each thread has to fetch every K-th element starting from that thread's // position inside the block. We are incrementing with K, because buffer // must contain also surrounding kernel elements. const auto buffer_size = (K + ksize); for(int m = threadIdx.y; m < buffer_size; m += K) { for(int n = threadIdx.x; n < buffer_size; n += K) { // Note that we are not caching result of ksize/2 // Benchmark showed, that current variant is better const int y = (m + blockIdx.y*K - ksize/2); const int x = (n + blockIdx.x*K - ksize/2); // If we are out of bound of the image, assume that buffer is zero if(x < 0 || y < 0 || x > cols || y > rows) { s_buffer[m][n] = 0; continue; } // Store copy of source image pixel into the buffer s_buffer[m][n] = src[y*spitch + x]; } } // Wait until all threads has done caching __syncthreads(); // Perform convolution on shared memory buffer const int i = (blockIdx.y*K + threadIdx.y); const int j = (blockIdx.x*K + threadIdx.x); const int half_ksize = (ksize / 2); // Check, if we are at the image border if((i > (rows - half_ksize)) || (i < half_ksize) || (j > (cols - half_ksize)) || (j < half_ksize)) { // Do not calculate nor write anything (aka BORDER_NONE) return; } // Calculate partial sums of buffer pixels with kernel's elements // Note that we are iterating only over kernel using pointer (benchmarks) auto sum = 0.0f; auto kernel = c_kernel; for(int m = 0; m < ksize; ++m) { for(int n = 0; n < ksize; ++n) { const auto y = (threadIdx.y + m); const auto x = (threadIdx.x + n); const auto buffer_v = s_buffer[y][x]; const auto kernel_v = *(kernel++); sum += (buffer_v * kernel_v); } } // Store final sum in the destination image dst[i*dpitch + j] = sum; } } // namespace filters
cb47d092c06ac187b7f50d806730e9e4f5d733b9.cu
/////////////////////////////////////////////////////////////////////////////// // filters.cu // // Contains implementation of `filters` library /////////////////////////////////////////////////////////////////////////////// #include "filters.hpp" #include <cassert> #include <cstdlib> #include <opencv2/imgproc.hpp> #include "filters_errors.hpp" namespace filters { namespace { // // Private globals // //! Maximum size of the squared kernel const auto KSizeMax = 64; //! Fixed size constant buffer for convolution filter kernels __constant__ float c_kernel[KSizeMax * KSizeMax]; //! Number of threads in both X and Y dimensions in the block const auto K = 32; //! Whether to print logs to stdout or not bool g_verbose = false; //! Number of cuda device to use unsigned g_devnum = 0; // // Private functions // /** * @brief Retrieves attribute of the device * @details * * @param attr attribute to get * @return attribute value */ int get_attribute(cudaDeviceAttr attr) { int value; check_errors(cudaDeviceGetAttribute(&value, attr, g_devnum)); return value; } /** * @brief Gets from environment, whether verbosity should be enabled * @details * @return verbosity status */ bool obtain_verbosity() { const auto verbose = std::getenv("VERBOSE"); if(verbose == nullptr || (std::strcmp(verbose, "0") == 0)) { return false; } return true; } /** * @brief Gets from environment number of device to select * @details * @return number of device to select */ int obtain_devnum() { const auto devnum = std::getenv("DEVNUM"); if(devnum == nullptr) { return 0; } return atoi(devnum); } /** * @brief Prints device attributes * @details */ void print_attributes() { printf("Device attributes:\n"); printf(" cudaDevAttrMaxThreadsPerBlock=%d\n", get_attribute(cudaDevAttrMaxThreadsPerBlock)); printf(" cudaDevAttrMaxBlockDimX=%d\n", get_attribute(cudaDevAttrMaxBlockDimX)); printf(" cudaDevAttrMaxBlockDimY=%d\n", get_attribute(cudaDevAttrMaxBlockDimY)); printf(" cudaDevAttrMaxBlockDimZ=%d\n", get_attribute(cudaDevAttrMaxBlockDimZ)); printf(" cudaDevAttrMaxGridDimX=%d\n", get_attribute(cudaDevAttrMaxGridDimX)); printf(" cudaDevAttrMaxGridDimY=%d\n", get_attribute(cudaDevAttrMaxGridDimY)); printf(" cudaDevAttrMaxGridDimZ=%d\n", get_attribute(cudaDevAttrMaxGridDimZ)); printf(" cudaDevAttrMaxSharedMemoryPerBlock=%d\n", get_attribute(cudaDevAttrMaxSharedMemoryPerBlock)); printf(" cudaDevAttrTotalConstantMemory=%d\n", get_attribute(cudaDevAttrTotalConstantMemory)); printf(" cudaDevAttrWarpSize=%d\n", get_attribute(cudaDevAttrWarpSize)); printf(" cudaDevAttrClockRate=%d\n", get_attribute(cudaDevAttrClockRate)); printf(" cudaDevAttrMultiProcessorCount=%d\n", get_attribute(cudaDevAttrMultiProcessorCount)); printf(" cudaDevAttrMemoryClockRate=%d\n", get_attribute(cudaDevAttrMemoryClockRate)); printf(" cudaDevAttrL2CacheSize=%d\n", get_attribute(cudaDevAttrL2CacheSize)); printf(" cudaDevAttrMaxThreadsPerMultiProcessor=%d\n", get_attribute(cudaDevAttrMaxThreadsPerMultiProcessor)); printf(" cudaDevAttrComputeCapabilityMajor=%d\n", get_attribute(cudaDevAttrComputeCapabilityMajor)); printf(" cudaDevAttrComputeCapabilityMinor=%d\n", get_attribute(cudaDevAttrComputeCapabilityMinor)); printf(" cudaDevAttrMaxSharedMemoryPerMultiprocessor=%d\n", get_attribute(cudaDevAttrMaxSharedMemoryPerMultiprocessor)); printf(" cudaDevAttrMaxRegistersPerMultiprocessor=%d\n", get_attribute(cudaDevAttrMaxRegistersPerMultiprocessor)); } } // namespace __host__ void init() { // Obtain environment variables g_verbose = obtain_verbosity(); g_devnum = obtain_devnum(); // If verbose, print device attributes if(g_verbose) { print_attributes(); } // Select proper device check_errors(cudaSetDevice(g_devnum)); } __host__ void cleanup() { check_errors(cudaDeviceReset()); } std::pair<uchar* /*d_img*/, size_t /*d_pitch*/> create_image(size_t cols, size_t rows) { uchar* d_img; size_t d_pitch; check_errors(cudaMallocPitch(&d_img, &d_pitch, cols * sizeof(uchar), rows)); return {d_img, d_pitch}; } void free_image(uchar* d_img) { check_errors(cudaFree(d_img)); } void set_image(uchar* d_dst, size_t d_dpitch, const uchar* src, size_t spitch, size_t cols, size_t rows) { const auto width = (cols * sizeof(uchar)); const auto height = rows; check_errors(cudaMemcpy2D(d_dst, d_dpitch, src, spitch, width, height, cudaMemcpyHostToDevice)); } void get_image(uchar* dst, size_t dpitch, const uchar* d_src, size_t d_spitch, size_t cols, size_t rows) { const auto width = (cols * sizeof(uchar)); const auto height = rows; check_errors(cudaMemcpy2D(dst, dpitch, d_src, d_spitch, width, height, cudaMemcpyDeviceToHost)); } __host__ void set_kernel(const float* kernel, size_t ksize) { // Ensure proper size of the kernel assert(ksize <= KSizeMax); // Copy data from host kernel to constant memory check_errors(cudaMemcpyToSymbol(c_kernel, kernel, ksize * ksize * sizeof(float))); } __host__ cv::Mat filter2d(const cv::Mat& src, const cv::Mat& kernel) { auto dst = cv::Mat(src.rows, src.cols, src.type()); filter2d(src, kernel, dst); return dst; } __host__ void filter2d(const cv::Mat& src, const cv::Mat& kernel, cv::Mat& dst) { // Ensure, that images have equal size assert(src.rows == dst.rows); assert(src.cols == dst.cols); const auto cols = src.cols; const auto rows = src.rows; // Ensure, that image is laid without spaces between assert(src.isContinuous() && dst.isContinuous()); const auto spitch = cols * sizeof(uchar); const auto dpitch = cols * sizeof(uchar); // Ensure proper type of images assert(src.type() == CV_8UC1 && dst.type() == CV_8UC1); const auto src_data = (const uchar*) src.data; const auto dst_data = (uchar*) dst.data; // Ensure, that kernel is squared assert(kernel.rows == kernel.cols); const auto ksize = kernel.rows; // Ensure proper type of kernel assert(kernel.type() == CV_32F); assert(kernel.isContinuous()); const auto kernel_data = (const float*) kernel.data; // Invoke low-level filtering method filter2d(src_data, spitch, cols, rows, kernel_data, ksize, dst_data, dpitch); } __host__ void filter2d( const uchar* src, size_t spitch, size_t cols, size_t rows, const float* kernel, size_t ksize, uchar* dst, size_t dpitch) { // Allocate memories uchar* d_src; size_t d_spitch; std::tie(d_src, d_spitch) = create_image(cols, rows); uchar* d_dst; size_t d_dpitch; std::tie(d_dst, d_dpitch) = create_image(cols, rows); // Copy input data set_image(d_src, d_spitch, src, spitch, cols, rows); set_kernel(kernel, ksize); // Launch filtering CUDA kernel filter2d_launch(d_src, d_spitch, cols, rows, ksize, d_dst, d_dpitch); // Wait for kernel launch to be done check_errors(cudaDeviceSynchronize()); // Copy output data get_image(dst, dpitch, d_dst, d_dpitch, cols, rows); // Free memories free_image(d_src); free_image(d_dst); } __host__ void filter2d_launch( const uchar* d_src, size_t d_spitch, size_t cols, size_t rows, size_t ksize, uchar* d_dst, size_t d_dpitch) { // Let use as much threads in block as possible const auto dim_block = dim3(K, K); // Use as much KxK blocks as needed for this image const auto dim_grid_x = ((cols+K-1) / K); const auto dim_grid_y = ((rows+K-1) / K); const auto dim_grid = dim3(dim_grid_x, dim_grid_y); // Invoke algorithm filter2d_kernel<<<dim_grid, dim_block>>>( d_src, d_spitch, cols, rows, ksize, d_dst, d_dpitch); // Check errors in kernel invocation check_errors(cudaGetLastError()); } __global__ void filter2d_kernel( const uchar* src, size_t spitch, size_t cols, size_t rows, size_t ksize, uchar* dst, size_t dpitch) { // We need shared memory buffer to cache pixels from image. // In general, in every pixel of the block we must provide access to // surrounding halfes of the kernel (which at the end gives full kernel size) constexpr auto BufferSizeMax = (K + KSizeMax); // Note that we are declaring buffer using 2D notation, instead of 1D notation // This is because, after benchmarking, iterating over rows of fixed size // works faster than iterating over dynamic size array. __shared__ uchar s_buffer[BufferSizeMax][BufferSizeMax]; // Cache source image into shared memory // Each thread has to fetch every K-th element starting from that thread's // position inside the block. We are incrementing with K, because buffer // must contain also surrounding kernel elements. const auto buffer_size = (K + ksize); for(int m = threadIdx.y; m < buffer_size; m += K) { for(int n = threadIdx.x; n < buffer_size; n += K) { // Note that we are not caching result of ksize/2 // Benchmark showed, that current variant is better const int y = (m + blockIdx.y*K - ksize/2); const int x = (n + blockIdx.x*K - ksize/2); // If we are out of bound of the image, assume that buffer is zero if(x < 0 || y < 0 || x > cols || y > rows) { s_buffer[m][n] = 0; continue; } // Store copy of source image pixel into the buffer s_buffer[m][n] = src[y*spitch + x]; } } // Wait until all threads has done caching __syncthreads(); // Perform convolution on shared memory buffer const int i = (blockIdx.y*K + threadIdx.y); const int j = (blockIdx.x*K + threadIdx.x); const int half_ksize = (ksize / 2); // Check, if we are at the image border if((i > (rows - half_ksize)) || (i < half_ksize) || (j > (cols - half_ksize)) || (j < half_ksize)) { // Do not calculate nor write anything (aka BORDER_NONE) return; } // Calculate partial sums of buffer pixels with kernel's elements // Note that we are iterating only over kernel using pointer (benchmarks) auto sum = 0.0f; auto kernel = c_kernel; for(int m = 0; m < ksize; ++m) { for(int n = 0; n < ksize; ++n) { const auto y = (threadIdx.y + m); const auto x = (threadIdx.x + n); const auto buffer_v = s_buffer[y][x]; const auto kernel_v = *(kernel++); sum += (buffer_v * kernel_v); } } // Store final sum in the destination image dst[i*dpitch + j] = sum; } } // namespace filters
0e6661dead6bdbfd7c5b747807c38e64687054e4.hip
// !!! This is a file automatically generated by hipify!!! #include"material.h" #include"../param/param.h" #include"../gpu.h" #include<cstdio> extern __constant__ float d_coef[5][4]; void MATERIAL::init_gpu_full(int deviceid,PARAM &param,MATERIAL &mat){ nx=mat.nx; nz=mat.nz; int nz1=param.a_nz1[deviceid]; int nz2=param.a_nz2[deviceid]; int tnz=nz2-nz1+1; safecall(hipMemcpyToSymbol(d_coef,g_coef,sizeof(float)*20)); usetable=mat.usetable; if(usetable){ num_mat=mat.num_mat; usetable=mat.usetable; safecall(hipMalloc((void**)&(tbl_BU ),sizeof(float)*num_mat)); safecall(hipMalloc((void**)&(tbl_BW ),sizeof(float)*num_mat)); safecall(hipMalloc((void**)&(tbl_MU ),sizeof(float)*num_mat)); safecall(hipMalloc((void**)&(tbl_MUA),sizeof(float)*num_mat)); safecall(hipMalloc((void**)&(tbl_LAM),sizeof(float)*num_mat)); safecall(hipMalloc((void**)&(index),sizeof(float)*nx*tnz)); index -=nz1*nx; safecall(hipMemcpy( tbl_BU , mat.tbl_BU , sizeof(float)*num_mat,hipMemcpyHostToDevice)); safecall(hipMemcpy( tbl_BW , mat.tbl_BW , sizeof(float)*num_mat,hipMemcpyHostToDevice)); safecall(hipMemcpy( tbl_MU , mat.tbl_MU , sizeof(float)*num_mat,hipMemcpyHostToDevice)); safecall(hipMemcpy( tbl_MUA, mat.tbl_MUA, sizeof(float)*num_mat,hipMemcpyHostToDevice)); safecall(hipMemcpy( tbl_LAM, mat.tbl_LAM, sizeof(float)*num_mat,hipMemcpyHostToDevice)); safecall(hipMemcpy( index + nz1*nx, mat.index + nz1*nx, sizeof(float)*nx*tnz,hipMemcpyHostToDevice)); }else{ safecall(hipMalloc((void**)&(BU ),sizeof(float)*nx*tnz)); BU -=nz1*nx; safecall(hipMalloc((void**)&(BW ),sizeof(float)*nx*tnz)); BW -=nz1*nx; safecall(hipMalloc((void**)&(MU ),sizeof(float)*nx*tnz)); MU -=nz1*nx; safecall(hipMalloc((void**)&(MUA),sizeof(float)*nx*tnz)); MUA -=nz1*nx; safecall(hipMalloc((void**)&(LAM),sizeof(float)*nx*tnz)); LAM -=nz1*nx; safecall(hipMemcpy( BU + nz1*nx, mat.BU + nz1*nx, sizeof(float)*nx*tnz,hipMemcpyHostToDevice)); safecall(hipMemcpy( BW + nz1*nx, mat.BW + nz1*nx, sizeof(float)*nx*tnz,hipMemcpyHostToDevice)); safecall(hipMemcpy( MU + nz1*nx, mat.MU + nz1*nx, sizeof(float)*nx*tnz,hipMemcpyHostToDevice)); safecall(hipMemcpy( MUA + nz1*nx, mat.MUA + nz1*nx, sizeof(float)*nx*tnz,hipMemcpyHostToDevice)); safecall(hipMemcpy( LAM + nz1*nx, mat.LAM + nz1*nx, sizeof(float)*nx*tnz,hipMemcpyHostToDevice)); } }
0e6661dead6bdbfd7c5b747807c38e64687054e4.cu
#include"material.h" #include"../param/param.h" #include"../gpu.h" #include<cstdio> extern __constant__ float d_coef[5][4]; void MATERIAL::init_gpu_full(int deviceid,PARAM &param,MATERIAL &mat){ nx=mat.nx; nz=mat.nz; int nz1=param.a_nz1[deviceid]; int nz2=param.a_nz2[deviceid]; int tnz=nz2-nz1+1; safecall(cudaMemcpyToSymbol(d_coef,g_coef,sizeof(float)*20)); usetable=mat.usetable; if(usetable){ num_mat=mat.num_mat; usetable=mat.usetable; safecall(cudaMalloc((void**)&(tbl_BU ),sizeof(float)*num_mat)); safecall(cudaMalloc((void**)&(tbl_BW ),sizeof(float)*num_mat)); safecall(cudaMalloc((void**)&(tbl_MU ),sizeof(float)*num_mat)); safecall(cudaMalloc((void**)&(tbl_MUA),sizeof(float)*num_mat)); safecall(cudaMalloc((void**)&(tbl_LAM),sizeof(float)*num_mat)); safecall(cudaMalloc((void**)&(index),sizeof(float)*nx*tnz)); index -=nz1*nx; safecall(cudaMemcpy( tbl_BU , mat.tbl_BU , sizeof(float)*num_mat,cudaMemcpyHostToDevice)); safecall(cudaMemcpy( tbl_BW , mat.tbl_BW , sizeof(float)*num_mat,cudaMemcpyHostToDevice)); safecall(cudaMemcpy( tbl_MU , mat.tbl_MU , sizeof(float)*num_mat,cudaMemcpyHostToDevice)); safecall(cudaMemcpy( tbl_MUA, mat.tbl_MUA, sizeof(float)*num_mat,cudaMemcpyHostToDevice)); safecall(cudaMemcpy( tbl_LAM, mat.tbl_LAM, sizeof(float)*num_mat,cudaMemcpyHostToDevice)); safecall(cudaMemcpy( index + nz1*nx, mat.index + nz1*nx, sizeof(float)*nx*tnz,cudaMemcpyHostToDevice)); }else{ safecall(cudaMalloc((void**)&(BU ),sizeof(float)*nx*tnz)); BU -=nz1*nx; safecall(cudaMalloc((void**)&(BW ),sizeof(float)*nx*tnz)); BW -=nz1*nx; safecall(cudaMalloc((void**)&(MU ),sizeof(float)*nx*tnz)); MU -=nz1*nx; safecall(cudaMalloc((void**)&(MUA),sizeof(float)*nx*tnz)); MUA -=nz1*nx; safecall(cudaMalloc((void**)&(LAM),sizeof(float)*nx*tnz)); LAM -=nz1*nx; safecall(cudaMemcpy( BU + nz1*nx, mat.BU + nz1*nx, sizeof(float)*nx*tnz,cudaMemcpyHostToDevice)); safecall(cudaMemcpy( BW + nz1*nx, mat.BW + nz1*nx, sizeof(float)*nx*tnz,cudaMemcpyHostToDevice)); safecall(cudaMemcpy( MU + nz1*nx, mat.MU + nz1*nx, sizeof(float)*nx*tnz,cudaMemcpyHostToDevice)); safecall(cudaMemcpy( MUA + nz1*nx, mat.MUA + nz1*nx, sizeof(float)*nx*tnz,cudaMemcpyHostToDevice)); safecall(cudaMemcpy( LAM + nz1*nx, mat.LAM + nz1*nx, sizeof(float)*nx*tnz,cudaMemcpyHostToDevice)); } }
44c5721ee1e2810f33b55f3dd4c36698d1f0e5ee.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_im2col.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { template<typename Dtype> void ConvolutionSKLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM // CUDA backend code for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); Dtype* col_data = col_buffer()->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); int weight_offset = M_ * K_; int col_offset = K_ * N_; int top_offset = M_ * N_; for (int n = 0; n < num_; ++n) { // First, im2col im2col_sk_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); // Second, innerproduct with groups for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype) 1., weight + weight_offset * g, col_data + col_offset * g, (Dtype) 0., top_data + top[i]->offset(n) + top_offset * g); } // Third, add bias if (bias_term_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype) 1., this->blobs_[1]->gpu_data(), bias_multiplier_.gpu_data(), (Dtype) 1., top_data + top[i]->offset(n)); } } } #endif } else { // GreenTea backend code #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); for (int i = 0; i < bottom.size(); ++i) { const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); cl_mem col_data = (cl_mem) (col_buffer()->mutable_gpu_data()); const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); int weight_offset = M_ * K_; int col_offset = K_ * N_; int top_offset = M_ * N_; for (int n = 0; n < num_; ++n) { // First, im2col greentea_im2col_sk_gpu<Dtype>(&program, &ctx, bottom_data, bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); // Second, innerproduct with groups for (int g = 0; g < group_; ++g) { greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype) 1., weight, weight_offset * g, col_data, col_offset * g, (Dtype) 0., top_data, top[i]->offset(n) + top_offset * g); } // Third, add bias if (bias_term_) { greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype) 1., (cl_mem) (this->blobs_[1]->gpu_data()), 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., top_data, top[i]->offset(n)); } } } #endif } } template<typename Dtype> void ConvolutionSKLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } const int weight_offset = M_ * K_; const int col_offset = K_ * N_; const int top_offset = M_ * N_; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = NULL; // Bias gradient, if necessary. if (bias_term_ && this->param_propagate_down_[1]) { top_diff = top[i]->gpu_diff(); for (int n = 0; n < num_; ++n) { caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, N_, 1., top_diff + top[0]->offset(n), bias_multiplier_.gpu_data(), 1., bias_diff); } } if (this->param_propagate_down_[0] || propagate_down[i]) { if (!top_diff) { top_diff = top[i]->gpu_diff(); } Dtype* col_data = col_buffer()->mutable_gpu_data(); Dtype* col_diff = col_buffer()->mutable_gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. im2col_sk_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>( CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., top_diff + top[i]->offset(n) + top_offset * g, col_data + col_offset * g, (Dtype) 1., weight_diff + weight_offset * g); } } // gradient w.r.t. bottom data, if necessary if (propagate_down[i]) { for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>( CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., weight + weight_offset * g, top_diff + top[i]->offset(n) + top_offset * g, (Dtype) 0., col_diff + col_offset * g); } // col2im back to the data col2im_sk_gpu(col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, bottom_diff + bottom[i]->offset(n)); } } } } #endif } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); cl_mem weight = NULL; cl_mem weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = (cl_mem) (this->blobs_[0]->gpu_data()); weight_diff = (cl_mem) (this->blobs_[0]->mutable_gpu_diff()); greentea_gpu_set(this->device_context_->id(), this->blobs_[0]->count(), Dtype(0), weight_diff, 0); } cl_mem bias_diff = NULL; if (bias_term_ && this->param_propagate_down_[1]) { bias_diff = (cl_mem) (this->blobs_[1]->mutable_gpu_diff()); greentea_gpu_set(this->device_context_->id(), this->blobs_[1]->count(), Dtype(0), bias_diff, 0); } const int weight_offset = M_ * K_; const int col_offset = K_ * N_; const int top_offset = M_ * N_; for (int i = 0; i < top.size(); ++i) { cl_mem top_diff = NULL; // Bias gradient, if necessary. if (bias_term_ && this->param_propagate_down_[1]) { top_diff = (cl_mem) (top[i]->gpu_diff()); for (int n = 0; n < num_; ++n) { greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, num_output_, N_, (Dtype) 1., top_diff, top[0]->offset(n), (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., bias_diff, 0); } } if (this->param_propagate_down_[0] || propagate_down[i]) { if (!top_diff) { top_diff = (cl_mem) (top[i]->gpu_diff()); } cl_mem col_data = (cl_mem) (col_buffer()->mutable_gpu_data()); cl_mem col_diff = (cl_mem) (col_buffer()->mutable_gpu_diff()); const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); cl_mem bottom_diff = (cl_mem) (bottom[i]->mutable_gpu_diff()); for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. greentea_im2col_sk_gpu<Dtype>(&program, &ctx, bottom_data, bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { for (int g = 0; g < group_; ++g) { greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., top_diff, top[i]->offset(n) + top_offset * g, col_data, col_offset * g, (Dtype) 1., weight_diff, weight_offset * g); } } // gradient w.r.t. bottom data, if necessary if (propagate_down[i]) { for (int g = 0; g < group_; ++g) { greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., weight, weight_offset * g, top_diff, top[i]->offset(n) + top_offset * g, (Dtype) 0., col_diff, col_offset * g); } // col2im back to the data greentea_col2im_sk_gpu<Dtype>(&program, &ctx, col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, bottom_diff, bottom[i]->offset(n)); } } } } #endif } } template<typename Dtype> shared_ptr< Blob<Dtype> > ConvolutionSKLayer<Dtype>::col_buffer() { return this->device_context_-> template Buffer<Dtype>(this->device_context_->current_queue_id()); } INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionSKLayer); } // namespace caffe
44c5721ee1e2810f33b55f3dd4c36698d1f0e5ee.cu
#include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_im2col.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { template<typename Dtype> void ConvolutionSKLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // CUDA backend code for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); Dtype* col_data = col_buffer()->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); int weight_offset = M_ * K_; int col_offset = K_ * N_; int top_offset = M_ * N_; for (int n = 0; n < num_; ++n) { // First, im2col im2col_sk_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); // Second, innerproduct with groups for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype) 1., weight + weight_offset * g, col_data + col_offset * g, (Dtype) 0., top_data + top[i]->offset(n) + top_offset * g); } // Third, add bias if (bias_term_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype) 1., this->blobs_[1]->gpu_data(), bias_multiplier_.gpu_data(), (Dtype) 1., top_data + top[i]->offset(n)); } } } #endif } else { // GreenTea backend code #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); for (int i = 0; i < bottom.size(); ++i) { const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); cl_mem top_data = (cl_mem) (top[i]->mutable_gpu_data()); cl_mem col_data = (cl_mem) (col_buffer()->mutable_gpu_data()); const cl_mem weight = (cl_mem) (this->blobs_[0]->gpu_data()); int weight_offset = M_ * K_; int col_offset = K_ * N_; int top_offset = M_ * N_; for (int n = 0; n < num_; ++n) { // First, im2col greentea_im2col_sk_gpu<Dtype>(&program, &ctx, bottom_data, bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); // Second, innerproduct with groups for (int g = 0; g < group_; ++g) { greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype) 1., weight, weight_offset * g, col_data, col_offset * g, (Dtype) 0., top_data, top[i]->offset(n) + top_offset * g); } // Third, add bias if (bias_term_) { greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype) 1., (cl_mem) (this->blobs_[1]->gpu_data()), 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., top_data, top[i]->offset(n)); } } } #endif } } template<typename Dtype> void ConvolutionSKLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } const int weight_offset = M_ * K_; const int col_offset = K_ * N_; const int top_offset = M_ * N_; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = NULL; // Bias gradient, if necessary. if (bias_term_ && this->param_propagate_down_[1]) { top_diff = top[i]->gpu_diff(); for (int n = 0; n < num_; ++n) { caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, N_, 1., top_diff + top[0]->offset(n), bias_multiplier_.gpu_data(), 1., bias_diff); } } if (this->param_propagate_down_[0] || propagate_down[i]) { if (!top_diff) { top_diff = top[i]->gpu_diff(); } Dtype* col_data = col_buffer()->mutable_gpu_data(); Dtype* col_diff = col_buffer()->mutable_gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. im2col_sk_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>( CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., top_diff + top[i]->offset(n) + top_offset * g, col_data + col_offset * g, (Dtype) 1., weight_diff + weight_offset * g); } } // gradient w.r.t. bottom data, if necessary if (propagate_down[i]) { for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>( CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., weight + weight_offset * g, top_diff + top[i]->offset(n) + top_offset * g, (Dtype) 0., col_diff + col_offset * g); } // col2im back to the data col2im_sk_gpu(col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, bottom_diff + bottom[i]->offset(n)); } } } } #endif } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); cl_mem weight = NULL; cl_mem weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = (cl_mem) (this->blobs_[0]->gpu_data()); weight_diff = (cl_mem) (this->blobs_[0]->mutable_gpu_diff()); greentea_gpu_set(this->device_context_->id(), this->blobs_[0]->count(), Dtype(0), weight_diff, 0); } cl_mem bias_diff = NULL; if (bias_term_ && this->param_propagate_down_[1]) { bias_diff = (cl_mem) (this->blobs_[1]->mutable_gpu_diff()); greentea_gpu_set(this->device_context_->id(), this->blobs_[1]->count(), Dtype(0), bias_diff, 0); } const int weight_offset = M_ * K_; const int col_offset = K_ * N_; const int top_offset = M_ * N_; for (int i = 0; i < top.size(); ++i) { cl_mem top_diff = NULL; // Bias gradient, if necessary. if (bias_term_ && this->param_propagate_down_[1]) { top_diff = (cl_mem) (top[i]->gpu_diff()); for (int n = 0; n < num_; ++n) { greentea_gpu_gemv(this->device_context_->id(), CblasNoTrans, num_output_, N_, (Dtype) 1., top_diff, top[0]->offset(n), (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., bias_diff, 0); } } if (this->param_propagate_down_[0] || propagate_down[i]) { if (!top_diff) { top_diff = (cl_mem) (top[i]->gpu_diff()); } cl_mem col_data = (cl_mem) (col_buffer()->mutable_gpu_data()); cl_mem col_diff = (cl_mem) (col_buffer()->mutable_gpu_diff()); const cl_mem bottom_data = (cl_mem) (bottom[i]->gpu_data()); cl_mem bottom_diff = (cl_mem) (bottom[i]->mutable_gpu_diff()); for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. greentea_im2col_sk_gpu<Dtype>(&program, &ctx, bottom_data, bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { for (int g = 0; g < group_; ++g) { greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype) 1., top_diff, top[i]->offset(n) + top_offset * g, col_data, col_offset * g, (Dtype) 1., weight_diff, weight_offset * g); } } // gradient w.r.t. bottom data, if necessary if (propagate_down[i]) { for (int g = 0; g < group_; ++g) { greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype) 1., weight, weight_offset * g, top_diff, top[i]->offset(n) + top_offset * g, (Dtype) 0., col_diff, col_offset * g); } // col2im back to the data greentea_col2im_sk_gpu<Dtype>(&program, &ctx, col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, kstride_h_, kstride_w_, bottom_diff, bottom[i]->offset(n)); } } } } #endif } } template<typename Dtype> shared_ptr< Blob<Dtype> > ConvolutionSKLayer<Dtype>::col_buffer() { return this->device_context_-> template Buffer<Dtype>(this->device_context_->current_queue_id()); } INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionSKLayer); } // namespace caffe
926cbe36c8b897c1cca8eb20102de918d94cc73a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : ergasia4_final.cu Author : Christophoros Bekos ([email protected]) Version : Copyright : @ Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #define threads_per_warp 32 #define num_of_threads 256 __device__ void sigmoid(float& z) { z = 1.0 / (1.0 + exp(-(z))); } __device__ void backpropagate_some_cols(float* result, int rows_per_block, int col_length, float* matrix, float* vector, int last_block, int size, float* sigm_der) { // README : // each block uses rows threads // each block modifies rows columns ( cols columns per block) // each thread modifies one column , column's length is col_length // cols : number of columns that this block will modify // one last block has less job to do, this one takes parameter last_block == 1 // and size (after index exceeds size in last block, no computation must be made) int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int block_id = blockIdx.x; extern __shared__ float shared[]; float* temp = shared; float* m = &temp[rows_per_block]; float* v = &m[col_length * rows_per_block]; float* res = &v[col_length * rows_per_block]; // move data in shared memory for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { m[i] = matrix[i]; } v[thread_id] = 0; v[thread_id] = vector[thread_id] * (thread_id < col_length); __syncthreads(); int cnt = 0; for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { m[i] = m[i] * v[cnt]; cnt++; } __syncthreads(); temp[thread_id] = 0; for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { temp[thread_id] += m[i]; } __syncthreads(); result[thread_id] = temp[thread_id] * sigm_der[thread_id]; } __global__ void backpropagate(float* result, int rows_per_block, int col_length, float* matrix, float* vector, int last_block, int size, float* sigm_der) { int block_id = blockIdx.y * gridDim.x + blockIdx.x; int thread_id = threadIdx.y * blockDim.x + threadIdx.x; backpropagate_some_cols(&result[block_id * rows_per_block], rows_per_block, col_length, &matrix[block_id * rows_per_block], vector, (block_id == last_block), size, &sigm_der[block_id * rows_per_block]); } __device__ void hadamard_product_small(float* sh_a, float* sh_b, float* sh_res, int multiplier, int size) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; // start the computations for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { sh_res[i] = sh_b[i] * sh_a[i] * ((int) (i < size)); } // result is stored in sh_b vector\ //done } __device__ void array_sum_small(float* sha, float& result, int size, int start) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; // start the computations for (int i = threads_per_warp; i < num_of_threads; i = i * 2) { // switch 1 : even warps add their's neighbors contents switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // add the "more next vector" sha[thread_id] = sha[thread_id] + sha[i + thread_id] * ((int) (start + thread_id + i < size)); break; default: // thread_id % i == odd // do nothing break; } __syncthreads(); // switch2 : odd warps clean up their content switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // do nothing break; default: // thread_id % i == odd // clean up sha[thread_id] = 0; //__syncthreads(); break; } __syncthreads(); } // loop ended, sha[0:threads_per_warp] got the sum if (thread_id == 0) { for (int i = 0; i < threads_per_warp; i++) { result = result + sha[i]; sha[i] = 0; } } } __device__ void mull_feedforward_one_col(float* result, int rows, int cols, float* matrix, float* vector, int multiplier, int size, float bias, float* sigm_der) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int block_id = blockIdx.y * gridDim.x + blockIdx.x; extern __shared__ float shared[]; float* temp = shared; float* m = &temp[rows * multiplier]; float* v = &m[rows * multiplier]; float* res = &v[rows * multiplier]; for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { m[i] = matrix[i] * ((i < size)); } for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { v[i] = vector[i] * ((i < size)); } for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { res[i] = 0.0; } for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { temp[i] = 0.0; } __syncthreads(); hadamard_product_small(m, v, temp, multiplier, size); __syncthreads(); for (int i = multiplier - 1; i >= 0; i--) { array_sum_small(&temp[i * num_of_threads], res[0], size, (i * num_of_threads)); __syncthreads(); } if (thread_id == 0) { float tmp = (res[thread_id] + bias); sigmoid(tmp); result[block_id] = tmp; sigm_der[block_id] = tmp * (1 - tmp); } } __global__ void feedforward(float* result, int rows, int cols, float* matrix, float* vector, int multiplier, int size, float* biases, float* sigm_der) { int block_id = blockIdx.y * gridDim.x + blockIdx.x; mull_feedforward_one_col(result, rows, cols, &matrix[block_id * size], vector, multiplier, size, biases[block_id], sigm_der); } __global__ void compute_d_L(float* a, float* y, float* sigm_der, float* d_L) { extern __shared__ float shared[]; int thread_id = threadIdx.y * blockDim.x + threadIdx.x; shared[thread_id] = a[thread_id]; shared[thread_id] = shared[thread_id] - y[thread_id]; shared[thread_id] = shared[thread_id] * sigm_der[thread_id]; d_L[thread_id] = shared[thread_id]; } void cpu_feedforward(float* a_old, int rows, int cols, float** a_new, float* w, float* b, float* sigm_der_result); void train(int num_of_layers, int* s, float** w, float** b, float** alfa, float** delta, float** sigm_derivative); float getRandom(int min, int max); float* transformOutput(int output, int size); float* cost_derivative(float* a, float* y, int size); float* mull_feedforward(int rows, int cols, float* matrix, float* vector); float* hadamard_product(int size, float* a, float* b); void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new, float* sigm_der, float* w); void cuda_train(int num_of_layers, int* s, float** w, float** b, float** alfa, float** delta, float** sigm_derivative,float* gpu_y,int* rows_for_backprop,hipStream_t default_stream); int main(void) { // SECTION 1 : // define network's size : int num_of_layers = 3; int* s = new int[num_of_layers]; // size of layers int* rows_for_backprop = new int[num_of_layers - 1]; rows_for_backprop[0] = 0; // always zero , it's not used rows_for_backprop[1] = 100; s[0] = 784; s[1] = 30; s[2] = 10; // SECTION 2 : // define network's structures float **w, **gpu_w; float **b, **gpu_b, **sigm_derivative, **gpu_sigm_derivative, **delta, **gpu_delta, **alfa, **gpu_alfa; float* gpu_y; hipMalloc((void**) &gpu_y, sizeof(float) * (s[num_of_layers - 1])); //float **c_w, **c_b; w = new float*[num_of_layers]; gpu_w = new float*[num_of_layers]; //c_w = new float*[num_of_layers]; b = new float*[num_of_layers]; gpu_b = new float*[num_of_layers]; //c_b = new float*[num_of_layers]; delta = new float*[num_of_layers]; sigm_derivative = new float*[num_of_layers]; alfa = new float*[num_of_layers]; gpu_delta = new float*[num_of_layers]; gpu_sigm_derivative = new float*[num_of_layers]; gpu_alfa = new float*[num_of_layers]; alfa[0] = new float[s[0]]; hipMalloc((void**) &gpu_alfa[0], sizeof(float) * (s[0])); w[0] = NULL; b[0] = NULL; gpu_w[0] = NULL; gpu_b[0] = NULL; //c_w[0] = NULL; //c_b[0] = NULL; sigm_derivative[0] = NULL; delta[0] = NULL; for (int i = 1; i < num_of_layers; i++) { w[i] = new float[s[i - 1] * s[i]]; hipMalloc((void**) &gpu_w[i], sizeof(float) * (s[i - 1] * s[i])); //c_w[i] = new float[s[i - 1] * s[i]]; sigm_derivative[i] = new float[s[i]]; hipMalloc((void**) &gpu_sigm_derivative[i], sizeof(float) * (s[i])); b[i] = new float[s[i]]; hipMalloc((void**) &gpu_b[i], sizeof(float) * (s[i])); //c_b[i] = new float[s[i]]; delta[i] = new float[s[i]]; hipMalloc((void**) &gpu_delta[i], sizeof(float) * (s[i])); alfa[i] = new float[s[i]]; hipMalloc((void**) &gpu_alfa[i], sizeof(float) * (s[i])); } for (int i = 1; i < num_of_layers; i++) { for (int j = 0; j < s[i]; j++) { b[i][j] = 1; } } for (int i = 1; i < num_of_layers; i++) { for (int j = 0; j < s[i - 1] * s[i]; j++) { w[i][j] = 0.5; } } // SECTION 3 : // Cuda initial data transfer hipStream_t default_stream; hipStreamCreate(&default_stream); for (int i = 1; i < num_of_layers; i++) { hipMemcpyAsync(gpu_w[i], w[i], sizeof(float) * (s[i - 1] * s[i]), hipMemcpyHostToDevice, default_stream); hipMemcpyAsync(gpu_b[i], b[i], sizeof(float) * (s[i]), hipMemcpyHostToDevice, default_stream); } hipStreamSynchronize(default_stream); // SECTION 4 : // train function - missing : update_sums(...) and gradient_descent(...) (check c++ code in the other file) struct timeval t1, t2; double time_c, time_h; gettimeofday(&t1, 0); train(num_of_layers, s, w, b, alfa, delta, sigm_derivative); gettimeofday(&t2, 0); time_c = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; gettimeofday(&t1, 0); cuda_train(num_of_layers, s, gpu_w, b, gpu_alfa, gpu_delta,gpu_sigm_derivative,gpu_y,rows_for_backprop,default_stream); gettimeofday(&t2, 0); time_h = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; printf("gpu time %0.6f , cpu time %0.6f \n", time_c, time_h); printf("Accelaration %0.6f %\n", ((time_h / time_c) * 100)); printf("success\n"); return 0; } void cuda_train(int num_of_layers, int* s, float** w, float** b, float** alfa, float** delta, float** sigm_derivative,float* gpu_y,int* rows_for_backprop,hipStream_t default_stream) { // float learning_rate = 0.5; int epochs = 1; int batch_size = 1; int yd = 0; float* y, *cost; int blocks = 0; int numofthreads = 256; int multiplier; float cache = 11000 * sizeof(float); float* a = new float[s[0]]; int num_of_blocks; int rows_per_block; int last_block; int size_for_last_block; for (int ep = 0; ep < epochs; ep += (batch_size)) { // reset_sums(); --> NO CUDA VERSION OF IT for (int batch = 0; batch < batch_size; batch++) { // alfa[0] = read_tuple(ep + batch, &y_int); --> NO CUDA VERSION OF IT // since we don't read alfa[0] from file (in order to proper simulate it) // we will update alfa[0] with random values in each iteration // in any case, time would be wasted ,in order alfa[0] to be transfered in gpu for (int i = 0; i < s[0]; i++) { a[i] = getRandom(-1, 1); } // same goes for yd (y desired) READING VERSION FOR .CU FILE ISN'T YET CREATED yd = 0; // feedforward(&alfa[0]); hipMemcpy(alfa[0], a, sizeof(float) * (s[0]), hipMemcpyHostToDevice); for (int i = 1; i < num_of_layers; i++) { multiplier = floor(s[i - 1] / numofthreads) + 1; if (s[i - 1] < numofthreads) { multiplier = 1; } hipLaunchKernelGGL(( feedforward), dim3(s[i]), dim3(numofthreads), cache, 0, alfa[i], numofthreads, s[i], w[i], alfa[i - 1], multiplier, s[i - 1], b[i], sigm_derivative[i]); if (i == 1) { // while gpu running , compute y and store it in cuda y = transformOutput(yd, s[num_of_layers - 1]); hipMemcpyAsync(gpu_y, y, sizeof(float) * (s[num_of_layers - 1]), hipMemcpyHostToDevice, default_stream); } hipDeviceSynchronize(); // no need to copy data back -> all implementation in cuda } // wait for y copy - just to be sure - actually y copy must has been done way before you reach this statement hipStreamSynchronize (default_stream); // feedforward completed, compute cost_derivative hipLaunchKernelGGL(( compute_d_L), dim3(1), dim3(s[num_of_layers - 1]), s[num_of_layers - 1] * sizeof(float), 0, alfa[num_of_layers - 1], gpu_y, sigm_derivative[num_of_layers - 1], delta[num_of_layers - 1]); hipDeviceSynchronize(); // backpropagate the error for (int i = num_of_layers - 2; i > 0; i--) { rows_per_block = rows_for_backprop[i]; num_of_blocks = floor(s[i] / rows_per_block) + 1; last_block = floor(s[i] / rows_per_block); size_for_last_block = s[i] - floor(s[i] / rows_per_block) * rows_per_block; hipLaunchKernelGGL(( backpropagate), dim3(num_of_blocks), dim3(rows_per_block), cache, 0, delta[i], rows_per_block, s[i+1], w[i + 1],delta[i + 1], last_block, size_for_last_block,sigm_derivative[i - 1]); hipDeviceSynchronize(); } // update_sums(); --> NO CUDA VERSION OF IT } // gradient_descent(learning_rate, batch_size); --> NO CUDA VERSION OF IT } } void train(int num_of_layers, int* s, float** w, float** b, float** alfa, float** delta, float** sigm_derivative) { // float learning_rate = 0.5; int epochs = 1; int batch_size = 1; int yd = 0; float* y, *cost; for (int ep = 0; ep < epochs; ep += (batch_size)) { // reset_sums(); --> NO CUDA VERSION OF IT for (int batch = 0; batch < batch_size; batch++) { // alfa[0] = read_tuple(ep + batch, &y_int); --> NO CUDA VERSION OF IT // since we don't read alfa[0] from file (in order to proper simulate it) // we will update alfa[0] with random values in each iteration // in any case, time would be wasted ,in order alfa[0] to be transfered in gpu for (int i = 0; i < s[0]; i++) { alfa[0][i] = getRandom(-1, 1); } // same goes for yd (y desired) READING VERSION FOR .CU FILE ISN'T YET CREATED yd = 0; y = transformOutput(yd, s[num_of_layers - 1]); // feedforward(&alfa[0]); for (int i = 1; i < num_of_layers; i++) { cpu_feedforward(alfa[i - 1], s[i - 1], s[i], &alfa[i], w[i], b[i], sigm_derivative[i]); } // NO TIME TO WRITE A CUDA IMPLEMENTATIION FOR THEM cost = cost_derivative(alfa[num_of_layers - 1], y, s[num_of_layers - 1]); delta[num_of_layers - 1] = hadamard_product(s[num_of_layers - 1], cost, sigm_derivative[num_of_layers - 1]); // backpropagate(delta[num_of_layers-1]); for (int i = num_of_layers - 2; i > 0; i--) { cpu_backpropagate(delta[i + 1], s[i], s[i + 1], &delta[i], sigm_derivative[i], w[i + 1]); } // update_sums(); --> NO CUDA VERSION OF IT } // gradient_descent(learning_rate, batch_size); --> NO CUDA VERSION OF IT } } float getRandom(int min, int max) { return (((max - min) * ((float) rand() / (float) RAND_MAX) + min) * 100) / 100; } float* transformOutput(int output, int size) { // transforms a singleton input (named output:int) into // a vector (named result:*double) float* result = new float[size]; for (int i = 0; i < size; i++) { result[i] = 0; } result[output] = 1; return result; } float* cost_derivative(float* a, float* y, int size) { // derivative of C with respect to a (a == output layer's content ) float* result = new float[size]; for (int i = 0; i < size; i++) { result[i] = a[i] - y[i]; } return result; } // FOR FEEDFORWARD IN CPU float* hadamard_product(int size, float* a, float* b) { // returns the datamard product for vectors a and b // (return a.*b in matlab) // size = length of arrays a and b float* result = new float[size]; for (int i = 0; i < size; i++) { result[i] = a[i] * b[i]; } return result; } float* mull_feedforward(int rows, int cols, float* matrix, float* vector) { // TESTED // returns "cols x 1" vector float* temp = NULL; float* res = new float[cols]; for (int j = 0; j < cols; j++) { temp = hadamard_product(rows, &matrix[j * rows], vector); res[j] = 0; for (int i = 0; i < rows; i++) { res[j] += temp[i]; } delete[] temp; } return res; } void vector_add(int size, float* a, float* b) { for (int i = 0; i < size; i++) { a[i] += b[i]; } } float sigm(float z) { return 1.0 / (1.0 + exp(-z)); } void sigmoid(float** z, int size) { for (int i = 0; i < size; i++) { (*z)[i] = sigm(((*z)[i])); } } float* compute_z(float* a, float* w, float* b, int rows, int cols) { float* result = mull_feedforward(rows, cols, w, a); vector_add(cols, result, b); return result; } void compute_sigm_der(float* a, float* result, int size) { for (int i = 0; i < size; i++) { result[i] = a[i] * (1 - a[i]); } } void cpu_feedforward(float* a_old, int rows, int cols, float** a_new, float* w, float* b, float* sigm_der_result) { a_new[0] = compute_z(a_old, w, b, rows, cols); sigmoid(&a_new[0], cols); compute_sigm_der(a_new[0], sigm_der_result, cols); } // FOR BACKPROPAGATE float* mull_backpropagate(int rows, int cols, float* matrix, float* vector) { // TESTED // returns "rows x 1" vector float* temp = NULL; float* res = new float[rows]; for (int j = 0; j < rows; j++) { temp = hadamard_product(cols, &matrix[j * cols], vector); res[j] = 0; for (int i = 0; i < cols; i++) { res[j] += temp[i]; } delete[] temp; } return res; } void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new, float* sigm_der, float* w) { float* w_d; w_d = mull_backpropagate(rows, cols, w, d_L); d_new[0] = hadamard_product(rows, w_d, sigm_der); delete[] w_d; }
926cbe36c8b897c1cca8eb20102de918d94cc73a.cu
/* ============================================================================ Name : ergasia4_final.cu Author : Christophoros Bekos ([email protected]) Version : Copyright : @ Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #define threads_per_warp 32 #define num_of_threads 256 __device__ void sigmoid(float& z) { z = 1.0 / (1.0 + exp(-(z))); } __device__ void backpropagate_some_cols(float* result, int rows_per_block, int col_length, float* matrix, float* vector, int last_block, int size, float* sigm_der) { // README : // each block uses rows threads // each block modifies rows columns ( cols columns per block) // each thread modifies one column , column's length is col_length // cols : number of columns that this block will modify // one last block has less job to do, this one takes parameter last_block == 1 // and size (after index exceeds size in last block, no computation must be made) int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int block_id = blockIdx.x; extern __shared__ float shared[]; float* temp = shared; float* m = &temp[rows_per_block]; float* v = &m[col_length * rows_per_block]; float* res = &v[col_length * rows_per_block]; // move data in shared memory for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { m[i] = matrix[i]; } v[thread_id] = 0; v[thread_id] = vector[thread_id] * (thread_id < col_length); __syncthreads(); int cnt = 0; for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { m[i] = m[i] * v[cnt]; cnt++; } __syncthreads(); temp[thread_id] = 0; for (int i = thread_id * col_length; i < thread_id * col_length + col_length; i++) { temp[thread_id] += m[i]; } __syncthreads(); result[thread_id] = temp[thread_id] * sigm_der[thread_id]; } __global__ void backpropagate(float* result, int rows_per_block, int col_length, float* matrix, float* vector, int last_block, int size, float* sigm_der) { int block_id = blockIdx.y * gridDim.x + blockIdx.x; int thread_id = threadIdx.y * blockDim.x + threadIdx.x; backpropagate_some_cols(&result[block_id * rows_per_block], rows_per_block, col_length, &matrix[block_id * rows_per_block], vector, (block_id == last_block), size, &sigm_der[block_id * rows_per_block]); } __device__ void hadamard_product_small(float* sh_a, float* sh_b, float* sh_res, int multiplier, int size) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; // start the computations for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { sh_res[i] = sh_b[i] * sh_a[i] * ((int) (i < size)); } // result is stored in sh_b vector\ //done } __device__ void array_sum_small(float* sha, float& result, int size, int start) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; // start the computations for (int i = threads_per_warp; i < num_of_threads; i = i * 2) { // switch 1 : even warps add their's neighbors contents switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // add the "more next vector" sha[thread_id] = sha[thread_id] + sha[i + thread_id] * ((int) (start + thread_id + i < size)); break; default: // thread_id % i == odd // do nothing break; } __syncthreads(); // switch2 : odd warps clean up their content switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // do nothing break; default: // thread_id % i == odd // clean up sha[thread_id] = 0; //__syncthreads(); break; } __syncthreads(); } // loop ended, sha[0:threads_per_warp] got the sum if (thread_id == 0) { for (int i = 0; i < threads_per_warp; i++) { result = result + sha[i]; sha[i] = 0; } } } __device__ void mull_feedforward_one_col(float* result, int rows, int cols, float* matrix, float* vector, int multiplier, int size, float bias, float* sigm_der) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int block_id = blockIdx.y * gridDim.x + blockIdx.x; extern __shared__ float shared[]; float* temp = shared; float* m = &temp[rows * multiplier]; float* v = &m[rows * multiplier]; float* res = &v[rows * multiplier]; for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { m[i] = matrix[i] * ((i < size)); } for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { v[i] = vector[i] * ((i < size)); } for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { res[i] = 0.0; } for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { temp[i] = 0.0; } __syncthreads(); hadamard_product_small(m, v, temp, multiplier, size); __syncthreads(); for (int i = multiplier - 1; i >= 0; i--) { array_sum_small(&temp[i * num_of_threads], res[0], size, (i * num_of_threads)); __syncthreads(); } if (thread_id == 0) { float tmp = (res[thread_id] + bias); sigmoid(tmp); result[block_id] = tmp; sigm_der[block_id] = tmp * (1 - tmp); } } __global__ void feedforward(float* result, int rows, int cols, float* matrix, float* vector, int multiplier, int size, float* biases, float* sigm_der) { int block_id = blockIdx.y * gridDim.x + blockIdx.x; mull_feedforward_one_col(result, rows, cols, &matrix[block_id * size], vector, multiplier, size, biases[block_id], sigm_der); } __global__ void compute_d_L(float* a, float* y, float* sigm_der, float* d_L) { extern __shared__ float shared[]; int thread_id = threadIdx.y * blockDim.x + threadIdx.x; shared[thread_id] = a[thread_id]; shared[thread_id] = shared[thread_id] - y[thread_id]; shared[thread_id] = shared[thread_id] * sigm_der[thread_id]; d_L[thread_id] = shared[thread_id]; } void cpu_feedforward(float* a_old, int rows, int cols, float** a_new, float* w, float* b, float* sigm_der_result); void train(int num_of_layers, int* s, float** w, float** b, float** alfa, float** delta, float** sigm_derivative); float getRandom(int min, int max); float* transformOutput(int output, int size); float* cost_derivative(float* a, float* y, int size); float* mull_feedforward(int rows, int cols, float* matrix, float* vector); float* hadamard_product(int size, float* a, float* b); void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new, float* sigm_der, float* w); void cuda_train(int num_of_layers, int* s, float** w, float** b, float** alfa, float** delta, float** sigm_derivative,float* gpu_y,int* rows_for_backprop,cudaStream_t default_stream); int main(void) { // SECTION 1 : // define network's size : int num_of_layers = 3; int* s = new int[num_of_layers]; // size of layers int* rows_for_backprop = new int[num_of_layers - 1]; rows_for_backprop[0] = 0; // always zero , it's not used rows_for_backprop[1] = 100; s[0] = 784; s[1] = 30; s[2] = 10; // SECTION 2 : // define network's structures float **w, **gpu_w; float **b, **gpu_b, **sigm_derivative, **gpu_sigm_derivative, **delta, **gpu_delta, **alfa, **gpu_alfa; float* gpu_y; cudaMalloc((void**) &gpu_y, sizeof(float) * (s[num_of_layers - 1])); //float **c_w, **c_b; w = new float*[num_of_layers]; gpu_w = new float*[num_of_layers]; //c_w = new float*[num_of_layers]; b = new float*[num_of_layers]; gpu_b = new float*[num_of_layers]; //c_b = new float*[num_of_layers]; delta = new float*[num_of_layers]; sigm_derivative = new float*[num_of_layers]; alfa = new float*[num_of_layers]; gpu_delta = new float*[num_of_layers]; gpu_sigm_derivative = new float*[num_of_layers]; gpu_alfa = new float*[num_of_layers]; alfa[0] = new float[s[0]]; cudaMalloc((void**) &gpu_alfa[0], sizeof(float) * (s[0])); w[0] = NULL; b[0] = NULL; gpu_w[0] = NULL; gpu_b[0] = NULL; //c_w[0] = NULL; //c_b[0] = NULL; sigm_derivative[0] = NULL; delta[0] = NULL; for (int i = 1; i < num_of_layers; i++) { w[i] = new float[s[i - 1] * s[i]]; cudaMalloc((void**) &gpu_w[i], sizeof(float) * (s[i - 1] * s[i])); //c_w[i] = new float[s[i - 1] * s[i]]; sigm_derivative[i] = new float[s[i]]; cudaMalloc((void**) &gpu_sigm_derivative[i], sizeof(float) * (s[i])); b[i] = new float[s[i]]; cudaMalloc((void**) &gpu_b[i], sizeof(float) * (s[i])); //c_b[i] = new float[s[i]]; delta[i] = new float[s[i]]; cudaMalloc((void**) &gpu_delta[i], sizeof(float) * (s[i])); alfa[i] = new float[s[i]]; cudaMalloc((void**) &gpu_alfa[i], sizeof(float) * (s[i])); } for (int i = 1; i < num_of_layers; i++) { for (int j = 0; j < s[i]; j++) { b[i][j] = 1; } } for (int i = 1; i < num_of_layers; i++) { for (int j = 0; j < s[i - 1] * s[i]; j++) { w[i][j] = 0.5; } } // SECTION 3 : // Cuda initial data transfer cudaStream_t default_stream; cudaStreamCreate(&default_stream); for (int i = 1; i < num_of_layers; i++) { cudaMemcpyAsync(gpu_w[i], w[i], sizeof(float) * (s[i - 1] * s[i]), cudaMemcpyHostToDevice, default_stream); cudaMemcpyAsync(gpu_b[i], b[i], sizeof(float) * (s[i]), cudaMemcpyHostToDevice, default_stream); } cudaStreamSynchronize(default_stream); // SECTION 4 : // train function - missing : update_sums(...) and gradient_descent(...) (check c++ code in the other file) struct timeval t1, t2; double time_c, time_h; gettimeofday(&t1, 0); train(num_of_layers, s, w, b, alfa, delta, sigm_derivative); gettimeofday(&t2, 0); time_c = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; gettimeofday(&t1, 0); cuda_train(num_of_layers, s, gpu_w, b, gpu_alfa, gpu_delta,gpu_sigm_derivative,gpu_y,rows_for_backprop,default_stream); gettimeofday(&t2, 0); time_h = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; printf("gpu time %0.6f , cpu time %0.6f \n", time_c, time_h); printf("Accelaration %0.6f %\n", ((time_h / time_c) * 100)); printf("success\n"); return 0; } void cuda_train(int num_of_layers, int* s, float** w, float** b, float** alfa, float** delta, float** sigm_derivative,float* gpu_y,int* rows_for_backprop,cudaStream_t default_stream) { // float learning_rate = 0.5; int epochs = 1; int batch_size = 1; int yd = 0; float* y, *cost; int blocks = 0; int numofthreads = 256; int multiplier; float cache = 11000 * sizeof(float); float* a = new float[s[0]]; int num_of_blocks; int rows_per_block; int last_block; int size_for_last_block; for (int ep = 0; ep < epochs; ep += (batch_size)) { // reset_sums(); --> NO CUDA VERSION OF IT for (int batch = 0; batch < batch_size; batch++) { // alfa[0] = read_tuple(ep + batch, &y_int); --> NO CUDA VERSION OF IT // since we don't read alfa[0] from file (in order to proper simulate it) // we will update alfa[0] with random values in each iteration // in any case, time would be wasted ,in order alfa[0] to be transfered in gpu for (int i = 0; i < s[0]; i++) { a[i] = getRandom(-1, 1); } // same goes for yd (y desired) READING VERSION FOR .CU FILE ISN'T YET CREATED yd = 0; // feedforward(&alfa[0]); cudaMemcpy(alfa[0], a, sizeof(float) * (s[0]), cudaMemcpyHostToDevice); for (int i = 1; i < num_of_layers; i++) { multiplier = floor(s[i - 1] / numofthreads) + 1; if (s[i - 1] < numofthreads) { multiplier = 1; } feedforward<<<s[i], numofthreads, cache>>>(alfa[i], numofthreads, s[i], w[i], alfa[i - 1], multiplier, s[i - 1], b[i], sigm_derivative[i]); if (i == 1) { // while gpu running , compute y and store it in cuda y = transformOutput(yd, s[num_of_layers - 1]); cudaMemcpyAsync(gpu_y, y, sizeof(float) * (s[num_of_layers - 1]), cudaMemcpyHostToDevice, default_stream); } cudaDeviceSynchronize(); // no need to copy data back -> all implementation in cuda } // wait for y copy - just to be sure - actually y copy must has been done way before you reach this statement cudaStreamSynchronize (default_stream); // feedforward completed, compute cost_derivative compute_d_L<<<1, s[num_of_layers - 1], s[num_of_layers - 1] * sizeof(float)>>>( alfa[num_of_layers - 1], gpu_y, sigm_derivative[num_of_layers - 1], delta[num_of_layers - 1]); cudaDeviceSynchronize(); // backpropagate the error for (int i = num_of_layers - 2; i > 0; i--) { rows_per_block = rows_for_backprop[i]; num_of_blocks = floor(s[i] / rows_per_block) + 1; last_block = floor(s[i] / rows_per_block); size_for_last_block = s[i] - floor(s[i] / rows_per_block) * rows_per_block; backpropagate<<<num_of_blocks, rows_per_block, cache>>>(delta[i], rows_per_block, s[i+1], w[i + 1],delta[i + 1], last_block, size_for_last_block,sigm_derivative[i - 1]); cudaDeviceSynchronize(); } // update_sums(); --> NO CUDA VERSION OF IT } // gradient_descent(learning_rate, batch_size); --> NO CUDA VERSION OF IT } } void train(int num_of_layers, int* s, float** w, float** b, float** alfa, float** delta, float** sigm_derivative) { // float learning_rate = 0.5; int epochs = 1; int batch_size = 1; int yd = 0; float* y, *cost; for (int ep = 0; ep < epochs; ep += (batch_size)) { // reset_sums(); --> NO CUDA VERSION OF IT for (int batch = 0; batch < batch_size; batch++) { // alfa[0] = read_tuple(ep + batch, &y_int); --> NO CUDA VERSION OF IT // since we don't read alfa[0] from file (in order to proper simulate it) // we will update alfa[0] with random values in each iteration // in any case, time would be wasted ,in order alfa[0] to be transfered in gpu for (int i = 0; i < s[0]; i++) { alfa[0][i] = getRandom(-1, 1); } // same goes for yd (y desired) READING VERSION FOR .CU FILE ISN'T YET CREATED yd = 0; y = transformOutput(yd, s[num_of_layers - 1]); // feedforward(&alfa[0]); for (int i = 1; i < num_of_layers; i++) { cpu_feedforward(alfa[i - 1], s[i - 1], s[i], &alfa[i], w[i], b[i], sigm_derivative[i]); } // NO TIME TO WRITE A CUDA IMPLEMENTATIION FOR THEM cost = cost_derivative(alfa[num_of_layers - 1], y, s[num_of_layers - 1]); delta[num_of_layers - 1] = hadamard_product(s[num_of_layers - 1], cost, sigm_derivative[num_of_layers - 1]); // backpropagate(delta[num_of_layers-1]); for (int i = num_of_layers - 2; i > 0; i--) { cpu_backpropagate(delta[i + 1], s[i], s[i + 1], &delta[i], sigm_derivative[i], w[i + 1]); } // update_sums(); --> NO CUDA VERSION OF IT } // gradient_descent(learning_rate, batch_size); --> NO CUDA VERSION OF IT } } float getRandom(int min, int max) { return (((max - min) * ((float) rand() / (float) RAND_MAX) + min) * 100) / 100; } float* transformOutput(int output, int size) { // transforms a singleton input (named output:int) into // a vector (named result:*double) float* result = new float[size]; for (int i = 0; i < size; i++) { result[i] = 0; } result[output] = 1; return result; } float* cost_derivative(float* a, float* y, int size) { // derivative of C with respect to a (a == output layer's content ) float* result = new float[size]; for (int i = 0; i < size; i++) { result[i] = a[i] - y[i]; } return result; } // FOR FEEDFORWARD IN CPU float* hadamard_product(int size, float* a, float* b) { // returns the datamard product for vectors a and b // (return a.*b in matlab) // size = length of arrays a and b float* result = new float[size]; for (int i = 0; i < size; i++) { result[i] = a[i] * b[i]; } return result; } float* mull_feedforward(int rows, int cols, float* matrix, float* vector) { // TESTED // returns "cols x 1" vector float* temp = NULL; float* res = new float[cols]; for (int j = 0; j < cols; j++) { temp = hadamard_product(rows, &matrix[j * rows], vector); res[j] = 0; for (int i = 0; i < rows; i++) { res[j] += temp[i]; } delete[] temp; } return res; } void vector_add(int size, float* a, float* b) { for (int i = 0; i < size; i++) { a[i] += b[i]; } } float sigm(float z) { return 1.0 / (1.0 + exp(-z)); } void sigmoid(float** z, int size) { for (int i = 0; i < size; i++) { (*z)[i] = sigm(((*z)[i])); } } float* compute_z(float* a, float* w, float* b, int rows, int cols) { float* result = mull_feedforward(rows, cols, w, a); vector_add(cols, result, b); return result; } void compute_sigm_der(float* a, float* result, int size) { for (int i = 0; i < size; i++) { result[i] = a[i] * (1 - a[i]); } } void cpu_feedforward(float* a_old, int rows, int cols, float** a_new, float* w, float* b, float* sigm_der_result) { a_new[0] = compute_z(a_old, w, b, rows, cols); sigmoid(&a_new[0], cols); compute_sigm_der(a_new[0], sigm_der_result, cols); } // FOR BACKPROPAGATE float* mull_backpropagate(int rows, int cols, float* matrix, float* vector) { // TESTED // returns "rows x 1" vector float* temp = NULL; float* res = new float[rows]; for (int j = 0; j < rows; j++) { temp = hadamard_product(cols, &matrix[j * cols], vector); res[j] = 0; for (int i = 0; i < cols; i++) { res[j] += temp[i]; } delete[] temp; } return res; } void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new, float* sigm_der, float* w) { float* w_d; w_d = mull_backpropagate(rows, cols, w, d_L); d_new[0] = hadamard_product(rows, w_d, sigm_der); delete[] w_d; }
ccc0a64f7b940e7516cfea1ba13406b16fae0bf1.hip
// !!! This is a file automatically generated by hipify!!! /* * SynapticComm.cu * * Created on: 10/12/2010 * Author: rcamargo */ #include "Connections.hpp" #include <cstdio> #include <cassert> #include <hip/hip_runtime.h> // Necessary to allow better eclipse integration #include <hip/hip_runtime_api.h> // Necessary to allow better eclipse integration #include <device_launch_parameters.h> // Necessary to allow better eclipse integration #include <hip/device_functions.h> // Necessary to allow better eclipse integration extern void checkCUDAError(const char *msg); // TODO: remove connections from Connections ConnGpu* createGpuConnections( Connections *conn, int destType, int nTypes, int *nNeurons, int nGroups ) { // Contains the structures with the connections for each neuron group ConnGpu *connGpuTypeHost = (ConnGpu *)malloc(nGroups*sizeof(ConnGpu)); int nConnectionsTotal[nGroups]; int nNeuronsPerGroup = nNeurons[destType]/nGroups; int nGroupsExtraNeuron = (nNeurons[destType] % nGroups); int nNeuronsExtraGroups = nGroupsExtraNeuron * (nNeuronsPerGroup + 1); int nNeuronsInPreviousGroups = 0; for (int group=0; group<nGroups; group++) { ConnGpu & connGpu = connGpuTypeHost[group]; nConnectionsTotal[group] = 0; connGpu.nNeuronsGroup = nNeuronsPerGroup; connGpu.nNeuronsInPreviousGroups = nNeuronsInPreviousGroups; if ( group < nGroupsExtraNeuron ) connGpu.nNeuronsGroup++; nNeuronsInPreviousGroups += connGpu.nNeuronsGroup; } /** * Counts the total number of connections for the group */ for (int srcType=0; srcType < nTypes; srcType++) { for (int neuron=0; neuron < nNeurons[srcType]; neuron++) { std::vector<Conn> & connList = conn->getConnArray(neuron + srcType*CONN_NEURON_TYPE); for (int conn=0; conn<connList.size(); conn++) { if (connList[conn].dest / CONN_NEURON_TYPE == destType) { int destNeuron = connList[conn].dest % CONN_NEURON_TYPE; int group = destNeuron / nNeuronsPerGroup; if (nNeurons[destType] % nGroups != 0) { if (destNeuron < nNeuronsExtraGroups) group = destNeuron / (nNeuronsPerGroup+1); else group = nGroupsExtraNeuron + ((destNeuron - nNeuronsExtraGroups) / nNeuronsPerGroup); } nConnectionsTotal[group]++; // assert (destNeuron >= connGpuTypeHost[group].nNeuronsInPreviousGroups); // assert (destNeuron < connGpuTypeHost[group].nNeuronsInPreviousGroups + connGpuTypeHost[group].nNeuronsGroup); } } } } for (int group=0; group<nGroups; group++) { ConnGpu & connGpu = connGpuTypeHost[group]; connGpu.nConnectionsTotal = nConnectionsTotal[group]; checkCUDAError("Allocation error 0 at [SynapticComm.cu]:"); /** * Allocates the memory to keep the connection information in the GPU and CPU */ hipMalloc( (void **) &(connGpu.srcDevice), nConnectionsTotal[group]*sizeof(int) ); connGpu.srcHost = (int *)malloc( nConnectionsTotal[group]*sizeof(int) ); checkCUDAError("Allocation error 1 at [SynapticComm.cu]:"); hipMalloc( (void **) &(connGpu.destDevice), nConnectionsTotal[group]*sizeof(int) ); connGpu.destHost = (int *)malloc( nConnectionsTotal[group]*sizeof(int) ); checkCUDAError("Allocation error 2 at [SynapticComm.cu]:"); hipMalloc( (void **) &(connGpu.synapseDevice), nConnectionsTotal[group]*sizeof(ucomp) ); connGpu.synapseHost = (ucomp *)malloc( nConnectionsTotal[group]*sizeof(ucomp) ); checkCUDAError("Allocation error 3 at [SynapticComm.cu]:"); hipMalloc( (void **) &(connGpu.weightDevice), nConnectionsTotal[group]*sizeof(ftype) ); connGpu.weightHost = (ftype *)malloc( nConnectionsTotal[group]*sizeof(ftype) ); checkCUDAError("Allocation error 4 at [SynapticComm.cu]:"); hipMalloc( (void **) &(connGpu.delayDevice), nConnectionsTotal[group]*sizeof(ftype) ); connGpu.delayHost = (ftype *)malloc( nConnectionsTotal[group]*sizeof(ftype) ); checkCUDAError("Allocation error 5 at [SynapticComm.cu]:"); } /** * Copies the connection info data to the host memory */ int memPosList[nGroups]; for (int group=0; group<nGroups; group++) memPosList[group] = 0; for (int srcType=0; srcType < nTypes; srcType++) { for (int neuron=0; neuron < nNeurons[srcType]; neuron++) { std::vector<Conn> & connList = conn->getConnArray(neuron + srcType*CONN_NEURON_TYPE); for (int conn=0; conn<connList.size(); conn++) { if (connList[conn].dest / CONN_NEURON_TYPE == destType) { Conn & connStr = connList[conn]; int destNeuron = connStr.dest % CONN_NEURON_TYPE; int group = destNeuron / nNeuronsPerGroup; if (nNeurons[destType] % nGroups != 0) { if (destNeuron < nNeuronsExtraGroups) group = destNeuron / (nNeuronsPerGroup+1); else group = nGroupsExtraNeuron + ((destNeuron - nNeuronsExtraGroups) / nNeuronsPerGroup); } ConnGpu & connGpu = connGpuTypeHost[group]; int memPos = memPosList[group]; connGpu.srcHost[memPos] = neuron + srcType*CONN_NEURON_TYPE; connGpu.destHost[memPos] = connStr.dest; // TODO: can move to another vector connGpu.synapseHost[memPos] = connStr.synapse; // TODO: can move to another vector connGpu.weightHost[memPos] = connStr.weigth; // TODO: can move to another vector connGpu.delayHost[memPos] = connStr.delay; // TODO: can move to another vector memPosList[group]++; } } } } /** * Copies the connection info data to the device memory */ for (int group=0; group<nGroups; group++) { assert (memPosList[group] == nConnectionsTotal[group]); ConnGpu & connGpu = connGpuTypeHost[group]; hipMemcpy( connGpu.srcDevice, connGpu.srcHost, nConnectionsTotal[group]*sizeof(int), hipMemcpyHostToDevice); hipMemcpy( connGpu.destDevice, connGpu.destHost, nConnectionsTotal[group]*sizeof(int), hipMemcpyHostToDevice); hipMemcpy( connGpu.synapseDevice, connGpu.synapseHost, nConnectionsTotal[group]*sizeof(ucomp), hipMemcpyHostToDevice); hipMemcpy( connGpu.weightDevice, connGpu.weightHost, nConnectionsTotal[group]*sizeof(ftype), hipMemcpyHostToDevice); hipMemcpy( connGpu.delayDevice, connGpu.delayHost, nConnectionsTotal[group]*sizeof(ftype), hipMemcpyHostToDevice); checkCUDAError("Memcopy error at [SynapticComm.cu]:"); } return connGpuTypeHost; } /** * Counts the number of spikes received at each synapse by each neuron of a given type * Used only to test if the implementation of createGpuConnections is working */ int **countReceivedSpikesCpu(ConnGpu *connGpuList, int nNeurons, int nGroups, ucomp **nGeneratedSpikes) { int nSynapses = 2; int **nReceivedSpikes = new int *[nSynapses]; nReceivedSpikes[0] = new int[nNeurons]; nReceivedSpikes[1] = new int[nNeurons]; for (int i=0; i<nNeurons; i++) { nReceivedSpikes[0][i] = 0; nReceivedSpikes[1][i] = 0; } int typeTmp = connGpuList[0].destHost[0]/CONN_NEURON_TYPE; int nConsideredSynapses = 0; int nAddedSpikes = 0; for (int group = 0; group < nGroups; group++) { ConnGpu & connGpu = connGpuList[group]; for (int iConn = 0; iConn < connGpu.nConnectionsTotal; iConn++) { assert (typeTmp == connGpu.destHost[iConn]/CONN_NEURON_TYPE); nReceivedSpikes[ connGpu.synapseHost[iConn] ][ connGpu.destHost[iConn]%CONN_NEURON_TYPE ] += nGeneratedSpikes[ connGpu.srcHost[iConn]/CONN_NEURON_TYPE ][ connGpu.srcHost[iConn]%CONN_NEURON_TYPE ]; nConsideredSynapses++; nAddedSpikes += nGeneratedSpikes[ connGpu.srcHost[iConn]/CONN_NEURON_TYPE ][ connGpu.srcHost[iConn]%CONN_NEURON_TYPE ]; } //printf("###\n"); } printf ("nConsideredSynapses = %d nAddedSpikes = %d\n", nConsideredSynapses, nAddedSpikes); //printf("###\n"); return nReceivedSpikes; } /** * Count the number of spikes delivered to each neuron * TODO: change ConnGpu connGpuDev to reference */ __device__ void countReceivedSpikesG(int nNeurons, ConnGpu connGpuDev, int *nReceivedSpikesShared, ucomp **nGeneratedSpikesDev, int *randomSpikeDestDev) { int rcvSpkListSize = connGpuDev.nNeuronsGroup * blockDim.x; for (int iConn = 0; iConn < connGpuDev.nConnectionsTotal; iConn += blockDim.x) { if (iConn+threadIdx.x < connGpuDev.nConnectionsTotal) { int destNeuron = (connGpuDev.destDevice[iConn+threadIdx.x]%CONN_NEURON_TYPE) - connGpuDev.nNeuronsInPreviousGroups; int threadStartPos = connGpuDev.synapseDevice[iConn+threadIdx.x] * rcvSpkListSize + threadIdx.x * connGpuDev.nNeuronsGroup; int srcNeuron = connGpuDev.srcDevice[iConn+threadIdx.x]; nReceivedSpikesShared[ threadStartPos + destNeuron ] += nGeneratedSpikesDev[ srcNeuron/CONN_NEURON_TYPE ][ srcNeuron%CONN_NEURON_TYPE ]; } } int threadStartPosRnd = threadIdx.x * connGpuDev.nNeuronsGroup; int iRnd = 0; int destNeuron = randomSpikeDestDev[ threadIdx.x ]; int maxNeuron = connGpuDev.nNeuronsInPreviousGroups + connGpuDev.nNeuronsGroup; while (destNeuron >= 0 && destNeuron < maxNeuron) { destNeuron -= connGpuDev.nNeuronsInPreviousGroups; if (destNeuron >= 0) nReceivedSpikesShared[ threadStartPosRnd + destNeuron ]++; iRnd += blockDim.x; destNeuron = randomSpikeDestDev[iRnd+threadIdx.x]; } /** * Here we consider nThreads == nNeuronsGroup; */ for (int i = 1; i < blockDim.x; i++) { if (threadIdx.x < connGpuDev.nNeuronsGroup) { nReceivedSpikesShared[threadIdx.x] += nReceivedSpikesShared[i * connGpuDev.nNeuronsGroup + threadIdx.x]; } } if (threadIdx.x < connGpuDev.nNeuronsGroup) nReceivedSpikesShared[connGpuDev.nNeuronsGroup + threadIdx.x] = 0; for (int i = 0; i < blockDim.x; i++) { if (threadIdx.x < connGpuDev.nNeuronsGroup) { nReceivedSpikesShared[connGpuDev.nNeuronsGroup + threadIdx.x] += nReceivedSpikesShared[rcvSpkListSize + i * connGpuDev.nNeuronsGroup + threadIdx.x]; } } //nReceivedSpikesShared[connGpuDev.nNeuronsInPreviousGroups + threadIdx.x] = nReceivedSpikesShared[threadIdx.x]; } /** * Move the current spikes to accommodate the new spikes */ __device__ void moveCurrentSpikesG(HinesStruct *hList, ConnGpu connGpuDev, int nNeurons, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListSizeGlobal, int *startPosCurr, int *startPosNew) { //int neuron = blockIdx.x * blockDim.x + threadIdx.x; int neuron = connGpuDev.nNeuronsInPreviousGroups + threadIdx.x; if (threadIdx.x >= connGpuDev.nNeuronsGroup) return; HinesStruct & h = hList[neuron]; ftype currTime = h.currStep * h.dt; startPosNew[connGpuDev.nNeuronsGroup + threadIdx.x] = startPosCurr[threadIdx.x] + startPosNew[threadIdx.x] + startPosCurr[connGpuDev.nNeuronsGroup + threadIdx.x]; startPosCurr[connGpuDev.nNeuronsGroup + threadIdx.x] = startPosCurr[threadIdx.x] + startPosNew[threadIdx.x]; startPosNew[threadIdx.x] = startPosCurr[threadIdx.x]; startPosCurr[threadIdx.x] = 0; int *synSpikeListPos = spikeListPosGlobal + neuron * h.synapseListSize; /** * Scans the spike list, copying the new generated spikes and the existing ones. */ int synapseListSize = 2; for (int syn=0; syn < synapseListSize ; syn++) { /* * Move the current spikes to the their final positions * TODO: Works only with 2 synapses, when synapse 0 is AMPA and synapse 1 is GABA!!! */ ftype remThresh = currTime - (3 * (h.tau[2*syn] + h.tau[2*syn+1]) ); int synPos = syn * connGpuDev.nNeuronsGroup + threadIdx.x; if ( startPosCurr[synPos] <= synSpikeListPos[syn]) { int pos = startPosCurr[synPos]; int spk = synSpikeListPos[syn]; int lastSpk = synSpikeListPos[syn] + startPosNew[synPos] - startPosCurr[synPos]; for (; spk < lastSpk; spk++) { // Copy only the spikes not expired if (spikeListGlobal[spk * nNeurons + neuron] > remThresh) { spikeListGlobal[pos * nNeurons + neuron] = spikeListGlobal[spk * nNeurons + neuron]; weightListGlobal[pos * nNeurons + neuron] = weightListGlobal[spk * nNeurons + neuron]; pos++; } //else spikeListGlobal[spk * nNeurons + neuron] = 0; } } else { int pos = startPosNew[synPos]-1; int spk = synSpikeListPos[syn] + startPosNew[synPos] - startPosCurr[synPos] - 1; int lastSpk = synSpikeListPos[syn]; for (; spk >= lastSpk; spk--) { // Copy only the spikes not expired if (spikeListGlobal[spk * nNeurons + neuron] > remThresh) { spikeListGlobal[pos * nNeurons + neuron] = spikeListGlobal[spk * nNeurons + neuron]; weightListGlobal[pos * nNeurons + neuron] = weightListGlobal[spk * nNeurons + neuron]; pos--; } //else spikeListGlobal[spk * nNeurons + neuron] = 0; } } } } /** * Count the number of current spikes to keep in the spikeList */ __device__ void countCurrentSpikesG(HinesStruct *hList, ConnGpu connGpuDev, int nNeurons, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListSizeGlobal, int *nSpikesToKeepShared) { //int neuron = blockIdx.x * blockDim.x + threadIdx.x; int neuron = connGpuDev.nNeuronsInPreviousGroups + threadIdx.x; if (threadIdx.x >= connGpuDev.nNeuronsGroup) return; HinesStruct & h = hList[neuron]; ftype currTime = h.currStep * h.dt; //int rcvSpkListSize = connGpuDev.nNeuronsGroup * blockDim.x; int *synSpikeListPos = spikeListPosGlobal + neuron * h.synapseListSize; // SYNAPSE_AMPA 0, SYNAPSE_GABA 1 // TODO: Works only when synapse 0 is AMPA and synapse 1 is GABA!!! int synapseListSize = 2; for (int syn = 0; syn<synapseListSize; syn++) { int nSpikesToKeep = 0; ftype remThresh = currTime - (3 * (h.tau[2*syn] + h.tau[2*syn+1]) ); int spk = synSpikeListPos[syn]; int lastSpk = (syn < synapseListSize-1) ? synSpikeListPos[syn+1] : spikeListSizeGlobal[neuron]; int spkMovePos = -1; for (; spk < lastSpk && spkMovePos == -1; spk++) { if (spikeListGlobal[spk * nNeurons + neuron] > remThresh) nSpikesToKeep++; else spkMovePos = spk; } for (; spk < lastSpk; spk++) { if (spikeListGlobal[spk * nNeurons + neuron] > remThresh) { nSpikesToKeep++; spikeListGlobal[spkMovePos * nNeurons + neuron] = spikeListGlobal[spk* nNeurons + neuron]; weightListGlobal[spkMovePos * nNeurons + neuron] = weightListGlobal[spk* nNeurons + neuron]; spkMovePos++; } } if (syn==0) nSpikesToKeepShared[threadIdx.x] = nSpikesToKeep; else nSpikesToKeepShared[connGpuDev.nNeuronsGroup + threadIdx.x] = nSpikesToKeep; } } /** * Copy the new spikes to the vector */ __device__ void deliverGeneratedSpikesG(ConnGpu connGpuDev, int nNeurons, int *sharedMem, int *startPosNew, ucomp **nGeneratedSpikesDev, ftype **genSpikeTimeListDev, ftype *randomSpikeTimesDev, int *randomSpikeDestDev, ftype *spikeListGlobal, ftype *weightListGlobal) { int spikeTimeListSize = 5; // ############################################# //const int nSynapses = 2; int *srcNeuronShared = sharedMem; int *destNeuronShared = srcNeuronShared + blockDim.x; int *nSpikesSrcShared = destNeuronShared + blockDim.x; int *newPosThreadShared = nSpikesSrcShared + blockDim.x; int srcNeuron, srcType, nSpikesSource; int destNeuron, destSyn, synPosL, synPosG; ftype weight, delay, *genSpikeTimes; /** * Copy the spikes received from other neurons */ for (int iConn = 0; iConn < connGpuDev.nConnectionsTotal; iConn += blockDim.x) { __syncthreads(); nSpikesSrcShared[threadIdx.x] = 0; newPosThreadShared[threadIdx.x] = threadIdx.x; if (iConn+threadIdx.x < connGpuDev.nConnectionsTotal) { srcNeuron = connGpuDev.srcDevice[iConn+threadIdx.x]; srcType = srcNeuron/CONN_NEURON_TYPE; srcNeuron = srcNeuron%CONN_NEURON_TYPE; nSpikesSource = nGeneratedSpikesDev[ srcType ][ srcNeuron ]; weight = connGpuDev.weightDevice[iConn+threadIdx.x]; destNeuron = connGpuDev.destDevice[iConn+threadIdx.x]%CONN_NEURON_TYPE; destSyn = connGpuDev.synapseDevice[iConn+threadIdx.x]; synPosL = destSyn * connGpuDev.nNeuronsGroup + (destNeuron-connGpuDev.nNeuronsInPreviousGroups); synPosG = startPosNew[synPosL]; delay = connGpuDev.delayDevice[iConn+threadIdx.x]; genSpikeTimes = genSpikeTimeListDev[srcType] + spikeTimeListSize * srcNeuron; // Put in Shared srcNeuronShared[threadIdx.x] = srcNeuron + srcType * CONN_NEURON_TYPE; destNeuronShared[threadIdx.x] = destNeuron + destSyn * CONN_NEURON_TYPE; nSpikesSrcShared[threadIdx.x] = nSpikesSource; } __syncthreads(); if (nSpikesSource > 0 && iConn+threadIdx.x < connGpuDev.nConnectionsTotal) { //if ( srcNeuronShared[0] != srcNeuronShared[threadIdx.x] ) { // only valid if there are no repeated connections for (int i=0; i < threadIdx.x; i++ ) { if ( destNeuronShared[i] == destNeuronShared[threadIdx.x] ) { synPosG += nSpikesSrcShared[i]; newPosThreadShared[i] = threadIdx.x; } } //} for (int i = 0; i < nSpikesSource; synPosG++, i++) { spikeListGlobal[ synPosG * nNeurons + destNeuron ] = genSpikeTimes[i] + delay; weightListGlobal[ synPosG * nNeurons + destNeuron ] = weight; } if (threadIdx.x == newPosThreadShared[threadIdx.x]) startPosNew[synPosL] = synPosG; } } __syncthreads(); /** * Copy the random spikes * Only works when random spikes are delivered to synapse 0 */ { int iRnd = 0; int destNeuron = randomSpikeDestDev[ threadIdx.x ]%CONN_NEURON_TYPE; int maxNeuron = connGpuDev.nNeuronsGroup + connGpuDev.nNeuronsInPreviousGroups; while (destNeuron >= 0 && destNeuron < maxNeuron) { destNeuron -= connGpuDev.nNeuronsInPreviousGroups; destNeuronShared[threadIdx.x] = destNeuron; if ( destNeuron >= 0 ) { int spikePos = startPosNew[destNeuron] * nNeurons + (destNeuron + connGpuDev.nNeuronsInPreviousGroups); if (threadIdx.x > 0) { for (int i = 1; i <= threadIdx.x && destNeuronShared[threadIdx.x - i] == destNeuron; i++) { spikePos += nNeurons; startPosNew[destNeuron]++; } } spikeListGlobal[ spikePos ] = randomSpikeTimesDev[iRnd+threadIdx.x]; weightListGlobal[ spikePos ] = 1; startPosNew[destNeuron]++; } iRnd += blockDim.x; destNeuron = randomSpikeDestDev[iRnd+threadIdx.x]%CONN_NEURON_TYPE ; } } } // HinesStruct *hList, int nSteps, int nNeurons, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListStartGlobal, ftype *vmListGlobal __global__ void performCommunicationsG(int nNeurons, ConnGpu *connGpuListDev, ucomp **nGeneratedSpikesDev, ftype **genSpikeTimeListDev, HinesStruct *hList, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListSizeGlobal, ftype *randomSpikeTimesDev, int *randomSpikeDestDev) { int group = blockIdx.x; ConnGpu connGpuDev = connGpuListDev[group]; int neuron = connGpuDev.nNeuronsInPreviousGroups + threadIdx.x; extern __shared__ ftype sharedMem[]; int nSynapses = 2; int *nReceivedSpikesShared = (int *)sharedMem; int *nSpikesToKeepShared = nReceivedSpikesShared + connGpuDev.nNeuronsGroup * nSynapses; int *sharedMemNext = nSpikesToKeepShared + connGpuDev.nNeuronsGroup * nSynapses; int receivedSpikesSize = connGpuDev.nNeuronsGroup * blockDim.x; for (int i=0; i < receivedSpikesSize; i += blockDim.x) { nReceivedSpikesShared[i + threadIdx.x] = 0; // synapse 0 nReceivedSpikesShared[i + threadIdx.x + receivedSpikesSize] = 0; // synapse 1 } /** * Counts the number of spikes from other neurons and from a random source that will be added */ countReceivedSpikesG(nNeurons, connGpuDev, nReceivedSpikesShared, nGeneratedSpikesDev, randomSpikeDestDev); /** * Counts the number of current spikes in spikeList to keep for the next kernel call */ nSpikesToKeepShared[threadIdx.x] = 0; nSpikesToKeepShared[connGpuDev.nNeuronsGroup + threadIdx.x] = 0; countCurrentSpikesG(hList, connGpuDev, nNeurons, spikeListGlobal, weightListGlobal, spikeListPosGlobal, spikeListSizeGlobal, nSpikesToKeepShared); /** * Copy the data to the CPU for debugging * Here we consider nThreads == nNeuronsGroup; */ if (threadIdx.x < connGpuDev.nNeuronsGroup ) { spikeListSizeGlobal[neuron] = nReceivedSpikesShared[threadIdx.x] + nReceivedSpikesShared[threadIdx.x + connGpuDev.nNeuronsGroup] + nSpikesToKeepShared[threadIdx.x] + nSpikesToKeepShared[threadIdx.x + connGpuDev.nNeuronsGroup]; } /** * Move the current spikes in the spikeList vector */ int *startPosCurr = nSpikesToKeepShared; int *startPosNew = nReceivedSpikesShared; moveCurrentSpikesG(hList, connGpuDev, nNeurons, spikeListGlobal, weightListGlobal, spikeListPosGlobal, spikeListSizeGlobal, startPosCurr, startPosNew); if (threadIdx.x < connGpuDev.nNeuronsGroup ) { spikeListPosGlobal[2*neuron] = startPosCurr[threadIdx.x]; spikeListPosGlobal[2*neuron+1] = startPosCurr[threadIdx.x + connGpuDev.nNeuronsGroup]; } deliverGeneratedSpikesG(connGpuDev, nNeurons, sharedMemNext, startPosNew, nGeneratedSpikesDev, genSpikeTimeListDev, randomSpikeTimesDev, randomSpikeDestDev, spikeListGlobal, weightListGlobal); } // HinesStruct *hList, int nSteps, int nNeurons, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListStartGlobal, ftype *vmListGlobal __global__ void performCommunicationsG_Step1(int nNeurons, ConnGpu *connGpuListDev, ucomp **nGeneratedSpikesDev, ftype **genSpikeTimeListDev, HinesStruct *hList, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListSizeGlobal, ftype *randomSpikeTimesDev, int *randomSpikeDestDev, ftype *tmpDevMemory) { int group = blockIdx.x; ConnGpu connGpuDev = connGpuListDev[group]; int neuron = connGpuDev.nNeuronsInPreviousGroups + threadIdx.x; extern __shared__ ftype sharedMem[]; int nSynapses = 2; int *nReceivedSpikesShared = (int *)sharedMem; int *nSpikesToKeepShared = nReceivedSpikesShared + connGpuDev.nNeuronsGroup * nSynapses; int receivedSpikesSize = connGpuDev.nNeuronsGroup * blockDim.x; for (int i=0; i < receivedSpikesSize; i += blockDim.x) { nReceivedSpikesShared[i + threadIdx.x] = 0; // synapse 0 nReceivedSpikesShared[i + threadIdx.x + receivedSpikesSize] = 0; // synapse 1 } /** * Counts the number of spikes from other neurons and from a random source that will be added */ countReceivedSpikesG(nNeurons, connGpuDev, nReceivedSpikesShared, nGeneratedSpikesDev, randomSpikeDestDev); /** * Counts the number of current spikes in spikeList to keep for the next kernel call */ nSpikesToKeepShared[threadIdx.x] = 0; nSpikesToKeepShared[connGpuDev.nNeuronsGroup + threadIdx.x] = 0; countCurrentSpikesG(hList, connGpuDev, nNeurons, spikeListGlobal, weightListGlobal, spikeListPosGlobal, spikeListSizeGlobal, nSpikesToKeepShared); /** * Copy the data to the CPU for debugging * Here we consider nThreads == nNeuronsGroup; */ if (threadIdx.x < connGpuDev.nNeuronsGroup ) { spikeListSizeGlobal[neuron] = nReceivedSpikesShared[threadIdx.x] + nReceivedSpikesShared[threadIdx.x + connGpuDev.nNeuronsGroup] + nSpikesToKeepShared[threadIdx.x] + nSpikesToKeepShared[threadIdx.x + connGpuDev.nNeuronsGroup]; } /** * Move the current spikes in the spikeList vector */ int *startPosCurr = nSpikesToKeepShared; int *startPosNew = nReceivedSpikesShared; moveCurrentSpikesG(hList, connGpuDev, nNeurons, spikeListGlobal, weightListGlobal, spikeListPosGlobal, spikeListSizeGlobal, startPosCurr, startPosNew); if (threadIdx.x < connGpuDev.nNeuronsGroup ) { spikeListPosGlobal[2*neuron] = startPosCurr[threadIdx.x]; spikeListPosGlobal[2*neuron+1] = startPosCurr[threadIdx.x + connGpuDev.nNeuronsGroup]; } /* * Used only for benchmarking */ if (threadIdx.x < connGpuDev.nNeuronsGroup ) { tmpDevMemory[neuron] = startPosNew[threadIdx.x]; tmpDevMemory[nNeurons + neuron] = startPosNew [connGpuDev.nNeuronsGroup + threadIdx.x]; } } __global__ void performCommunicationsG_Step2(int nNeurons, ConnGpu *connGpuListDev, ucomp **nGeneratedSpikesDev, ftype **genSpikeTimeListDev, HinesStruct *hList, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListSizeGlobal, ftype *randomSpikeTimesDev, int *randomSpikeDestDev, ftype *tmpDevMemory) { int group = blockIdx.x; ConnGpu connGpuDev = connGpuListDev[group]; extern __shared__ ftype sharedMem[]; int neuron = connGpuDev.nNeuronsInPreviousGroups + threadIdx.x; int nSynapses = 2; int *startPosNew = (int *)sharedMem; int *sharedMemNext = startPosNew + connGpuDev.nNeuronsGroup * nSynapses; /* * Used only for benchmarking */ if (threadIdx.x < connGpuDev.nNeuronsGroup ) { startPosNew[threadIdx.x] = tmpDevMemory[neuron]; startPosNew [connGpuDev.nNeuronsGroup + threadIdx.x] = tmpDevMemory[nNeurons + neuron]; } deliverGeneratedSpikesG(connGpuDev, nNeurons, sharedMemNext, startPosNew, nGeneratedSpikesDev, genSpikeTimeListDev, randomSpikeTimesDev, randomSpikeDestDev, spikeListGlobal, weightListGlobal); }
ccc0a64f7b940e7516cfea1ba13406b16fae0bf1.cu
/* * SynapticComm.cu * * Created on: 10/12/2010 * Author: rcamargo */ #include "Connections.hpp" #include <cstdio> #include <cassert> #include <cuda.h> // Necessary to allow better eclipse integration #include <cuda_runtime_api.h> // Necessary to allow better eclipse integration #include <device_launch_parameters.h> // Necessary to allow better eclipse integration #include <device_functions.h> // Necessary to allow better eclipse integration extern void checkCUDAError(const char *msg); // TODO: remove connections from Connections ConnGpu* createGpuConnections( Connections *conn, int destType, int nTypes, int *nNeurons, int nGroups ) { // Contains the structures with the connections for each neuron group ConnGpu *connGpuTypeHost = (ConnGpu *)malloc(nGroups*sizeof(ConnGpu)); int nConnectionsTotal[nGroups]; int nNeuronsPerGroup = nNeurons[destType]/nGroups; int nGroupsExtraNeuron = (nNeurons[destType] % nGroups); int nNeuronsExtraGroups = nGroupsExtraNeuron * (nNeuronsPerGroup + 1); int nNeuronsInPreviousGroups = 0; for (int group=0; group<nGroups; group++) { ConnGpu & connGpu = connGpuTypeHost[group]; nConnectionsTotal[group] = 0; connGpu.nNeuronsGroup = nNeuronsPerGroup; connGpu.nNeuronsInPreviousGroups = nNeuronsInPreviousGroups; if ( group < nGroupsExtraNeuron ) connGpu.nNeuronsGroup++; nNeuronsInPreviousGroups += connGpu.nNeuronsGroup; } /** * Counts the total number of connections for the group */ for (int srcType=0; srcType < nTypes; srcType++) { for (int neuron=0; neuron < nNeurons[srcType]; neuron++) { std::vector<Conn> & connList = conn->getConnArray(neuron + srcType*CONN_NEURON_TYPE); for (int conn=0; conn<connList.size(); conn++) { if (connList[conn].dest / CONN_NEURON_TYPE == destType) { int destNeuron = connList[conn].dest % CONN_NEURON_TYPE; int group = destNeuron / nNeuronsPerGroup; if (nNeurons[destType] % nGroups != 0) { if (destNeuron < nNeuronsExtraGroups) group = destNeuron / (nNeuronsPerGroup+1); else group = nGroupsExtraNeuron + ((destNeuron - nNeuronsExtraGroups) / nNeuronsPerGroup); } nConnectionsTotal[group]++; // assert (destNeuron >= connGpuTypeHost[group].nNeuronsInPreviousGroups); // assert (destNeuron < connGpuTypeHost[group].nNeuronsInPreviousGroups + connGpuTypeHost[group].nNeuronsGroup); } } } } for (int group=0; group<nGroups; group++) { ConnGpu & connGpu = connGpuTypeHost[group]; connGpu.nConnectionsTotal = nConnectionsTotal[group]; checkCUDAError("Allocation error 0 at [SynapticComm.cu]:"); /** * Allocates the memory to keep the connection information in the GPU and CPU */ cudaMalloc( (void **) &(connGpu.srcDevice), nConnectionsTotal[group]*sizeof(int) ); connGpu.srcHost = (int *)malloc( nConnectionsTotal[group]*sizeof(int) ); checkCUDAError("Allocation error 1 at [SynapticComm.cu]:"); cudaMalloc( (void **) &(connGpu.destDevice), nConnectionsTotal[group]*sizeof(int) ); connGpu.destHost = (int *)malloc( nConnectionsTotal[group]*sizeof(int) ); checkCUDAError("Allocation error 2 at [SynapticComm.cu]:"); cudaMalloc( (void **) &(connGpu.synapseDevice), nConnectionsTotal[group]*sizeof(ucomp) ); connGpu.synapseHost = (ucomp *)malloc( nConnectionsTotal[group]*sizeof(ucomp) ); checkCUDAError("Allocation error 3 at [SynapticComm.cu]:"); cudaMalloc( (void **) &(connGpu.weightDevice), nConnectionsTotal[group]*sizeof(ftype) ); connGpu.weightHost = (ftype *)malloc( nConnectionsTotal[group]*sizeof(ftype) ); checkCUDAError("Allocation error 4 at [SynapticComm.cu]:"); cudaMalloc( (void **) &(connGpu.delayDevice), nConnectionsTotal[group]*sizeof(ftype) ); connGpu.delayHost = (ftype *)malloc( nConnectionsTotal[group]*sizeof(ftype) ); checkCUDAError("Allocation error 5 at [SynapticComm.cu]:"); } /** * Copies the connection info data to the host memory */ int memPosList[nGroups]; for (int group=0; group<nGroups; group++) memPosList[group] = 0; for (int srcType=0; srcType < nTypes; srcType++) { for (int neuron=0; neuron < nNeurons[srcType]; neuron++) { std::vector<Conn> & connList = conn->getConnArray(neuron + srcType*CONN_NEURON_TYPE); for (int conn=0; conn<connList.size(); conn++) { if (connList[conn].dest / CONN_NEURON_TYPE == destType) { Conn & connStr = connList[conn]; int destNeuron = connStr.dest % CONN_NEURON_TYPE; int group = destNeuron / nNeuronsPerGroup; if (nNeurons[destType] % nGroups != 0) { if (destNeuron < nNeuronsExtraGroups) group = destNeuron / (nNeuronsPerGroup+1); else group = nGroupsExtraNeuron + ((destNeuron - nNeuronsExtraGroups) / nNeuronsPerGroup); } ConnGpu & connGpu = connGpuTypeHost[group]; int memPos = memPosList[group]; connGpu.srcHost[memPos] = neuron + srcType*CONN_NEURON_TYPE; connGpu.destHost[memPos] = connStr.dest; // TODO: can move to another vector connGpu.synapseHost[memPos] = connStr.synapse; // TODO: can move to another vector connGpu.weightHost[memPos] = connStr.weigth; // TODO: can move to another vector connGpu.delayHost[memPos] = connStr.delay; // TODO: can move to another vector memPosList[group]++; } } } } /** * Copies the connection info data to the device memory */ for (int group=0; group<nGroups; group++) { assert (memPosList[group] == nConnectionsTotal[group]); ConnGpu & connGpu = connGpuTypeHost[group]; cudaMemcpy( connGpu.srcDevice, connGpu.srcHost, nConnectionsTotal[group]*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy( connGpu.destDevice, connGpu.destHost, nConnectionsTotal[group]*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy( connGpu.synapseDevice, connGpu.synapseHost, nConnectionsTotal[group]*sizeof(ucomp), cudaMemcpyHostToDevice); cudaMemcpy( connGpu.weightDevice, connGpu.weightHost, nConnectionsTotal[group]*sizeof(ftype), cudaMemcpyHostToDevice); cudaMemcpy( connGpu.delayDevice, connGpu.delayHost, nConnectionsTotal[group]*sizeof(ftype), cudaMemcpyHostToDevice); checkCUDAError("Memcopy error at [SynapticComm.cu]:"); } return connGpuTypeHost; } /** * Counts the number of spikes received at each synapse by each neuron of a given type * Used only to test if the implementation of createGpuConnections is working */ int **countReceivedSpikesCpu(ConnGpu *connGpuList, int nNeurons, int nGroups, ucomp **nGeneratedSpikes) { int nSynapses = 2; int **nReceivedSpikes = new int *[nSynapses]; nReceivedSpikes[0] = new int[nNeurons]; nReceivedSpikes[1] = new int[nNeurons]; for (int i=0; i<nNeurons; i++) { nReceivedSpikes[0][i] = 0; nReceivedSpikes[1][i] = 0; } int typeTmp = connGpuList[0].destHost[0]/CONN_NEURON_TYPE; int nConsideredSynapses = 0; int nAddedSpikes = 0; for (int group = 0; group < nGroups; group++) { ConnGpu & connGpu = connGpuList[group]; for (int iConn = 0; iConn < connGpu.nConnectionsTotal; iConn++) { assert (typeTmp == connGpu.destHost[iConn]/CONN_NEURON_TYPE); nReceivedSpikes[ connGpu.synapseHost[iConn] ][ connGpu.destHost[iConn]%CONN_NEURON_TYPE ] += nGeneratedSpikes[ connGpu.srcHost[iConn]/CONN_NEURON_TYPE ][ connGpu.srcHost[iConn]%CONN_NEURON_TYPE ]; nConsideredSynapses++; nAddedSpikes += nGeneratedSpikes[ connGpu.srcHost[iConn]/CONN_NEURON_TYPE ][ connGpu.srcHost[iConn]%CONN_NEURON_TYPE ]; } //printf("###\n"); } printf ("nConsideredSynapses = %d nAddedSpikes = %d\n", nConsideredSynapses, nAddedSpikes); //printf("###\n"); return nReceivedSpikes; } /** * Count the number of spikes delivered to each neuron * TODO: change ConnGpu connGpuDev to reference */ __device__ void countReceivedSpikesG(int nNeurons, ConnGpu connGpuDev, int *nReceivedSpikesShared, ucomp **nGeneratedSpikesDev, int *randomSpikeDestDev) { int rcvSpkListSize = connGpuDev.nNeuronsGroup * blockDim.x; for (int iConn = 0; iConn < connGpuDev.nConnectionsTotal; iConn += blockDim.x) { if (iConn+threadIdx.x < connGpuDev.nConnectionsTotal) { int destNeuron = (connGpuDev.destDevice[iConn+threadIdx.x]%CONN_NEURON_TYPE) - connGpuDev.nNeuronsInPreviousGroups; int threadStartPos = connGpuDev.synapseDevice[iConn+threadIdx.x] * rcvSpkListSize + threadIdx.x * connGpuDev.nNeuronsGroup; int srcNeuron = connGpuDev.srcDevice[iConn+threadIdx.x]; nReceivedSpikesShared[ threadStartPos + destNeuron ] += nGeneratedSpikesDev[ srcNeuron/CONN_NEURON_TYPE ][ srcNeuron%CONN_NEURON_TYPE ]; } } int threadStartPosRnd = threadIdx.x * connGpuDev.nNeuronsGroup; int iRnd = 0; int destNeuron = randomSpikeDestDev[ threadIdx.x ]; int maxNeuron = connGpuDev.nNeuronsInPreviousGroups + connGpuDev.nNeuronsGroup; while (destNeuron >= 0 && destNeuron < maxNeuron) { destNeuron -= connGpuDev.nNeuronsInPreviousGroups; if (destNeuron >= 0) nReceivedSpikesShared[ threadStartPosRnd + destNeuron ]++; iRnd += blockDim.x; destNeuron = randomSpikeDestDev[iRnd+threadIdx.x]; } /** * Here we consider nThreads == nNeuronsGroup; */ for (int i = 1; i < blockDim.x; i++) { if (threadIdx.x < connGpuDev.nNeuronsGroup) { nReceivedSpikesShared[threadIdx.x] += nReceivedSpikesShared[i * connGpuDev.nNeuronsGroup + threadIdx.x]; } } if (threadIdx.x < connGpuDev.nNeuronsGroup) nReceivedSpikesShared[connGpuDev.nNeuronsGroup + threadIdx.x] = 0; for (int i = 0; i < blockDim.x; i++) { if (threadIdx.x < connGpuDev.nNeuronsGroup) { nReceivedSpikesShared[connGpuDev.nNeuronsGroup + threadIdx.x] += nReceivedSpikesShared[rcvSpkListSize + i * connGpuDev.nNeuronsGroup + threadIdx.x]; } } //nReceivedSpikesShared[connGpuDev.nNeuronsInPreviousGroups + threadIdx.x] = nReceivedSpikesShared[threadIdx.x]; } /** * Move the current spikes to accommodate the new spikes */ __device__ void moveCurrentSpikesG(HinesStruct *hList, ConnGpu connGpuDev, int nNeurons, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListSizeGlobal, int *startPosCurr, int *startPosNew) { //int neuron = blockIdx.x * blockDim.x + threadIdx.x; int neuron = connGpuDev.nNeuronsInPreviousGroups + threadIdx.x; if (threadIdx.x >= connGpuDev.nNeuronsGroup) return; HinesStruct & h = hList[neuron]; ftype currTime = h.currStep * h.dt; startPosNew[connGpuDev.nNeuronsGroup + threadIdx.x] = startPosCurr[threadIdx.x] + startPosNew[threadIdx.x] + startPosCurr[connGpuDev.nNeuronsGroup + threadIdx.x]; startPosCurr[connGpuDev.nNeuronsGroup + threadIdx.x] = startPosCurr[threadIdx.x] + startPosNew[threadIdx.x]; startPosNew[threadIdx.x] = startPosCurr[threadIdx.x]; startPosCurr[threadIdx.x] = 0; int *synSpikeListPos = spikeListPosGlobal + neuron * h.synapseListSize; /** * Scans the spike list, copying the new generated spikes and the existing ones. */ int synapseListSize = 2; for (int syn=0; syn < synapseListSize ; syn++) { /* * Move the current spikes to the their final positions * TODO: Works only with 2 synapses, when synapse 0 is AMPA and synapse 1 is GABA!!! */ ftype remThresh = currTime - (3 * (h.tau[2*syn] + h.tau[2*syn+1]) ); int synPos = syn * connGpuDev.nNeuronsGroup + threadIdx.x; if ( startPosCurr[synPos] <= synSpikeListPos[syn]) { int pos = startPosCurr[synPos]; int spk = synSpikeListPos[syn]; int lastSpk = synSpikeListPos[syn] + startPosNew[synPos] - startPosCurr[synPos]; for (; spk < lastSpk; spk++) { // Copy only the spikes not expired if (spikeListGlobal[spk * nNeurons + neuron] > remThresh) { spikeListGlobal[pos * nNeurons + neuron] = spikeListGlobal[spk * nNeurons + neuron]; weightListGlobal[pos * nNeurons + neuron] = weightListGlobal[spk * nNeurons + neuron]; pos++; } //else spikeListGlobal[spk * nNeurons + neuron] = 0; } } else { int pos = startPosNew[synPos]-1; int spk = synSpikeListPos[syn] + startPosNew[synPos] - startPosCurr[synPos] - 1; int lastSpk = synSpikeListPos[syn]; for (; spk >= lastSpk; spk--) { // Copy only the spikes not expired if (spikeListGlobal[spk * nNeurons + neuron] > remThresh) { spikeListGlobal[pos * nNeurons + neuron] = spikeListGlobal[spk * nNeurons + neuron]; weightListGlobal[pos * nNeurons + neuron] = weightListGlobal[spk * nNeurons + neuron]; pos--; } //else spikeListGlobal[spk * nNeurons + neuron] = 0; } } } } /** * Count the number of current spikes to keep in the spikeList */ __device__ void countCurrentSpikesG(HinesStruct *hList, ConnGpu connGpuDev, int nNeurons, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListSizeGlobal, int *nSpikesToKeepShared) { //int neuron = blockIdx.x * blockDim.x + threadIdx.x; int neuron = connGpuDev.nNeuronsInPreviousGroups + threadIdx.x; if (threadIdx.x >= connGpuDev.nNeuronsGroup) return; HinesStruct & h = hList[neuron]; ftype currTime = h.currStep * h.dt; //int rcvSpkListSize = connGpuDev.nNeuronsGroup * blockDim.x; int *synSpikeListPos = spikeListPosGlobal + neuron * h.synapseListSize; // SYNAPSE_AMPA 0, SYNAPSE_GABA 1 // TODO: Works only when synapse 0 is AMPA and synapse 1 is GABA!!! int synapseListSize = 2; for (int syn = 0; syn<synapseListSize; syn++) { int nSpikesToKeep = 0; ftype remThresh = currTime - (3 * (h.tau[2*syn] + h.tau[2*syn+1]) ); int spk = synSpikeListPos[syn]; int lastSpk = (syn < synapseListSize-1) ? synSpikeListPos[syn+1] : spikeListSizeGlobal[neuron]; int spkMovePos = -1; for (; spk < lastSpk && spkMovePos == -1; spk++) { if (spikeListGlobal[spk * nNeurons + neuron] > remThresh) nSpikesToKeep++; else spkMovePos = spk; } for (; spk < lastSpk; spk++) { if (spikeListGlobal[spk * nNeurons + neuron] > remThresh) { nSpikesToKeep++; spikeListGlobal[spkMovePos * nNeurons + neuron] = spikeListGlobal[spk* nNeurons + neuron]; weightListGlobal[spkMovePos * nNeurons + neuron] = weightListGlobal[spk* nNeurons + neuron]; spkMovePos++; } } if (syn==0) nSpikesToKeepShared[threadIdx.x] = nSpikesToKeep; else nSpikesToKeepShared[connGpuDev.nNeuronsGroup + threadIdx.x] = nSpikesToKeep; } } /** * Copy the new spikes to the vector */ __device__ void deliverGeneratedSpikesG(ConnGpu connGpuDev, int nNeurons, int *sharedMem, int *startPosNew, ucomp **nGeneratedSpikesDev, ftype **genSpikeTimeListDev, ftype *randomSpikeTimesDev, int *randomSpikeDestDev, ftype *spikeListGlobal, ftype *weightListGlobal) { int spikeTimeListSize = 5; // ############################################# //const int nSynapses = 2; int *srcNeuronShared = sharedMem; int *destNeuronShared = srcNeuronShared + blockDim.x; int *nSpikesSrcShared = destNeuronShared + blockDim.x; int *newPosThreadShared = nSpikesSrcShared + blockDim.x; int srcNeuron, srcType, nSpikesSource; int destNeuron, destSyn, synPosL, synPosG; ftype weight, delay, *genSpikeTimes; /** * Copy the spikes received from other neurons */ for (int iConn = 0; iConn < connGpuDev.nConnectionsTotal; iConn += blockDim.x) { __syncthreads(); nSpikesSrcShared[threadIdx.x] = 0; newPosThreadShared[threadIdx.x] = threadIdx.x; if (iConn+threadIdx.x < connGpuDev.nConnectionsTotal) { srcNeuron = connGpuDev.srcDevice[iConn+threadIdx.x]; srcType = srcNeuron/CONN_NEURON_TYPE; srcNeuron = srcNeuron%CONN_NEURON_TYPE; nSpikesSource = nGeneratedSpikesDev[ srcType ][ srcNeuron ]; weight = connGpuDev.weightDevice[iConn+threadIdx.x]; destNeuron = connGpuDev.destDevice[iConn+threadIdx.x]%CONN_NEURON_TYPE; destSyn = connGpuDev.synapseDevice[iConn+threadIdx.x]; synPosL = destSyn * connGpuDev.nNeuronsGroup + (destNeuron-connGpuDev.nNeuronsInPreviousGroups); synPosG = startPosNew[synPosL]; delay = connGpuDev.delayDevice[iConn+threadIdx.x]; genSpikeTimes = genSpikeTimeListDev[srcType] + spikeTimeListSize * srcNeuron; // Put in Shared srcNeuronShared[threadIdx.x] = srcNeuron + srcType * CONN_NEURON_TYPE; destNeuronShared[threadIdx.x] = destNeuron + destSyn * CONN_NEURON_TYPE; nSpikesSrcShared[threadIdx.x] = nSpikesSource; } __syncthreads(); if (nSpikesSource > 0 && iConn+threadIdx.x < connGpuDev.nConnectionsTotal) { //if ( srcNeuronShared[0] != srcNeuronShared[threadIdx.x] ) { // only valid if there are no repeated connections for (int i=0; i < threadIdx.x; i++ ) { if ( destNeuronShared[i] == destNeuronShared[threadIdx.x] ) { synPosG += nSpikesSrcShared[i]; newPosThreadShared[i] = threadIdx.x; } } //} for (int i = 0; i < nSpikesSource; synPosG++, i++) { spikeListGlobal[ synPosG * nNeurons + destNeuron ] = genSpikeTimes[i] + delay; weightListGlobal[ synPosG * nNeurons + destNeuron ] = weight; } if (threadIdx.x == newPosThreadShared[threadIdx.x]) startPosNew[synPosL] = synPosG; } } __syncthreads(); /** * Copy the random spikes * Only works when random spikes are delivered to synapse 0 */ { int iRnd = 0; int destNeuron = randomSpikeDestDev[ threadIdx.x ]%CONN_NEURON_TYPE; int maxNeuron = connGpuDev.nNeuronsGroup + connGpuDev.nNeuronsInPreviousGroups; while (destNeuron >= 0 && destNeuron < maxNeuron) { destNeuron -= connGpuDev.nNeuronsInPreviousGroups; destNeuronShared[threadIdx.x] = destNeuron; if ( destNeuron >= 0 ) { int spikePos = startPosNew[destNeuron] * nNeurons + (destNeuron + connGpuDev.nNeuronsInPreviousGroups); if (threadIdx.x > 0) { for (int i = 1; i <= threadIdx.x && destNeuronShared[threadIdx.x - i] == destNeuron; i++) { spikePos += nNeurons; startPosNew[destNeuron]++; } } spikeListGlobal[ spikePos ] = randomSpikeTimesDev[iRnd+threadIdx.x]; weightListGlobal[ spikePos ] = 1; startPosNew[destNeuron]++; } iRnd += blockDim.x; destNeuron = randomSpikeDestDev[iRnd+threadIdx.x]%CONN_NEURON_TYPE ; } } } // HinesStruct *hList, int nSteps, int nNeurons, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListStartGlobal, ftype *vmListGlobal __global__ void performCommunicationsG(int nNeurons, ConnGpu *connGpuListDev, ucomp **nGeneratedSpikesDev, ftype **genSpikeTimeListDev, HinesStruct *hList, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListSizeGlobal, ftype *randomSpikeTimesDev, int *randomSpikeDestDev) { int group = blockIdx.x; ConnGpu connGpuDev = connGpuListDev[group]; int neuron = connGpuDev.nNeuronsInPreviousGroups + threadIdx.x; extern __shared__ ftype sharedMem[]; int nSynapses = 2; int *nReceivedSpikesShared = (int *)sharedMem; int *nSpikesToKeepShared = nReceivedSpikesShared + connGpuDev.nNeuronsGroup * nSynapses; int *sharedMemNext = nSpikesToKeepShared + connGpuDev.nNeuronsGroup * nSynapses; int receivedSpikesSize = connGpuDev.nNeuronsGroup * blockDim.x; for (int i=0; i < receivedSpikesSize; i += blockDim.x) { nReceivedSpikesShared[i + threadIdx.x] = 0; // synapse 0 nReceivedSpikesShared[i + threadIdx.x + receivedSpikesSize] = 0; // synapse 1 } /** * Counts the number of spikes from other neurons and from a random source that will be added */ countReceivedSpikesG(nNeurons, connGpuDev, nReceivedSpikesShared, nGeneratedSpikesDev, randomSpikeDestDev); /** * Counts the number of current spikes in spikeList to keep for the next kernel call */ nSpikesToKeepShared[threadIdx.x] = 0; nSpikesToKeepShared[connGpuDev.nNeuronsGroup + threadIdx.x] = 0; countCurrentSpikesG(hList, connGpuDev, nNeurons, spikeListGlobal, weightListGlobal, spikeListPosGlobal, spikeListSizeGlobal, nSpikesToKeepShared); /** * Copy the data to the CPU for debugging * Here we consider nThreads == nNeuronsGroup; */ if (threadIdx.x < connGpuDev.nNeuronsGroup ) { spikeListSizeGlobal[neuron] = nReceivedSpikesShared[threadIdx.x] + nReceivedSpikesShared[threadIdx.x + connGpuDev.nNeuronsGroup] + nSpikesToKeepShared[threadIdx.x] + nSpikesToKeepShared[threadIdx.x + connGpuDev.nNeuronsGroup]; } /** * Move the current spikes in the spikeList vector */ int *startPosCurr = nSpikesToKeepShared; int *startPosNew = nReceivedSpikesShared; moveCurrentSpikesG(hList, connGpuDev, nNeurons, spikeListGlobal, weightListGlobal, spikeListPosGlobal, spikeListSizeGlobal, startPosCurr, startPosNew); if (threadIdx.x < connGpuDev.nNeuronsGroup ) { spikeListPosGlobal[2*neuron] = startPosCurr[threadIdx.x]; spikeListPosGlobal[2*neuron+1] = startPosCurr[threadIdx.x + connGpuDev.nNeuronsGroup]; } deliverGeneratedSpikesG(connGpuDev, nNeurons, sharedMemNext, startPosNew, nGeneratedSpikesDev, genSpikeTimeListDev, randomSpikeTimesDev, randomSpikeDestDev, spikeListGlobal, weightListGlobal); } // HinesStruct *hList, int nSteps, int nNeurons, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListStartGlobal, ftype *vmListGlobal __global__ void performCommunicationsG_Step1(int nNeurons, ConnGpu *connGpuListDev, ucomp **nGeneratedSpikesDev, ftype **genSpikeTimeListDev, HinesStruct *hList, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListSizeGlobal, ftype *randomSpikeTimesDev, int *randomSpikeDestDev, ftype *tmpDevMemory) { int group = blockIdx.x; ConnGpu connGpuDev = connGpuListDev[group]; int neuron = connGpuDev.nNeuronsInPreviousGroups + threadIdx.x; extern __shared__ ftype sharedMem[]; int nSynapses = 2; int *nReceivedSpikesShared = (int *)sharedMem; int *nSpikesToKeepShared = nReceivedSpikesShared + connGpuDev.nNeuronsGroup * nSynapses; int receivedSpikesSize = connGpuDev.nNeuronsGroup * blockDim.x; for (int i=0; i < receivedSpikesSize; i += blockDim.x) { nReceivedSpikesShared[i + threadIdx.x] = 0; // synapse 0 nReceivedSpikesShared[i + threadIdx.x + receivedSpikesSize] = 0; // synapse 1 } /** * Counts the number of spikes from other neurons and from a random source that will be added */ countReceivedSpikesG(nNeurons, connGpuDev, nReceivedSpikesShared, nGeneratedSpikesDev, randomSpikeDestDev); /** * Counts the number of current spikes in spikeList to keep for the next kernel call */ nSpikesToKeepShared[threadIdx.x] = 0; nSpikesToKeepShared[connGpuDev.nNeuronsGroup + threadIdx.x] = 0; countCurrentSpikesG(hList, connGpuDev, nNeurons, spikeListGlobal, weightListGlobal, spikeListPosGlobal, spikeListSizeGlobal, nSpikesToKeepShared); /** * Copy the data to the CPU for debugging * Here we consider nThreads == nNeuronsGroup; */ if (threadIdx.x < connGpuDev.nNeuronsGroup ) { spikeListSizeGlobal[neuron] = nReceivedSpikesShared[threadIdx.x] + nReceivedSpikesShared[threadIdx.x + connGpuDev.nNeuronsGroup] + nSpikesToKeepShared[threadIdx.x] + nSpikesToKeepShared[threadIdx.x + connGpuDev.nNeuronsGroup]; } /** * Move the current spikes in the spikeList vector */ int *startPosCurr = nSpikesToKeepShared; int *startPosNew = nReceivedSpikesShared; moveCurrentSpikesG(hList, connGpuDev, nNeurons, spikeListGlobal, weightListGlobal, spikeListPosGlobal, spikeListSizeGlobal, startPosCurr, startPosNew); if (threadIdx.x < connGpuDev.nNeuronsGroup ) { spikeListPosGlobal[2*neuron] = startPosCurr[threadIdx.x]; spikeListPosGlobal[2*neuron+1] = startPosCurr[threadIdx.x + connGpuDev.nNeuronsGroup]; } /* * Used only for benchmarking */ if (threadIdx.x < connGpuDev.nNeuronsGroup ) { tmpDevMemory[neuron] = startPosNew[threadIdx.x]; tmpDevMemory[nNeurons + neuron] = startPosNew [connGpuDev.nNeuronsGroup + threadIdx.x]; } } __global__ void performCommunicationsG_Step2(int nNeurons, ConnGpu *connGpuListDev, ucomp **nGeneratedSpikesDev, ftype **genSpikeTimeListDev, HinesStruct *hList, ftype *spikeListGlobal, ftype *weightListGlobal, int *spikeListPosGlobal, int *spikeListSizeGlobal, ftype *randomSpikeTimesDev, int *randomSpikeDestDev, ftype *tmpDevMemory) { int group = blockIdx.x; ConnGpu connGpuDev = connGpuListDev[group]; extern __shared__ ftype sharedMem[]; int neuron = connGpuDev.nNeuronsInPreviousGroups + threadIdx.x; int nSynapses = 2; int *startPosNew = (int *)sharedMem; int *sharedMemNext = startPosNew + connGpuDev.nNeuronsGroup * nSynapses; /* * Used only for benchmarking */ if (threadIdx.x < connGpuDev.nNeuronsGroup ) { startPosNew[threadIdx.x] = tmpDevMemory[neuron]; startPosNew [connGpuDev.nNeuronsGroup + threadIdx.x] = tmpDevMemory[nNeurons + neuron]; } deliverGeneratedSpikesG(connGpuDev, nNeurons, sharedMemNext, startPosNew, nGeneratedSpikesDev, genSpikeTimeListDev, randomSpikeTimesDev, randomSpikeDestDev, spikeListGlobal, weightListGlobal); }
0448047bb3a3b7a67cf4fe778944c022af61e2ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include "utils.hpp" #include "update.hpp" #include "memoryManager.hpp" #include "cuStinger.hpp" using namespace std; #include <stdio.h> #include <string.h> // void initializeCuStinger(cuStingerConfig); // __global__ void devInitVertexData(cuStinger* custing,uint8_t* temp) __global__ void devInitVertexData(cuStinger::cusVertexData *dVD,vertexId_t nv,uint8_t* temp) { if(threadIdx.x!=0 || blockIdx.x!=0) DEV_CUSTINGER_ERROR("Number of threads and thread blocks for initializing vertex should always be one"); // cuStinger::cusVertexData *dVD = custing->dVD; dVD->mem = temp; int32_t pos=0; dVD->adj = (cuStinger::cusEdgeData**)(dVD->getMem() + pos); pos+=sizeof(cuStinger::cusEdgeData*)*nv; dVD->edMem = (uint8_t**)(dVD->getMem() + pos); pos+=sizeof(uint8_t*)*nv; dVD->used = (length_t*)(dVD->getMem() + pos); pos+=sizeof(length_t)*nv; dVD->max = (length_t*)(dVD->getMem() + pos); pos+=sizeof(length_t)*nv; dVD->vw = (vweight_t*)(dVD->getMem() + pos); pos+=sizeof(vweight_t)*nv; dVD->vt = (vtype_t*)(dVD->getMem() + pos); pos+=sizeof(vtype_t)*nv; } void cuStinger::initVertexDataPointers(cuStinger::cusVertexData *dVD, uint8_t* temp){ // devInitVertexData<<<1,1>>>( d_cuStinger,temp); hipLaunchKernelGGL(( devInitVertexData), dim3(1),dim3(1), 0, 0, dVD,nv,temp); } __global__ void devInitEdgeData(cuStinger* custing, int verticesPerThreadBlock){ vertexId_t v_init=blockIdx.x*verticesPerThreadBlock+threadIdx.x; length_t nv = custing->getMaxNV(); for (vertexId_t v_hat=0; v_hat<verticesPerThreadBlock; v_hat+=blockDim.x){ vertexId_t v=v_init+v_hat; if(v>=nv) break; //epv = edge per vertex length_t epv = custing->dVD->getMax()[v]; int32_t pos=0; cuStinger::cusEdgeData *dED = custing->dVD->adj[v]; dED->mem = custing->dVD->edMem[v]; dED->dst = (vertexId_t*)(dED->getMem() + pos); pos+=sizeof(vertexId_t)*epv; dED->ew = (eweight_t*)(dED->getMem() + pos); pos+=sizeof(eweight_t)*epv; dED->et = (etype_t*)(dED->getMem() + pos); pos+=sizeof(etype_t)*epv; dED->t1 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv; dED->t2 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv; } } void cuStinger::initEdgeDataPointers(){ dim3 numBlocks(1, 1); int32_t threads=64; dim3 threadsPerBlock(threads, 1); numBlocks.x = ceil((float)nv/(float)threads); if (numBlocks.x>16000){ numBlocks.x=16000; } int32_t verticesPerThreadBlock = threads; if(numBlocks.x>1) verticesPerThreadBlock = ceil(float(nv)/float(numBlocks.x-1)); hipLaunchKernelGGL(( devInitEdgeData), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_cuStinger,verticesPerThreadBlock); } __global__ void devMakeGPUStinger(vertexId_t* d_off, length_t* d_adj, int verticesPerThreadBlock,cuStinger* custing){ length_t* d_utilized = custing->dVD->getUsed(); length_t* d_max = custing->dVD->getMax(); int32_t v_init=blockIdx.x*verticesPerThreadBlock; for (int v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){ int32_t v=v_init+v_hat; if(v>=custing->nv) break; cuStinger::cusEdgeData* adjv = custing->dVD->adj[v]; for(int32_t e=threadIdx.x; e<d_utilized[v]; e+=blockDim.x){ adjv->dst[e]=d_adj[d_off[v]+e]; } for(int32_t e=threadIdx.x + d_utilized[v]; e < d_max[v]; e+=blockDim.x){ adjv->dst[e]=DELETION_MARKER; } } } void cuStinger::internalCSRTocuStinger(length_t* h_off, vertexId_t* h_adj, length_t ne){ length_t* d_off = (length_t*)allocDeviceArray(nv+1,sizeof(length_t)); vertexId_t* d_adj = (vertexId_t*)allocDeviceArray(ne,sizeof(vertexId_t)); copyArrayHostToDevice(h_off,d_off,nv+1,sizeof(length_t)); copyArrayHostToDevice(h_adj,d_adj,ne,sizeof(vertexId_t)); dim3 numBlocks(1, 1); int32_t threads=64; dim3 threadsPerBlock(threads, 1); numBlocks.x = ceil((float)nv/(float)threads); if (numBlocks.x>16000){ numBlocks.x=16000; } int32_t verticesPerThreadBlock = ceil(float(nv)/float(numBlocks.x-1)); hipLaunchKernelGGL(( devMakeGPUStinger), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_off,d_adj,verticesPerThreadBlock, d_cuStinger); freeDeviceArray(d_adj); freeDeviceArray(d_off); } #define SUM_BLOCK_SIZE 512 __global__ void devSumArray(length_t * input, length_t * output, length_t len) { __shared__ length_t partialSum[2 * SUM_BLOCK_SIZE]; //Load a segment of the input vector into shared memory length_t tid = threadIdx.x, start = 2 * blockIdx.x * SUM_BLOCK_SIZE; if (start + tid < len) partialSum[tid] = input[start + tid]; else partialSum[tid] = 0; if (start + SUM_BLOCK_SIZE + tid < len) partialSum[SUM_BLOCK_SIZE + tid] = input[start + SUM_BLOCK_SIZE + tid]; else partialSum[SUM_BLOCK_SIZE + tid] = 0; //Traverse the reduction tree for (int stride = SUM_BLOCK_SIZE; stride >= 1; stride >>= 1) { __syncthreads(); if (tid < stride) partialSum[tid] += partialSum[tid+stride]; } //Write the computed sum of the block to the output vector at the correct index if (tid == 0) output[blockIdx.x] = partialSum[0]; } length_t cuStinger::sumDeviceArray(length_t* arr, length_t len){ length_t numOutputElements = len / (SUM_BLOCK_SIZE<<1); if (len % (SUM_BLOCK_SIZE<<1)) { numOutputElements++; } length_t* d_out = (length_t*)allocDeviceArray(len, sizeof(length_t*)); hipLaunchKernelGGL(( devSumArray), dim3(numOutputElements),dim3(SUM_BLOCK_SIZE), 0, 0, arr,d_out,len); length_t* h_out = (length_t*)allocHostArray(len, sizeof(length_t*)); length_t sum=0; copyArrayDeviceToHost(d_out, h_out, len, sizeof(length_t)); for(int i=0; i<numOutputElements; i++){ sum+=h_out[i]; } freeHostArray(h_out); freeDeviceArray(d_out); return sum; } __global__ void deviceCopyMultipleAdjacencies(cuStinger* custing, cuStinger::cusVertexData* olddVD, vertexId_t* requireUpdates, length_t requireCount ,length_t verticesPerThreadBlock) { // int32_t** d_cuadj = custing->d_adj; // length_t* d_utilized = custing->getDeviceUsed(); length_t v_init=blockIdx.x*verticesPerThreadBlock; for (int v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){ if((v_init+v_hat)>=requireCount) break; vertexId_t v=requireUpdates[v_init+v_hat]; cuStinger::cusEdgeData *dED = custing->dVD->adj[v]; cuStinger::cusEdgeData *olddED = olddVD->adj[v]; //epv = edge per vertex length_t epv = olddVD->getMax()[v]; int32_t pos=0; dED->mem = custing->dVD->edMem[v]; dED->dst = (vertexId_t*)(dED->getMem() + pos); pos+=sizeof(vertexId_t)*epv; dED->ew = (eweight_t*)(dED->getMem() + pos); pos+=sizeof(eweight_t)*epv; dED->et = (etype_t*)(dED->getMem() + pos); pos+=sizeof(etype_t)*epv; dED->t1 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv; dED->t2 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv; __syncthreads(); for(length_t e=threadIdx.x; e<olddVD->getUsed()[v]; e+=blockDim.x){ dED->dst[e] = olddED->dst[e]; if(custing->isSemantic){ dED->ew[e] = olddED->ew[e]; } else if(custing->useEWeight){ dED->ew[e] = olddED->ew[e]; dED->et[e] = olddED->et[e]; dED->t1[e] = olddED->t1[e]; dED->t2[e] = olddED->t1[e]; } } } } void cuStinger::copyMultipleAdjacencies(cusVertexData* olddVD, vertexId_t* requireUpdates, length_t requireCount){ dim3 numBlocks(1, 1); int32_t threads=32; dim3 threadsPerBlock(threads, 1); numBlocks.x = ceil((float)requireCount); if (numBlocks.x>16000){ numBlocks.x=16000; } int32_t verticesPerThreadBlock; if(numBlocks.x == requireCount) verticesPerThreadBlock=1; else verticesPerThreadBlock = ceil(float(requireCount)/float(numBlocks.x-1)); hipLaunchKernelGGL(( deviceCopyMultipleAdjacencies), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_cuStinger, olddVD, requireUpdates, requireCount, verticesPerThreadBlock); checkLastCudaError("Error in the first update sweep"); } __global__ void deviceCheckForDuplicateEdges(cuStinger* custing, length_t verticesPerThreadBlock) { vertexId_t v_init=blockIdx.x*verticesPerThreadBlock+threadIdx.x; length_t nv = custing->getMaxNV(); __shared__ int dupFound; for (vertexId_t v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){ vertexId_t v=v_init+v_hat; if(v>=nv) break; length_t edges = custing->dVD->getUsed()[v]; cuStinger::cusEdgeData *dED = custing->dVD->adj[v]; // if(v ==45788 && threadIdx.x==0){ // for(length_t e=0; e<edges; e++) // printf("%d ,",dED->dst[e]); // printf("\n"); // } for (length_t e=0; e<edges; e++){ vertexId_t currDest=dED->dst[e]; dupFound=-1; __syncthreads(); for (length_t e2=0; e2<edges; e2+=blockDim.x){ vertexId_t currDest2 = dED->dst[e2]; if(currDest==currDest2 && e!=e2){ dupFound=e2; } } __syncthreads(); if(dupFound!=-1) printf("DUP FOUND IN CUSTIGER\n"); } } } void cuStinger::checkDuplicateEdges(){ dim3 numBlocks(1, 1); int32_t threads=32; dim3 threadsPerBlock(threads, 1); int32_t verticesPerThreadBlock; numBlocks.x = ceil((float)nv/(float)threads); if (numBlocks.x>16000){ numBlocks.x=16000; } verticesPerThreadBlock = ceil(float(nv)/float(numBlocks.x)); // cout << "checkDuplicateEdges : " << verticesPerThreadBlock<< endl; // cout << "checkDuplicateEdges : " << numBlocks.x << endl; // cout << "Deletions : " << threadsPerBlock.x << endl; hipLaunchKernelGGL(( deviceCheckForDuplicateEdges), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_cuStinger, verticesPerThreadBlock); checkLastCudaError("Error in the first update sweep"); }
0448047bb3a3b7a67cf4fe778944c022af61e2ca.cu
#include <thrust/device_vector.h> #include <thrust/host_vector.h> #include "utils.hpp" #include "update.hpp" #include "memoryManager.hpp" #include "cuStinger.hpp" using namespace std; #include <stdio.h> #include <string.h> // void initializeCuStinger(cuStingerConfig); // __global__ void devInitVertexData(cuStinger* custing,uint8_t* temp) __global__ void devInitVertexData(cuStinger::cusVertexData *dVD,vertexId_t nv,uint8_t* temp) { if(threadIdx.x!=0 || blockIdx.x!=0) DEV_CUSTINGER_ERROR("Number of threads and thread blocks for initializing vertex should always be one"); // cuStinger::cusVertexData *dVD = custing->dVD; dVD->mem = temp; int32_t pos=0; dVD->adj = (cuStinger::cusEdgeData**)(dVD->getMem() + pos); pos+=sizeof(cuStinger::cusEdgeData*)*nv; dVD->edMem = (uint8_t**)(dVD->getMem() + pos); pos+=sizeof(uint8_t*)*nv; dVD->used = (length_t*)(dVD->getMem() + pos); pos+=sizeof(length_t)*nv; dVD->max = (length_t*)(dVD->getMem() + pos); pos+=sizeof(length_t)*nv; dVD->vw = (vweight_t*)(dVD->getMem() + pos); pos+=sizeof(vweight_t)*nv; dVD->vt = (vtype_t*)(dVD->getMem() + pos); pos+=sizeof(vtype_t)*nv; } void cuStinger::initVertexDataPointers(cuStinger::cusVertexData *dVD, uint8_t* temp){ // devInitVertexData<<<1,1>>>( d_cuStinger,temp); devInitVertexData<<<1,1>>>( dVD,nv,temp); } __global__ void devInitEdgeData(cuStinger* custing, int verticesPerThreadBlock){ vertexId_t v_init=blockIdx.x*verticesPerThreadBlock+threadIdx.x; length_t nv = custing->getMaxNV(); for (vertexId_t v_hat=0; v_hat<verticesPerThreadBlock; v_hat+=blockDim.x){ vertexId_t v=v_init+v_hat; if(v>=nv) break; //epv = edge per vertex length_t epv = custing->dVD->getMax()[v]; int32_t pos=0; cuStinger::cusEdgeData *dED = custing->dVD->adj[v]; dED->mem = custing->dVD->edMem[v]; dED->dst = (vertexId_t*)(dED->getMem() + pos); pos+=sizeof(vertexId_t)*epv; dED->ew = (eweight_t*)(dED->getMem() + pos); pos+=sizeof(eweight_t)*epv; dED->et = (etype_t*)(dED->getMem() + pos); pos+=sizeof(etype_t)*epv; dED->t1 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv; dED->t2 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv; } } void cuStinger::initEdgeDataPointers(){ dim3 numBlocks(1, 1); int32_t threads=64; dim3 threadsPerBlock(threads, 1); numBlocks.x = ceil((float)nv/(float)threads); if (numBlocks.x>16000){ numBlocks.x=16000; } int32_t verticesPerThreadBlock = threads; if(numBlocks.x>1) verticesPerThreadBlock = ceil(float(nv)/float(numBlocks.x-1)); devInitEdgeData<<<numBlocks,threadsPerBlock>>>( d_cuStinger,verticesPerThreadBlock); } __global__ void devMakeGPUStinger(vertexId_t* d_off, length_t* d_adj, int verticesPerThreadBlock,cuStinger* custing){ length_t* d_utilized = custing->dVD->getUsed(); length_t* d_max = custing->dVD->getMax(); int32_t v_init=blockIdx.x*verticesPerThreadBlock; for (int v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){ int32_t v=v_init+v_hat; if(v>=custing->nv) break; cuStinger::cusEdgeData* adjv = custing->dVD->adj[v]; for(int32_t e=threadIdx.x; e<d_utilized[v]; e+=blockDim.x){ adjv->dst[e]=d_adj[d_off[v]+e]; } for(int32_t e=threadIdx.x + d_utilized[v]; e < d_max[v]; e+=blockDim.x){ adjv->dst[e]=DELETION_MARKER; } } } void cuStinger::internalCSRTocuStinger(length_t* h_off, vertexId_t* h_adj, length_t ne){ length_t* d_off = (length_t*)allocDeviceArray(nv+1,sizeof(length_t)); vertexId_t* d_adj = (vertexId_t*)allocDeviceArray(ne,sizeof(vertexId_t)); copyArrayHostToDevice(h_off,d_off,nv+1,sizeof(length_t)); copyArrayHostToDevice(h_adj,d_adj,ne,sizeof(vertexId_t)); dim3 numBlocks(1, 1); int32_t threads=64; dim3 threadsPerBlock(threads, 1); numBlocks.x = ceil((float)nv/(float)threads); if (numBlocks.x>16000){ numBlocks.x=16000; } int32_t verticesPerThreadBlock = ceil(float(nv)/float(numBlocks.x-1)); devMakeGPUStinger<<<numBlocks,threadsPerBlock>>>(d_off,d_adj,verticesPerThreadBlock, d_cuStinger); freeDeviceArray(d_adj); freeDeviceArray(d_off); } #define SUM_BLOCK_SIZE 512 __global__ void devSumArray(length_t * input, length_t * output, length_t len) { __shared__ length_t partialSum[2 * SUM_BLOCK_SIZE]; //Load a segment of the input vector into shared memory length_t tid = threadIdx.x, start = 2 * blockIdx.x * SUM_BLOCK_SIZE; if (start + tid < len) partialSum[tid] = input[start + tid]; else partialSum[tid] = 0; if (start + SUM_BLOCK_SIZE + tid < len) partialSum[SUM_BLOCK_SIZE + tid] = input[start + SUM_BLOCK_SIZE + tid]; else partialSum[SUM_BLOCK_SIZE + tid] = 0; //Traverse the reduction tree for (int stride = SUM_BLOCK_SIZE; stride >= 1; stride >>= 1) { __syncthreads(); if (tid < stride) partialSum[tid] += partialSum[tid+stride]; } //Write the computed sum of the block to the output vector at the correct index if (tid == 0) output[blockIdx.x] = partialSum[0]; } length_t cuStinger::sumDeviceArray(length_t* arr, length_t len){ length_t numOutputElements = len / (SUM_BLOCK_SIZE<<1); if (len % (SUM_BLOCK_SIZE<<1)) { numOutputElements++; } length_t* d_out = (length_t*)allocDeviceArray(len, sizeof(length_t*)); devSumArray<<<numOutputElements,SUM_BLOCK_SIZE>>>(arr,d_out,len); length_t* h_out = (length_t*)allocHostArray(len, sizeof(length_t*)); length_t sum=0; copyArrayDeviceToHost(d_out, h_out, len, sizeof(length_t)); for(int i=0; i<numOutputElements; i++){ sum+=h_out[i]; } freeHostArray(h_out); freeDeviceArray(d_out); return sum; } __global__ void deviceCopyMultipleAdjacencies(cuStinger* custing, cuStinger::cusVertexData* olddVD, vertexId_t* requireUpdates, length_t requireCount ,length_t verticesPerThreadBlock) { // int32_t** d_cuadj = custing->d_adj; // length_t* d_utilized = custing->getDeviceUsed(); length_t v_init=blockIdx.x*verticesPerThreadBlock; for (int v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){ if((v_init+v_hat)>=requireCount) break; vertexId_t v=requireUpdates[v_init+v_hat]; cuStinger::cusEdgeData *dED = custing->dVD->adj[v]; cuStinger::cusEdgeData *olddED = olddVD->adj[v]; //epv = edge per vertex length_t epv = olddVD->getMax()[v]; int32_t pos=0; dED->mem = custing->dVD->edMem[v]; dED->dst = (vertexId_t*)(dED->getMem() + pos); pos+=sizeof(vertexId_t)*epv; dED->ew = (eweight_t*)(dED->getMem() + pos); pos+=sizeof(eweight_t)*epv; dED->et = (etype_t*)(dED->getMem() + pos); pos+=sizeof(etype_t)*epv; dED->t1 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv; dED->t2 = (timestamp_t*)(dED->getMem() + pos); pos+=sizeof(timestamp_t)*epv; __syncthreads(); for(length_t e=threadIdx.x; e<olddVD->getUsed()[v]; e+=blockDim.x){ dED->dst[e] = olddED->dst[e]; if(custing->isSemantic){ dED->ew[e] = olddED->ew[e]; } else if(custing->useEWeight){ dED->ew[e] = olddED->ew[e]; dED->et[e] = olddED->et[e]; dED->t1[e] = olddED->t1[e]; dED->t2[e] = olddED->t1[e]; } } } } void cuStinger::copyMultipleAdjacencies(cusVertexData* olddVD, vertexId_t* requireUpdates, length_t requireCount){ dim3 numBlocks(1, 1); int32_t threads=32; dim3 threadsPerBlock(threads, 1); numBlocks.x = ceil((float)requireCount); if (numBlocks.x>16000){ numBlocks.x=16000; } int32_t verticesPerThreadBlock; if(numBlocks.x == requireCount) verticesPerThreadBlock=1; else verticesPerThreadBlock = ceil(float(requireCount)/float(numBlocks.x-1)); deviceCopyMultipleAdjacencies<<<numBlocks,threadsPerBlock>>>(d_cuStinger, olddVD, requireUpdates, requireCount, verticesPerThreadBlock); checkLastCudaError("Error in the first update sweep"); } __global__ void deviceCheckForDuplicateEdges(cuStinger* custing, length_t verticesPerThreadBlock) { vertexId_t v_init=blockIdx.x*verticesPerThreadBlock+threadIdx.x; length_t nv = custing->getMaxNV(); __shared__ int dupFound; for (vertexId_t v_hat=0; v_hat<verticesPerThreadBlock; v_hat++){ vertexId_t v=v_init+v_hat; if(v>=nv) break; length_t edges = custing->dVD->getUsed()[v]; cuStinger::cusEdgeData *dED = custing->dVD->adj[v]; // if(v ==45788 && threadIdx.x==0){ // for(length_t e=0; e<edges; e++) // printf("%d ,",dED->dst[e]); // printf("\n"); // } for (length_t e=0; e<edges; e++){ vertexId_t currDest=dED->dst[e]; dupFound=-1; __syncthreads(); for (length_t e2=0; e2<edges; e2+=blockDim.x){ vertexId_t currDest2 = dED->dst[e2]; if(currDest==currDest2 && e!=e2){ dupFound=e2; } } __syncthreads(); if(dupFound!=-1) printf("DUP FOUND IN CUSTIGER\n"); } } } void cuStinger::checkDuplicateEdges(){ dim3 numBlocks(1, 1); int32_t threads=32; dim3 threadsPerBlock(threads, 1); int32_t verticesPerThreadBlock; numBlocks.x = ceil((float)nv/(float)threads); if (numBlocks.x>16000){ numBlocks.x=16000; } verticesPerThreadBlock = ceil(float(nv)/float(numBlocks.x)); // cout << "checkDuplicateEdges : " << verticesPerThreadBlock<< endl; // cout << "checkDuplicateEdges : " << numBlocks.x << endl; // cout << "Deletions : " << threadsPerBlock.x << endl; deviceCheckForDuplicateEdges<<<numBlocks,threadsPerBlock>>>(d_cuStinger, verticesPerThreadBlock); checkLastCudaError("Error in the first update sweep"); }
c734d0ac28fe792b520b1f207e51e5ce9b7643d1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "recombine.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int *p0 = NULL; hipMalloc(&p0, XSIZE*YSIZE); unsigned int *p1 = NULL; hipMalloc(&p1, XSIZE*YSIZE); unsigned int *off = NULL; hipMalloc(&off, XSIZE*YSIZE); unsigned int cols = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( recombine), dim3(gridBlock),dim3(threadBlock), 0, 0, p0,p1,off,cols); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( recombine), dim3(gridBlock),dim3(threadBlock), 0, 0, p0,p1,off,cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( recombine), dim3(gridBlock),dim3(threadBlock), 0, 0, p0,p1,off,cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c734d0ac28fe792b520b1f207e51e5ce9b7643d1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "recombine.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int *p0 = NULL; cudaMalloc(&p0, XSIZE*YSIZE); unsigned int *p1 = NULL; cudaMalloc(&p1, XSIZE*YSIZE); unsigned int *off = NULL; cudaMalloc(&off, XSIZE*YSIZE); unsigned int cols = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); recombine<<<gridBlock,threadBlock>>>(p0,p1,off,cols); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { recombine<<<gridBlock,threadBlock>>>(p0,p1,off,cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { recombine<<<gridBlock,threadBlock>>>(p0,p1,off,cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f41afdc6cc14dcf6bb467ca6a8143f1240ff9946.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/partition.h> #include <thrust/count.h> #include <thrust/execution_policy.h> template<typename ExecutionPolicy, typename Iterator1, typename Predicate, typename Iterator2> __global__ void partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Predicate pred, Iterator2 result) { *result = thrust::partition(exec, first, last, pred); } template<typename T> struct is_even { __host__ __device__ bool operator()(T x) const { return ((int) x % 2) == 0; } }; template<typename ExecutionPolicy> void TestPartitionDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<iterator> result(1); hipLaunchKernelGGL(( partition_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), is_even<T>(), result.begin()); thrust::device_vector<T> ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } void TestPartitionDeviceSeq() { TestPartitionDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionDeviceSeq); void TestPartitionDeviceDevice() { TestPartitionDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3> __global__ void partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Predicate pred, Iterator3 result) { *result = thrust::partition(exec, first, last, stencil_first, pred); } template<typename ExecutionPolicy> void TestPartitionStencilDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<T> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<iterator> result(1); hipLaunchKernelGGL(( partition_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), stencil.begin(), is_even<T>(), result.begin()); thrust::device_vector<T> ref(5); ref[0] = 1; ref[1] = 1; ref[2] = 0; ref[3] = 0; ref[4] = 0; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } void TestPartitionStencilDeviceSeq() { TestPartitionStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionStencilDeviceSeq); void TestPartitionStencilDeviceDevice() { TestPartitionStencilDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionStencilDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 true_result, Iterator3 false_result, Predicate pred, Iterator4 result) { *result = thrust::partition_copy(exec, first, last, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestPartitionCopyDevice(ExecutionPolicy exec) { typedef int T; typedef thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); hipLaunchKernelGGL(( partition_copy_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); thrust::device_vector<T> true_ref(2); true_ref[0] = 2; true_ref[1] = 2; thrust::device_vector<T> false_ref(3); false_ref[0] = 1; false_ref[1] = 1; false_ref[2] = 1; pair_type ends = iterators[0]; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestPartitionCopyDeviceSeq() { TestPartitionCopyDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionCopyDeviceSeq); void TestPartitionCopyDeviceDevice() { TestPartitionCopyDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionCopyDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Predicate, typename Iterator5> __global__ void partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 true_result, Iterator4 false_result, Predicate pred, Iterator5 result) { *result = thrust::partition_copy(exec, first, last, stencil_first, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestPartitionCopyStencilDevice(ExecutionPolicy exec) { typedef int T; thrust::device_vector<int> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<int> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef typename thrust::device_vector<int>::iterator iterator; typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); hipLaunchKernelGGL(( partition_copy_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), stencil.begin(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); pair_type ends = iterators[0]; thrust::device_vector<int> true_ref(2); true_ref[0] = 1; true_ref[1] = 1; thrust::device_vector<int> false_ref(3); false_ref[0] = 0; false_ref[1] = 0; false_ref[2] = 0; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestPartitionCopyStencilDeviceSeq() { TestPartitionCopyStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionCopyStencilDeviceSeq); void TestPartitionCopyStencilDeviceDevice() { TestPartitionCopyStencilDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionCopyStencilDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Predicate, typename Iterator2, typename Iterator3> __global__ void stable_partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Predicate pred, Iterator2 result, Iterator3 is_supported) { #if (__CUDA_ARCH__ >= 200) *is_supported = true; *result = thrust::stable_partition(exec, first, last, pred); #else *is_supported = false; #endif } template<typename ExecutionPolicy> void TestStablePartitionDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<iterator> result(1); thrust::device_vector<bool> is_supported(1); hipLaunchKernelGGL(( stable_partition_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), is_even<T>(), result.begin(), is_supported.begin()); if(is_supported[0]) { thrust::device_vector<T> ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } } void TestStablePartitionDeviceSeq() { TestStablePartitionDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionDeviceSeq); void TestStablePartitionDeviceDevice() { TestStablePartitionDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3, typename Iterator4> __global__ void stable_partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Predicate pred, Iterator3 result, Iterator4 is_supported) { #if (__CUDA_ARCH__ >= 200) *is_supported = true; *result = thrust::stable_partition(exec, first, last, stencil_first, pred); #else *is_supported = false; #endif } template<typename ExecutionPolicy> void TestStablePartitionStencilDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<T> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<iterator> result(1); thrust::device_vector<bool> is_supported(1); hipLaunchKernelGGL(( stable_partition_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), stencil.begin(), is_even<T>(), result.begin(), is_supported.begin()); if(is_supported[0]) { thrust::device_vector<T> ref(5); ref[0] = 1; ref[1] = 1; ref[2] = 0; ref[3] = 0; ref[4] = 0; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } } void TestStablePartitionStencilDeviceSeq() { TestStablePartitionStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionStencilDeviceSeq); void TestStablePartitionStencilDeviceDevice() { TestStablePartitionStencilDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionStencilDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void stable_partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 true_result, Iterator3 false_result, Predicate pred, Iterator4 result) { *result = thrust::stable_partition_copy(exec, first, last, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestStablePartitionCopyDevice(ExecutionPolicy exec) { typedef int T; typedef thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); hipLaunchKernelGGL(( stable_partition_copy_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); thrust::device_vector<T> true_ref(2); true_ref[0] = 2; true_ref[1] = 2; thrust::device_vector<T> false_ref(3); false_ref[0] = 1; false_ref[1] = 1; false_ref[2] = 1; pair_type ends = iterators[0]; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestStablePartitionCopyDeviceSeq() { TestStablePartitionCopyDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionCopyDeviceSeq); void TestStablePartitionCopyDeviceDevice() { TestStablePartitionCopyDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionCopyDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Predicate, typename Iterator5> __global__ void stable_partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 true_result, Iterator4 false_result, Predicate pred, Iterator5 result) { *result = thrust::stable_partition_copy(exec, first, last, stencil_first, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestStablePartitionCopyStencilDevice(ExecutionPolicy exec) { typedef int T; thrust::device_vector<int> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<int> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef typename thrust::device_vector<int>::iterator iterator; typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); hipLaunchKernelGGL(( stable_partition_copy_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), stencil.begin(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); pair_type ends = iterators[0]; thrust::device_vector<int> true_ref(2); true_ref[0] = 1; true_ref[1] = 1; thrust::device_vector<int> false_ref(3); false_ref[0] = 0; false_ref[1] = 0; false_ref[2] = 0; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestStablePartitionCopyStencilDeviceSeq() { TestStablePartitionCopyStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionCopyStencilDeviceSeq); void TestStablePartitionCopyStencilDeviceDevice() { TestStablePartitionCopyStencilDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionCopyStencilDeviceDevice); void TestPartitionCudaStreams() { typedef thrust::device_vector<int> Vector; typedef typename Vector::value_type T; typedef typename Vector::iterator Iterator; Vector data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; hipStream_t s; hipStreamCreate(&s); Iterator iter = thrust::partition(thrust::hip::par.on(s), data.begin(), data.end(), is_even<T>()); Vector ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(iter - data.begin(), 2); ASSERT_EQUAL(data, ref); hipStreamDestroy(s); } DECLARE_UNITTEST(TestPartitionCudaStreams);
f41afdc6cc14dcf6bb467ca6a8143f1240ff9946.cu
#include <unittest/unittest.h> #include <thrust/partition.h> #include <thrust/count.h> #include <thrust/execution_policy.h> template<typename ExecutionPolicy, typename Iterator1, typename Predicate, typename Iterator2> __global__ void partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Predicate pred, Iterator2 result) { *result = thrust::partition(exec, first, last, pred); } template<typename T> struct is_even { __host__ __device__ bool operator()(T x) const { return ((int) x % 2) == 0; } }; template<typename ExecutionPolicy> void TestPartitionDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<iterator> result(1); partition_kernel<<<1,1>>>(exec, data.begin(), data.end(), is_even<T>(), result.begin()); thrust::device_vector<T> ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } void TestPartitionDeviceSeq() { TestPartitionDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionDeviceSeq); void TestPartitionDeviceDevice() { TestPartitionDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3> __global__ void partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Predicate pred, Iterator3 result) { *result = thrust::partition(exec, first, last, stencil_first, pred); } template<typename ExecutionPolicy> void TestPartitionStencilDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<T> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<iterator> result(1); partition_kernel<<<1,1>>>(exec, data.begin(), data.end(), stencil.begin(), is_even<T>(), result.begin()); thrust::device_vector<T> ref(5); ref[0] = 1; ref[1] = 1; ref[2] = 0; ref[3] = 0; ref[4] = 0; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } void TestPartitionStencilDeviceSeq() { TestPartitionStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionStencilDeviceSeq); void TestPartitionStencilDeviceDevice() { TestPartitionStencilDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionStencilDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 true_result, Iterator3 false_result, Predicate pred, Iterator4 result) { *result = thrust::partition_copy(exec, first, last, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestPartitionCopyDevice(ExecutionPolicy exec) { typedef int T; typedef thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); partition_copy_kernel<<<1,1>>>(exec, data.begin(), data.end(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); thrust::device_vector<T> true_ref(2); true_ref[0] = 2; true_ref[1] = 2; thrust::device_vector<T> false_ref(3); false_ref[0] = 1; false_ref[1] = 1; false_ref[2] = 1; pair_type ends = iterators[0]; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestPartitionCopyDeviceSeq() { TestPartitionCopyDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionCopyDeviceSeq); void TestPartitionCopyDeviceDevice() { TestPartitionCopyDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionCopyDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Predicate, typename Iterator5> __global__ void partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 true_result, Iterator4 false_result, Predicate pred, Iterator5 result) { *result = thrust::partition_copy(exec, first, last, stencil_first, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestPartitionCopyStencilDevice(ExecutionPolicy exec) { typedef int T; thrust::device_vector<int> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<int> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef typename thrust::device_vector<int>::iterator iterator; typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); partition_copy_kernel<<<1,1>>>(exec, data.begin(), data.end(), stencil.begin(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); pair_type ends = iterators[0]; thrust::device_vector<int> true_ref(2); true_ref[0] = 1; true_ref[1] = 1; thrust::device_vector<int> false_ref(3); false_ref[0] = 0; false_ref[1] = 0; false_ref[2] = 0; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestPartitionCopyStencilDeviceSeq() { TestPartitionCopyStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestPartitionCopyStencilDeviceSeq); void TestPartitionCopyStencilDeviceDevice() { TestPartitionCopyStencilDevice(thrust::device); } DECLARE_UNITTEST(TestPartitionCopyStencilDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Predicate, typename Iterator2, typename Iterator3> __global__ void stable_partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Predicate pred, Iterator2 result, Iterator3 is_supported) { #if (__CUDA_ARCH__ >= 200) *is_supported = true; *result = thrust::stable_partition(exec, first, last, pred); #else *is_supported = false; #endif } template<typename ExecutionPolicy> void TestStablePartitionDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<iterator> result(1); thrust::device_vector<bool> is_supported(1); stable_partition_kernel<<<1,1>>>(exec, data.begin(), data.end(), is_even<T>(), result.begin(), is_supported.begin()); if(is_supported[0]) { thrust::device_vector<T> ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } } void TestStablePartitionDeviceSeq() { TestStablePartitionDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionDeviceSeq); void TestStablePartitionDeviceDevice() { TestStablePartitionDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Predicate, typename Iterator3, typename Iterator4> __global__ void stable_partition_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Predicate pred, Iterator3 result, Iterator4 is_supported) { #if (__CUDA_ARCH__ >= 200) *is_supported = true; *result = thrust::stable_partition(exec, first, last, stencil_first, pred); #else *is_supported = false; #endif } template<typename ExecutionPolicy> void TestStablePartitionStencilDevice(ExecutionPolicy exec) { typedef int T; typedef typename thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<T> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<iterator> result(1); thrust::device_vector<bool> is_supported(1); stable_partition_kernel<<<1,1>>>(exec, data.begin(), data.end(), stencil.begin(), is_even<T>(), result.begin(), is_supported.begin()); if(is_supported[0]) { thrust::device_vector<T> ref(5); ref[0] = 1; ref[1] = 1; ref[2] = 0; ref[3] = 0; ref[4] = 0; ASSERT_EQUAL(2, (iterator)result[0] - data.begin()); ASSERT_EQUAL(ref, data); } } void TestStablePartitionStencilDeviceSeq() { TestStablePartitionStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionStencilDeviceSeq); void TestStablePartitionStencilDeviceDevice() { TestStablePartitionStencilDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionStencilDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Predicate, typename Iterator4> __global__ void stable_partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 true_result, Iterator3 false_result, Predicate pred, Iterator4 result) { *result = thrust::stable_partition_copy(exec, first, last, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestStablePartitionCopyDevice(ExecutionPolicy exec) { typedef int T; typedef thrust::device_vector<T>::iterator iterator; thrust::device_vector<T> data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); stable_partition_copy_kernel<<<1,1>>>(exec, data.begin(), data.end(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); thrust::device_vector<T> true_ref(2); true_ref[0] = 2; true_ref[1] = 2; thrust::device_vector<T> false_ref(3); false_ref[0] = 1; false_ref[1] = 1; false_ref[2] = 1; pair_type ends = iterators[0]; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestStablePartitionCopyDeviceSeq() { TestStablePartitionCopyDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionCopyDeviceSeq); void TestStablePartitionCopyDeviceDevice() { TestStablePartitionCopyDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionCopyDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Predicate, typename Iterator5> __global__ void stable_partition_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 true_result, Iterator4 false_result, Predicate pred, Iterator5 result) { *result = thrust::stable_partition_copy(exec, first, last, stencil_first, true_result, false_result, pred); } template<typename ExecutionPolicy> void TestStablePartitionCopyStencilDevice(ExecutionPolicy exec) { typedef int T; thrust::device_vector<int> data(5); data[0] = 0; data[1] = 1; data[2] = 0; data[3] = 0; data[4] = 1; thrust::device_vector<int> stencil(5); stencil[0] = 1; stencil[1] = 2; stencil[2] = 1; stencil[3] = 1; stencil[4] = 2; thrust::device_vector<int> true_results(2); thrust::device_vector<int> false_results(3); typedef typename thrust::device_vector<int>::iterator iterator; typedef thrust::pair<iterator,iterator> pair_type; thrust::device_vector<pair_type> iterators(1); stable_partition_copy_kernel<<<1,1>>>(exec, data.begin(), data.end(), stencil.begin(), true_results.begin(), false_results.begin(), is_even<T>(), iterators.begin()); pair_type ends = iterators[0]; thrust::device_vector<int> true_ref(2); true_ref[0] = 1; true_ref[1] = 1; thrust::device_vector<int> false_ref(3); false_ref[0] = 0; false_ref[1] = 0; false_ref[2] = 0; ASSERT_EQUAL(2, ends.first - true_results.begin()); ASSERT_EQUAL(3, ends.second - false_results.begin()); ASSERT_EQUAL(true_ref, true_results); ASSERT_EQUAL(false_ref, false_results); } void TestStablePartitionCopyStencilDeviceSeq() { TestStablePartitionCopyStencilDevice(thrust::seq); } DECLARE_UNITTEST(TestStablePartitionCopyStencilDeviceSeq); void TestStablePartitionCopyStencilDeviceDevice() { TestStablePartitionCopyStencilDevice(thrust::device); } DECLARE_UNITTEST(TestStablePartitionCopyStencilDeviceDevice); void TestPartitionCudaStreams() { typedef thrust::device_vector<int> Vector; typedef typename Vector::value_type T; typedef typename Vector::iterator Iterator; Vector data(5); data[0] = 1; data[1] = 2; data[2] = 1; data[3] = 1; data[4] = 2; cudaStream_t s; cudaStreamCreate(&s); Iterator iter = thrust::partition(thrust::cuda::par.on(s), data.begin(), data.end(), is_even<T>()); Vector ref(5); ref[0] = 2; ref[1] = 2; ref[2] = 1; ref[3] = 1; ref[4] = 1; ASSERT_EQUAL(iter - data.begin(), 2); ASSERT_EQUAL(data, ref); cudaStreamDestroy(s); } DECLARE_UNITTEST(TestPartitionCudaStreams);
1a711cdeb907785a220facac039409f47afc0fa2.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <rocblas.h> #include <hip/hip_runtime.h> __device__ double* t3_s_d; __device__ double* t3_d; #include "header.h" #include "ourinclude.h" #define T1 16 #define T2 16 #define Tcomm 16 hipblasHandle_t handle; double* output_d; size_t current_i_size; extern "C" void ttgt_init() { hipblasCreate(&handle); output_d = NULL; current_i_size = 0; } extern "C" void set_dev_mem_d(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d) { int size_t3; size_t3 = h1d*h2d*h3d*p4d*p5d*p6d; t3_d = (double *) getGpuMem(size_t3*sizeof(double)); hipMemset(t3_d,0,size_t3*sizeof(double)); } extern "C" void dev_mem_d_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d) { set_dev_mem_d((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d); } extern "C" void dev_release() { freeGpuMem(t3_d); freeGpuMem(t3_s_d); } extern "C" void dev_release_() { dev_release(); } extern "C" void sd_t_d1_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub, int id) { double* output_d; static int count = 0; if(count == 0) { ttgt_init(); count++; } size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; size_t i; double *t2sub_d,*v2sub_d; size_triplesx= p4d * p5d * h1d * h3d * h2d * p6d *sizeof(double); size_t2sub=h7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*h2d*p6d*h7d*sizeof(double); int i1[4], i2[4], o[6]; i1[0] = h7d; i1[1] = p4d; i1[2] = p5d; i1[3] = h1d; i2[0] = h3d; i2[1] = h2d; i2[2] = p6d; i2[3] = h7d; o[0] = p4d; o[1] = p5d; o[2] = h1d; o[3] = h3d; o[4] = h2d; o[5] = p6d; hipblasOperation_t transa, transb; transa = HIPBLAS_OP_T; transb = HIPBLAS_OP_T; size_t m,n,k; m = p4d*p5d*h1d; k = h7d; n = h3d*h2d*p6d; double alpha, beta; alpha = 1; beta = 0; t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); //if(size_triplesx > current_i_size) { output_d=(double*)getGpuMem(size_triplesx); current_i_size = size_triplesx; } if(output_d == NULL) { exit(0); } int perm[6]; //double beta; switch(id) { case 1: perm[0] = 3; perm[1] = 4; perm[2] = 2; perm[3] = 5; perm[4] = 1; perm[5] = 0; beta = -1.0; break; case 2: perm[0] = 3; perm[1] = 2; perm[2] = 4; perm[3] = 5; perm[4] = 1; perm[5] = 0; beta = 1.0; break; case 3: perm[0] = 2; perm[1] = 3; perm[2] = 4; perm[3] = 5; perm[4] = 1; perm[5] = 0; beta = -1.0; break; case 4: perm[0] = 3; perm[1] = 4; perm[2] = 2; perm[3] = 1; perm[4] = 0; perm[5] = 5; beta = -1.0; break; case 5: perm[0] = 3; perm[1] = 2; perm[2] = 4; perm[3] = 1; perm[4] = 0; perm[5] = 5; beta = 1.0; break; case 6: perm[0] = 2; perm[1] = 3; perm[2] = 4; perm[3] = 1; perm[4] = 0; perm[5] = 5; beta = -1.0; break; case 7: perm[0] = 3; perm[1] = 4; perm[2] = 2; perm[3] = 1; perm[4] = 5; perm[5] = 0; beta = 1.0; break; case 8: perm[0] = 3; perm[1] = 2; perm[2] = 4; perm[3] = 1; perm[4] = 5; perm[5] = 0; beta = -1.0; break; case 9: perm[0] = 2; perm[1] = 3; perm[2] = 4; perm[3] = 1; perm[4] = 5; perm[5] = 0; beta = 1.0; break; } hipblasDgemm(handle, transa, transb, m, n, k, &alpha, t2sub_d, h7d, v2sub_d, n, &beta, output_d, m); ttlg_transpose(6, o, perm, output_d, t3_d, 1, beta); hipDeviceSynchronize(); freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); freeGpuMem(output_d); } /*----------------------------------------------------------------------* *triplesx[h3,h1,p6,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_1_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 1); } extern "C" void sd_t_d1_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,h2,p5,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_2_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 2); } extern "C" void sd_t_d1_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h1,h3,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_3_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 3); } extern "C" void sd_t_d1_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_4_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 4); } extern "C" void sd_t_d1_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,h2,p5,p4,p6] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_5_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 5); } extern "C" void sd_t_d1_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h1,h3,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_6_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 6); } extern "C" void sd_t_d1_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_7_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 7); } extern "C" void sd_t_d1_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,h2,p5,p6,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_8_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 8); } extern "C" void sd_t_d1_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h1,h3,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_9_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 9); } extern "C" void sd_t_d1_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } extern "C" void sd_t_d2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *triplesx, double *t2sub, double *v2sub, int id) { size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; double* output_d; size_t i; double *t2sub_d,*v2sub_d; size_triplesx= p4d * p5d * h1d * h3d * h2d * p6d *sizeof(double); size_t2sub=p7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*h2d*p6d*p7d*sizeof(double); int i1[4], i2[4], o[6]; i1[0] = p7d; i1[1] = p4d; i1[2] = h1d; i1[3] = h2d; i2[0] = p7d; i2[1] = h3d; i2[2] = p6d; i2[3] = p5d; o[0] = p4d; o[1] = h1d; o[2] = h2d; o[3] = h3d; o[4] = p6d; o[5] = p5d; hipblasOperation_t transa, transb; transa = HIPBLAS_OP_T; transb = HIPBLAS_OP_N; size_t m,n,k; m = p4d*h1d*h2d; k = p7d; n = h3d*p6d*p5d; double alpha, beta; alpha = 1; beta = 0; t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); //if(size_triplesx > current_i_size) { output_d=(double*)getGpuMem(size_triplesx); current_i_size = size_triplesx; } if(output_d == NULL) { exit(0); } int perm[6]; //double beta; switch(id) { case 1: perm[0] = 3; perm[1] = 2; perm[2] = 1; perm[3] = 4; perm[4] = 5; perm[5] = 0; beta = -1.0; break; case 2: perm[0] = 2; perm[1] = 1; perm[2] = 3; perm[3] = 4; perm[4] = 5; perm[5] = 0; beta = -1.0; break; case 3: perm[0] = 2; perm[1] = 3; perm[2] = 1; perm[3] = 4; perm[4] = 5; perm[5] = 0; beta = 1.0; break; case 4: perm[0] = 3; perm[1] = 2; perm[2] = 1; perm[3] = 4; perm[4] = 0; perm[5] = 5; beta = 1.0; break; case 5: perm[0] = 2; perm[1] = 1; perm[2] = 3; perm[3] = 4; perm[4] = 0; perm[5] = 5; beta = 1.0; break; case 6: perm[0] = 2; perm[1] = 3; perm[2] = 1; perm[3] = 4; perm[4] = 0; perm[5] = 5; beta = -1.0; break; case 7: perm[0] = 3; perm[1] = 2; perm[2] = 1; perm[3] = 0; perm[4] = 4; perm[5] = 5; beta = -1.0; break; case 8: perm[0] = 2; perm[1] = 1; perm[2] = 3; perm[3] = 0; perm[4] = 4; perm[5] = 5; beta = -1.0; break; case 9: perm[0] = 2; perm[1] = 3; perm[2] = 1; perm[3] = 0; perm[4] = 4; perm[5] = 5; beta = 1.0; break; } hipblasDgemm(handle, transa, transb, m, n, k, &alpha, t2sub_d, p7d, v2sub_d, n, &beta, output_d, m); ttlg_transpose(6, o, perm, output_d, t3_d, 1, beta); hipDeviceSynchronize(); freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); freeGpuMem(output_d); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 1); } extern "C" void sd_t_d2_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h1,h3,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 2); } extern "C" void sd_t_d2_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h3,h1,p6,p4] += t2[p7,p4,h1,h2] * v2[p7,h3,p6] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 3); } extern "C" void sd_t_d2_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 4); } extern "C" void sd_t_d2_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h1,h3,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 5); } extern "C" void sd_t_d2_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h3,h1,p6,p4,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 6); } extern "C" void sd_t_d2_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 7); } extern "C" void sd_t_d2_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h1,h3,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 8); } extern "C" void sd_t_d2_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h3,h1,p4,p6,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 9); } extern "C" void sd_t_d2_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } #define MAX_h3 64 /* IMPORTANT!!!! t3_d must be passed as parameter to kernel function. A __global__ function can't access the global variable directly*/ __global__ void compute_energy_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,double* eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, double* energy, double factor, int total_size, double* t3d, double* t3_sd) { int h1,h2,p6,p4,p5, h3,i=0; double e1,e2,e4,e5,e6; // __shared__ double t2_shm[MAX_h3]; __shared__ double energy_s[T1]; __shared__ double energy2_s[T1]; double inner_fac; int limit; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; if(threadIdx.x==0) { energy[blockIdx.x]=0; energy[blockIdx.x+gridDim.x]=0; energy_s[threadIdx.x] = 0.0; energy2_s[threadIdx.x] = 0.0; } for(int j =0; j<T2*T1;j++) { thread_x = T2*T1*blockIdx.x + j; rest_x = thread_x; __syncthreads(); h2=rest_x%h2d; rest_x=rest_x/h2d; h1=rest_x%h1d; rest_x=rest_x/h1d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; rest_x=rest_x/p5d; p4=rest_x%p4d; e1 = eval1[h1]; e2 = eval2[h2]; e4 = eval4[p4]; e5 = eval5[p5]; e6 = eval6[p6]; /* for(p4=0;p4<p4d;p4++) for(p5 = 0;p5<p5d;p5++) for(p6=0;p6<p6d;p6++) for(h1= 0;h1<h1d;h1++) for(h2=0;h2<h2d;h2++) for(h3=0;h3<h3d;h3++) { inner_fac = -eval4[p4]-eval5[p5]-eval6[p6]+eval1[h1] +eval2[h2]+eval3[h3]; energy_s[0]+=factor*t3d[i]*t3d[i]/inner_fac; energy2_s[0]+=factor*t3d[i]*(t3_sd[i]+t3d[i])/inner_fac; i++; } */ if(thread_x<total_size) for(int i=0;i<h3d;i++) { inner_fac = -e4-e5-e6+e1+e2+eval3[i]; //t2_shm[i]; //ckbn avoid e1 in case we need just (T) energy_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*t3d[thread_x*h3d+i]/inner_fac; energy2_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*(t3_sd[thread_x*h3d+i]+t3d[thread_x*h3d+i])/inner_fac; } __syncthreads(); } if(threadIdx.x==0) { /* limit = blockDim.x; if (blockIdx.x == (gridDim.x-1)) limit = total_size%blockDim.x; for(int i=0;i<limit;i++) { energy[blockIdx.x]+=energy_s[i]; energy[blockIdx.x+gridDim.x]+=energy2_s[i]; } */ energy[blockIdx.x] = energy_s[0]; energy[blockIdx.x+gridDim.x] = energy2_s[0]; } __syncthreads(); } extern "C" void compute_energy(double factor, double* energy, double* eval1, double* eval2,double* eval3,double* eval4,double* eval5,double* eval6,int h1d, int h2d, int h3d, int p4d, int p5d,int p6d, double* host1, double* host2) //ckbn en_comment, double* total_d, double* total_s) { double* energy_d, *energy_h; double* eval_d1,*eval_d2,*eval_d3,*eval_d4,*eval_d5,*eval_d6; int size_energy = 2*sizeof(double); int total_block = DIV_UB((h1d*h2d*p4d*p5d*p6d), (T2*T1)); // int total_block = 1; int total_elements = h1d*h2d*p4d*p5d*p6d; energy_d = (double*)getGpuMem(size_energy*total_block*2); int i=0,in; double* t3 = (double*)malloc(sizeof(double)*h3d*total_elements); double* ts3 = (double*)malloc(sizeof(double)*h3d*total_elements); energy_h = (double*)getHostMem(size_energy*2*total_block); eval_d1 = (double*)getGpuMem(h1d*sizeof(double)); eval_d2 = (double*)getGpuMem(h2d*sizeof(double)); eval_d3 = (double*)getGpuMem(h3d*sizeof(double)); eval_d4 = (double*)getGpuMem(p4d*sizeof(double)); eval_d5 = (double*)getGpuMem(p5d*sizeof(double)); eval_d6 = (double*)getGpuMem(p6d*sizeof(double)); CUDA_SAFE(hipMemcpy(eval_d1, eval1, h1d*sizeof(double), hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(eval_d2, eval2, h2d*sizeof(double), hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(eval_d3, eval3, h3d*sizeof(double), hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(eval_d4, eval4, p4d*sizeof(double), hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(eval_d5, eval5, p5d*sizeof(double), hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(eval_d6, eval6, p6d*sizeof(double), hipMemcpyHostToDevice)); /* for test only */ //printf("host 2 is %f %f\n", host2[0], host2[1]); // CUDA_SAFE(hipMemcpy(t3_s_d, host2, total_elements*h3d*sizeof(double), hipMemcpyHostToDevice)); dim3 dimBlock(1); //T2*T1); dim3 dimGrid(total_block); hipLaunchKernelGGL(( compute_energy_kernel), dim3(dimGrid),dim3(dimBlock),0, 0, h1d,h2d,h3d,p4d,p5d,p6d, eval_d1,eval_d2,eval_d3,eval_d4,eval_d5,eval_d6,energy_d, factor, h1d*h2d*p4d*p5d*p6d, t3_d, t3_s_d); hipDeviceSynchronize(); //CHECK_ERR("Kernel execution failed"); CUDA_SAFE(hipMemcpy(((char *) energy_h) , ((char *) energy_d) , size_energy*total_block*2, hipMemcpyDeviceToHost)); for(int i=1;i<dimGrid.x;i++) { energy_h[0]+=energy_h[i]; energy_h[dimGrid.x]+=energy_h[i+dimGrid.x]; } // printf("CUDA energy_h is %f %f %d %d %d %d %d %d\n", energy_h[0], energy_h[dimGrid.x]); //, total_size, h1d, h2d, p4d, p5d,p6d); /* CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_d) , sizeof(double)*h3d*total_elements, hipMemcpyDeviceToHost)); CUDA_SAFE(hipMemcpy(((char *) ts3) , ((char *) t3_s_d) , sizeof(double)*h3d*total_elements, hipMemcpyDeviceToHost)); total_s[0]=0.0, total_d[0]=0.0; for(int i=0;i<h3d*total_elements;i++) { total_s[0] += ts3[i]; total_d[0] += t3[i]; } */ // printf("Total doubles and singles %f, %f\n", total_d, total_s); energy[0] = energy_h[0]; energy[1] = energy_h[dimGrid.x]; freeGpuMem(energy_d); freeGpuMem(eval_d1); freeGpuMem(eval_d2); freeGpuMem(eval_d3); freeGpuMem(eval_d4); freeGpuMem(eval_d5); freeGpuMem(eval_d6); freeHostMem(energy_h); } extern "C" void compute_en_(double * factor, double * energy, double * eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double* host1, double* host2) //ckbn en_comment,double* total_d, double* total_s) { compute_energy((double) *factor, energy, eval1,eval2, eval3, eval4, eval5, eval6,(int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, host1, host2); //ckbn en_comment ,total_d, total_s); } //__device__ double* t3_d; extern "C" void set_dev_mem_s(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d) { int size_t3; size_t3 = h1d*h2d*h3d*p4d*p5d*p6d; t3_s_d = (double *) getGpuMem(size_t3*sizeof(double)); hipMemset(t3_s_d,0,size_t3*sizeof(double)); } extern "C" void dev_mem_s_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d) { set_dev_mem_s((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_1_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3, double *t2_d, double *v2_d,int p4, int total_x, double* t3d) { int h1,h2,h3,p6; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; hipStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); //CUDA_SAFE(hipMalloc((void**) &t3_d, size_t3)); //CUDA_SAFE(hipMalloc((void**) &t2_d, size_t2)); //CUDA_SAFE(hipMalloc((void**) &v2_d, size_v2)); // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipStreamCreate(&streams[i])); } CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h2ld_t3 = h3d; h1ld_t3 = h2d * h3d; p6ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p4ld_t3 = p5d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ hipLaunchKernelGGL(( sd_t_s1_1_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d); CHECK_ERR("Kernel execution failed"); } /* st = timer(); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (hipStreamQuery(streams[stream]) != hipSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; } */ hipDeviceSynchronize(); // CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { hipStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); // hipFree(t2_d); // hipFree(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_1_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_1_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h1,h2,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_2_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t2_d, double *v2_d,int p4, int total_x, double* t3d) { int h1,h2,h3,p6; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; hipStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); hipMemset(t3_d,0,size_t3*sizeof(double)); first = 0; }*/ //CUDA_SAFE(hipMalloc((void**) &t2_d, size_t2)); //CUDA_SAFE(hipMalloc((void**) &v2_d, size_v2)); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); /* assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipStreamCreate(&streams[i])); }*/ CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d ; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h1ld_t3 = h3d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p4ld_t3 = p5d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); // for(i=0;i<nstreams;++i){ hipLaunchKernelGGL(( sd_t_s1_2_kernel), dim3(dimGrid),dim3(dimBlock),0, 0, h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d); CHECK_ERR("Kernel execution failed"); // } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (hipStreamQuery(streams[stream]) != hipSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ hipDeviceSynchronize(); // CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost)); /* for (i = 0; i < nstreams; ++i) { hipStreamDestroy(streams[i]); }*/ freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } extern "C" void sd_t_s1_2_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_2_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } extern "C" void sd_t_s1_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; hipStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); hipMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipStreamCreate(&streams[i])); } CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d ; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h1ld_t3 = 1; h3ld_t3 = h1d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p4ld_t3 = p5d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ hipLaunchKernelGGL(( sd_t_s1_1_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (hipStreamQuery(streams[stream]) != hipSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; } */ hipDeviceSynchronize(); //CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { hipStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_3_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_3_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) { int h1,h2,h3,p6,p5; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; hipStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); hipMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); /* assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipStreamCreate(&streams[i])); }*/ CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; p5ld_v2 = p6d * h3d * h2d; h3ld_t3 = 1; h2ld_t3 = h3d; h1ld_t3 = h2d * h3d; p6ld_t3 = h1d * h2d * h3d; p4ld_t3 = p6d * h1d * h2d * h3d; p5ld_t3 = p4d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); i=0; // for(i=0;i<nstreams;++i){ hipLaunchKernelGGL(( sd_t_s1_4_kernel), dim3(dimGrid),dim3(dimBlock),0, 0, h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); //sd_t_s1_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); // } hipDeviceSynchronize(); /* CUDA_SAFE(hipMemcpy(((char *) t3_p) , ((char *) t3_d) , size_block_t3, hipMemcpyDeviceToHost)); printf("Time for Async DeviceToHost %f\n", et-st); stream = 0; // while (stream < nstreams) { // while (hipStreamQuery(streams[stream]) != hipSuccess); double *src = t3_p; //[stream * size_el_block_t3]; double *dst = t3; //[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] -= src[i]; } // stream++; // } */ // hipDeviceSynchronize(); /* for (i = 0; i < nstreams; ++i) { hipStreamDestroy(streams[i]); }*/ // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_4_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_4_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h1,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_5_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) { int h1,h2,h3,p6,p5; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; hipStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); hipMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipStreamCreate(&streams[i])); } CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d ; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; p5ld_v2 = p6d * h3d * h2d; h3ld_t3 = 1; h1ld_t3 = h3d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; p4ld_t3 = p6d * h1d * h2d * h3d; p5ld_t3 = p4d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ hipLaunchKernelGGL(( sd_t_s1_5_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (hipStreamQuery(streams[stream]) != hipSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; } */ hipDeviceSynchronize(); //CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { hipStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_5_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_5_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h1,h3,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_6_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) { int h1,h2,h3,p6,p5; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; hipStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); hipMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipStreamCreate(&streams[i])); } CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; p5ld_v2 = p6d * h3d * h2d; h1ld_t3 = 1; h3ld_t3 = h1d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; p4ld_t3 = p6d * h1d * h2d * h3d; p5ld_t3 = p4d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ hipLaunchKernelGGL(( sd_t_s1_6_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (hipStreamQuery(streams[stream]) != hipSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ hipDeviceSynchronize(); //CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { hipStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_6_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_6_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_7_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) { int h1,h2,h3,p6; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; hipStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); hipMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipStreamCreate(&streams[i])); } CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h2ld_t3 = h3d; h1ld_t3 = h2d * h3d; p4ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p6ld_t3 = p4d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ hipLaunchKernelGGL(( sd_t_s1_7_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (hipStreamQuery(streams[stream]) != hipSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ hipDeviceSynchronize(); //CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { hipStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_7_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_7_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_8_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) { int h1,h2,h3,p6; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } /*----------------------------------------------------------------------* *t3[h3,h1,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 extern "C" void sd_t_s1_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; hipStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); hipMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipStreamCreate(&streams[i])); } CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h1ld_t3 = h3d; h2ld_t3 = h1d * h3d; p4ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p6ld_t3 = p4d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ hipLaunchKernelGGL(( sd_t_s1_8_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (hipStreamQuery(streams[stream]) != hipSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ hipDeviceSynchronize(); // CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { hipStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } extern "C" void sd_t_s1_8_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_8_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h1,h3,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_s1_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; hipStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); hipMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipStreamCreate(&streams[i])); } CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice)); CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h1ld_t3 = 1; h3ld_t3 = h1d; h2ld_t3 = h1d * h3d; p4ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p6ld_t3 = p4d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ hipLaunchKernelGGL(( sd_t_s1_7_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (hipStreamQuery(streams[stream]) != hipSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ hipDeviceSynchronize(); //CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost)); // printf("out is %lf\n", t3_p[0]); for (i = 0; i < nstreams; ++i) { hipStreamDestroy(streams[i]); } //freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } extern "C" void sd_t_s1_9_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_9_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); }
1a711cdeb907785a220facac039409f47afc0fa2.cu
#include <cuda_runtime.h> #include <cublas_v2.h> #include <cuda.h> __device__ double* t3_s_d; __device__ double* t3_d; #include "header.h" #include "ourinclude.h" #define T1 16 #define T2 16 #define Tcomm 16 cublasHandle_t handle; double* output_d; size_t current_i_size; extern "C" void ttgt_init() { cublasCreate(&handle); output_d = NULL; current_i_size = 0; } extern "C" void set_dev_mem_d(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d) { int size_t3; size_t3 = h1d*h2d*h3d*p4d*p5d*p6d; t3_d = (double *) getGpuMem(size_t3*sizeof(double)); cudaMemset(t3_d,0,size_t3*sizeof(double)); } extern "C" void dev_mem_d_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d) { set_dev_mem_d((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d); } extern "C" void dev_release() { freeGpuMem(t3_d); freeGpuMem(t3_s_d); } extern "C" void dev_release_() { dev_release(); } extern "C" void sd_t_d1_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub, int id) { double* output_d; static int count = 0; if(count == 0) { ttgt_init(); count++; } size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; size_t i; double *t2sub_d,*v2sub_d; size_triplesx= p4d * p5d * h1d * h3d * h2d * p6d *sizeof(double); size_t2sub=h7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*h2d*p6d*h7d*sizeof(double); int i1[4], i2[4], o[6]; i1[0] = h7d; i1[1] = p4d; i1[2] = p5d; i1[3] = h1d; i2[0] = h3d; i2[1] = h2d; i2[2] = p6d; i2[3] = h7d; o[0] = p4d; o[1] = p5d; o[2] = h1d; o[3] = h3d; o[4] = h2d; o[5] = p6d; cublasOperation_t transa, transb; transa = CUBLAS_OP_T; transb = CUBLAS_OP_T; size_t m,n,k; m = p4d*p5d*h1d; k = h7d; n = h3d*h2d*p6d; double alpha, beta; alpha = 1; beta = 0; t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); //if(size_triplesx > current_i_size) { output_d=(double*)getGpuMem(size_triplesx); current_i_size = size_triplesx; } if(output_d == NULL) { exit(0); } int perm[6]; //double beta; switch(id) { case 1: perm[0] = 3; perm[1] = 4; perm[2] = 2; perm[3] = 5; perm[4] = 1; perm[5] = 0; beta = -1.0; break; case 2: perm[0] = 3; perm[1] = 2; perm[2] = 4; perm[3] = 5; perm[4] = 1; perm[5] = 0; beta = 1.0; break; case 3: perm[0] = 2; perm[1] = 3; perm[2] = 4; perm[3] = 5; perm[4] = 1; perm[5] = 0; beta = -1.0; break; case 4: perm[0] = 3; perm[1] = 4; perm[2] = 2; perm[3] = 1; perm[4] = 0; perm[5] = 5; beta = -1.0; break; case 5: perm[0] = 3; perm[1] = 2; perm[2] = 4; perm[3] = 1; perm[4] = 0; perm[5] = 5; beta = 1.0; break; case 6: perm[0] = 2; perm[1] = 3; perm[2] = 4; perm[3] = 1; perm[4] = 0; perm[5] = 5; beta = -1.0; break; case 7: perm[0] = 3; perm[1] = 4; perm[2] = 2; perm[3] = 1; perm[4] = 5; perm[5] = 0; beta = 1.0; break; case 8: perm[0] = 3; perm[1] = 2; perm[2] = 4; perm[3] = 1; perm[4] = 5; perm[5] = 0; beta = -1.0; break; case 9: perm[0] = 2; perm[1] = 3; perm[2] = 4; perm[3] = 1; perm[4] = 5; perm[5] = 0; beta = 1.0; break; } cublasDgemm(handle, transa, transb, m, n, k, &alpha, t2sub_d, h7d, v2sub_d, n, &beta, output_d, m); ttlg_transpose(6, o, perm, output_d, t3_d, 1, beta); cudaThreadSynchronize(); freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); freeGpuMem(output_d); } /*----------------------------------------------------------------------* *triplesx[h3,h1,p6,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_1_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 1); } extern "C" void sd_t_d1_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,h2,p5,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_2_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 2); } extern "C" void sd_t_d1_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h1,h3,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_3_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 3); } extern "C" void sd_t_d1_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_4_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 4); } extern "C" void sd_t_d1_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,h2,p5,p4,p6] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_5_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 5); } extern "C" void sd_t_d1_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h1,h3,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_6_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 6); } extern "C" void sd_t_d1_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_7_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 7); } extern "C" void sd_t_d1_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,h2,p5,p6,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_8_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 8); } extern "C" void sd_t_d1_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h1,h3,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ extern "C" void sd_t_d1_9_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_cuda( h1d, h2d, h3d, h7d, p4d, p5d, p6d, triplesx, t2sub, v2sub, 9); } extern "C" void sd_t_d1_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub); } extern "C" void sd_t_d2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *triplesx, double *t2sub, double *v2sub, int id) { size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; double* output_d; size_t i; double *t2sub_d,*v2sub_d; size_triplesx= p4d * p5d * h1d * h3d * h2d * p6d *sizeof(double); size_t2sub=p7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*h2d*p6d*p7d*sizeof(double); int i1[4], i2[4], o[6]; i1[0] = p7d; i1[1] = p4d; i1[2] = h1d; i1[3] = h2d; i2[0] = p7d; i2[1] = h3d; i2[2] = p6d; i2[3] = p5d; o[0] = p4d; o[1] = h1d; o[2] = h2d; o[3] = h3d; o[4] = p6d; o[5] = p5d; cublasOperation_t transa, transb; transa = CUBLAS_OP_T; transb = CUBLAS_OP_N; size_t m,n,k; m = p4d*h1d*h2d; k = p7d; n = h3d*p6d*p5d; double alpha, beta; alpha = 1; beta = 0; t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); //if(size_triplesx > current_i_size) { output_d=(double*)getGpuMem(size_triplesx); current_i_size = size_triplesx; } if(output_d == NULL) { exit(0); } int perm[6]; //double beta; switch(id) { case 1: perm[0] = 3; perm[1] = 2; perm[2] = 1; perm[3] = 4; perm[4] = 5; perm[5] = 0; beta = -1.0; break; case 2: perm[0] = 2; perm[1] = 1; perm[2] = 3; perm[3] = 4; perm[4] = 5; perm[5] = 0; beta = -1.0; break; case 3: perm[0] = 2; perm[1] = 3; perm[2] = 1; perm[3] = 4; perm[4] = 5; perm[5] = 0; beta = 1.0; break; case 4: perm[0] = 3; perm[1] = 2; perm[2] = 1; perm[3] = 4; perm[4] = 0; perm[5] = 5; beta = 1.0; break; case 5: perm[0] = 2; perm[1] = 1; perm[2] = 3; perm[3] = 4; perm[4] = 0; perm[5] = 5; beta = 1.0; break; case 6: perm[0] = 2; perm[1] = 3; perm[2] = 1; perm[3] = 4; perm[4] = 0; perm[5] = 5; beta = -1.0; break; case 7: perm[0] = 3; perm[1] = 2; perm[2] = 1; perm[3] = 0; perm[4] = 4; perm[5] = 5; beta = -1.0; break; case 8: perm[0] = 2; perm[1] = 1; perm[2] = 3; perm[3] = 0; perm[4] = 4; perm[5] = 5; beta = -1.0; break; case 9: perm[0] = 2; perm[1] = 3; perm[2] = 1; perm[3] = 0; perm[4] = 4; perm[5] = 5; beta = 1.0; break; } cublasDgemm(handle, transa, transb, m, n, k, &alpha, t2sub_d, p7d, v2sub_d, n, &beta, output_d, m); ttlg_transpose(6, o, perm, output_d, t3_d, 1, beta); cudaThreadSynchronize(); freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); freeGpuMem(output_d); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 1); } extern "C" void sd_t_d2_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h1,h3,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 2); } extern "C" void sd_t_d2_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h3,h1,p6,p4] += t2[p7,p4,h1,h2] * v2[p7,h3,p6] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 3); } extern "C" void sd_t_d2_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 4); } extern "C" void sd_t_d2_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h1,h3,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 5); } extern "C" void sd_t_d2_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h3,h1,p6,p4,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 6); } extern "C" void sd_t_d2_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 7); } extern "C" void sd_t_d2_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h1,h3,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 8); } extern "C" void sd_t_d2_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h3,h1,p4,p6,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_d2_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) { sd_t_d2_cuda(h1d, h2d, h3d, p4d, p5d, p6d, p7d, t3, t2, v2, 9); } extern "C" void sd_t_d2_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2); } #define MAX_h3 64 /* IMPORTANT!!!! t3_d must be passed as parameter to kernel function. A __global__ function can't access the global variable directly*/ __global__ void compute_energy_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,double* eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, double* energy, double factor, int total_size, double* t3d, double* t3_sd) { int h1,h2,p6,p4,p5, h3,i=0; double e1,e2,e4,e5,e6; // __shared__ double t2_shm[MAX_h3]; __shared__ double energy_s[T1]; __shared__ double energy2_s[T1]; double inner_fac; int limit; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; if(threadIdx.x==0) { energy[blockIdx.x]=0; energy[blockIdx.x+gridDim.x]=0; energy_s[threadIdx.x] = 0.0; energy2_s[threadIdx.x] = 0.0; } for(int j =0; j<T2*T1;j++) { thread_x = T2*T1*blockIdx.x + j; rest_x = thread_x; __syncthreads(); h2=rest_x%h2d; rest_x=rest_x/h2d; h1=rest_x%h1d; rest_x=rest_x/h1d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; rest_x=rest_x/p5d; p4=rest_x%p4d; e1 = eval1[h1]; e2 = eval2[h2]; e4 = eval4[p4]; e5 = eval5[p5]; e6 = eval6[p6]; /* for(p4=0;p4<p4d;p4++) for(p5 = 0;p5<p5d;p5++) for(p6=0;p6<p6d;p6++) for(h1= 0;h1<h1d;h1++) for(h2=0;h2<h2d;h2++) for(h3=0;h3<h3d;h3++) { inner_fac = -eval4[p4]-eval5[p5]-eval6[p6]+eval1[h1] +eval2[h2]+eval3[h3]; energy_s[0]+=factor*t3d[i]*t3d[i]/inner_fac; energy2_s[0]+=factor*t3d[i]*(t3_sd[i]+t3d[i])/inner_fac; i++; } */ if(thread_x<total_size) for(int i=0;i<h3d;i++) { inner_fac = -e4-e5-e6+e1+e2+eval3[i]; //t2_shm[i]; //ckbn avoid e1 in case we need just (T) energy_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*t3d[thread_x*h3d+i]/inner_fac; energy2_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*(t3_sd[thread_x*h3d+i]+t3d[thread_x*h3d+i])/inner_fac; } __syncthreads(); } if(threadIdx.x==0) { /* limit = blockDim.x; if (blockIdx.x == (gridDim.x-1)) limit = total_size%blockDim.x; for(int i=0;i<limit;i++) { energy[blockIdx.x]+=energy_s[i]; energy[blockIdx.x+gridDim.x]+=energy2_s[i]; } */ energy[blockIdx.x] = energy_s[0]; energy[blockIdx.x+gridDim.x] = energy2_s[0]; } __syncthreads(); } extern "C" void compute_energy(double factor, double* energy, double* eval1, double* eval2,double* eval3,double* eval4,double* eval5,double* eval6,int h1d, int h2d, int h3d, int p4d, int p5d,int p6d, double* host1, double* host2) //ckbn en_comment, double* total_d, double* total_s) { double* energy_d, *energy_h; double* eval_d1,*eval_d2,*eval_d3,*eval_d4,*eval_d5,*eval_d6; int size_energy = 2*sizeof(double); int total_block = DIV_UB((h1d*h2d*p4d*p5d*p6d), (T2*T1)); // int total_block = 1; int total_elements = h1d*h2d*p4d*p5d*p6d; energy_d = (double*)getGpuMem(size_energy*total_block*2); int i=0,in; double* t3 = (double*)malloc(sizeof(double)*h3d*total_elements); double* ts3 = (double*)malloc(sizeof(double)*h3d*total_elements); energy_h = (double*)getHostMem(size_energy*2*total_block); eval_d1 = (double*)getGpuMem(h1d*sizeof(double)); eval_d2 = (double*)getGpuMem(h2d*sizeof(double)); eval_d3 = (double*)getGpuMem(h3d*sizeof(double)); eval_d4 = (double*)getGpuMem(p4d*sizeof(double)); eval_d5 = (double*)getGpuMem(p5d*sizeof(double)); eval_d6 = (double*)getGpuMem(p6d*sizeof(double)); CUDA_SAFE(cudaMemcpy(eval_d1, eval1, h1d*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(eval_d2, eval2, h2d*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(eval_d3, eval3, h3d*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(eval_d4, eval4, p4d*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(eval_d5, eval5, p5d*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(eval_d6, eval6, p6d*sizeof(double), cudaMemcpyHostToDevice)); /* for test only */ //printf("host 2 is %f %f\n", host2[0], host2[1]); // CUDA_SAFE(cudaMemcpy(t3_s_d, host2, total_elements*h3d*sizeof(double), cudaMemcpyHostToDevice)); dim3 dimBlock(1); //T2*T1); dim3 dimGrid(total_block); compute_energy_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d,p6d, eval_d1,eval_d2,eval_d3,eval_d4,eval_d5,eval_d6,energy_d, factor, h1d*h2d*p4d*p5d*p6d, t3_d, t3_s_d); cudaThreadSynchronize(); //CHECK_ERR("Kernel execution failed"); CUDA_SAFE(cudaMemcpy(((char *) energy_h) , ((char *) energy_d) , size_energy*total_block*2, cudaMemcpyDeviceToHost)); for(int i=1;i<dimGrid.x;i++) { energy_h[0]+=energy_h[i]; energy_h[dimGrid.x]+=energy_h[i+dimGrid.x]; } // printf("CUDA energy_h is %f %f %d %d %d %d %d %d\n", energy_h[0], energy_h[dimGrid.x]); //, total_size, h1d, h2d, p4d, p5d,p6d); /* CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_d) , sizeof(double)*h3d*total_elements, cudaMemcpyDeviceToHost)); CUDA_SAFE(cudaMemcpy(((char *) ts3) , ((char *) t3_s_d) , sizeof(double)*h3d*total_elements, cudaMemcpyDeviceToHost)); total_s[0]=0.0, total_d[0]=0.0; for(int i=0;i<h3d*total_elements;i++) { total_s[0] += ts3[i]; total_d[0] += t3[i]; } */ // printf("Total doubles and singles %f, %f\n", total_d, total_s); energy[0] = energy_h[0]; energy[1] = energy_h[dimGrid.x]; freeGpuMem(energy_d); freeGpuMem(eval_d1); freeGpuMem(eval_d2); freeGpuMem(eval_d3); freeGpuMem(eval_d4); freeGpuMem(eval_d5); freeGpuMem(eval_d6); freeHostMem(energy_h); } extern "C" void compute_en_(double * factor, double * energy, double * eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double* host1, double* host2) //ckbn en_comment,double* total_d, double* total_s) { compute_energy((double) *factor, energy, eval1,eval2, eval3, eval4, eval5, eval6,(int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, host1, host2); //ckbn en_comment ,total_d, total_s); } //__device__ double* t3_d; extern "C" void set_dev_mem_s(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d) { int size_t3; size_t3 = h1d*h2d*h3d*p4d*p5d*p6d; t3_s_d = (double *) getGpuMem(size_t3*sizeof(double)); cudaMemset(t3_s_d,0,size_t3*sizeof(double)); } extern "C" void dev_mem_s_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d) { set_dev_mem_s((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_1_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3, double *t2_d, double *v2_d,int p4, int total_x, double* t3d) { int h1,h2,h3,p6; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); //CUDA_SAFE(cudaMalloc((void**) &t3_d, size_t3)); //CUDA_SAFE(cudaMalloc((void**) &t2_d, size_t2)); //CUDA_SAFE(cudaMalloc((void**) &v2_d, size_v2)); // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h2ld_t3 = h3d; h1ld_t3 = h2d * h3d; p6ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p4ld_t3 = p5d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d); CHECK_ERR("Kernel execution failed"); } /* st = timer(); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; } */ cudaThreadSynchronize(); // CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); // cudaFree(t2_d); // cudaFree(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_1_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_1_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h1,h2,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_2_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t2_d, double *v2_d,int p4, int total_x, double* t3d) { int h1,h2,h3,p6; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; }*/ //CUDA_SAFE(cudaMalloc((void**) &t2_d, size_t2)); //CUDA_SAFE(cudaMalloc((void**) &v2_d, size_v2)); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); /* assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); }*/ CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d ; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h1ld_t3 = h3d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p4ld_t3 = p5d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); // for(i=0;i<nstreams;++i){ sd_t_s1_2_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d); CHECK_ERR("Kernel execution failed"); // } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ cudaThreadSynchronize(); // CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); /* for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); }*/ freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } extern "C" void sd_t_s1_2_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_2_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } extern "C" void sd_t_s1_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d ; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h1ld_t3 = 1; h3ld_t3 = h1d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p4ld_t3 = p5d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; } */ cudaThreadSynchronize(); //CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_3_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_3_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) { int h1,h2,h3,p6,p5; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); /* assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); }*/ CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; p5ld_v2 = p6d * h3d * h2d; h3ld_t3 = 1; h2ld_t3 = h3d; h1ld_t3 = h2d * h3d; p6ld_t3 = h1d * h2d * h3d; p4ld_t3 = p6d * h1d * h2d * h3d; p5ld_t3 = p4d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); i=0; // for(i=0;i<nstreams;++i){ sd_t_s1_4_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); //sd_t_s1_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); // } cudaThreadSynchronize(); /* CUDA_SAFE(cudaMemcpy(((char *) t3_p) , ((char *) t3_d) , size_block_t3, cudaMemcpyDeviceToHost)); printf("Time for Async DeviceToHost %f\n", et-st); stream = 0; // while (stream < nstreams) { // while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = t3_p; //[stream * size_el_block_t3]; double *dst = t3; //[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] -= src[i]; } // stream++; // } */ // cudaThreadSynchronize(); /* for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); }*/ // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_4_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_4_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h1,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_5_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) { int h1,h2,h3,p6,p5; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d ; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; p5ld_v2 = p6d * h3d * h2d; h3ld_t3 = 1; h1ld_t3 = h3d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; p4ld_t3 = p6d * h1d * h2d * h3d; p5ld_t3 = p4d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_5_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; } */ cudaThreadSynchronize(); //CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_5_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_5_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h1,h3,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_6_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) { int h1,h2,h3,p6,p5; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; p5ld_v2 = p6d * h3d * h2d; h1ld_t3 = 1; h3ld_t3 = h1d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; p4ld_t3 = p6d * h1d * h2d * h3d; p5ld_t3 = p4d * p6d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_6_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ cudaThreadSynchronize(); //CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_6_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_6_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_7_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) { int h1,h2,h3,p6; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h2ld_t3 = h3d; h1ld_t3 = h2d * h3d; p4ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p6ld_t3 = p4d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ cudaThreadSynchronize(); //CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_7_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_7_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_8_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) { int h1,h2,h3,p6; __shared__ double t2_shm[T1*2*Tcomm]; for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; int rest_x=blockIdx.x; int thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(int i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } /*----------------------------------------------------------------------* *t3[h3,h1,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 extern "C" void sd_t_s1_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h1ld_t3 = h3d; h2ld_t3 = h1d * h3d; p4ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p6ld_t3 = p4d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_8_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ cudaThreadSynchronize(); // CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } extern "C" void sd_t_s1_8_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_8_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h1,h3,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_s1_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h1ld_t3 = 1; h3ld_t3 = h1d; h2ld_t3 = h1d * h3d; p4ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p6ld_t3 = p4d * h1d * h2d * h3d; int total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ cudaThreadSynchronize(); //CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); // printf("out is %lf\n", t3_p[0]); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } //freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); freeHostMem(t3_p); free(streams); } extern "C" void sd_t_s1_9_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_9_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2); }
ac071c840e37cde580a77d0e3c4ceb63f7e6c904.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/op_kernel_state_wrapper.h" #include "oneflow/core/kernel/random_generator.h" #include "oneflow/core/kernel/kernel_util.h" #include "oneflow/core/common/data_type.h" namespace oneflow { namespace { template<typename T> __global__ void MaskAndScaleGpu(const int64_t n, float scale, const T* x, const int8_t* mask, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * static_cast<T>(mask[i]) * scale; } } template<typename T> __global__ void MaskAndScaleAddGpu(const int64_t n, float scale, const T* x, const int8_t* mask, const T* addend, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * static_cast<T>(mask[i]) * scale + addend[i]; } } template<> __global__ void MaskAndScaleGpu<half>(const int64_t n, float scale, const half* x, const int8_t* mask, half* y) { const int64_t h2_n = n / 2; half2 h2_scale = __float2half2_rn(scale); const auto* x_h2 = reinterpret_cast<const half2*>(x); const auto* mask_c2 = reinterpret_cast<const char2*>(mask); auto* y_h2 = reinterpret_cast<half2*>(y); CUDA_1D_KERNEL_LOOP(i, h2_n) { char2 mask_val = mask_c2[i]; half2 one_or_zero_h2; one_or_zero_h2.x = mask_val.x; one_or_zero_h2.y = mask_val.y; y_h2[i] = __hmul2(__hmul2(x_h2[i], one_or_zero_h2), h2_scale); } if (n % 2 != 0 && blockIdx.x == 0 && threadIdx.x == 0) { const int64_t last_idx = n - 1; half one_or_zero = mask[last_idx]; y[last_idx] = __hmul(__hmul(x[last_idx], one_or_zero), h2_scale.x); } } template<> __global__ void MaskAndScaleAddGpu<half>(const int64_t n, float scale, const half* x, const int8_t* mask, const half* addend, half* y) { const int64_t h2_n = n / 2; half2 h2_scale = __float2half2_rn(scale); const auto* x_h2 = reinterpret_cast<const half2*>(x); const auto* addend_h2 = reinterpret_cast<const half2*>(addend); const auto* mask_c2 = reinterpret_cast<const char2*>(mask); auto* y_h2 = reinterpret_cast<half2*>(y); CUDA_1D_KERNEL_LOOP(i, h2_n) { char2 mask_val = mask_c2[i]; half2 one_or_zero_h2; one_or_zero_h2.x = mask_val.x; one_or_zero_h2.y = mask_val.y; y_h2[i] = __hadd2(__hmul2(__hmul2(x_h2[i], one_or_zero_h2), h2_scale), addend_h2[i]); } if (n % 2 != 0 && blockIdx.x == 0 && threadIdx.x == 0) { const int64_t last_idx = n - 1; half one_or_zero = mask[last_idx]; y[last_idx] = __hadd(__hmul(__hmul(x[last_idx], one_or_zero), h2_scale.x), addend[last_idx]); } } template<typename T> void MaskAndScale(DeviceCtx* ctx, const int64_t n, float scale, const T* x, const int8_t* mask, T* y) { hipLaunchKernelGGL(( MaskAndScaleGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, scale, x, mask, y); } template<> void MaskAndScale<half>(DeviceCtx* ctx, const int64_t n, float scale, const half* x, const int8_t* mask, half* y) { hipLaunchKernelGGL(( MaskAndScaleGpu<half>) , dim3(BlocksNum4ThreadsNum(RoundUp(n, 2) / 2)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, scale, x, mask, y); } template<typename T> void MaskAndScaleAdd(DeviceCtx* ctx, const int64_t n, float scale, const T* x, const int8_t* mask, const T* addend, T* y) { hipLaunchKernelGGL(( MaskAndScaleAddGpu<T>) , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, scale, x, mask, addend, y); } template<> void MaskAndScaleAdd<half>(DeviceCtx* ctx, const int64_t n, float scale, const half* x, const int8_t* mask, const half* addend, half* y) { hipLaunchKernelGGL(( MaskAndScaleAddGpu<half>) , dim3(BlocksNum4ThreadsNum(RoundUp(n, 2) / 2)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, scale, x, mask, addend, y); } template<typename T> class DropoutKernelGPU final : public user_op::OpKernel { public: DropoutKernelGPU() = default; ~DropoutKernelGPU() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const float scale = ctx->Attr<float>("scale"); if (ctx->has_input("_add_to_output", 0)) { const user_op::Tensor* addend = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0); MaskAndScaleAdd<T>(ctx->device_ctx(), in->shape().elem_cnt(), scale, in->dptr<T>(), mask->dptr<int8_t>(), addend->dptr<T>(), out->mut_dptr<T>()); } else { MaskAndScale<T>(ctx->device_ctx(), in->shape().elem_cnt(), scale, in->dptr<T>(), mask->dptr<int8_t>(), out->mut_dptr<T>()); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_DROPOUT_KERNEL_GPU(dtype) \ REGISTER_USER_KERNEL("dropout").SetCreateFn<DropoutKernelGPU<dtype>>().SetIsMatchedHob( \ (user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)); REGISTER_DROPOUT_KERNEL_GPU(half) REGISTER_DROPOUT_KERNEL_GPU(float) REGISTER_DROPOUT_KERNEL_GPU(double) template<typename T> class DropoutGradKernelGPU final : public user_op::OpKernel { public: DropoutGradKernelGPU() = default; ~DropoutGradKernelGPU() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0); user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const float scale = ctx->Attr<float>("scale"); MaskAndScale<T>(ctx->device_ctx(), dy->shape().elem_cnt(), scale, dy->dptr<T>(), mask->dptr<int8_t>(), dx->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_DROPOUT_GRAD_KERNEL_GPU(dtype) \ REGISTER_USER_KERNEL("dropout_grad") \ .SetCreateFn<DropoutGradKernelGPU<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \ .SetInplaceProposalFn([](const user_op::InferContext&, \ user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \ OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true)); \ return Maybe<void>::Ok(); \ }); REGISTER_DROPOUT_GRAD_KERNEL_GPU(half) REGISTER_DROPOUT_GRAD_KERNEL_GPU(float) REGISTER_DROPOUT_GRAD_KERNEL_GPU(double) } // namespace } // namespace oneflow
ac071c840e37cde580a77d0e3c4ceb63f7e6c904.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/op_kernel_state_wrapper.h" #include "oneflow/core/kernel/random_generator.h" #include "oneflow/core/kernel/kernel_util.h" #include "oneflow/core/common/data_type.h" namespace oneflow { namespace { template<typename T> __global__ void MaskAndScaleGpu(const int64_t n, float scale, const T* x, const int8_t* mask, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * static_cast<T>(mask[i]) * scale; } } template<typename T> __global__ void MaskAndScaleAddGpu(const int64_t n, float scale, const T* x, const int8_t* mask, const T* addend, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * static_cast<T>(mask[i]) * scale + addend[i]; } } template<> __global__ void MaskAndScaleGpu<half>(const int64_t n, float scale, const half* x, const int8_t* mask, half* y) { const int64_t h2_n = n / 2; half2 h2_scale = __float2half2_rn(scale); const auto* x_h2 = reinterpret_cast<const half2*>(x); const auto* mask_c2 = reinterpret_cast<const char2*>(mask); auto* y_h2 = reinterpret_cast<half2*>(y); CUDA_1D_KERNEL_LOOP(i, h2_n) { char2 mask_val = mask_c2[i]; half2 one_or_zero_h2; one_or_zero_h2.x = mask_val.x; one_or_zero_h2.y = mask_val.y; y_h2[i] = __hmul2(__hmul2(x_h2[i], one_or_zero_h2), h2_scale); } if (n % 2 != 0 && blockIdx.x == 0 && threadIdx.x == 0) { const int64_t last_idx = n - 1; half one_or_zero = mask[last_idx]; y[last_idx] = __hmul(__hmul(x[last_idx], one_or_zero), h2_scale.x); } } template<> __global__ void MaskAndScaleAddGpu<half>(const int64_t n, float scale, const half* x, const int8_t* mask, const half* addend, half* y) { const int64_t h2_n = n / 2; half2 h2_scale = __float2half2_rn(scale); const auto* x_h2 = reinterpret_cast<const half2*>(x); const auto* addend_h2 = reinterpret_cast<const half2*>(addend); const auto* mask_c2 = reinterpret_cast<const char2*>(mask); auto* y_h2 = reinterpret_cast<half2*>(y); CUDA_1D_KERNEL_LOOP(i, h2_n) { char2 mask_val = mask_c2[i]; half2 one_or_zero_h2; one_or_zero_h2.x = mask_val.x; one_or_zero_h2.y = mask_val.y; y_h2[i] = __hadd2(__hmul2(__hmul2(x_h2[i], one_or_zero_h2), h2_scale), addend_h2[i]); } if (n % 2 != 0 && blockIdx.x == 0 && threadIdx.x == 0) { const int64_t last_idx = n - 1; half one_or_zero = mask[last_idx]; y[last_idx] = __hadd(__hmul(__hmul(x[last_idx], one_or_zero), h2_scale.x), addend[last_idx]); } } template<typename T> void MaskAndScale(DeviceCtx* ctx, const int64_t n, float scale, const T* x, const int8_t* mask, T* y) { MaskAndScaleGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, x, mask, y); } template<> void MaskAndScale<half>(DeviceCtx* ctx, const int64_t n, float scale, const half* x, const int8_t* mask, half* y) { MaskAndScaleGpu<half> <<<BlocksNum4ThreadsNum(RoundUp(n, 2) / 2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, x, mask, y); } template<typename T> void MaskAndScaleAdd(DeviceCtx* ctx, const int64_t n, float scale, const T* x, const int8_t* mask, const T* addend, T* y) { MaskAndScaleAddGpu<T> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, x, mask, addend, y); } template<> void MaskAndScaleAdd<half>(DeviceCtx* ctx, const int64_t n, float scale, const half* x, const int8_t* mask, const half* addend, half* y) { MaskAndScaleAddGpu<half> <<<BlocksNum4ThreadsNum(RoundUp(n, 2) / 2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, x, mask, addend, y); } template<typename T> class DropoutKernelGPU final : public user_op::OpKernel { public: DropoutKernelGPU() = default; ~DropoutKernelGPU() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const float scale = ctx->Attr<float>("scale"); if (ctx->has_input("_add_to_output", 0)) { const user_op::Tensor* addend = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0); MaskAndScaleAdd<T>(ctx->device_ctx(), in->shape().elem_cnt(), scale, in->dptr<T>(), mask->dptr<int8_t>(), addend->dptr<T>(), out->mut_dptr<T>()); } else { MaskAndScale<T>(ctx->device_ctx(), in->shape().elem_cnt(), scale, in->dptr<T>(), mask->dptr<int8_t>(), out->mut_dptr<T>()); } } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_DROPOUT_KERNEL_GPU(dtype) \ REGISTER_USER_KERNEL("dropout").SetCreateFn<DropoutKernelGPU<dtype>>().SetIsMatchedHob( \ (user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)); REGISTER_DROPOUT_KERNEL_GPU(half) REGISTER_DROPOUT_KERNEL_GPU(float) REGISTER_DROPOUT_KERNEL_GPU(double) template<typename T> class DropoutGradKernelGPU final : public user_op::OpKernel { public: DropoutGradKernelGPU() = default; ~DropoutGradKernelGPU() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0); user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const float scale = ctx->Attr<float>("scale"); MaskAndScale<T>(ctx->device_ctx(), dy->shape().elem_cnt(), scale, dy->dptr<T>(), mask->dptr<int8_t>(), dx->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_DROPOUT_GRAD_KERNEL_GPU(dtype) \ REGISTER_USER_KERNEL("dropout_grad") \ .SetCreateFn<DropoutGradKernelGPU<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \ .SetInplaceProposalFn([](const user_op::InferContext&, \ user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \ OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true)); \ return Maybe<void>::Ok(); \ }); REGISTER_DROPOUT_GRAD_KERNEL_GPU(half) REGISTER_DROPOUT_GRAD_KERNEL_GPU(float) REGISTER_DROPOUT_GRAD_KERNEL_GPU(double) } // namespace } // namespace oneflow
ca375f8e1404485d61b87f145bb02caf6c1c20de.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "rocblas.h" #include "common.h" const int num_submatrix = 8; void copyElements(float* out, float* entry, unsigned long long eRows, unsigned long long eCols, unsigned long long oRows, unsigned long long oCols, unsigned long long x, unsigned long long y, unsigned long long ofA, unsigned long long ofB){ unsigned long long counterRows = eRows; unsigned long long counterCols = eCols; if(ofA){ counterRows = ofA; } if(ofB){ counterCols = ofB; } for(unsigned long long i = 0; i < counterRows; ++i){ for(unsigned long long j = 0; j < counterCols; ++j){ out[x*eRows*oCols + (i*oCols) + (y*eCols + j)] = entry[i*eCols + j]; } } } void msplitm(char transa, char transb, unsigned long long m, unsigned long long n, unsigned long long k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { float* A_d; float* B_d; float* C_d; unsigned long long A_sz = m * k; unsigned long long B_sz = n * k; unsigned long long C_sz = m * n; unsigned long long MAX = (unsigned long long )m* (unsigned long long) n / num_submatrix; MAX -= MAX % k; printf("MAX: %d\n", MAX); printf("B_sz: %d\n",B_sz); unsigned long long numSubMatrixB = B_sz / MAX; printf("SubmatriciesB: %d\n", numSubMatrixB); unsigned long long SMB_sz = B_sz / numSubMatrixB; printf("SMB_sz: %d\n", SMB_sz); unsigned long long subCols = B_sz / (numSubMatrixB * k); printf("subCols: %d\n", subCols); unsigned long long numSubMatrixA = A_sz / MAX; unsigned long long SMA_sz = A_sz / numSubMatrixA; unsigned long long subRows = A_sz / (numSubMatrixA * k); printf("subrows: %d\n", subRows); printf("SMA_sz: %d\n", SMA_sz); printf("submatriciesA: %d\n", numSubMatrixA); unsigned long long overflowA = m % subRows; unsigned long long overflowB = n % subCols; printf("overflowB: %d\n", overflowB); printf("overflowA: %d\n", overflowA); float** B_split = (float**)malloc(sizeof(float*) * (numSubMatrixB + 1)); for(int i = 0; i < numSubMatrixB + 1; ++i){ float* temp = (float*) malloc( sizeof(float)*subCols * k ); for(int j = 0; j < k; ++j){ for(int x = 0; x < subCols; ++x){ if(i * subCols + x < n){ temp[j * subCols + x] = B[j * n + (i*subCols + x)]; }else{ temp[j *subCols + x] = 0; } } } hipMalloc((void**) &B_split[i], sizeof(float) * subCols * k); hipMemcpy(B_split[i], temp, sizeof(float)*subCols*k, hipMemcpyHostToDevice); free(temp); } for(unsigned long long i = 0; i < numSubMatrixA + 1; ++i){ if(overflowA == 0 && i == numSubMatrixA){ break; } float* temp = (float*) malloc( sizeof(float)*subRows * k ); for(int j = 0; j < subRows; ++j){ for(int x = 0; x < k; ++x){ if(i * subRows + j < m){ temp[j * k + x] = A[i*subRows*k + j*k + x]; }else{ temp[j * k + x] = 0; } } } float* temp2 = 0; float* temp3 = 0; hipMalloc((void**) &temp2, sizeof(float) * subRows * k); hipMalloc((void**) &temp3, sizeof(float) * subCols * subRows); hipMemcpy(temp2, temp, sizeof(float)*subRows*k, hipMemcpyHostToDevice); free(temp); printf("Running multiply for row group %d\n", i); temp = (float*)malloc(sizeof(float)*subRows*subCols); for(int x = 0; x < numSubMatrixB + 1; ++x){ if(overflowB == 0 && x == numSubMatrixB){ break; } doMultiply2Matrices(subRows, k, temp2, k, subCols, B_split[x], temp3, alpha); hipMemcpy(temp, temp3, sizeof(float)*subRows*subCols,hipMemcpyDeviceToHost); if(x == numSubMatrixB && i == numSubMatrixA){ copyElements(C, temp, subRows, subCols, m, n, i, x, overflowA, overflowB, beta); }else if(x == numSubMatrixB){ copyElements(C, temp, subRows, subCols, m, n, i, x, 0, overflowB, beta); }else if(i == numSubMatrixA){ copyElements(C, temp, subRows, subCols, m, n, i, x, overflowA, 0, beta); }else{ copyElements(C, temp, subRows, subCols, m, n, i, x, 0, 0, beta); } } hipFree(temp2); hipFree(temp3); } }
ca375f8e1404485d61b87f145bb02caf6c1c20de.cu
#include <stdio.h> #include <cuda_runtime.h> #include "cublas_v2.h" #include "common.h" const int num_submatrix = 8; void copyElements(float* out, float* entry, unsigned long long eRows, unsigned long long eCols, unsigned long long oRows, unsigned long long oCols, unsigned long long x, unsigned long long y, unsigned long long ofA, unsigned long long ofB){ unsigned long long counterRows = eRows; unsigned long long counterCols = eCols; if(ofA){ counterRows = ofA; } if(ofB){ counterCols = ofB; } for(unsigned long long i = 0; i < counterRows; ++i){ for(unsigned long long j = 0; j < counterCols; ++j){ out[x*eRows*oCols + (i*oCols) + (y*eCols + j)] = entry[i*eCols + j]; } } } void msplitm(char transa, char transb, unsigned long long m, unsigned long long n, unsigned long long k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { float* A_d; float* B_d; float* C_d; unsigned long long A_sz = m * k; unsigned long long B_sz = n * k; unsigned long long C_sz = m * n; unsigned long long MAX = (unsigned long long )m* (unsigned long long) n / num_submatrix; MAX -= MAX % k; printf("MAX: %d\n", MAX); printf("B_sz: %d\n",B_sz); unsigned long long numSubMatrixB = B_sz / MAX; printf("SubmatriciesB: %d\n", numSubMatrixB); unsigned long long SMB_sz = B_sz / numSubMatrixB; printf("SMB_sz: %d\n", SMB_sz); unsigned long long subCols = B_sz / (numSubMatrixB * k); printf("subCols: %d\n", subCols); unsigned long long numSubMatrixA = A_sz / MAX; unsigned long long SMA_sz = A_sz / numSubMatrixA; unsigned long long subRows = A_sz / (numSubMatrixA * k); printf("subrows: %d\n", subRows); printf("SMA_sz: %d\n", SMA_sz); printf("submatriciesA: %d\n", numSubMatrixA); unsigned long long overflowA = m % subRows; unsigned long long overflowB = n % subCols; printf("overflowB: %d\n", overflowB); printf("overflowA: %d\n", overflowA); float** B_split = (float**)malloc(sizeof(float*) * (numSubMatrixB + 1)); for(int i = 0; i < numSubMatrixB + 1; ++i){ float* temp = (float*) malloc( sizeof(float)*subCols * k ); for(int j = 0; j < k; ++j){ for(int x = 0; x < subCols; ++x){ if(i * subCols + x < n){ temp[j * subCols + x] = B[j * n + (i*subCols + x)]; }else{ temp[j *subCols + x] = 0; } } } cudaMalloc((void**) &B_split[i], sizeof(float) * subCols * k); cudaMemcpy(B_split[i], temp, sizeof(float)*subCols*k, cudaMemcpyHostToDevice); free(temp); } for(unsigned long long i = 0; i < numSubMatrixA + 1; ++i){ if(overflowA == 0 && i == numSubMatrixA){ break; } float* temp = (float*) malloc( sizeof(float)*subRows * k ); for(int j = 0; j < subRows; ++j){ for(int x = 0; x < k; ++x){ if(i * subRows + j < m){ temp[j * k + x] = A[i*subRows*k + j*k + x]; }else{ temp[j * k + x] = 0; } } } float* temp2 = 0; float* temp3 = 0; cudaMalloc((void**) &temp2, sizeof(float) * subRows * k); cudaMalloc((void**) &temp3, sizeof(float) * subCols * subRows); cudaMemcpy(temp2, temp, sizeof(float)*subRows*k, cudaMemcpyHostToDevice); free(temp); printf("Running multiply for row group %d\n", i); temp = (float*)malloc(sizeof(float)*subRows*subCols); for(int x = 0; x < numSubMatrixB + 1; ++x){ if(overflowB == 0 && x == numSubMatrixB){ break; } doMultiply2Matrices(subRows, k, temp2, k, subCols, B_split[x], temp3, alpha); cudaMemcpy(temp, temp3, sizeof(float)*subRows*subCols,cudaMemcpyDeviceToHost); if(x == numSubMatrixB && i == numSubMatrixA){ copyElements(C, temp, subRows, subCols, m, n, i, x, overflowA, overflowB, beta); }else if(x == numSubMatrixB){ copyElements(C, temp, subRows, subCols, m, n, i, x, 0, overflowB, beta); }else if(i == numSubMatrixA){ copyElements(C, temp, subRows, subCols, m, n, i, x, overflowA, 0, beta); }else{ copyElements(C, temp, subRows, subCols, m, n, i, x, 0, 0, beta); } } cudaFree(temp2); cudaFree(temp3); } }
2f75b8a617ef4574518a2bdd834062b65f2a4142.hip
// !!! This is a file automatically generated by hipify!!! /* * This CUDA-Cusparse code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double] * Sparse/Dense vector-sparse/dense vector addition Z=CuMatlab_addV(Sparse/Dense(X),Sparse/Dense(Y), alpha). * Z= alpha*X+Y * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cusparse_v2.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "SPARSEHELPER.h" #include "ERRORCHK.h" #include <omp.h> // Input Arguments #define INPUTDENSEA prhs[0] #define INPUTSPARSEB prhs[1] #define ALPHA prhs[2] //#define BETA prhs[3] // Output Arguments #define OUTPUTMATRIX plhs[0] extern "C" static void mexCuMatlab_sparseDSR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; hipError_t errCode =hipGetDeviceCount(&nDevices); //int nDevices; //hipGetDeviceCount(&nDevices); if (errCode != hipSuccess){ printf("Error! No CUDA devices found! \n"); return; } char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be three."; char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one."; if ((nrhs!=3)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } if ((nlhs!=1)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(INPUTDENSEA); if ((mxIsChar(INPUTDENSEA))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } char *input_buf1; input_buf1 = mxArrayToString(INPUTSPARSEB); if ((mxIsChar(INPUTSPARSEB))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1); } char *input_buf2; input_buf2 = mxArrayToString(ALPHA); if ((mxIsChar(ALPHA))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(THIRD ARGUMENT) must be scalar not %s\n",input_buf2); } if (mxIsGPUArray(INPUTDENSEA) && mxIsGPUArray(INPUTSPARSEB)) { mxGPUArray const *INPUTDENSEGPUA; mxGPUArray const *INPUTSPARSEGPUB; /* Initialize the MathWorks GPU API. */ mxInitGPU(); INPUTDENSEGPUA = mxGPUCreateFromMxArray(INPUTDENSEA); INPUTSPARSEGPUB = mxGPUCreateFromMxArray(INPUTSPARSEB); if((!mxGPUIsSparse(INPUTDENSEGPUA))&& (mxGPUIsSparse(INPUTSPARSEGPUB)) ){ const mwSize *dimsGPUSA; dimsGPUSA=mxGPUGetDimensions(INPUTDENSEGPUA); int numARows, numAColumns; numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */ numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */ const mwSize *dimsGPUSB; dimsGPUSB=mxGPUGetDimensions(INPUTSPARSEGPUB); int numBRows, numBColumns; numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */ numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */ if ( (((numARows!= 1) && (numAColumns!= 1))) ||(((numBRows!= 1) && (numBColumns!= 1)))) { mxGPUDestroyGPUArray(INPUTDENSEGPUA); mxGPUDestroyGPUArray(INPUTSPARSEGPUB); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, first/second arguments must be dense/sparse column vector."); } if ( mxGPUGetNumberOfElements(INPUTDENSEGPUA)!=mxGPUGetNumberOfElements(INPUTSPARSEGPUB)) { mxGPUDestroyGPUArray(INPUTDENSEGPUA); mxGPUDestroyGPUArray(INPUTSPARSEGPUB); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, row number of dense vector(first argument) must be equal to row numbers of sparse vector(second argument)."); } if ( (numARows!=numBRows)&& (numAColumns!=numBColumns) ) { mxGPUDestroyGPUArray(INPUTDENSEGPUA); mxGPUDestroyGPUArray(INPUTSPARSEGPUB); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, dense vector (first argument) and sparse vector(second argument) must be both row or column vectors."); } const double alpha= mxGetScalar(ALPHA); mwIndex nnz2; mxArray * tempx = mxGPUCreateMxArrayOnCPU(INPUTSPARSEGPUB); nnz2 = *(mxGetJc(tempx) + numBColumns); int nnz= static_cast<int> (nnz2); int *pointerrow =0; mxArray *row_sort; if (numBColumns == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Ir_DataGetSetIXY(tempx , pointerrow, nnz); } if (numBRows == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Jc_GetSetIXY(tempx , pointerrow); } double *pointerval = (double *)mxGetDoubles(tempx); size_t pivot_dimensionsrow[1] = {nnz}; size_t pivot_dimensionsvalue[1] = {nnz}; mxGPUArray *row_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *xrow_sortA=(int *)mxGPUGetData(row_sortA); gpuErrchk(hipMemcpy(xrow_sortA, pointerrow, nnz * sizeof(*xrow_sortA), hipMemcpyHostToDevice)); mxGPUArray *val_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *xval_sortA=(double*)mxGPUGetData(val_sortA); gpuErrchk(hipMemcpy(xval_sortA, pointerval, nnz * sizeof(*xval_sortA), hipMemcpyHostToDevice)); hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle)); hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE); double const *d_A_dense; d_A_dense = (double const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUA)); mxGPUDestroyGPUArray(INPUTDENSEGPUA); mxGPUDestroyGPUArray(INPUTSPARSEGPUB); mxDestroyArray(row_sort); mxDestroyArray(tempx); double *VALOUT=0; mxGPUArray *VAL; if (numAColumns == 1) { size_t pivot_dimensionsvalueV[1] = {numARows}; VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); VALOUT = (double *)mxGPUGetData(VAL); gpuErrchk(hipMemcpy(VALOUT, d_A_dense, sizeof(double) * numARows , hipMemcpyDeviceToDevice)); } if (numARows == 1) { size_t pivot_dimensionsvalueV[2] = {1,numBColumns}; VAL = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); VALOUT = (double *)mxGPUGetData(VAL); gpuErrchk(hipMemcpy(VALOUT, d_A_dense, sizeof(double) * numBColumns , hipMemcpyDeviceToDevice)); } cusparseSafeCall(cusparseDaxpyi( handle, nnz, &alpha, xval_sortA, xrow_sortA, VALOUT, HIPSPARSE_INDEX_BASE_ONE)); mxGPUDestroyGPUArray(row_sortA); mxGPUDestroyGPUArray(val_sortA); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); mxGPUDestroyGPUArray(VAL); hipsparseDestroyMatDescr(descrA); hipsparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } //////////////////////////////////////////////////////////////////////////////////// else if (!(mxIsGPUArray(INPUTDENSEA)) && !(mxIsGPUArray(INPUTSPARSEB))){ // if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) { // mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", // "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be double precision."); // } if((!mxIsSparse(INPUTDENSEA))&& (mxIsSparse(INPUTSPARSEB)) ){ mxInitGPU(); const mwSize *dimsCPUA; dimsCPUA=mxGetDimensions(INPUTDENSEA); int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */ int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */ const mwSize *dimsCPUB; dimsCPUB=mxGetDimensions(INPUTSPARSEB); int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */ int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */ if ( (((numARows!= 1) && (numAColumns!= 1))) ||(((numBRows!= 1) && (numBColumns!= 1)))) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, first/second arguments must be dense/sparse column vector."); } if ( mxGetNumberOfElements(INPUTDENSEA)!=mxGetNumberOfElements(INPUTSPARSEB)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, row number of dense vector(first argument) must be equal to row numbers of sparse vector(second argument)."); } if ( (numARows!=numBRows)&& (numAColumns!=numBColumns) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, dense vector (first argument) and sparse vector(second argument) must be both row or column vectors."); } const double alpha= mxGetScalar(ALPHA); mwIndex nnz2; nnz2 = *(mxGetJc(INPUTSPARSEB) + numBColumns); int nnz= static_cast<int> (nnz2); int *pointerrow =0; mxArray *row_sort; if (numBColumns == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Ir_DataGetSetIXY(INPUTSPARSEB , pointerrow, nnz); } if (numBRows == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Jc_GetSetIXY(INPUTSPARSEB , pointerrow); } double *pointerval = (double *)mxGetDoubles(INPUTSPARSEB); size_t pivot_dimensionsrow[1] = {nnz}; size_t pivot_dimensionsvalue[1] = {nnz}; mxGPUArray *row_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *xrow_sortA=(int *)mxGPUGetData(row_sortA); gpuErrchk(hipMemcpy(xrow_sortA, pointerrow, nnz * sizeof(*xrow_sortA), hipMemcpyHostToDevice)); mxGPUArray *val_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *xval_sortA=(double*)mxGPUGetData(val_sortA); gpuErrchk(hipMemcpy(xval_sortA, pointerval, nnz * sizeof(*xval_sortA), hipMemcpyHostToDevice)); hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle)); hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE); double *h_A_dense1; h_A_dense1 = (double *)mxGetDoubles(INPUTDENSEA); mxDestroyArray(row_sort); double *VALOUT=0; mxGPUArray *VAL; if (numAColumns == 1) { size_t pivot_dimensionsvalueV[1] = {numARows}; VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); VALOUT = (double *)mxGPUGetData(VAL); gpuErrchk(hipMemcpy(VALOUT, h_A_dense1, sizeof(double) * numARows , hipMemcpyHostToDevice)); } if (numARows == 1) { size_t pivot_dimensionsvalueV[2] = {1,numBColumns}; VAL = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); VALOUT = (double *)mxGPUGetData(VAL); gpuErrchk(hipMemcpy(VALOUT, h_A_dense1, sizeof(double) * numBColumns , hipMemcpyHostToDevice)); } cusparseSafeCall(cusparseDaxpyi( handle, nnz, &alpha, xval_sortA, xrow_sortA, VALOUT, HIPSPARSE_INDEX_BASE_ONE)); mxGPUDestroyGPUArray(row_sortA); mxGPUDestroyGPUArray(val_sortA); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); mxGPUDestroyGPUArray(VAL); hipsparseDestroyMatDescr(descrA); hipsparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } // else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } }
2f75b8a617ef4574518a2bdd834062b65f2a4142.cu
/* * This CUDA-Cusparse code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double] * Sparse/Dense vector-sparse/dense vector addition Z=CuMatlab_addV(Sparse/Dense(X),Sparse/Dense(Y), alpha). * Z= alpha*X+Y * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cusparse_v2.h> #include <cuda.h> #include <cuda_runtime.h> #include "SPARSEHELPER.h" #include "ERRORCHK.h" #include <omp.h> // Input Arguments #define INPUTDENSEA prhs[0] #define INPUTSPARSEB prhs[1] #define ALPHA prhs[2] //#define BETA prhs[3] // Output Arguments #define OUTPUTMATRIX plhs[0] extern "C" static void mexCuMatlab_sparseDSR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; cudaError_t errCode =cudaGetDeviceCount(&nDevices); //int nDevices; //cudaGetDeviceCount(&nDevices); if (errCode != cudaSuccess){ printf("Error! No CUDA devices found! \n"); return; } char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be three."; char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one."; if ((nrhs!=3)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } if ((nlhs!=1)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(INPUTDENSEA); if ((mxIsChar(INPUTDENSEA))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } char *input_buf1; input_buf1 = mxArrayToString(INPUTSPARSEB); if ((mxIsChar(INPUTSPARSEB))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1); } char *input_buf2; input_buf2 = mxArrayToString(ALPHA); if ((mxIsChar(ALPHA))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(THIRD ARGUMENT) must be scalar not %s\n",input_buf2); } if (mxIsGPUArray(INPUTDENSEA) && mxIsGPUArray(INPUTSPARSEB)) { mxGPUArray const *INPUTDENSEGPUA; mxGPUArray const *INPUTSPARSEGPUB; /* Initialize the MathWorks GPU API. */ mxInitGPU(); INPUTDENSEGPUA = mxGPUCreateFromMxArray(INPUTDENSEA); INPUTSPARSEGPUB = mxGPUCreateFromMxArray(INPUTSPARSEB); if((!mxGPUIsSparse(INPUTDENSEGPUA))&& (mxGPUIsSparse(INPUTSPARSEGPUB)) ){ const mwSize *dimsGPUSA; dimsGPUSA=mxGPUGetDimensions(INPUTDENSEGPUA); int numARows, numAColumns; numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */ numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */ const mwSize *dimsGPUSB; dimsGPUSB=mxGPUGetDimensions(INPUTSPARSEGPUB); int numBRows, numBColumns; numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */ numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */ if ( (((numARows!= 1) && (numAColumns!= 1))) ||(((numBRows!= 1) && (numBColumns!= 1)))) { mxGPUDestroyGPUArray(INPUTDENSEGPUA); mxGPUDestroyGPUArray(INPUTSPARSEGPUB); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, first/second arguments must be dense/sparse column vector."); } if ( mxGPUGetNumberOfElements(INPUTDENSEGPUA)!=mxGPUGetNumberOfElements(INPUTSPARSEGPUB)) { mxGPUDestroyGPUArray(INPUTDENSEGPUA); mxGPUDestroyGPUArray(INPUTSPARSEGPUB); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, row number of dense vector(first argument) must be equal to row numbers of sparse vector(second argument)."); } if ( (numARows!=numBRows)&& (numAColumns!=numBColumns) ) { mxGPUDestroyGPUArray(INPUTDENSEGPUA); mxGPUDestroyGPUArray(INPUTSPARSEGPUB); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, dense vector (first argument) and sparse vector(second argument) must be both row or column vectors."); } const double alpha= mxGetScalar(ALPHA); mwIndex nnz2; mxArray * tempx = mxGPUCreateMxArrayOnCPU(INPUTSPARSEGPUB); nnz2 = *(mxGetJc(tempx) + numBColumns); int nnz= static_cast<int> (nnz2); int *pointerrow =0; mxArray *row_sort; if (numBColumns == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Ir_DataGetSetIXY(tempx , pointerrow, nnz); } if (numBRows == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Jc_GetSetIXY(tempx , pointerrow); } double *pointerval = (double *)mxGetDoubles(tempx); size_t pivot_dimensionsrow[1] = {nnz}; size_t pivot_dimensionsvalue[1] = {nnz}; mxGPUArray *row_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *xrow_sortA=(int *)mxGPUGetData(row_sortA); gpuErrchk(cudaMemcpy(xrow_sortA, pointerrow, nnz * sizeof(*xrow_sortA), cudaMemcpyHostToDevice)); mxGPUArray *val_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *xval_sortA=(double*)mxGPUGetData(val_sortA); gpuErrchk(cudaMemcpy(xval_sortA, pointerval, nnz * sizeof(*xval_sortA), cudaMemcpyHostToDevice)); cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle)); cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE); double const *d_A_dense; d_A_dense = (double const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUA)); mxGPUDestroyGPUArray(INPUTDENSEGPUA); mxGPUDestroyGPUArray(INPUTSPARSEGPUB); mxDestroyArray(row_sort); mxDestroyArray(tempx); double *VALOUT=0; mxGPUArray *VAL; if (numAColumns == 1) { size_t pivot_dimensionsvalueV[1] = {numARows}; VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); VALOUT = (double *)mxGPUGetData(VAL); gpuErrchk(cudaMemcpy(VALOUT, d_A_dense, sizeof(double) * numARows , cudaMemcpyDeviceToDevice)); } if (numARows == 1) { size_t pivot_dimensionsvalueV[2] = {1,numBColumns}; VAL = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); VALOUT = (double *)mxGPUGetData(VAL); gpuErrchk(cudaMemcpy(VALOUT, d_A_dense, sizeof(double) * numBColumns , cudaMemcpyDeviceToDevice)); } cusparseSafeCall(cusparseDaxpyi( handle, nnz, &alpha, xval_sortA, xrow_sortA, VALOUT, CUSPARSE_INDEX_BASE_ONE)); mxGPUDestroyGPUArray(row_sortA); mxGPUDestroyGPUArray(val_sortA); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); mxGPUDestroyGPUArray(VAL); cusparseDestroyMatDescr(descrA); cusparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } //////////////////////////////////////////////////////////////////////////////////// else if (!(mxIsGPUArray(INPUTDENSEA)) && !(mxIsGPUArray(INPUTSPARSEB))){ // if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) { // mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", // "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be double precision."); // } if((!mxIsSparse(INPUTDENSEA))&& (mxIsSparse(INPUTSPARSEB)) ){ mxInitGPU(); const mwSize *dimsCPUA; dimsCPUA=mxGetDimensions(INPUTDENSEA); int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */ int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */ const mwSize *dimsCPUB; dimsCPUB=mxGetDimensions(INPUTSPARSEB); int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */ int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */ if ( (((numARows!= 1) && (numAColumns!= 1))) ||(((numBRows!= 1) && (numBColumns!= 1)))) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, first/second arguments must be dense/sparse column vector."); } if ( mxGetNumberOfElements(INPUTDENSEA)!=mxGetNumberOfElements(INPUTSPARSEB)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, row number of dense vector(first argument) must be equal to row numbers of sparse vector(second argument)."); } if ( (numARows!=numBRows)&& (numAColumns!=numBColumns) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, dense vector (first argument) and sparse vector(second argument) must be both row or column vectors."); } const double alpha= mxGetScalar(ALPHA); mwIndex nnz2; nnz2 = *(mxGetJc(INPUTSPARSEB) + numBColumns); int nnz= static_cast<int> (nnz2); int *pointerrow =0; mxArray *row_sort; if (numBColumns == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Ir_DataGetSetIXY(INPUTSPARSEB , pointerrow, nnz); } if (numBRows == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Jc_GetSetIXY(INPUTSPARSEB , pointerrow); } double *pointerval = (double *)mxGetDoubles(INPUTSPARSEB); size_t pivot_dimensionsrow[1] = {nnz}; size_t pivot_dimensionsvalue[1] = {nnz}; mxGPUArray *row_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *xrow_sortA=(int *)mxGPUGetData(row_sortA); gpuErrchk(cudaMemcpy(xrow_sortA, pointerrow, nnz * sizeof(*xrow_sortA), cudaMemcpyHostToDevice)); mxGPUArray *val_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *xval_sortA=(double*)mxGPUGetData(val_sortA); gpuErrchk(cudaMemcpy(xval_sortA, pointerval, nnz * sizeof(*xval_sortA), cudaMemcpyHostToDevice)); cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle)); cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE); double *h_A_dense1; h_A_dense1 = (double *)mxGetDoubles(INPUTDENSEA); mxDestroyArray(row_sort); double *VALOUT=0; mxGPUArray *VAL; if (numAColumns == 1) { size_t pivot_dimensionsvalueV[1] = {numARows}; VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); VALOUT = (double *)mxGPUGetData(VAL); gpuErrchk(cudaMemcpy(VALOUT, h_A_dense1, sizeof(double) * numARows , cudaMemcpyHostToDevice)); } if (numARows == 1) { size_t pivot_dimensionsvalueV[2] = {1,numBColumns}; VAL = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); VALOUT = (double *)mxGPUGetData(VAL); gpuErrchk(cudaMemcpy(VALOUT, h_A_dense1, sizeof(double) * numBColumns , cudaMemcpyHostToDevice)); } cusparseSafeCall(cusparseDaxpyi( handle, nnz, &alpha, xval_sortA, xrow_sortA, VALOUT, CUSPARSE_INDEX_BASE_ONE)); mxGPUDestroyGPUArray(row_sortA); mxGPUDestroyGPUArray(val_sortA); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); mxGPUDestroyGPUArray(VAL); cusparseDestroyMatDescr(descrA); cusparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } // else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } }
66e471dc892f347818343cd04f4001fe491436a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* CUDA implementation of Gauss-Jordan elimination algorithm. * * Gauss-Jordan elimination method * =============================== * * This function solves a set of linear equations using the Gauss-Jordan elimination method. * Considering a set of N equations with N unknowns, this can be written in matrix form as * an NxN matrix of coefficients and a Nx1 column vector of right-hand side values. * * For example, consider the following problem with 3 equations and 3 unknowns (N=3): * * A x + B y + C z = MM * D x + E y + F z = NN * G x + H y + J z = PP * * We can write this as follows in matrix form: * * [ A B C ] [ x ] = [ MM ] * [ D E F ] [ y ] = [ NN ] * [ G H I ] [ z ] = [ PP ] * * or, [A]*[X] = [B] where [A] is the matrix of coefficients and [B] is the vector of * right-hand side values. * * The Gauss Jordan elimiation method solves the system of equations in the following * manner. First, we form the augmented matrix (A|B): * * [ A B C | MM ] * [ D E F | NN ] * [ G H I | PP ] * * and then the augmented matrix is manipulated until its left side has the reduced * row-echelon form. That is to say that any individual row may be multiplied * by a scalar factor, and any linear combination of rows may be added to another * row. Finally, two rows may be swapped without affecting the solution. * * When the manipulations are complete and the left side of the matrix has the desired * form, the right side then corresponds to the solution of the system. * * * Description of the cuda_gaussjordan function * ============================================ * * This algorithm is designed to perform many solutions of the Gauss Jordan elimination * method in parallel. One limitation of the algorithm implemented here is that for * each solution the number of equations and unknowns (N) must be identical. * * Parameters: * * alpha: Coefficients matrices. The matrix of coefficients for a single solution is * a vector of NxN, where N is the number of equations. This array stores the * coefficients for the entire set of M input problems, concatenated end to end, * and hence the total size of the array is MxNxN. * * beta: Vector of right hand side values, concatenated together for all input problems. * For a set of M inputs, the size of the vector is MxN. Upon completion, this * vector contains the results vector X for each solution. * * skip_calculation: An input vector which allows the calculation to be skipped for * a particular solution. For a set of M inputs, the size of this * vector is M. * * singular: An output vector used to report whether a given solution is singular. For * a set of M inputs, this vector has size M. Memory needs to be allocated * by the calling the function. * * n_equations: The number of equations and unknowns for a single solution. This is * equal to the size N. * * n_equations_pow2: The next highest power of 2 greater than n_equations. * * * Calling the cuda_gaussjordan function * ===================================== * * When calling the function, the blocks and threads must be set up correctly, as well * as the shared memory space, as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_equations + 1; * threads.y = n_equations; * blocks.x = n_solutions; * blocks.y = 1; * * int const shared_size = sizeof(float) * * ( (threads.x * threads.y) + n_parameters_pow2 + n_parameters_pow2 ); * * int * singular; * CUDA_CHECK_STATUS(hipMalloc((void**)&singular, n_solutions * sizeof(int))); * * cuda_gaussjordan<<< blocks, threads, shared_size >>>( * alpha, * beta, * skip_calculation, * singular, * n_equations, * n_equations_pow2); * */ #include "cuda_gaussjordan.cuh" __global__ void cuda_gaussjordan( float * delta, float const * beta, float const * alpha, int const * skip_calculation, int * singular, std::size_t const n_equations, std::size_t const n_equations_pow2) { extern __shared__ float extern_array[]; //shared memory between threads of a single block, //used for storing the calculation_matrix, the //abs_row vector, and the abs_row_index vector // In this routine we will store the augmented matrix (A|B), referred to here // as the calculation matrix in a shared memory space which is visible to all // threads within a block. Also stored in shared memory are two vectors which // are used to find the largest element in each row (the pivot). These vectors // are called abs_row and abs_row_index. // // Sizes of data stored in shared memory: // // calculation_matrix: n_equations * (n_equations+1) // abs_row: n_equations_pow2 // abs_row_index: n_equations_pow2 // // Note that each thread represents an element of the augmented matrix, with // the column and row indicated by the x and y index of the thread. Each // solution is calculated within one block, and the solution index is the // block index x value. int const col_index = threadIdx.x; //column index in the calculation_matrix int const row_index = threadIdx.y; //row index in the calculation_matrix int const solution_index = blockIdx.x; int const n_col = blockDim.x; //number of columns in calculation matrix (=threads.x) int const n_row = blockDim.y; //number of rows in calculation matrix (=threads.y) int const alpha_size = blockDim.y * blockDim.y; //number of entries in alpha matrix for one solution (NxN) if (skip_calculation[solution_index]) return; float p; //local variable used in pivot calculation float * calculation_matrix = extern_array; //point to the shared memory float * abs_row = extern_array + n_equations * (n_equations + 1); //abs_row is located after the calculation_matrix //within the shared memory int * abs_row_index = (int *)(abs_row + n_equations_pow2); //abs_row_index is located after abs_row // //note that although the shared memory is defined as //float, we are storing data of type int in this //part of the shared memory //initialize the singular vector if (col_index == 0 && row_index == 0) { singular[solution_index] = 0; } //initialize abs_row and abs_row_index, using only the threads on the diagonal if (col_index == row_index) { abs_row[col_index + (n_equations_pow2 - n_equations)] = 0.0f; abs_row_index[col_index + (n_equations_pow2 - n_equations)] = col_index + (n_equations_pow2 - n_equations); } //initialize the calculation_matrix (alpha and beta, concatenated, for one solution) if (col_index != n_equations) calculation_matrix[row_index*n_col + col_index] = alpha[solution_index * alpha_size + row_index * n_equations + col_index]; else calculation_matrix[row_index*n_col + col_index] = beta[solution_index * n_equations + row_index]; //wait for thread synchronization __syncthreads(); //start of main outer loop over the rows of the calculation matrix for (int current_row = 0; current_row < n_equations; current_row++) { // work in only one row, skipping the last column if (row_index == current_row && col_index != n_equations) { //save the absolute values of the current row abs_row[col_index] = abs(calculation_matrix[row_index * n_col + col_index]); //save the column indices abs_row_index[col_index] = col_index; __threadfence(); //find the largest absolute value in the current row and write its index in abs_row_index[0] for (int n = 2; n <= n_equations_pow2; n = n * 2) { if (col_index < (n_equations_pow2 / n)) { if (abs_row[abs_row_index[col_index]] < abs_row[abs_row_index[col_index + (n_equations_pow2 / n)]]) { abs_row_index[col_index] = abs_row_index[col_index + (n_equations_pow2 / n)]; } } } } __syncthreads(); //singularity check - if all values in the row are zero, no solution exists if (row_index == current_row && col_index != n_equations) { if (abs_row[abs_row_index[0]] == 0.0f) { singular[solution_index] = 1; } } //devide the row by the biggest value in the row if (row_index == current_row) { calculation_matrix[row_index * n_col + col_index] = calculation_matrix[row_index * n_col + col_index] / calculation_matrix[row_index * n_col + abs_row_index[0]]; } __syncthreads(); //The value of the largest element of the current row was found, and then current //row was divided by this value such that the largest value of the current row //is equal to one. // //Next, the matrix is manipulated to reduce to zero all other entries in the column //in which the largest value was found. To do this, the values in the current row //are scaled appropriately and substracted from the other rows of the matrix. // //For each element of the matrix that is not in the current row, calculate the value //to be subtracted and let each thread store this value in the scalar variable p. p = calculation_matrix[current_row * n_col + col_index] * calculation_matrix[row_index * n_col + abs_row_index[0]]; __syncthreads(); if (row_index != current_row) { calculation_matrix[row_index * n_col + col_index] = calculation_matrix[row_index * n_col + col_index] - p; } __syncthreads(); } //At this point, if the solution exists, the calculation matrix has been reduced to the //identity matrix on the left side, and the solution vector on the right side. However //we have not swapped rows during the procedure, so the identity matrix is out of order. // //For example, starting with the following augmented matrix as input: // // [ 3 2 -4 | 4 ] // [ 2 3 3 | 15 ] // [ 5 -3 1 | 14 ] // //we will obtain: // // [ 0 0 1 | 2 ] // [ 0 1 0 | 1 ] // [ 1 0 0 | 3 ] // //Which needs to be re-arranged to obtain the correct solution vector. In the final //step, each thread checks to see if its value equals 1, and if so it assigns the value //in its rightmost column to the appropriate entry in the beta vector. The solution is //stored in beta upon completetion. if (col_index != n_equations && calculation_matrix[row_index * n_col + col_index] == 1) delta[n_row * solution_index + col_index] = calculation_matrix[row_index * n_col + n_equations]; __syncthreads(); }
66e471dc892f347818343cd04f4001fe491436a8.cu
/* CUDA implementation of Gauss-Jordan elimination algorithm. * * Gauss-Jordan elimination method * =============================== * * This function solves a set of linear equations using the Gauss-Jordan elimination method. * Considering a set of N equations with N unknowns, this can be written in matrix form as * an NxN matrix of coefficients and a Nx1 column vector of right-hand side values. * * For example, consider the following problem with 3 equations and 3 unknowns (N=3): * * A x + B y + C z = MM * D x + E y + F z = NN * G x + H y + J z = PP * * We can write this as follows in matrix form: * * [ A B C ] [ x ] = [ MM ] * [ D E F ] [ y ] = [ NN ] * [ G H I ] [ z ] = [ PP ] * * or, [A]*[X] = [B] where [A] is the matrix of coefficients and [B] is the vector of * right-hand side values. * * The Gauss Jordan elimiation method solves the system of equations in the following * manner. First, we form the augmented matrix (A|B): * * [ A B C | MM ] * [ D E F | NN ] * [ G H I | PP ] * * and then the augmented matrix is manipulated until its left side has the reduced * row-echelon form. That is to say that any individual row may be multiplied * by a scalar factor, and any linear combination of rows may be added to another * row. Finally, two rows may be swapped without affecting the solution. * * When the manipulations are complete and the left side of the matrix has the desired * form, the right side then corresponds to the solution of the system. * * * Description of the cuda_gaussjordan function * ============================================ * * This algorithm is designed to perform many solutions of the Gauss Jordan elimination * method in parallel. One limitation of the algorithm implemented here is that for * each solution the number of equations and unknowns (N) must be identical. * * Parameters: * * alpha: Coefficients matrices. The matrix of coefficients for a single solution is * a vector of NxN, where N is the number of equations. This array stores the * coefficients for the entire set of M input problems, concatenated end to end, * and hence the total size of the array is MxNxN. * * beta: Vector of right hand side values, concatenated together for all input problems. * For a set of M inputs, the size of the vector is MxN. Upon completion, this * vector contains the results vector X for each solution. * * skip_calculation: An input vector which allows the calculation to be skipped for * a particular solution. For a set of M inputs, the size of this * vector is M. * * singular: An output vector used to report whether a given solution is singular. For * a set of M inputs, this vector has size M. Memory needs to be allocated * by the calling the function. * * n_equations: The number of equations and unknowns for a single solution. This is * equal to the size N. * * n_equations_pow2: The next highest power of 2 greater than n_equations. * * * Calling the cuda_gaussjordan function * ===================================== * * When calling the function, the blocks and threads must be set up correctly, as well * as the shared memory space, as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_equations + 1; * threads.y = n_equations; * blocks.x = n_solutions; * blocks.y = 1; * * int const shared_size = sizeof(float) * * ( (threads.x * threads.y) + n_parameters_pow2 + n_parameters_pow2 ); * * int * singular; * CUDA_CHECK_STATUS(cudaMalloc((void**)&singular, n_solutions * sizeof(int))); * * cuda_gaussjordan<<< blocks, threads, shared_size >>>( * alpha, * beta, * skip_calculation, * singular, * n_equations, * n_equations_pow2); * */ #include "cuda_gaussjordan.cuh" __global__ void cuda_gaussjordan( float * delta, float const * beta, float const * alpha, int const * skip_calculation, int * singular, std::size_t const n_equations, std::size_t const n_equations_pow2) { extern __shared__ float extern_array[]; //shared memory between threads of a single block, //used for storing the calculation_matrix, the //abs_row vector, and the abs_row_index vector // In this routine we will store the augmented matrix (A|B), referred to here // as the calculation matrix in a shared memory space which is visible to all // threads within a block. Also stored in shared memory are two vectors which // are used to find the largest element in each row (the pivot). These vectors // are called abs_row and abs_row_index. // // Sizes of data stored in shared memory: // // calculation_matrix: n_equations * (n_equations+1) // abs_row: n_equations_pow2 // abs_row_index: n_equations_pow2 // // Note that each thread represents an element of the augmented matrix, with // the column and row indicated by the x and y index of the thread. Each // solution is calculated within one block, and the solution index is the // block index x value. int const col_index = threadIdx.x; //column index in the calculation_matrix int const row_index = threadIdx.y; //row index in the calculation_matrix int const solution_index = blockIdx.x; int const n_col = blockDim.x; //number of columns in calculation matrix (=threads.x) int const n_row = blockDim.y; //number of rows in calculation matrix (=threads.y) int const alpha_size = blockDim.y * blockDim.y; //number of entries in alpha matrix for one solution (NxN) if (skip_calculation[solution_index]) return; float p; //local variable used in pivot calculation float * calculation_matrix = extern_array; //point to the shared memory float * abs_row = extern_array + n_equations * (n_equations + 1); //abs_row is located after the calculation_matrix //within the shared memory int * abs_row_index = (int *)(abs_row + n_equations_pow2); //abs_row_index is located after abs_row // //note that although the shared memory is defined as //float, we are storing data of type int in this //part of the shared memory //initialize the singular vector if (col_index == 0 && row_index == 0) { singular[solution_index] = 0; } //initialize abs_row and abs_row_index, using only the threads on the diagonal if (col_index == row_index) { abs_row[col_index + (n_equations_pow2 - n_equations)] = 0.0f; abs_row_index[col_index + (n_equations_pow2 - n_equations)] = col_index + (n_equations_pow2 - n_equations); } //initialize the calculation_matrix (alpha and beta, concatenated, for one solution) if (col_index != n_equations) calculation_matrix[row_index*n_col + col_index] = alpha[solution_index * alpha_size + row_index * n_equations + col_index]; else calculation_matrix[row_index*n_col + col_index] = beta[solution_index * n_equations + row_index]; //wait for thread synchronization __syncthreads(); //start of main outer loop over the rows of the calculation matrix for (int current_row = 0; current_row < n_equations; current_row++) { // work in only one row, skipping the last column if (row_index == current_row && col_index != n_equations) { //save the absolute values of the current row abs_row[col_index] = abs(calculation_matrix[row_index * n_col + col_index]); //save the column indices abs_row_index[col_index] = col_index; __threadfence(); //find the largest absolute value in the current row and write its index in abs_row_index[0] for (int n = 2; n <= n_equations_pow2; n = n * 2) { if (col_index < (n_equations_pow2 / n)) { if (abs_row[abs_row_index[col_index]] < abs_row[abs_row_index[col_index + (n_equations_pow2 / n)]]) { abs_row_index[col_index] = abs_row_index[col_index + (n_equations_pow2 / n)]; } } } } __syncthreads(); //singularity check - if all values in the row are zero, no solution exists if (row_index == current_row && col_index != n_equations) { if (abs_row[abs_row_index[0]] == 0.0f) { singular[solution_index] = 1; } } //devide the row by the biggest value in the row if (row_index == current_row) { calculation_matrix[row_index * n_col + col_index] = calculation_matrix[row_index * n_col + col_index] / calculation_matrix[row_index * n_col + abs_row_index[0]]; } __syncthreads(); //The value of the largest element of the current row was found, and then current //row was divided by this value such that the largest value of the current row //is equal to one. // //Next, the matrix is manipulated to reduce to zero all other entries in the column //in which the largest value was found. To do this, the values in the current row //are scaled appropriately and substracted from the other rows of the matrix. // //For each element of the matrix that is not in the current row, calculate the value //to be subtracted and let each thread store this value in the scalar variable p. p = calculation_matrix[current_row * n_col + col_index] * calculation_matrix[row_index * n_col + abs_row_index[0]]; __syncthreads(); if (row_index != current_row) { calculation_matrix[row_index * n_col + col_index] = calculation_matrix[row_index * n_col + col_index] - p; } __syncthreads(); } //At this point, if the solution exists, the calculation matrix has been reduced to the //identity matrix on the left side, and the solution vector on the right side. However //we have not swapped rows during the procedure, so the identity matrix is out of order. // //For example, starting with the following augmented matrix as input: // // [ 3 2 -4 | 4 ] // [ 2 3 3 | 15 ] // [ 5 -3 1 | 14 ] // //we will obtain: // // [ 0 0 1 | 2 ] // [ 0 1 0 | 1 ] // [ 1 0 0 | 3 ] // //Which needs to be re-arranged to obtain the correct solution vector. In the final //step, each thread checks to see if its value equals 1, and if so it assigns the value //in its rightmost column to the appropriate entry in the beta vector. The solution is //stored in beta upon completetion. if (col_index != n_equations && calculation_matrix[row_index * n_col + col_index] == 1) delta[n_row * solution_index + col_index] = calculation_matrix[row_index * n_col + n_equations]; __syncthreads(); }
169a50b7d16a211a58454748db77c2d89081ba4d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "warmup.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *C = NULL; hipMalloc(&C, XSIZE*YSIZE); const int n = 1; int offset = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( warmup), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n,offset); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( warmup), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n,offset); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( warmup), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n,offset); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
169a50b7d16a211a58454748db77c2d89081ba4d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "warmup.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); const int n = 1; int offset = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); warmup<<<gridBlock,threadBlock>>>(A,B,C,n,offset); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { warmup<<<gridBlock,threadBlock>>>(A,B,C,n,offset); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { warmup<<<gridBlock,threadBlock>>>(A,B,C,n,offset); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bf4f2816de41b46053a301f3bb6fb9a888dc6f01.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "args.h" #include "model.h" #include "tpacf_kernel.cu" #define CUDA_ERRCK { hipError_t err; \ if ((err = hipGetLastError()) != hipSuccess) { \ printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \ return -1; }} extern unsigned int NUM_SETS; extern unsigned int NUM_ELEMENTS; int main( int argc, char** argv) { struct pb_TimerSet timers; struct pb_Parameters *params; pb_InitializeTimerSet( &timers ); params = pb_ReadParameters( &argc, argv ); options args; parse_args(argc, argv, &args); pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); NUM_ELEMENTS = args.npoints; NUM_SETS = args.random_count; int num_elements = NUM_ELEMENTS; printf("Min distance: %f arcmin\n", min_arcmin); printf("Max distance: %f arcmin\n", max_arcmin); printf("Bins per dec: %i\n", bins_per_dec); printf("Total bins : %i\n", NUM_BINS); //read in files unsigned mem_size = (1+NUM_SETS)*num_elements*sizeof(struct cartesian); unsigned f_mem_size = (1+NUM_SETS)*num_elements*sizeof(REAL); // container for all the points read from files struct cartesian *h_all_data; h_all_data = (struct cartesian*) malloc(mem_size); // Until I can get libs fixed // iterator for data files struct cartesian *working = h_all_data; // go through and read all data and random points into h_all_data pb_SwitchToTimer( &timers, pb_TimerID_IO ); readdatafile(params->inpFiles[0], working, num_elements); pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); working += num_elements; for(int i = 0; i < (NUM_SETS); i++) { pb_SwitchToTimer( &timers, pb_TimerID_IO ); readdatafile(params->inpFiles[i+1], working, num_elements); pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); working += num_elements; } // split into x, y, and z arrays REAL * h_x_data = (REAL*) malloc (3*f_mem_size); REAL * h_y_data = h_x_data + NUM_ELEMENTS*(NUM_SETS+1); REAL * h_z_data = h_y_data + NUM_ELEMENTS*(NUM_SETS+1); for(int i = 0; i < (NUM_SETS+1); ++i) { for(int j = 0; j < NUM_ELEMENTS; ++j) { h_x_data[i*NUM_ELEMENTS+j] = h_all_data[i*NUM_ELEMENTS+j].x; h_y_data[i*NUM_ELEMENTS+j] = h_all_data[i*NUM_ELEMENTS+j].y; h_z_data[i*NUM_ELEMENTS+j] = h_all_data[i*NUM_ELEMENTS+j].z; } } // from on use x, y, and z arrays, free h_all_data free(h_all_data); pb_SwitchToTimer( &timers, pb_TimerID_COPY ); // allocate cuda memory to hold all points REAL * d_x_data; hipMalloc((void**) & d_x_data, 3*f_mem_size); CUDA_ERRCK REAL * d_y_data = d_x_data + NUM_ELEMENTS*(NUM_SETS+1); REAL * d_z_data = d_y_data + NUM_ELEMENTS*(NUM_SETS+1); // allocate cuda memory to hold final histograms // (1 for dd, and NUM_SETS for dr and rr apiece) hist_t * d_hists; hipMalloc((void**) & d_hists, NUM_BINS*(NUM_SETS*2+1)*sizeof(hist_t) ); CUDA_ERRCK pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); // allocate system memory for final histograms hist_t *new_hists = (hist_t *) malloc(NUM_BINS*(NUM_SETS*2+1)* sizeof(hist_t)); // Initialize the boundary constants for bin search initBinB( &timers ); CUDA_ERRCK // **===------------------ Kick off TPACF on CUDA------------------===** pb_SwitchToTimer( &timers, pb_TimerID_COPY ); hipMemcpy(d_x_data, h_x_data, 3*f_mem_size, hipMemcpyHostToDevice); CUDA_ERRCK pb_SwitchToTimer( &timers, pb_TimerID_KERNEL ); TPACF(d_hists, d_x_data, d_y_data, d_z_data); pb_SwitchToTimer( &timers, pb_TimerID_COPY ); hipMemcpy(new_hists, d_hists, NUM_BINS*(NUM_SETS*2+1)* sizeof(hist_t), hipMemcpyDeviceToHost); CUDA_ERRCK pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); // **===-----------------------------------------------------------===** // references into output histograms hist_t *dd_hist = new_hists; hist_t *rr_hist = dd_hist + NUM_BINS; hist_t *dr_hist = rr_hist + NUM_BINS*NUM_SETS; // add up values within dr and rr int rr[NUM_BINS]; for(int i=0; i<NUM_BINS; i++) { rr[i] = 0; } for(int i=0; i<NUM_SETS; i++) { for(int j=0; j<NUM_BINS; j++) { rr[j] += rr_hist[i*NUM_BINS + j]; } } int dr[NUM_BINS]; for(int i=0; i<NUM_BINS; i++) { dr[i] = 0; } for(int i=0; i<NUM_SETS; i++) { for(int j=0; j<NUM_BINS; j++) { dr[j] += dr_hist[i*NUM_BINS + j]; } } //int dd_t = 0; //int dr_t = 0; //int rr_t = 0; FILE *outfile; if ((outfile = fopen(params->outFile, "w")) == NULL) { fprintf(stderr, "Unable to open output file %s for writing, " "assuming stdout\n", params->outFile); outfile = stdout; } pb_SwitchToTimer( &timers, pb_TimerID_IO ); // print out final histograms + omega (while calculating omega) for(int i=0; i<NUM_BINS; i++) { //REAL w = (100.0 * dd_hist[i] - dr[i]) / rr[i] + 1.0; //fprintf(outfile, "%f\n", w); fprintf(outfile, "%d\n%d\n%d\n", dd_hist[i], dr[i], rr[i]); // dd_t += dd_hist[i]; // dr_t += dr[i]; // rr_t += rr[i]; } pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); if(outfile != stdout) fclose(outfile); // cleanup memory free(new_hists); free( h_x_data); pb_SwitchToTimer( &timers, pb_TimerID_COPY ); hipFree( d_hists ); hipFree( d_x_data ); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); }
bf4f2816de41b46053a301f3bb6fb9a888dc6f01.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "args.h" #include "model.h" #include "tpacf_kernel.cu" #define CUDA_ERRCK { cudaError_t err; \ if ((err = cudaGetLastError()) != cudaSuccess) { \ printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \ return -1; }} extern unsigned int NUM_SETS; extern unsigned int NUM_ELEMENTS; int main( int argc, char** argv) { struct pb_TimerSet timers; struct pb_Parameters *params; pb_InitializeTimerSet( &timers ); params = pb_ReadParameters( &argc, argv ); options args; parse_args(argc, argv, &args); pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); NUM_ELEMENTS = args.npoints; NUM_SETS = args.random_count; int num_elements = NUM_ELEMENTS; printf("Min distance: %f arcmin\n", min_arcmin); printf("Max distance: %f arcmin\n", max_arcmin); printf("Bins per dec: %i\n", bins_per_dec); printf("Total bins : %i\n", NUM_BINS); //read in files unsigned mem_size = (1+NUM_SETS)*num_elements*sizeof(struct cartesian); unsigned f_mem_size = (1+NUM_SETS)*num_elements*sizeof(REAL); // container for all the points read from files struct cartesian *h_all_data; h_all_data = (struct cartesian*) malloc(mem_size); // Until I can get libs fixed // iterator for data files struct cartesian *working = h_all_data; // go through and read all data and random points into h_all_data pb_SwitchToTimer( &timers, pb_TimerID_IO ); readdatafile(params->inpFiles[0], working, num_elements); pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); working += num_elements; for(int i = 0; i < (NUM_SETS); i++) { pb_SwitchToTimer( &timers, pb_TimerID_IO ); readdatafile(params->inpFiles[i+1], working, num_elements); pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); working += num_elements; } // split into x, y, and z arrays REAL * h_x_data = (REAL*) malloc (3*f_mem_size); REAL * h_y_data = h_x_data + NUM_ELEMENTS*(NUM_SETS+1); REAL * h_z_data = h_y_data + NUM_ELEMENTS*(NUM_SETS+1); for(int i = 0; i < (NUM_SETS+1); ++i) { for(int j = 0; j < NUM_ELEMENTS; ++j) { h_x_data[i*NUM_ELEMENTS+j] = h_all_data[i*NUM_ELEMENTS+j].x; h_y_data[i*NUM_ELEMENTS+j] = h_all_data[i*NUM_ELEMENTS+j].y; h_z_data[i*NUM_ELEMENTS+j] = h_all_data[i*NUM_ELEMENTS+j].z; } } // from on use x, y, and z arrays, free h_all_data free(h_all_data); pb_SwitchToTimer( &timers, pb_TimerID_COPY ); // allocate cuda memory to hold all points REAL * d_x_data; cudaMalloc((void**) & d_x_data, 3*f_mem_size); CUDA_ERRCK REAL * d_y_data = d_x_data + NUM_ELEMENTS*(NUM_SETS+1); REAL * d_z_data = d_y_data + NUM_ELEMENTS*(NUM_SETS+1); // allocate cuda memory to hold final histograms // (1 for dd, and NUM_SETS for dr and rr apiece) hist_t * d_hists; cudaMalloc((void**) & d_hists, NUM_BINS*(NUM_SETS*2+1)*sizeof(hist_t) ); CUDA_ERRCK pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); // allocate system memory for final histograms hist_t *new_hists = (hist_t *) malloc(NUM_BINS*(NUM_SETS*2+1)* sizeof(hist_t)); // Initialize the boundary constants for bin search initBinB( &timers ); CUDA_ERRCK // **===------------------ Kick off TPACF on CUDA------------------===** pb_SwitchToTimer( &timers, pb_TimerID_COPY ); cudaMemcpy(d_x_data, h_x_data, 3*f_mem_size, cudaMemcpyHostToDevice); CUDA_ERRCK pb_SwitchToTimer( &timers, pb_TimerID_KERNEL ); TPACF(d_hists, d_x_data, d_y_data, d_z_data); pb_SwitchToTimer( &timers, pb_TimerID_COPY ); cudaMemcpy(new_hists, d_hists, NUM_BINS*(NUM_SETS*2+1)* sizeof(hist_t), cudaMemcpyDeviceToHost); CUDA_ERRCK pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); // **===-----------------------------------------------------------===** // references into output histograms hist_t *dd_hist = new_hists; hist_t *rr_hist = dd_hist + NUM_BINS; hist_t *dr_hist = rr_hist + NUM_BINS*NUM_SETS; // add up values within dr and rr int rr[NUM_BINS]; for(int i=0; i<NUM_BINS; i++) { rr[i] = 0; } for(int i=0; i<NUM_SETS; i++) { for(int j=0; j<NUM_BINS; j++) { rr[j] += rr_hist[i*NUM_BINS + j]; } } int dr[NUM_BINS]; for(int i=0; i<NUM_BINS; i++) { dr[i] = 0; } for(int i=0; i<NUM_SETS; i++) { for(int j=0; j<NUM_BINS; j++) { dr[j] += dr_hist[i*NUM_BINS + j]; } } //int dd_t = 0; //int dr_t = 0; //int rr_t = 0; FILE *outfile; if ((outfile = fopen(params->outFile, "w")) == NULL) { fprintf(stderr, "Unable to open output file %s for writing, " "assuming stdout\n", params->outFile); outfile = stdout; } pb_SwitchToTimer( &timers, pb_TimerID_IO ); // print out final histograms + omega (while calculating omega) for(int i=0; i<NUM_BINS; i++) { //REAL w = (100.0 * dd_hist[i] - dr[i]) / rr[i] + 1.0; //fprintf(outfile, "%f\n", w); fprintf(outfile, "%d\n%d\n%d\n", dd_hist[i], dr[i], rr[i]); // dd_t += dd_hist[i]; // dr_t += dr[i]; // rr_t += rr[i]; } pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); if(outfile != stdout) fclose(outfile); // cleanup memory free(new_hists); free( h_x_data); pb_SwitchToTimer( &timers, pb_TimerID_COPY ); cudaFree( d_hists ); cudaFree( d_x_data ); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); }
02ef93bc15fca738a608b1a542269a8dd2722f07.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <algorithm> #define SIMULATION_IMPL #include "CSimulationCuda.cuh" #include "OdeSolvers.cuh" #include "CudaLaunchHelpers.cuh" using namespace wing2d::simulation; using namespace wing2d::simulation::cuda; static __device__ float4 GetHeatMapColor(float value) { value = fminf(fmaxf(value, 0.0f), 1.0f); static const size_t stages = 7; static const float3 heatMap[stages] = { {0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 1.0f}, {0.0f, 1.0f, 1.0f}, {0.0f, 1.0f, 0.0f}, {1.0f, 1.0f, 0.0f}, {1.0f, 0.0f, 0.0f}, {1.0f, 1.0f, 1.0f} }; value *= stages - 1; int idx1 = int(value); int idx2 = idx1 + 1; float fract1 = value - float(idx1); auto result = heatMap[idx1] + fract1 * (heatMap[idx2] - heatMap[idx1]); return make_float4(result, 1.0f); } static __global__ void ColorParticlesKernel(const float* __restrict__ pDt, const size_t nParticles, const float2* __restrict__ lastVel, const float2* __restrict__ nextVel, float4* __restrict__ color) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= nParticles) return; auto force = (nextVel[threadId] - lastVel[threadId]) / *pDt; color[threadId] = GetHeatMapColor(logf(length(force) + 1.0f) / 10.0f + 0.15f); } static __global__ void ColorParticlesKernel2(const size_t nParticles, const float* __restrict__ pressures, const TIndex* __restrict__ oldIndices, float4* __restrict__ color) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= nParticles) return; auto oldIdx = oldIndices[threadId]; auto pressure = pressures[threadId]; color[oldIdx] = GetHeatMapColor(logf(pressure + 1.0f) / 10.0f + 0.15f); } static float2 TupleToVec(const SimulationState::vec2& v) { return make_float2(std::get<0>(v), std::get<1>(v)); } static SimulationState::vec2 VecToTuple2D(const float2& v) { return std::make_tuple(v.x, v.y); }; static SimulationState::vec4 VecToTuple4D(const float4& v) { return std::make_tuple(v.x, v.y, v.z, v.w); }; static Segments_t BuildWalls(const SimulationState& state) { auto corner = make_float2(state.worldSize.width / 2.0f, state.worldSize.height / 2.0f); auto topLeft = make_float2(-corner.x, corner.y); auto topRight = make_float2(corner.x, corner.y); auto bottomRight = make_float2(corner.x, -corner.y); auto bottomLeft = make_float2(-corner.x, -corner.y); Segments_t result; result.emplace_back(std::make_tuple(topRight, topLeft)); //result.emplace_back(std::make_tuple(bottomRight, topRight)); result.emplace_back(std::make_tuple(bottomLeft, bottomRight)); //result.emplace_back(std::make_tuple(topLeft, bottomLeft)); return result; } static Segments_t BuildAirfoil(const SimulationState& state) { Segments_t result; for (size_t i = 0; i < state.airfoil.size() - 1; ++i) result.emplace_back(std::make_tuple(TupleToVec(state.airfoil[i]), TupleToVec(state.airfoil[i + 1]))); result.emplace_back(std::make_tuple(TupleToVec(state.airfoil.back()), TupleToVec(state.airfoil.front()))); return result; } std::unique_ptr<ISimulation> wing2d::simulation::cuda::CreateSimulation() { return std::make_unique<CSimulationCuda>(); } void CSimulationCuda::ResetState(const SimulationState& state) { if (!state.IsValid()) throw std::runtime_error("state is invalid"); m_state = state; CopyToGPU(); m_derivativeSolver = std::make_unique<CDerivativeSolver>(m_state.particles, m_state.particleRad, BuildAirfoil(m_state), BuildWalls(m_state)); m_odeSolver = std::make_unique<CForwardEulerSolver>(m_derivativeSolver.get()); } float CSimulationCuda::Update(float dt) { for (int i = 0; i < 16; ++i) { m_odeSolver->NextState(ComputeMinDeltaTime(dt), m_curOdeState, m_nextOdeState); m_nextOdeState.swap(m_curOdeState); } ColorParticles2(); return dt; } void CSimulationCuda::ColorParticles() { dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_state.particles, kBlockSize)); const float2* lastVel = m_curOdeState.data().get() + m_state.particles; const float2* nextVel = m_nextOdeState.data().get() + m_state.particles; float4* colors = m_deviceColors.data().get(); hipLaunchKernelGGL(( ColorParticlesKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, m_minDeltaTime.dt.get(), m_state.particles, lastVel, nextVel, colors); CudaCheckError(); m_hostColors = m_deviceColors; std::transform(m_hostColors.cbegin(), m_hostColors.cend(), m_state.color.begin(), VecToTuple4D); } void CSimulationCuda::ColorParticles2() { dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_state.particles, kBlockSize)); auto pressures = m_derivativeSolver->GetPressures().data().get(); auto oldIndices = m_derivativeSolver->GetParticlesIndices().data().get(); auto resultColors = m_deviceColors.data().get(); hipLaunchKernelGGL(( ColorParticlesKernel2) , dim3(gridDim), dim3(blockDim) , 0, 0, m_state.particles, pressures, oldIndices, resultColors); CudaCheckError(); m_hostColors = m_deviceColors; std::transform(m_hostColors.cbegin(), m_hostColors.cend(), m_state.color.begin(), VecToTuple4D); } const SimulationState& CSimulationCuda::GetState() { m_hostOdeState = m_curOdeState; std::transform(m_hostOdeState.cbegin(), m_hostOdeState.cbegin() + m_state.particles, m_state.pos.begin(), VecToTuple2D); std::transform(m_hostOdeState.cbegin() + m_state.particles, m_hostOdeState.cend(), m_state.vel.begin(), VecToTuple2D); return m_state; } void CSimulationCuda::CopyToGPU() { const size_t& particles = m_state.particles; PinnedHostVector2D_t posBuf(particles); PinnedHostVector2D_t velBuf(particles); m_curOdeState.resize(particles * 2); m_nextOdeState.resize(particles * 2); m_hostOdeState.resize(particles * 2); m_deviceColors.resize(particles); m_hostColors.resize(particles); std::transform(m_state.pos.cbegin(), m_state.pos.cend(), posBuf.begin(), TupleToVec); std::transform(m_state.vel.cbegin(), m_state.vel.cend(), velBuf.begin(), TupleToVec); thrust::copy_n(posBuf.cbegin(), particles, m_curOdeState.begin()); thrust::copy_n(velBuf.cbegin(), particles, m_curOdeState.begin() + particles); }
02ef93bc15fca738a608b1a542269a8dd2722f07.cu
#include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <algorithm> #define SIMULATION_IMPL #include "CSimulationCuda.cuh" #include "OdeSolvers.cuh" #include "CudaLaunchHelpers.cuh" using namespace wing2d::simulation; using namespace wing2d::simulation::cuda; static __device__ float4 GetHeatMapColor(float value) { value = fminf(fmaxf(value, 0.0f), 1.0f); static const size_t stages = 7; static const float3 heatMap[stages] = { {0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 1.0f}, {0.0f, 1.0f, 1.0f}, {0.0f, 1.0f, 0.0f}, {1.0f, 1.0f, 0.0f}, {1.0f, 0.0f, 0.0f}, {1.0f, 1.0f, 1.0f} }; value *= stages - 1; int idx1 = int(value); int idx2 = idx1 + 1; float fract1 = value - float(idx1); auto result = heatMap[idx1] + fract1 * (heatMap[idx2] - heatMap[idx1]); return make_float4(result, 1.0f); } static __global__ void ColorParticlesKernel(const float* __restrict__ pDt, const size_t nParticles, const float2* __restrict__ lastVel, const float2* __restrict__ nextVel, float4* __restrict__ color) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= nParticles) return; auto force = (nextVel[threadId] - lastVel[threadId]) / *pDt; color[threadId] = GetHeatMapColor(logf(length(force) + 1.0f) / 10.0f + 0.15f); } static __global__ void ColorParticlesKernel2(const size_t nParticles, const float* __restrict__ pressures, const TIndex* __restrict__ oldIndices, float4* __restrict__ color) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= nParticles) return; auto oldIdx = oldIndices[threadId]; auto pressure = pressures[threadId]; color[oldIdx] = GetHeatMapColor(logf(pressure + 1.0f) / 10.0f + 0.15f); } static float2 TupleToVec(const SimulationState::vec2& v) { return make_float2(std::get<0>(v), std::get<1>(v)); } static SimulationState::vec2 VecToTuple2D(const float2& v) { return std::make_tuple(v.x, v.y); }; static SimulationState::vec4 VecToTuple4D(const float4& v) { return std::make_tuple(v.x, v.y, v.z, v.w); }; static Segments_t BuildWalls(const SimulationState& state) { auto corner = make_float2(state.worldSize.width / 2.0f, state.worldSize.height / 2.0f); auto topLeft = make_float2(-corner.x, corner.y); auto topRight = make_float2(corner.x, corner.y); auto bottomRight = make_float2(corner.x, -corner.y); auto bottomLeft = make_float2(-corner.x, -corner.y); Segments_t result; result.emplace_back(std::make_tuple(topRight, topLeft)); //result.emplace_back(std::make_tuple(bottomRight, topRight)); result.emplace_back(std::make_tuple(bottomLeft, bottomRight)); //result.emplace_back(std::make_tuple(topLeft, bottomLeft)); return result; } static Segments_t BuildAirfoil(const SimulationState& state) { Segments_t result; for (size_t i = 0; i < state.airfoil.size() - 1; ++i) result.emplace_back(std::make_tuple(TupleToVec(state.airfoil[i]), TupleToVec(state.airfoil[i + 1]))); result.emplace_back(std::make_tuple(TupleToVec(state.airfoil.back()), TupleToVec(state.airfoil.front()))); return result; } std::unique_ptr<ISimulation> wing2d::simulation::cuda::CreateSimulation() { return std::make_unique<CSimulationCuda>(); } void CSimulationCuda::ResetState(const SimulationState& state) { if (!state.IsValid()) throw std::runtime_error("state is invalid"); m_state = state; CopyToGPU(); m_derivativeSolver = std::make_unique<CDerivativeSolver>(m_state.particles, m_state.particleRad, BuildAirfoil(m_state), BuildWalls(m_state)); m_odeSolver = std::make_unique<CForwardEulerSolver>(m_derivativeSolver.get()); } float CSimulationCuda::Update(float dt) { for (int i = 0; i < 16; ++i) { m_odeSolver->NextState(ComputeMinDeltaTime(dt), m_curOdeState, m_nextOdeState); m_nextOdeState.swap(m_curOdeState); } ColorParticles2(); return dt; } void CSimulationCuda::ColorParticles() { dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_state.particles, kBlockSize)); const float2* lastVel = m_curOdeState.data().get() + m_state.particles; const float2* nextVel = m_nextOdeState.data().get() + m_state.particles; float4* colors = m_deviceColors.data().get(); ColorParticlesKernel <<<gridDim, blockDim >>> (m_minDeltaTime.dt.get(), m_state.particles, lastVel, nextVel, colors); CudaCheckError(); m_hostColors = m_deviceColors; std::transform(m_hostColors.cbegin(), m_hostColors.cend(), m_state.color.begin(), VecToTuple4D); } void CSimulationCuda::ColorParticles2() { dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_state.particles, kBlockSize)); auto pressures = m_derivativeSolver->GetPressures().data().get(); auto oldIndices = m_derivativeSolver->GetParticlesIndices().data().get(); auto resultColors = m_deviceColors.data().get(); ColorParticlesKernel2 <<<gridDim, blockDim >>> (m_state.particles, pressures, oldIndices, resultColors); CudaCheckError(); m_hostColors = m_deviceColors; std::transform(m_hostColors.cbegin(), m_hostColors.cend(), m_state.color.begin(), VecToTuple4D); } const SimulationState& CSimulationCuda::GetState() { m_hostOdeState = m_curOdeState; std::transform(m_hostOdeState.cbegin(), m_hostOdeState.cbegin() + m_state.particles, m_state.pos.begin(), VecToTuple2D); std::transform(m_hostOdeState.cbegin() + m_state.particles, m_hostOdeState.cend(), m_state.vel.begin(), VecToTuple2D); return m_state; } void CSimulationCuda::CopyToGPU() { const size_t& particles = m_state.particles; PinnedHostVector2D_t posBuf(particles); PinnedHostVector2D_t velBuf(particles); m_curOdeState.resize(particles * 2); m_nextOdeState.resize(particles * 2); m_hostOdeState.resize(particles * 2); m_deviceColors.resize(particles); m_hostColors.resize(particles); std::transform(m_state.pos.cbegin(), m_state.pos.cend(), posBuf.begin(), TupleToVec); std::transform(m_state.vel.cbegin(), m_state.vel.cend(), velBuf.begin(), TupleToVec); thrust::copy_n(posBuf.cbegin(), particles, m_curOdeState.begin()); thrust::copy_n(velBuf.cbegin(), particles, m_curOdeState.begin() + particles); }
f1f0fbe6391b7ffd37bb10885e3a5d17e6eba4ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_CHANNELWISE_MEAN_LAYER_INSTANTIATE #include "lbann/layers/misc/channelwise_mean.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { template <El::Int block_size, typename TensorDataType> __global__ void mean_kernel(El::Int num_channels, El::Int channel_size, El::Int width, const TensorDataType* __restrict__ input, El::Int input_ldim, TensorDataType* __restrict__ output, El::Int output_ldim) { // Indices const El::Int tid = threadIdx.x; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; const El::Int bidz = blockIdx.z; const El::Int nthreadsx = blockDim.x * gridDim.x; const El::Int nblocksy = gridDim.y; const El::Int nblocksz = gridDim.z; // Compute local contribution for each channel for (El::Int col = bidz; col < width; col += nblocksz) { for (El::Int channel = bidy; channel < num_channels; channel += nblocksy) { // Sum for each thread TensorDataType private_sum = 0; for (El::Int i = gidx; i < channel_size; i += nthreadsx) { private_sum += input[i + channel*channel_size + col*input_ldim]; } // Shared memory reduction to get sum for each block /// @todo unroll loops __shared__ TensorDataType shared_sums[block_size]; shared_sums[tid] = private_sum; for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { shared_sums[tid] += shared_sums[tid + stride]; } } if (tid == 0) { gpu_lib::atomic_add(&output[channel + col * output_ldim], shared_sums[0] / TensorDataType(channel_size)); } } } } template <typename TensorDataType> __global__ void backprop_kernel(El::Int num_channels, El::Int channel_size, El::Int width, const TensorDataType* __restrict__ gradient_wrt_output, El::Int gradient_wrt_output_ldim, TensorDataType* __restrict__ gradient_wrt_input, El::Int gradient_wrt_input_ldim) { // Indices const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; const El::Int bidz = blockIdx.z; const El::Int nthreadsx = blockDim.x * gridDim.x; const El::Int nblocksy = gridDim.y; const El::Int nblocksz = gridDim.z; // Compute local contribution for each channel for (El::Int col = bidz; col < width; col += nblocksz) { for (El::Int channel = bidy; channel < num_channels; channel += nblocksy) { const auto& dy = gradient_wrt_output[channel + col * gradient_wrt_output_ldim]; const auto& dx = dy / TensorDataType(channel_size); for (El::Int i = gidx; i < channel_size; i += nthreadsx) { gradient_wrt_input[i + channel*channel_size + col*gradient_wrt_input_ldim] = dx; } } } } } // namespace template <typename TensorDataType, data_layout Layout, El::Device Device> void channelwise_mean_layer<TensorDataType, Layout, Device>::fp_compute() { // Local matrices const auto& local_input = this->get_local_prev_activations(); auto& local_output = this->get_local_activations(); // Dimensions // Note: channel_size is the number of input entries per channel and // local_width is the number of local mini-batch samples. const auto& input_dims = this->get_input_dims(); const El::Int num_channels = input_dims[0]; const El::Int channel_size = std::accumulate(input_dims.begin() + 1, input_dims.end(), 1, std::multiplies<int>()); const auto& local_width = local_input.Width(); // Compute channel-wise mean El::Zero(local_output); if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output), gpu::get_sync_info(local_input)); constexpr El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_width; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( mean_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, num_channels, channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim()); } } template <typename TensorDataType, data_layout Layout, El::Device Device> void channelwise_mean_layer<TensorDataType, Layout, Device>::bp_compute() { // Local matrices const auto& local_gradient_wrt_output = this->get_local_prev_error_signals(); auto& local_gradient_wrt_input = this->get_local_error_signals(); // Dimensions // Note: channel_size is the number of input entries per channel and // local_width is the number of local mini-batch samples. const auto& input_dims = this->get_input_dims(); const El::Int num_channels = input_dims[0]; const El::Int channel_size = std::accumulate(input_dims.begin() + 1, input_dims.end(), 1, std::multiplies<int>()); const auto& local_width = local_gradient_wrt_input.Width(); // Compute gradients if (!local_gradient_wrt_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_gradient_wrt_output)); constexpr El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_width; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( backprop_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, num_channels, channel_size, local_width, local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } } #define PROTO(T) \ template class channelwise_mean_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
f1f0fbe6391b7ffd37bb10885e3a5d17e6eba4ea.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_CHANNELWISE_MEAN_LAYER_INSTANTIATE #include "lbann/layers/misc/channelwise_mean.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { template <El::Int block_size, typename TensorDataType> __global__ void mean_kernel(El::Int num_channels, El::Int channel_size, El::Int width, const TensorDataType* __restrict__ input, El::Int input_ldim, TensorDataType* __restrict__ output, El::Int output_ldim) { // Indices const El::Int tid = threadIdx.x; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; const El::Int bidz = blockIdx.z; const El::Int nthreadsx = blockDim.x * gridDim.x; const El::Int nblocksy = gridDim.y; const El::Int nblocksz = gridDim.z; // Compute local contribution for each channel for (El::Int col = bidz; col < width; col += nblocksz) { for (El::Int channel = bidy; channel < num_channels; channel += nblocksy) { // Sum for each thread TensorDataType private_sum = 0; for (El::Int i = gidx; i < channel_size; i += nthreadsx) { private_sum += input[i + channel*channel_size + col*input_ldim]; } // Shared memory reduction to get sum for each block /// @todo unroll loops __shared__ TensorDataType shared_sums[block_size]; shared_sums[tid] = private_sum; for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { shared_sums[tid] += shared_sums[tid + stride]; } } if (tid == 0) { gpu_lib::atomic_add(&output[channel + col * output_ldim], shared_sums[0] / TensorDataType(channel_size)); } } } } template <typename TensorDataType> __global__ void backprop_kernel(El::Int num_channels, El::Int channel_size, El::Int width, const TensorDataType* __restrict__ gradient_wrt_output, El::Int gradient_wrt_output_ldim, TensorDataType* __restrict__ gradient_wrt_input, El::Int gradient_wrt_input_ldim) { // Indices const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; const El::Int bidz = blockIdx.z; const El::Int nthreadsx = blockDim.x * gridDim.x; const El::Int nblocksy = gridDim.y; const El::Int nblocksz = gridDim.z; // Compute local contribution for each channel for (El::Int col = bidz; col < width; col += nblocksz) { for (El::Int channel = bidy; channel < num_channels; channel += nblocksy) { const auto& dy = gradient_wrt_output[channel + col * gradient_wrt_output_ldim]; const auto& dx = dy / TensorDataType(channel_size); for (El::Int i = gidx; i < channel_size; i += nthreadsx) { gradient_wrt_input[i + channel*channel_size + col*gradient_wrt_input_ldim] = dx; } } } } } // namespace template <typename TensorDataType, data_layout Layout, El::Device Device> void channelwise_mean_layer<TensorDataType, Layout, Device>::fp_compute() { // Local matrices const auto& local_input = this->get_local_prev_activations(); auto& local_output = this->get_local_activations(); // Dimensions // Note: channel_size is the number of input entries per channel and // local_width is the number of local mini-batch samples. const auto& input_dims = this->get_input_dims(); const El::Int num_channels = input_dims[0]; const El::Int channel_size = std::accumulate(input_dims.begin() + 1, input_dims.end(), 1, std::multiplies<int>()); const auto& local_width = local_input.Width(); // Compute channel-wise mean El::Zero(local_output); if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output), gpu::get_sync_info(local_input)); constexpr El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_width; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( mean_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, num_channels, channel_size, local_width, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim()); } } template <typename TensorDataType, data_layout Layout, El::Device Device> void channelwise_mean_layer<TensorDataType, Layout, Device>::bp_compute() { // Local matrices const auto& local_gradient_wrt_output = this->get_local_prev_error_signals(); auto& local_gradient_wrt_input = this->get_local_error_signals(); // Dimensions // Note: channel_size is the number of input entries per channel and // local_width is the number of local mini-batch samples. const auto& input_dims = this->get_input_dims(); const El::Int num_channels = input_dims[0]; const El::Int channel_size = std::accumulate(input_dims.begin() + 1, input_dims.end(), 1, std::multiplies<int>()); const auto& local_width = local_gradient_wrt_input.Width(); // Compute gradients if (!local_gradient_wrt_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_gradient_wrt_output)); constexpr El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_width; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( backprop_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, num_channels, channel_size, local_width, local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } } #define PROTO(T) \ template class channelwise_mean_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
c3424d1e7c5f0d676d1f2c01b61e6cce7430be57.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "task_generatesubsparseoffset.cuh" template<typename T> __global__ void d_setParamsUsed( DeviceMemory<T>* mem, int nParamsUsed ) { mem->nParamsUsed = nParamsUsed; } template<typename T> void hd_generateSubSparseOffsets( DeviceMemory<T>& mem ) { int nParamsUsed = 0; int curOffset = 0; hipMemcpy( mem.h_paramsUsedBuffer, mem.paramsUsed, sizeof( bool ) * mem.nParamPoints, hipMemcpyDeviceToHost ); for( int i = 0; i < mem.nParams; i++ ) { if( mem.h_paramsUsedBuffer[i % mem.nParamPoints] ) { nParamsUsed++; } else { curOffset++; } mem.h_subSparseOffsetBuffer[i] = curOffset; } mem.nParamsUsed = nParamsUsed; hipMemcpy( mem.subSparseOffsets, mem.h_subSparseOffsetBuffer, sizeof( int ) * mem.nParams, hipMemcpyHostToDevice ); hipLaunchKernelGGL(( d_setParamsUsed), dim3(1), dim3(1) , 0, 0, mem.d_mem, nParamsUsed ); } template void hd_generateSubSparseOffsets( DeviceMemory<float>& mem ); template void hd_generateSubSparseOffsets( DeviceMemory<double>& mem );
c3424d1e7c5f0d676d1f2c01b61e6cce7430be57.cu
#include "task_generatesubsparseoffset.cuh" template<typename T> __global__ void d_setParamsUsed( DeviceMemory<T>* mem, int nParamsUsed ) { mem->nParamsUsed = nParamsUsed; } template<typename T> void hd_generateSubSparseOffsets( DeviceMemory<T>& mem ) { int nParamsUsed = 0; int curOffset = 0; cudaMemcpy( mem.h_paramsUsedBuffer, mem.paramsUsed, sizeof( bool ) * mem.nParamPoints, cudaMemcpyDeviceToHost ); for( int i = 0; i < mem.nParams; i++ ) { if( mem.h_paramsUsedBuffer[i % mem.nParamPoints] ) { nParamsUsed++; } else { curOffset++; } mem.h_subSparseOffsetBuffer[i] = curOffset; } mem.nParamsUsed = nParamsUsed; cudaMemcpy( mem.subSparseOffsets, mem.h_subSparseOffsetBuffer, sizeof( int ) * mem.nParams, cudaMemcpyHostToDevice ); d_setParamsUsed<<< 1, 1 >>>( mem.d_mem, nParamsUsed ); } template void hd_generateSubSparseOffsets( DeviceMemory<float>& mem ); template void hd_generateSubSparseOffsets( DeviceMemory<double>& mem );
3e127cf1c17736ba7c726bc531cfea3f8c3c707b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void SetElement(float *vector , int position , float what) { vector[position] = what; }
3e127cf1c17736ba7c726bc531cfea3f8c3c707b.cu
#include "includes.h" __global__ void SetElement(float *vector , int position , float what) { vector[position] = what; }
cc8576681871080a739237f12c8ee600bcfc8fbc.hip
// !!! This is a file automatically generated by hipify!!! // include libraries #include <stdio.h> #include <math.h> #include <omp.h> #include "rocblas.h" #include "hip/hip_runtime.h" #define nstreams 1 int main () { // banner printf ("\n\nGPU DGEMM Exercise\n"); printf ( "==========================================\n"); printf ( "\nTiled Matrix-Matrix Multiplication\n"); printf ( "Using NVIDIA cuBLAS Library\n"); // echo device data int idevice = 0; hipSetDevice(idevice); hipDeviceProp_t dprops; hipGetDeviceProperties( &dprops, idevice ); printf ("\nDevice name = %s, with compute capability %d.%d \n", dprops.name, dprops.major, dprops.minor); // define parameters int n = 32768; // matrix dimension - all matrices being multiplied will be square int m = 4096; // tile size - tiles will be square, n must be divisible by m !! printf ("\nMatrix sizes: %d x %d, tile size: %d x %d\n", n,n,m,m); if ( ( n % m ) != 0 ) { printf ("\nmatrix size (n) has to be devisible by tile size (m) !"); return 0 ; } printf ("Number of Streams: %d", nstreams); // allocate arrays double *a; double *b; double *c; a = (double *) malloc ( n*n*sizeof(double) ); b = (double *) malloc ( n*n*sizeof(double) ); c = (double *) malloc ( n*n*sizeof(double) ); // initialize data #pragma omp parallel for for ( int row = 0; row<n; row++ ) { for ( int col = 0; col<n; col++ ) { // data in row-major format a[row*n+col] = row + col; b[row*n+col] = (row == col ) ? 1.0 : 0.0; c[row*n+col] = 0.0; } } // create communcations arrays double *pa; double *pb; double *pc; hipMallocManaged ( &pa, m*m*sizeof(double) ); hipMallocManaged ( &pb, m*m*sizeof(double) ); hipMallocManaged ( &pc, m*m*sizeof(double) ); // create a handle to cuBlas hipblasHandle_t cublasHandle; hipblasCreate( &cublasHandle ); int ntiles = n/m; // record start time hipEvent_t t_start; hipEvent_t t_end; hipEvent_t compute_end; hipEventCreate (&t_start); hipEventCreate (&t_end); hipEventCreate (&compute_end); hipEventRecord (t_start,0); // caches for indices of previous tile to write back results // from pinned buffer to original result matrix int prowtile; int pcoltile; // PERFORM MULTIPLICATION { double alpha = 1.0; double beta = 1.0; int itile = 0; // loop over inner tile dimension for ( int iktile = 0; iktile < ntiles; iktile++ ) { // loop over row tiles for ( int irowtile = 0; irowtile < ntiles; irowtile++ ) { // loop over column tiles for ( int icoltile = 0; icoltile < ntiles; icoltile++ ) { if ( itile >= 1 ) { hipEventSynchronize (compute_end); // needed since hipblasDgemm call is asynchronous and copy is only done // on page fault (if results has already been written to pc // copy result in pinned buffer back to global matrix # pragma omp parallel for for ( int i=0; i<m; i++ ) { for ( int j=0; j<m; j++ ) { c[(prowtile*m+i)*n+pcoltile*m+j] = pc[i*m+j]; } } } // copy next tile to pinned buffer # pragma omp parallel for for ( int i = 0; i < m; i++ ) { for ( int j = 0; j < m; j++ ) { pa[i*m+j] = a[(irowtile*m+i)*n+iktile*m+j]; pb[i*m+j] = b[(iktile*m+i)*n+icoltile*m+j]; pc[i*m+j] = c[(irowtile*m+i)*n+icoltile*m+j]; } } // perform dgemm hipblasDgemm ( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_T, m, m, m, &alpha, pa, m, pb, m, &beta, pc, m ); hipEventRecord (compute_end, 0); prowtile = irowtile; pcoltile = icoltile; // go to next tile itile++; } } } hipEventSynchronize (compute_end); // be sure that last hipblasDgemm call has finished // copy result in pinned buffer back to source # pragma omp parallel for for ( int i=0; i<m; i++ ) { for ( int j=0; j<m; j++ ) { c[(prowtile*m+i)*n+pcoltile*m+j] = pc[i*m+j]; } } } // END OF PERFORM MULTIPLICATION // record end time hipEventRecord (t_end,0); hipEventSynchronize(t_end); float et; hipEventElapsedTime (&et, t_start, t_end); // check results printf ("\nchecking results: "); bool correct = true; int num_errors = 0; double abs_error, sum_abs_errors = 0; # pragma omp parallel for for ( int row = 0; row < n; row++ ) { for ( int col = 0; col < n; col++ ) { abs_error = fabs(c[row * n + col] - a[row * n + col] ); sum_abs_errors += abs_error; if ( abs_error > 10e-1 ) { printf ("FAILED\n\nerror: c[%d]: %f != a[%d]: %f", row * n + col, c[row * n + col], row * n + col, a[row * n + col]); correct = false; ++num_errors; break; } } } // report results if ( correct ) { printf ("SUCCESS"); printf ("\nSum abs errors: %f", sum_abs_errors); printf ("\nNumber of errors: %d", num_errors); printf("\nExecution time: %4.4f seconds\n", (double)et/1000.); // hipEventElapsedTime is in milliseconds printf( "Gflop/s: %4.4f \n\n\n", 2.0e-6*n*n*n/et); // 2( * and + ) *n (inner dimension)*n^2(result size)/(time in ms.) } else { printf ("\nResult not correct (%d errors), check your code !\n", num_errors); } // clean up hipblasDestroy ( cublasHandle ); hipEventDestroy ( t_start ); hipEventDestroy ( t_end ); hipFree ( pa ); hipFree ( pb ); hipFree ( pc ); free (a); free (b); free (c); }
cc8576681871080a739237f12c8ee600bcfc8fbc.cu
// include libraries #include <stdio.h> #include <math.h> #include <omp.h> #include "cublas_v2.h" #include "cuda.h" #define nstreams 1 int main () { // banner printf ("\n\nGPU DGEMM Exercise\n"); printf ( "==========================================\n"); printf ( "\nTiled Matrix-Matrix Multiplication\n"); printf ( "Using NVIDIA cuBLAS Library\n"); // echo device data int idevice = 0; cudaSetDevice(idevice); cudaDeviceProp dprops; cudaGetDeviceProperties( &dprops, idevice ); printf ("\nDevice name = %s, with compute capability %d.%d \n", dprops.name, dprops.major, dprops.minor); // define parameters int n = 32768; // matrix dimension - all matrices being multiplied will be square int m = 4096; // tile size - tiles will be square, n must be divisible by m !! printf ("\nMatrix sizes: %d x %d, tile size: %d x %d\n", n,n,m,m); if ( ( n % m ) != 0 ) { printf ("\nmatrix size (n) has to be devisible by tile size (m) !"); return 0 ; } printf ("Number of Streams: %d", nstreams); // allocate arrays double *a; double *b; double *c; a = (double *) malloc ( n*n*sizeof(double) ); b = (double *) malloc ( n*n*sizeof(double) ); c = (double *) malloc ( n*n*sizeof(double) ); // initialize data #pragma omp parallel for for ( int row = 0; row<n; row++ ) { for ( int col = 0; col<n; col++ ) { // data in row-major format a[row*n+col] = row + col; b[row*n+col] = (row == col ) ? 1.0 : 0.0; c[row*n+col] = 0.0; } } // create communcations arrays double *pa; double *pb; double *pc; cudaMallocManaged ( &pa, m*m*sizeof(double) ); cudaMallocManaged ( &pb, m*m*sizeof(double) ); cudaMallocManaged ( &pc, m*m*sizeof(double) ); // create a handle to cuBlas cublasHandle_t cublasHandle; cublasCreate( &cublasHandle ); int ntiles = n/m; // record start time cudaEvent_t t_start; cudaEvent_t t_end; cudaEvent_t compute_end; cudaEventCreate (&t_start); cudaEventCreate (&t_end); cudaEventCreate (&compute_end); cudaEventRecord (t_start,0); // caches for indices of previous tile to write back results // from pinned buffer to original result matrix int prowtile; int pcoltile; // PERFORM MULTIPLICATION { double alpha = 1.0; double beta = 1.0; int itile = 0; // loop over inner tile dimension for ( int iktile = 0; iktile < ntiles; iktile++ ) { // loop over row tiles for ( int irowtile = 0; irowtile < ntiles; irowtile++ ) { // loop over column tiles for ( int icoltile = 0; icoltile < ntiles; icoltile++ ) { if ( itile >= 1 ) { cudaEventSynchronize (compute_end); // needed since cublasDgemm call is asynchronous and copy is only done // on page fault (if results has already been written to pc // copy result in pinned buffer back to global matrix # pragma omp parallel for for ( int i=0; i<m; i++ ) { for ( int j=0; j<m; j++ ) { c[(prowtile*m+i)*n+pcoltile*m+j] = pc[i*m+j]; } } } // copy next tile to pinned buffer # pragma omp parallel for for ( int i = 0; i < m; i++ ) { for ( int j = 0; j < m; j++ ) { pa[i*m+j] = a[(irowtile*m+i)*n+iktile*m+j]; pb[i*m+j] = b[(iktile*m+i)*n+icoltile*m+j]; pc[i*m+j] = c[(irowtile*m+i)*n+icoltile*m+j]; } } // perform dgemm cublasDgemm ( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_T, m, m, m, &alpha, pa, m, pb, m, &beta, pc, m ); cudaEventRecord (compute_end, 0); prowtile = irowtile; pcoltile = icoltile; // go to next tile itile++; } } } cudaEventSynchronize (compute_end); // be sure that last cublasDgemm call has finished // copy result in pinned buffer back to source # pragma omp parallel for for ( int i=0; i<m; i++ ) { for ( int j=0; j<m; j++ ) { c[(prowtile*m+i)*n+pcoltile*m+j] = pc[i*m+j]; } } } // END OF PERFORM MULTIPLICATION // record end time cudaEventRecord (t_end,0); cudaEventSynchronize(t_end); float et; cudaEventElapsedTime (&et, t_start, t_end); // check results printf ("\nchecking results: "); bool correct = true; int num_errors = 0; double abs_error, sum_abs_errors = 0; # pragma omp parallel for for ( int row = 0; row < n; row++ ) { for ( int col = 0; col < n; col++ ) { abs_error = fabs(c[row * n + col] - a[row * n + col] ); sum_abs_errors += abs_error; if ( abs_error > 10e-1 ) { printf ("FAILED\n\nerror: c[%d]: %f != a[%d]: %f", row * n + col, c[row * n + col], row * n + col, a[row * n + col]); correct = false; ++num_errors; break; } } } // report results if ( correct ) { printf ("SUCCESS"); printf ("\nSum abs errors: %f", sum_abs_errors); printf ("\nNumber of errors: %d", num_errors); printf("\nExecution time: %4.4f seconds\n", (double)et/1000.); // cudaEventElapsedTime is in milliseconds printf( "Gflop/s: %4.4f \n\n\n", 2.0e-6*n*n*n/et); // 2( * and + ) *n (inner dimension)*n^2(result size)/(time in ms.) } else { printf ("\nResult not correct (%d errors), check your code !\n", num_errors); } // clean up cublasDestroy ( cublasHandle ); cudaEventDestroy ( t_start ); cudaEventDestroy ( t_end ); cudaFree ( pa ); cudaFree ( pb ); cudaFree ( pc ); free (a); free (b); free (c); }
4c405881b28c25633db8aa91b7cacad3d38889dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //-------------------------------------------------------------------------------- // NVIDIA(R) GVDB VOXELS // Copyright 2017, NVIDIA Corporation. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the distribution. // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, // BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Version 1.0: Rama Hoetzlein, 5/1/2017 //---------------------------------------------------------------------------------- // File: cuda_gvdb_copydata.cu // // GVDB Data Transfers // - CopyData 3D volume into sub-volume // - CopyDataZYX 3D volume into sub-volume with ZYX swizzle // - RetreiveData 3D sub-volume into cuda buffer // - CopyTexToBuf 2D texture into cuda buffer // - CopyBufToTex cuda buffer into 2D texture //----------------------------------------------- #include "cuda_math.cuh" // Zero memory of 3D volume extern "C" __global__ void kernelFillTex ( int3 res, int dsize, hipSurfaceObject volTexOut ) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; surf3Dwrite ( 0, volTexOut, t.x*dsize, t.y, t.z ); } // Copy 3D texture into sub-volume of another 3D texture (char) extern "C" __global__ void kernelCopyTexC ( int3 offs, int3 res, hipTextureObject_t volTexInC, hipSurfaceObject volTexOut ) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; uchar val = tex3D<unsigned int>( volTexInC, t.x, t.y, t.z ); surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(char), (t.y+offs.y), (t.z+offs.z) ); } // Copy 3D texture into sub-volume of another 3D texture (float) extern "C" __global__ void kernelCopyTexF ( int3 offs, int3 res, hipTextureObject_t volTexInF, hipSurfaceObject volTexOut ) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; float val = tex3D<float> ( volTexInF, t.x, t.y, t.z ); surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) ); } // Copy linear memory as 3D volume into sub-volume of a 3D texture extern "C" __global__ void kernelCopyBufToTexC ( int3 offs, int3 res, uchar* inbuf, hipSurfaceObject volTexOut) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; unsigned char val = inbuf[ (t.z*res.y + t.y)*res.x + t.x ]; surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(uchar), (t.y+offs.y), (t.z+offs.z) ); } // Copy linear memory as 3D volume into sub-volume of a 3D texture extern "C" __global__ void kernelCopyBufToTexF ( int3 offs, int3 res, float* inbuf, hipSurfaceObject volTexOut) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; float val = inbuf[ (t.z*res.y + t.y)*res.x + t.x ]; surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) ); } // Copy 3D texture into sub-volume of another 3D texture with ZYX swizzle (float) extern "C" __global__ void kernelCopyTexZYX ( int3 offs, int3 res, hipTextureObject_t volTexInF, hipSurfaceObject volTexOut ) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; float val = tex3D<float>( volTexInF, t.z, t.y, t.x ); surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) ); } // Retrieve 3D texture into linear memory (float) extern "C" __global__ void kernelRetrieveTexXYZ ( int3 offs, int3 brickRes, float* buf, hipTextureObject_t volTexInF ) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= brickRes.x || t.y >= brickRes.y || t.z >= brickRes.z ) return; float val = tex3D<float> ( volTexInF, t.x+offs.x, t.y+offs.y, t.z+offs.z ); buf[ (t.x*brickRes.y + t.y)*brickRes.x + t.z ] = val; } // Copy 2D slice of 3D texture into 2D linear buffer extern "C" __global__ void kernelSliceTexToBufF ( int slice, int3 res, float* outbuf, hipTextureObject_t volTexInF ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= res.x || y >= res.y ) return; float val = tex3D<float> ( volTexInF, x, y, slice ); outbuf[ y*res.x + x ] = val; } extern "C" __global__ void kernelSliceTexToBufC ( int slice, int3 res, uchar* outbuf, hipTextureObject_t volTexInC ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= res.x || y >= res.y ) return; uchar val = tex3D<uchar> ( volTexInC, x, y, slice ); outbuf[ y*res.x + x ] = val; } // Copy 2D linear buffer into the 2D slice of a 3D texture extern "C" __global__ void kernelSliceBufToTexF ( int slice, int3 res, float* inbuf, hipSurfaceObject volTexOut ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= res.x || y >= res.y ) return; float val = inbuf[ y*res.x + x ]; surf3Dwrite ( val, volTexOut, x*sizeof(float), y, slice ); } extern "C" __global__ void kernelSliceBufToTexC ( int slice, int3 res, uchar* inbuf, hipSurfaceObject volTexOut ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= res.x || y >= res.y ) return; uchar val = inbuf[ y*res.x + x ]; surf3Dwrite ( val, volTexOut, x*sizeof(uchar), y, slice ); }
4c405881b28c25633db8aa91b7cacad3d38889dc.cu
//-------------------------------------------------------------------------------- // NVIDIA(R) GVDB VOXELS // Copyright 2017, NVIDIA Corporation. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the distribution. // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, // BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Version 1.0: Rama Hoetzlein, 5/1/2017 //---------------------------------------------------------------------------------- // File: cuda_gvdb_copydata.cu // // GVDB Data Transfers // - CopyData 3D volume into sub-volume // - CopyDataZYX 3D volume into sub-volume with ZYX swizzle // - RetreiveData 3D sub-volume into cuda buffer // - CopyTexToBuf 2D texture into cuda buffer // - CopyBufToTex cuda buffer into 2D texture //----------------------------------------------- #include "cuda_math.cuh" // Zero memory of 3D volume extern "C" __global__ void kernelFillTex ( int3 res, int dsize, CUsurfObject volTexOut ) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; surf3Dwrite ( 0, volTexOut, t.x*dsize, t.y, t.z ); } // Copy 3D texture into sub-volume of another 3D texture (char) extern "C" __global__ void kernelCopyTexC ( int3 offs, int3 res, CUtexObject volTexInC, CUsurfObject volTexOut ) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; uchar val = tex3D<unsigned int>( volTexInC, t.x, t.y, t.z ); surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(char), (t.y+offs.y), (t.z+offs.z) ); } // Copy 3D texture into sub-volume of another 3D texture (float) extern "C" __global__ void kernelCopyTexF ( int3 offs, int3 res, CUtexObject volTexInF, CUsurfObject volTexOut ) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; float val = tex3D<float> ( volTexInF, t.x, t.y, t.z ); surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) ); } // Copy linear memory as 3D volume into sub-volume of a 3D texture extern "C" __global__ void kernelCopyBufToTexC ( int3 offs, int3 res, uchar* inbuf, CUsurfObject volTexOut) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; unsigned char val = inbuf[ (t.z*res.y + t.y)*res.x + t.x ]; surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(uchar), (t.y+offs.y), (t.z+offs.z) ); } // Copy linear memory as 3D volume into sub-volume of a 3D texture extern "C" __global__ void kernelCopyBufToTexF ( int3 offs, int3 res, float* inbuf, CUsurfObject volTexOut) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; float val = inbuf[ (t.z*res.y + t.y)*res.x + t.x ]; surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) ); } // Copy 3D texture into sub-volume of another 3D texture with ZYX swizzle (float) extern "C" __global__ void kernelCopyTexZYX ( int3 offs, int3 res, CUtexObject volTexInF, CUsurfObject volTexOut ) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= res.x || t.y >= res.y || t.z >= res.z ) return; float val = tex3D<float>( volTexInF, t.z, t.y, t.x ); surf3Dwrite ( val, volTexOut, (t.x+offs.x)*sizeof(float), (t.y+offs.y), (t.z+offs.z) ); } // Retrieve 3D texture into linear memory (float) extern "C" __global__ void kernelRetrieveTexXYZ ( int3 offs, int3 brickRes, float* buf, CUtexObject volTexInF ) { uint3 t = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( t.x >= brickRes.x || t.y >= brickRes.y || t.z >= brickRes.z ) return; float val = tex3D<float> ( volTexInF, t.x+offs.x, t.y+offs.y, t.z+offs.z ); buf[ (t.x*brickRes.y + t.y)*brickRes.x + t.z ] = val; } // Copy 2D slice of 3D texture into 2D linear buffer extern "C" __global__ void kernelSliceTexToBufF ( int slice, int3 res, float* outbuf, CUtexObject volTexInF ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= res.x || y >= res.y ) return; float val = tex3D<float> ( volTexInF, x, y, slice ); outbuf[ y*res.x + x ] = val; } extern "C" __global__ void kernelSliceTexToBufC ( int slice, int3 res, uchar* outbuf, CUtexObject volTexInC ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= res.x || y >= res.y ) return; uchar val = tex3D<uchar> ( volTexInC, x, y, slice ); outbuf[ y*res.x + x ] = val; } // Copy 2D linear buffer into the 2D slice of a 3D texture extern "C" __global__ void kernelSliceBufToTexF ( int slice, int3 res, float* inbuf, CUsurfObject volTexOut ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= res.x || y >= res.y ) return; float val = inbuf[ y*res.x + x ]; surf3Dwrite ( val, volTexOut, x*sizeof(float), y, slice ); } extern "C" __global__ void kernelSliceBufToTexC ( int slice, int3 res, uchar* inbuf, CUsurfObject volTexOut ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= res.x || y >= res.y ) return; uchar val = inbuf[ y*res.x + x ]; surf3Dwrite ( val, volTexOut, x*sizeof(uchar), y, slice ); }
06e485a82af7e1dcf3c160e7652a193de2e5e9dd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "bfs_rec.h" #define QMAXLENGTH 10240000*10 #define GM_BUFF_SIZE 10240000*10 #ifndef THREADS_PER_BLOCK_FLAT //block size for flat parallelism #define THREADS_PER_BLOCK_FLAT 128 #endif #ifndef NUM_BLOCKS_FLAT #define NUM_BLOCKS_FLAT 256 #endif #ifndef THREADS_PER_BLOCK // nested kernel block size //#define THREADS_PER_BLOCK 64 #define THREADS_PER_BLOCK 128 #endif #ifndef CONSOLIDATE_LEVEL #define CONSOLIDATE_LEVEL 0 #endif #define STREAMS 0 #include "bfs_rec_kernel.cu" int *d_vertexArray; int *d_edgeArray; int *d_levelArray; int *d_work_queue; char *d_frontier; char *d_update; unsigned int *d_queue_length; unsigned int *d_nonstop; dim3 dimGrid(1,1,1); // thread+bitmap dim3 dimBlock(1,1,1); //char *update = new char [noNodeTotal] (); //int *queue = new int [queue_max_length]; unsigned int queue_max_length = QMAXLENGTH; unsigned int queue_length = 0; unsigned int nonstop = 0; inline void cudaCheckError(const char* file, int line, hipError_t ce) { if (ce != hipSuccess){ printf("Error: file %s, line %d %s\n", file, line, hipGetErrorString(ce)); exit(1); } } void prepare_gpu() { start_time = gettime_ms(); hipFree(NULL); end_time = gettime_ms(); init_time += end_time - start_time; if (DEBUG) { fprintf(stderr, "Choose CUDA device: %d\n", config.device_num); fprintf(stderr, "hipSetDevice:\t\t%lf\n",end_time-start_time); } start_time = gettime_ms(); size_t limit = 0; if (DEBUG) { cudaCheckError( __FILE__, __LINE__, hipDeviceGetLimit(&limit, hipLimitMallocHeapSize)); printf("cudaLimistMallocHeapSize: %u\n", (unsigned)limit); } limit = 102400000; cudaCheckError( __FILE__, __LINE__, hipDeviceSetLimit(hipLimitMallocHeapSize, limit)); if (DEBUG) { cudaCheckError( __FILE__, __LINE__, hipDeviceGetLimit(&limit, hipLimitMallocHeapSize)); printf("cudaLimistMallocHeapSize: %u\n", (unsigned)limit); } end_time = gettime_ms(); //fprintf(stderr, "Set Heap Size:\t\t%.2lf ms.\n", end_time-start_time); /* Allocate GPU memory */ start_time = gettime_ms(); cudaCheckError( __FILE__, __LINE__, hipMalloc( (void**)&d_vertexArray, sizeof(int)*(noNodeTotal+1) ) ); cudaCheckError( __FILE__, __LINE__, hipMalloc( (void**)&d_edgeArray, sizeof(int)*noEdgeTotal ) ); cudaCheckError( __FILE__, __LINE__, hipMalloc( (void**)&d_levelArray, sizeof(int)*noNodeTotal ) ); printf("DEBUG levelArray : %d \n", noNodeTotal); //cudaCheckError( __LINE__, hipMalloc( (void**)&d_nonstop, sizeof(unsigned int) ) ); end_time = gettime_ms(); d_malloc_time += end_time - start_time; start_time = gettime_ms(); cudaCheckError( __FILE__, __LINE__, hipMemcpy( d_vertexArray, graph.vertexArray, sizeof(int)*(noNodeTotal+1), hipMemcpyHostToDevice) ); cudaCheckError( __FILE__, __LINE__, hipMemcpy( d_edgeArray, graph.edgeArray, sizeof(int)*noEdgeTotal, hipMemcpyHostToDevice) ); //copy the level array from CPU to GPU cudaCheckError( __FILE__, __LINE__, hipMemcpy( d_levelArray, graph.levelArray, sizeof(int)*noNodeTotal, hipMemcpyHostToDevice) ); end_time = gettime_ms(); h2d_memcpy_time += end_time - start_time; } void clean_gpu() { hipFree(d_vertexArray); hipFree(d_edgeArray); hipFree(d_levelArray); } // ---------------------------------------------------------- // version #0 - flat parallelism - level-based BFS traversal // ---------------------------------------------------------- void bfs_flat_gpu() { /* prepare GPU */ bool queue_empty = false; bool *d_queue_empty; cudaCheckError( __FILE__, __LINE__, hipMalloc( &d_queue_empty, sizeof(bool)) ); // printf("Grid configuration gridxblocks, %d x %d\n", NUM_BLOCKS_FLAT, THREADS_PER_BLOCK_FLAT); if (DEBUG) printf("===> GPU #%d - flat parallelism, gridSize: %d, blockSize: %d\n", config.solution, 1, THREADS_PER_BLOCK_FLAT); unsigned level = 0; //level-based traversal while (!queue_empty){ cudaCheckError( __FILE__, __LINE__, hipMemset( d_queue_empty, true, sizeof(bool)) ); hipLaunchKernelGGL(( bfs_kernel_flat), dim3(1), dim3(THREADS_PER_BLOCK_FLAT), 0, 0, level,noNodeTotal, d_vertexArray, d_edgeArray, d_levelArray, d_queue_empty); cudaCheckError( __FILE__, __LINE__, hipGetLastError()); cudaCheckError( __FILE__, __LINE__, hipMemcpy( &queue_empty, d_queue_empty, sizeof(bool), hipMemcpyDeviceToHost) ); level++; } if (DEBUG) printf("===> GPU #%d - flat parallelism.\n", config.solution); } // ---------------------------------------------------------- // version #1 - dynamic parallelism - naive // ---------------------------------------------------------- void bfs_rec_dp_naive_gpu() { hipEvent_t start, stop; float time; /* prepare GPU */ int children = graph.vertexArray[source+1]-graph.vertexArray[source]; unsigned block_size = min (children, THREADS_PER_BLOCK); if (DEBUG) printf("===> GPU #%d - nested parallelism naive gridSize: %d bockSize: %d.\n", config.solution, 1, block_size); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( bfs_kernel_dp), dim3(1),dim3(block_size), 0, 0, source, d_vertexArray, d_edgeArray, d_levelArray); cudaCheckError( __FILE__, __LINE__, hipGetLastError()); cudaCheckError( __FILE__, __LINE__, hipDeviceSynchronize()); hipEventRecord(stop, 0); hipEventSynchronize(stop); //Display time hipEventElapsedTime(&time, start, stop); printf("\tParallel Job time: %.2f ms", time); if (DEBUG) printf("===> GPU #%d - nested parallelism naive.\n", config.solution); } // ---------------------------------------------------------- // version #2 - dynamic parallelism - hierarchical // ---------------------------------------------------------- void bfs_rec_dp_hier_gpu() { //recursive BFS traversal - hierarchical int children = graph.vertexArray[source+1]-graph.vertexArray[source]; hipLaunchKernelGGL(( bfs_kernel_dp_hier), dim3(children), dim3(THREADS_PER_BLOCK), 0, 0, source, d_vertexArray, d_edgeArray, d_levelArray); cudaCheckError( __FILE__, __LINE__, hipGetLastError()); cudaCheckError( __FILE__, __LINE__, hipDeviceSynchronize()); if (DEBUG) printf("===> GPU #%d - nested parallelism hierarchical %f.\n", config.solution, gettime_ms()-start_time); } // ---------------------------------------------------------- // version #3 - dynamic parallelism - consolidation // ---------------------------------------------------------- void bfs_rec_dp_cons_gpu() { //recursive BFS traversal - dynamic parallelism consolidation unsigned int *d_buffer; unsigned int *d_idx; cudaCheckError( __FILE__, __LINE__, hipMalloc( &d_buffer, sizeof(unsigned int)*GM_BUFF_SIZE) ); cudaCheckError( __FILE__, __LINE__, hipMalloc( &d_idx, sizeof(unsigned int)) ); hipLaunchKernelGGL(( bfs_kernel_dp_cons_prepare), dim3(1),dim3(1), 0, 0, d_levelArray, d_buffer, d_idx, source); int children = 1; switch (config.solution) { case 3: if (DEBUG) fprintf(stdout, "warp level consolidation\n"); hipLaunchKernelGGL(( bfs_kernel_dp_warp_cons), dim3(children), dim3(THREADS_PER_BLOCK), 0, 0, d_vertexArray, d_edgeArray, d_levelArray, d_buffer, children, d_buffer, d_idx); //bfs_kernel_dp_warp_cons_back<<<children, THREADS_PER_BLOCK>>>(d_vertexArray, d_edgeArray, d_levelArray, // d_buffer, d_buffer, d_idx); //bfs_kernel_dp_warp_malloc_cons<<<children, THREADS_PER_BLOCK>>>(d_vertexArray, d_edgeArray, d_levelArray, // d_buffer, d_buffer, d_idx); break; case 4: if (DEBUG) fprintf(stdout, "block level consolidation\n"); hipLaunchKernelGGL(( bfs_kernel_dp_block_cons), dim3(children), dim3(THREADS_PER_BLOCK), 0, 0, d_vertexArray, d_edgeArray, d_levelArray, d_buffer, children, d_buffer, d_idx); //bfs_kernel_dp_block_malloc_cons<<<children, THREADS_PER_BLOCK>>>(d_vertexArray, d_edgeArray, d_levelArray, // d_buffer, d_buffer, d_idx); break; case 5: // queue and buffer are different // buffer stores the active working set unsigned int *d_queue; unsigned int *d_qidx; unsigned int *d_count; cudaCheckError( __FILE__, __LINE__, hipMalloc( &d_queue, sizeof(unsigned int)*GM_BUFF_SIZE) ); cudaCheckError( __FILE__, __LINE__, hipMalloc( &d_qidx, sizeof(unsigned int)) ); cudaCheckError( __FILE__, __LINE__, hipMalloc( &d_count, sizeof(unsigned int)) ); cudaCheckError( __FILE__, __LINE__, hipMemset( d_qidx, 0, sizeof(unsigned int)) ); cudaCheckError( __FILE__, __LINE__, hipMemset( d_count, 0, sizeof(unsigned int)) ); if (DEBUG) fprintf(stdout, "grid level consolidation\n"); // by default, it utilize malloc hipLaunchKernelGGL(( dp_grid_cons_init), dim3(1),dim3(1), 0, 0, ); hipLaunchKernelGGL(( bfs_kernel_dp_grid_cons), dim3(children), dim3(THREADS_PER_BLOCK), 0, 0, d_vertexArray, d_edgeArray, d_levelArray, d_buffer, d_idx, d_queue, d_qidx, d_count); /* bfs_kernel_dp_grid_malloc_cons<<<children, THREADS_PER_BLOCK>>>(d_vertexArray, d_edgeArray, d_levelArray, d_buffer, d_idx, d_queue, d_qidx, d_count); */ break; default: printf("Unsopported solutions\n"); exit(0); } hipDeviceSynchronize(); cudaCheckError( __FILE__, __LINE__, hipGetLastError()); cudaCheckError( __FILE__, __LINE__, hipDeviceSynchronize()); if (DEBUG) printf("===> GPU #%d - nested parallelism consolidation %f.\n", config.solution, end_time-start_time); //gpu_print<<<1,1>>>(d_idx); cudaCheckError( __FILE__, __LINE__, hipFree(d_buffer) ); cudaCheckError( __FILE__, __LINE__, hipFree(d_idx) ); #if (CONSOLIDATE_LEVEL==2) cudaCheckError( __FILE__, __LINE__, hipFree(d_queue) ); cudaCheckError( __FILE__, __LINE__, hipFree(d_qidx) ); cudaCheckError( __FILE__, __LINE__, hipFree(d_count) ); #endif } // ---------------------------------------------------------- // version #6 - recursive GPU bfs // ---------------------------------------------------------- void bfs_rec() { hipEvent_t start, stop; float time; /* prepare GPU */ int children = graph.vertexArray[source+1]-graph.vertexArray[source]; unsigned block_size = min (children, THREADS_PER_BLOCK); if (DEBUG) printf("===> GPU #%d - BFS rec. gridSize: %d blockSize: %d\n", config.solution, 1, block_size); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( bfs_kernel_rec), dim3(1),dim3(block_size), 0, 0, source, d_vertexArray, d_edgeArray, d_levelArray); cudaCheckError( __FILE__, __LINE__, hipGetLastError()); cudaCheckError( __FILE__, __LINE__, hipDeviceSynchronize()); hipEventRecord(stop, 0); hipEventSynchronize(stop); //Display time hipEventElapsedTime(&time, start, stop); printf("\tParallel Job time: %.2f ms", time); if (DEBUG) printf("===> GPU #%d - BFS rec.\n", config.solution); } // ---------------------------------------------------------- // version #7 - flat parallelism pure GPU // ---------------------------------------------------------- void bfs_flat_pure_gpu() { hipEvent_t start, stop; float time; /* prepare GPU */ // bool queue_empty = false; // bool *d_queue_empty; // cudaCheckError( __FILE__, __LINE__, hipMalloc( &d_queue_empty, sizeof(bool)) ); //printf("Grid configuration gridxblocks, %d x %d\n", NUM_BLOCKS_FLAT, THREADS_PER_BLOCK_FLAT); if (DEBUG) printf("===> GPU #%d - flat pure gpu parallelism. gridSize: 1, blockSize: 1\n", config.solution); unsigned level = 0; //level-based traversal // while (!queue_empty){ // cudaCheckError( __FILE__, __LINE__, hipMemset( d_queue_empty, true, sizeof(bool)) ); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( bfs_kernel_flat_gpu), dim3(1), dim3(1), 0, 0, level, noNodeTotal, d_vertexArray, d_edgeArray, d_levelArray); cudaCheckError( __FILE__, __LINE__, hipGetLastError()); // cudaCheckError( __FILE__, __LINE__, hipMemcpy( &queue_empty, d_queue_empty, sizeof(bool), hipMemcpyDeviceToHost) ); cudaCheckError( __FILE__, __LINE__, hipDeviceSynchronize()); hipEventRecord(stop, 0); hipEventSynchronize(stop); //Display time hipEventElapsedTime(&time, start, stop); printf("\tParallel Job time: %.2f ms\n", time); // level++; // } if (DEBUG) printf("===> GPU #%d - flat pure gpu parallelism.\n", config.solution); } // ---------------------------------------------------------- // version #8 - GPU rec BFS Optimized // ---------------------------------------------------------- void bfs_flat_rec_optimized() { hipEvent_t start, stop; float time; /* prepare GPU */ // bool queue_empty = false; // bool *d_queue_empty; // cudaCheckError( __FILE__, __LINE__, hipMalloc( &d_queue_empty, sizeof(bool)) ); // printf("Grid configuration gridxblocks, %d x %d\n", NUM_BLOCKS_FLAT, THREADS_PER_BLOCK_FLAT); unsigned level = 0; if (DEBUG) printf("===> GPU #%d - rec gpu optimized parallelism. gridSize: %d, blockSize: %d\n", config.solution, 1, 32); // unsigned block_size = min(THREADS_PER_BLOCK, noNodeTotal); // unsigned grid_size = (noNodeTotal+ block_size-1)/block_size; //level-based traversal // cudaCheckError( __FILE__, __LINE__, hipMemset( d_queue_empty, false, sizeof(bool)) ); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( bfs_kernel_recOptimized), dim3(1), dim3(32), 0, 0, level, noNodeTotal, d_vertexArray, d_edgeArray, d_levelArray); cudaCheckError( __FILE__, __LINE__, hipGetLastError()); // cudaCheckError( __FILE__, __LINE__, hipMemcpy( &queue_empty, d_queue_empty, sizeof(bool), hipMemcpyDeviceToHost) ); cudaCheckError( __FILE__, __LINE__, hipDeviceSynchronize()); hipEventRecord(stop, 0); hipEventSynchronize(stop); //Display time hipEventElapsedTime(&time, start, stop); printf("\tParallel Job time: %.2f ms\n", time); if (DEBUG) printf("===> GPU #%d - rec gpu optimized parallelism.\n", config.solution); } void BFS_REC_GPU() { cudaCheckError( __FILE__, __LINE__, hipSetDevice(config.device_num) ); cudaCheckError( __FILE__, __LINE__, hipDeviceReset()); prepare_gpu(); #ifdef GPU_PROFILE hipLaunchKernelGGL(( reset_gpu_statistics), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); #endif #ifdef GPU_WORKEFFICIENCY hipLaunchKernelGGL(( reset_gpu_statisticsWE), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); #endif start_time = gettime_ms(); switch (config.solution) { case 0: bfs_flat_gpu(); // break; case 1: bfs_rec_dp_naive_gpu(); // break; case 2: bfs_rec_dp_hier_gpu(); // break; case 3: case 4: case 5: bfs_rec_dp_cons_gpu(); // break; case 6: bfs_rec(); // break; case 7: bfs_flat_pure_gpu(); // break; case 8: bfs_flat_rec_optimized(); // break; default: break; } cudaCheckError( __FILE__, __LINE__, hipDeviceSynchronize() ); end_time = gettime_ms(); ker_exe_time += end_time - start_time; #ifdef GPU_PROFILE hipLaunchKernelGGL(( gpu_statistics), dim3(1),dim3(1), 0, 0, config.solution); hipDeviceSynchronize(); #endif #ifdef GPU_WORKEFFICIENCY hipLaunchKernelGGL(( gpu_statisticsWE), dim3(1),dim3(1), 0, 0, config.solution); hipDeviceSynchronize(); #endif //copy the level array from GPU to CPU; start_time = gettime_ms(); cudaCheckError( __FILE__, __LINE__, hipMemcpy( graph.levelArray, d_levelArray, sizeof(unsigned)*noNodeTotal, hipMemcpyDeviceToHost) ); end_time = gettime_ms(); d2h_memcpy_time += end_time - start_time; clean_gpu(); }
06e485a82af7e1dcf3c160e7652a193de2e5e9dd.cu
#include <stdio.h> #include <cuda.h> #include "bfs_rec.h" #define QMAXLENGTH 10240000*10 #define GM_BUFF_SIZE 10240000*10 #ifndef THREADS_PER_BLOCK_FLAT //block size for flat parallelism #define THREADS_PER_BLOCK_FLAT 128 #endif #ifndef NUM_BLOCKS_FLAT #define NUM_BLOCKS_FLAT 256 #endif #ifndef THREADS_PER_BLOCK // nested kernel block size //#define THREADS_PER_BLOCK 64 #define THREADS_PER_BLOCK 128 #endif #ifndef CONSOLIDATE_LEVEL #define CONSOLIDATE_LEVEL 0 #endif #define STREAMS 0 #include "bfs_rec_kernel.cu" int *d_vertexArray; int *d_edgeArray; int *d_levelArray; int *d_work_queue; char *d_frontier; char *d_update; unsigned int *d_queue_length; unsigned int *d_nonstop; dim3 dimGrid(1,1,1); // thread+bitmap dim3 dimBlock(1,1,1); //char *update = new char [noNodeTotal] (); //int *queue = new int [queue_max_length]; unsigned int queue_max_length = QMAXLENGTH; unsigned int queue_length = 0; unsigned int nonstop = 0; inline void cudaCheckError(const char* file, int line, cudaError_t ce) { if (ce != cudaSuccess){ printf("Error: file %s, line %d %s\n", file, line, cudaGetErrorString(ce)); exit(1); } } void prepare_gpu() { start_time = gettime_ms(); cudaFree(NULL); end_time = gettime_ms(); init_time += end_time - start_time; if (DEBUG) { fprintf(stderr, "Choose CUDA device: %d\n", config.device_num); fprintf(stderr, "cudaSetDevice:\t\t%lf\n",end_time-start_time); } start_time = gettime_ms(); size_t limit = 0; if (DEBUG) { cudaCheckError( __FILE__, __LINE__, cudaDeviceGetLimit(&limit, cudaLimitMallocHeapSize)); printf("cudaLimistMallocHeapSize: %u\n", (unsigned)limit); } limit = 102400000; cudaCheckError( __FILE__, __LINE__, cudaDeviceSetLimit(cudaLimitMallocHeapSize, limit)); if (DEBUG) { cudaCheckError( __FILE__, __LINE__, cudaDeviceGetLimit(&limit, cudaLimitMallocHeapSize)); printf("cudaLimistMallocHeapSize: %u\n", (unsigned)limit); } end_time = gettime_ms(); //fprintf(stderr, "Set Heap Size:\t\t%.2lf ms.\n", end_time-start_time); /* Allocate GPU memory */ start_time = gettime_ms(); cudaCheckError( __FILE__, __LINE__, cudaMalloc( (void**)&d_vertexArray, sizeof(int)*(noNodeTotal+1) ) ); cudaCheckError( __FILE__, __LINE__, cudaMalloc( (void**)&d_edgeArray, sizeof(int)*noEdgeTotal ) ); cudaCheckError( __FILE__, __LINE__, cudaMalloc( (void**)&d_levelArray, sizeof(int)*noNodeTotal ) ); printf("DEBUG levelArray : %d \n", noNodeTotal); //cudaCheckError( __LINE__, cudaMalloc( (void**)&d_nonstop, sizeof(unsigned int) ) ); end_time = gettime_ms(); d_malloc_time += end_time - start_time; start_time = gettime_ms(); cudaCheckError( __FILE__, __LINE__, cudaMemcpy( d_vertexArray, graph.vertexArray, sizeof(int)*(noNodeTotal+1), cudaMemcpyHostToDevice) ); cudaCheckError( __FILE__, __LINE__, cudaMemcpy( d_edgeArray, graph.edgeArray, sizeof(int)*noEdgeTotal, cudaMemcpyHostToDevice) ); //copy the level array from CPU to GPU cudaCheckError( __FILE__, __LINE__, cudaMemcpy( d_levelArray, graph.levelArray, sizeof(int)*noNodeTotal, cudaMemcpyHostToDevice) ); end_time = gettime_ms(); h2d_memcpy_time += end_time - start_time; } void clean_gpu() { cudaFree(d_vertexArray); cudaFree(d_edgeArray); cudaFree(d_levelArray); } // ---------------------------------------------------------- // version #0 - flat parallelism - level-based BFS traversal // ---------------------------------------------------------- void bfs_flat_gpu() { /* prepare GPU */ bool queue_empty = false; bool *d_queue_empty; cudaCheckError( __FILE__, __LINE__, cudaMalloc( &d_queue_empty, sizeof(bool)) ); // printf("Grid configuration gridxblocks, %d x %d\n", NUM_BLOCKS_FLAT, THREADS_PER_BLOCK_FLAT); if (DEBUG) printf("===> GPU #%d - flat parallelism, gridSize: %d, blockSize: %d\n", config.solution, 1, THREADS_PER_BLOCK_FLAT); unsigned level = 0; //level-based traversal while (!queue_empty){ cudaCheckError( __FILE__, __LINE__, cudaMemset( d_queue_empty, true, sizeof(bool)) ); bfs_kernel_flat<<<1, THREADS_PER_BLOCK_FLAT>>>(level,noNodeTotal, d_vertexArray, d_edgeArray, d_levelArray, d_queue_empty); cudaCheckError( __FILE__, __LINE__, cudaGetLastError()); cudaCheckError( __FILE__, __LINE__, cudaMemcpy( &queue_empty, d_queue_empty, sizeof(bool), cudaMemcpyDeviceToHost) ); level++; } if (DEBUG) printf("===> GPU #%d - flat parallelism.\n", config.solution); } // ---------------------------------------------------------- // version #1 - dynamic parallelism - naive // ---------------------------------------------------------- void bfs_rec_dp_naive_gpu() { cudaEvent_t start, stop; float time; /* prepare GPU */ int children = graph.vertexArray[source+1]-graph.vertexArray[source]; unsigned block_size = min (children, THREADS_PER_BLOCK); if (DEBUG) printf("===> GPU #%d - nested parallelism naive gridSize: %d bockSize: %d.\n", config.solution, 1, block_size); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); bfs_kernel_dp<<<1,block_size>>>(source, d_vertexArray, d_edgeArray, d_levelArray); cudaCheckError( __FILE__, __LINE__, cudaGetLastError()); cudaCheckError( __FILE__, __LINE__, cudaDeviceSynchronize()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); //Display time cudaEventElapsedTime(&time, start, stop); printf("\tParallel Job time: %.2f ms", time); if (DEBUG) printf("===> GPU #%d - nested parallelism naive.\n", config.solution); } // ---------------------------------------------------------- // version #2 - dynamic parallelism - hierarchical // ---------------------------------------------------------- void bfs_rec_dp_hier_gpu() { //recursive BFS traversal - hierarchical int children = graph.vertexArray[source+1]-graph.vertexArray[source]; bfs_kernel_dp_hier<<<children, THREADS_PER_BLOCK>>>(source, d_vertexArray, d_edgeArray, d_levelArray); cudaCheckError( __FILE__, __LINE__, cudaGetLastError()); cudaCheckError( __FILE__, __LINE__, cudaDeviceSynchronize()); if (DEBUG) printf("===> GPU #%d - nested parallelism hierarchical %f.\n", config.solution, gettime_ms()-start_time); } // ---------------------------------------------------------- // version #3 - dynamic parallelism - consolidation // ---------------------------------------------------------- void bfs_rec_dp_cons_gpu() { //recursive BFS traversal - dynamic parallelism consolidation unsigned int *d_buffer; unsigned int *d_idx; cudaCheckError( __FILE__, __LINE__, cudaMalloc( &d_buffer, sizeof(unsigned int)*GM_BUFF_SIZE) ); cudaCheckError( __FILE__, __LINE__, cudaMalloc( &d_idx, sizeof(unsigned int)) ); bfs_kernel_dp_cons_prepare<<<1,1>>>(d_levelArray, d_buffer, d_idx, source); int children = 1; switch (config.solution) { case 3: if (DEBUG) fprintf(stdout, "warp level consolidation\n"); bfs_kernel_dp_warp_cons<<<children, THREADS_PER_BLOCK>>>(d_vertexArray, d_edgeArray, d_levelArray, d_buffer, children, d_buffer, d_idx); //bfs_kernel_dp_warp_cons_back<<<children, THREADS_PER_BLOCK>>>(d_vertexArray, d_edgeArray, d_levelArray, // d_buffer, d_buffer, d_idx); //bfs_kernel_dp_warp_malloc_cons<<<children, THREADS_PER_BLOCK>>>(d_vertexArray, d_edgeArray, d_levelArray, // d_buffer, d_buffer, d_idx); break; case 4: if (DEBUG) fprintf(stdout, "block level consolidation\n"); bfs_kernel_dp_block_cons<<<children, THREADS_PER_BLOCK>>>(d_vertexArray, d_edgeArray, d_levelArray, d_buffer, children, d_buffer, d_idx); //bfs_kernel_dp_block_malloc_cons<<<children, THREADS_PER_BLOCK>>>(d_vertexArray, d_edgeArray, d_levelArray, // d_buffer, d_buffer, d_idx); break; case 5: // queue and buffer are different // buffer stores the active working set unsigned int *d_queue; unsigned int *d_qidx; unsigned int *d_count; cudaCheckError( __FILE__, __LINE__, cudaMalloc( &d_queue, sizeof(unsigned int)*GM_BUFF_SIZE) ); cudaCheckError( __FILE__, __LINE__, cudaMalloc( &d_qidx, sizeof(unsigned int)) ); cudaCheckError( __FILE__, __LINE__, cudaMalloc( &d_count, sizeof(unsigned int)) ); cudaCheckError( __FILE__, __LINE__, cudaMemset( d_qidx, 0, sizeof(unsigned int)) ); cudaCheckError( __FILE__, __LINE__, cudaMemset( d_count, 0, sizeof(unsigned int)) ); if (DEBUG) fprintf(stdout, "grid level consolidation\n"); // by default, it utilize malloc dp_grid_cons_init<<<1,1>>>(); bfs_kernel_dp_grid_cons<<<children, THREADS_PER_BLOCK>>>(d_vertexArray, d_edgeArray, d_levelArray, d_buffer, d_idx, d_queue, d_qidx, d_count); /* bfs_kernel_dp_grid_malloc_cons<<<children, THREADS_PER_BLOCK>>>(d_vertexArray, d_edgeArray, d_levelArray, d_buffer, d_idx, d_queue, d_qidx, d_count); */ break; default: printf("Unsopported solutions\n"); exit(0); } cudaDeviceSynchronize(); cudaCheckError( __FILE__, __LINE__, cudaGetLastError()); cudaCheckError( __FILE__, __LINE__, cudaDeviceSynchronize()); if (DEBUG) printf("===> GPU #%d - nested parallelism consolidation %f.\n", config.solution, end_time-start_time); //gpu_print<<<1,1>>>(d_idx); cudaCheckError( __FILE__, __LINE__, cudaFree(d_buffer) ); cudaCheckError( __FILE__, __LINE__, cudaFree(d_idx) ); #if (CONSOLIDATE_LEVEL==2) cudaCheckError( __FILE__, __LINE__, cudaFree(d_queue) ); cudaCheckError( __FILE__, __LINE__, cudaFree(d_qidx) ); cudaCheckError( __FILE__, __LINE__, cudaFree(d_count) ); #endif } // ---------------------------------------------------------- // version #6 - recursive GPU bfs // ---------------------------------------------------------- void bfs_rec() { cudaEvent_t start, stop; float time; /* prepare GPU */ int children = graph.vertexArray[source+1]-graph.vertexArray[source]; unsigned block_size = min (children, THREADS_PER_BLOCK); if (DEBUG) printf("===> GPU #%d - BFS rec. gridSize: %d blockSize: %d\n", config.solution, 1, block_size); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); bfs_kernel_rec<<<1,block_size>>>(source, d_vertexArray, d_edgeArray, d_levelArray); cudaCheckError( __FILE__, __LINE__, cudaGetLastError()); cudaCheckError( __FILE__, __LINE__, cudaDeviceSynchronize()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); //Display time cudaEventElapsedTime(&time, start, stop); printf("\tParallel Job time: %.2f ms", time); if (DEBUG) printf("===> GPU #%d - BFS rec.\n", config.solution); } // ---------------------------------------------------------- // version #7 - flat parallelism pure GPU // ---------------------------------------------------------- void bfs_flat_pure_gpu() { cudaEvent_t start, stop; float time; /* prepare GPU */ // bool queue_empty = false; // bool *d_queue_empty; // cudaCheckError( __FILE__, __LINE__, cudaMalloc( &d_queue_empty, sizeof(bool)) ); //printf("Grid configuration gridxblocks, %d x %d\n", NUM_BLOCKS_FLAT, THREADS_PER_BLOCK_FLAT); if (DEBUG) printf("===> GPU #%d - flat pure gpu parallelism. gridSize: 1, blockSize: 1\n", config.solution); unsigned level = 0; //level-based traversal // while (!queue_empty){ // cudaCheckError( __FILE__, __LINE__, cudaMemset( d_queue_empty, true, sizeof(bool)) ); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); bfs_kernel_flat_gpu<<<1, 1>>>(level, noNodeTotal, d_vertexArray, d_edgeArray, d_levelArray); cudaCheckError( __FILE__, __LINE__, cudaGetLastError()); // cudaCheckError( __FILE__, __LINE__, cudaMemcpy( &queue_empty, d_queue_empty, sizeof(bool), cudaMemcpyDeviceToHost) ); cudaCheckError( __FILE__, __LINE__, cudaDeviceSynchronize()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); //Display time cudaEventElapsedTime(&time, start, stop); printf("\tParallel Job time: %.2f ms\n", time); // level++; // } if (DEBUG) printf("===> GPU #%d - flat pure gpu parallelism.\n", config.solution); } // ---------------------------------------------------------- // version #8 - GPU rec BFS Optimized // ---------------------------------------------------------- void bfs_flat_rec_optimized() { cudaEvent_t start, stop; float time; /* prepare GPU */ // bool queue_empty = false; // bool *d_queue_empty; // cudaCheckError( __FILE__, __LINE__, cudaMalloc( &d_queue_empty, sizeof(bool)) ); // printf("Grid configuration gridxblocks, %d x %d\n", NUM_BLOCKS_FLAT, THREADS_PER_BLOCK_FLAT); unsigned level = 0; if (DEBUG) printf("===> GPU #%d - rec gpu optimized parallelism. gridSize: %d, blockSize: %d\n", config.solution, 1, 32); // unsigned block_size = min(THREADS_PER_BLOCK, noNodeTotal); // unsigned grid_size = (noNodeTotal+ block_size-1)/block_size; //level-based traversal // cudaCheckError( __FILE__, __LINE__, cudaMemset( d_queue_empty, false, sizeof(bool)) ); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); bfs_kernel_recOptimized<<<1, 32>>>(level, noNodeTotal, d_vertexArray, d_edgeArray, d_levelArray); cudaCheckError( __FILE__, __LINE__, cudaGetLastError()); // cudaCheckError( __FILE__, __LINE__, cudaMemcpy( &queue_empty, d_queue_empty, sizeof(bool), cudaMemcpyDeviceToHost) ); cudaCheckError( __FILE__, __LINE__, cudaDeviceSynchronize()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); //Display time cudaEventElapsedTime(&time, start, stop); printf("\tParallel Job time: %.2f ms\n", time); if (DEBUG) printf("===> GPU #%d - rec gpu optimized parallelism.\n", config.solution); } void BFS_REC_GPU() { cudaCheckError( __FILE__, __LINE__, cudaSetDevice(config.device_num) ); cudaCheckError( __FILE__, __LINE__, cudaDeviceReset()); prepare_gpu(); #ifdef GPU_PROFILE reset_gpu_statistics<<<1,1>>>(); cudaDeviceSynchronize(); #endif #ifdef GPU_WORKEFFICIENCY reset_gpu_statisticsWE<<<1,1>>>(); cudaDeviceSynchronize(); #endif start_time = gettime_ms(); switch (config.solution) { case 0: bfs_flat_gpu(); // break; case 1: bfs_rec_dp_naive_gpu(); // break; case 2: bfs_rec_dp_hier_gpu(); // break; case 3: case 4: case 5: bfs_rec_dp_cons_gpu(); // break; case 6: bfs_rec(); // break; case 7: bfs_flat_pure_gpu(); // break; case 8: bfs_flat_rec_optimized(); // break; default: break; } cudaCheckError( __FILE__, __LINE__, cudaDeviceSynchronize() ); end_time = gettime_ms(); ker_exe_time += end_time - start_time; #ifdef GPU_PROFILE gpu_statistics<<<1,1>>>(config.solution); cudaDeviceSynchronize(); #endif #ifdef GPU_WORKEFFICIENCY gpu_statisticsWE<<<1,1>>>(config.solution); cudaDeviceSynchronize(); #endif //copy the level array from GPU to CPU; start_time = gettime_ms(); cudaCheckError( __FILE__, __LINE__, cudaMemcpy( graph.levelArray, d_levelArray, sizeof(unsigned)*noNodeTotal, cudaMemcpyDeviceToHost) ); end_time = gettime_ms(); d2h_memcpy_time += end_time - start_time; clean_gpu(); }
d7bd3484b2d68b0a9a83d2e9b253c17835caeb7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * JCuda - Java bindings for NVIDIA CUDA driver and runtime API * http://www.jcuda.org * * * This code is based on the NVIDIA 'reduction' CUDA sample, * Copyright 1993-2010 NVIDIA Corporation. */ extern "C" __global__ void threads(float *g_idata,unsigned int n) { // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; if (i<n) { printf("Hello world %d - %d : %f\n",tid,i,g_idata[i]); } }
d7bd3484b2d68b0a9a83d2e9b253c17835caeb7f.cu
/* * JCuda - Java bindings for NVIDIA CUDA driver and runtime API * http://www.jcuda.org * * * This code is based on the NVIDIA 'reduction' CUDA sample, * Copyright 1993-2010 NVIDIA Corporation. */ extern "C" __global__ void threads(float *g_idata,unsigned int n) { // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; if (i<n) { printf("Hello world %d - %d : %f\n",tid,i,g_idata[i]); } }
1b4bd84e97faa29a4582368068b6841b1dbf43dc.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/copy_if.cuh> #include <cudf/detail/gather.hpp> #include <cudf/detail/indexalator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/search.hpp> #include <cudf/detail/valid_if.cuh> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/dictionary/update_keys.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <thrust/scatter.h> #include <thrust/sequence.h> #include <thrust/transform.h> namespace cudf { namespace dictionary { namespace detail { namespace { /** * @brief Return a new dictionary by removing identified keys from the provided dictionary. * * This is a common utility for `remove_keys` and `remove_unused_keys` detail functions. * It will create a new dictionary with the remaining keys and create new indices values * to go with these new keys. * * @tparam KeysKeeper Function bool(size_type) that takes keys position index * and returns true if that key is to be used in the output dictionary. * @param dictionary_column The column to use for creating the new dictionary. * @param keys_to_keep_fn Called to determine which keys in `dictionary_column` to keep. * @param mr Device memory resource used to allocate the returned column's device memory. * @param stream CUDA stream used for device memory operations and kernel launches. */ template <typename KeysKeeper> std::unique_ptr<column> remove_keys_fn( dictionary_column_view const& dictionary_column, KeysKeeper keys_to_keep_fn, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(), hipStream_t stream = 0) { auto const keys_view = dictionary_column.keys(); auto execpol = rmm::exec_policy(stream); auto const indices_type = dictionary_column.indices().type(); auto const max_size = dictionary_column.size(); // create/init indices map array auto map_indices = make_fixed_width_column(indices_type, keys_view.size(), mask_state::UNALLOCATED, stream); auto map_itr = cudf::detail::indexalator_factory::make_output_iterator(map_indices->mutable_view()); // init to max to identify new nulls thrust::fill(execpol->on(stream), map_itr, map_itr + keys_view.size(), max_size); // all valid indices are less than this value // build keys column and indices map std::unique_ptr<column> keys_column = [&] { // create keys positions column to identify original key positions after removing they keys auto keys_positions = [&] { auto positions = make_fixed_width_column( indices_type, keys_view.size(), cudf::mask_state::UNALLOCATED, stream); auto itr = cudf::detail::indexalator_factory::make_output_iterator(positions->mutable_view()); thrust::sequence(execpol->on(stream), itr, itr + keys_view.size()); return positions; }(); // copy the non-removed keys ( keys_to_keep_fn(idx)==true ) auto table_keys = cudf::detail::copy_if( table_view{{keys_view, keys_positions->view()}}, keys_to_keep_fn, mr, stream) ->release(); auto const filtered_view = table_keys[1]->view(); auto filtered_itr = cudf::detail::indexalator_factory::make_input_iterator(filtered_view); auto positions_itr = cudf::detail::indexalator_factory::make_input_iterator(keys_positions->view()); // build indices mapper // Example scatter([0,1,2][0,2,4][max,max,max,max,max]) => [0,max,1,max,2] thrust::scatter(execpol->on(stream), positions_itr, positions_itr + filtered_view.size(), filtered_itr, map_itr); return std::move(table_keys.front()); }(); // create non-nullable indices view with offset applied -- this is used as a gather map column_view indices_view(dictionary_column.indices().type(), dictionary_column.size(), dictionary_column.indices().head(), nullptr, 0, dictionary_column.offset()); // create new indices column // Example: gather([0,max,1,max,2],[4,0,3,1,2,2,2,4,0]) => [2,0,max,max,1,1,1,2,0] auto table_indices = cudf::detail::gather(table_view{{map_indices->view()}}, indices_view, cudf::detail::out_of_bounds_policy::NULLIFY, cudf::detail::negative_index_policy::NOT_ALLOWED, mr, stream) ->release(); std::unique_ptr<column> indices_column(std::move(table_indices.front())); // compute new nulls -- merge the existing nulls with the newly created ones (value<0) auto const offset = dictionary_column.offset(); auto d_null_mask = dictionary_column.null_mask(); auto indices_itr = cudf::detail::indexalator_factory::make_input_iterator(indices_column->view()); auto new_nulls = cudf::detail::valid_if( thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(dictionary_column.size()), [offset, d_null_mask, indices_itr, max_size] __device__(size_type idx) { if (d_null_mask && !bit_is_set(d_null_mask, idx + offset)) return false; return (indices_itr[idx] < max_size); // new nulls have max values }, stream, mr); rmm::device_buffer new_null_mask = (new_nulls.second > 0) ? std::move(new_nulls.first) : rmm::device_buffer{0, stream, mr}; // create column with keys_column and indices_column return make_dictionary_column( std::move(keys_column), std::move(indices_column), std::move(new_null_mask), new_nulls.second); } } // namespace std::unique_ptr<column> remove_keys( dictionary_column_view const& dictionary_column, column_view const& keys_to_remove, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(), hipStream_t stream = 0) { CUDF_EXPECTS(!keys_to_remove.has_nulls(), "keys_to_remove must not have nulls"); auto const keys_view = dictionary_column.keys(); CUDF_EXPECTS(keys_view.type() == keys_to_remove.type(), "keys types must match"); // locate keys to remove by searching the keys column auto const matches = cudf::detail::contains(keys_view, keys_to_remove, mr, stream); auto d_matches = matches->view().data<bool>(); // call common utility method to keep the keys not matched to keys_to_remove auto key_matcher = [d_matches] __device__(size_type idx) { return !d_matches[idx]; }; return remove_keys_fn(dictionary_column, key_matcher, mr, stream); } std::unique_ptr<column> remove_unused_keys( dictionary_column_view const& dictionary_column, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(), hipStream_t stream = 0) { // locate the keys to remove auto const keys_size = dictionary_column.keys_size(); column_view indices_view = dictionary_column.get_indices_annotated(); // search the indices values with key indices to look for any holes auto const matches = [&] { // build keys index to verify against indices values rmm::device_uvector<uint32_t> keys_positions(keys_size, stream); thrust::sequence( rmm::exec_policy(stream)->on(stream), keys_positions.begin(), keys_positions.end()); // wrap the indices for comparison in contains() column_view keys_positions_view(data_type{type_id::UINT32}, keys_size, keys_positions.data()); return cudf::detail::contains(keys_positions_view, indices_view, mr, stream); }(); auto d_matches = matches->view().data<bool>(); // call common utility method to keep the keys that match auto key_matcher = [d_matches] __device__(size_type idx) { return d_matches[idx]; }; return remove_keys_fn(dictionary_column, key_matcher, mr, stream); } } // namespace detail // external APIs std::unique_ptr<column> remove_keys(dictionary_column_view const& dictionary_column, column_view const& keys_to_remove, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::remove_keys(dictionary_column, keys_to_remove, mr); } std::unique_ptr<column> remove_unused_keys(dictionary_column_view const& dictionary_column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::remove_unused_keys(dictionary_column, mr); } } // namespace dictionary } // namespace cudf
1b4bd84e97faa29a4582368068b6841b1dbf43dc.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/copy_if.cuh> #include <cudf/detail/gather.hpp> #include <cudf/detail/indexalator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/search.hpp> #include <cudf/detail/valid_if.cuh> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/dictionary/update_keys.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <thrust/scatter.h> #include <thrust/sequence.h> #include <thrust/transform.h> namespace cudf { namespace dictionary { namespace detail { namespace { /** * @brief Return a new dictionary by removing identified keys from the provided dictionary. * * This is a common utility for `remove_keys` and `remove_unused_keys` detail functions. * It will create a new dictionary with the remaining keys and create new indices values * to go with these new keys. * * @tparam KeysKeeper Function bool(size_type) that takes keys position index * and returns true if that key is to be used in the output dictionary. * @param dictionary_column The column to use for creating the new dictionary. * @param keys_to_keep_fn Called to determine which keys in `dictionary_column` to keep. * @param mr Device memory resource used to allocate the returned column's device memory. * @param stream CUDA stream used for device memory operations and kernel launches. */ template <typename KeysKeeper> std::unique_ptr<column> remove_keys_fn( dictionary_column_view const& dictionary_column, KeysKeeper keys_to_keep_fn, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(), cudaStream_t stream = 0) { auto const keys_view = dictionary_column.keys(); auto execpol = rmm::exec_policy(stream); auto const indices_type = dictionary_column.indices().type(); auto const max_size = dictionary_column.size(); // create/init indices map array auto map_indices = make_fixed_width_column(indices_type, keys_view.size(), mask_state::UNALLOCATED, stream); auto map_itr = cudf::detail::indexalator_factory::make_output_iterator(map_indices->mutable_view()); // init to max to identify new nulls thrust::fill(execpol->on(stream), map_itr, map_itr + keys_view.size(), max_size); // all valid indices are less than this value // build keys column and indices map std::unique_ptr<column> keys_column = [&] { // create keys positions column to identify original key positions after removing they keys auto keys_positions = [&] { auto positions = make_fixed_width_column( indices_type, keys_view.size(), cudf::mask_state::UNALLOCATED, stream); auto itr = cudf::detail::indexalator_factory::make_output_iterator(positions->mutable_view()); thrust::sequence(execpol->on(stream), itr, itr + keys_view.size()); return positions; }(); // copy the non-removed keys ( keys_to_keep_fn(idx)==true ) auto table_keys = cudf::detail::copy_if( table_view{{keys_view, keys_positions->view()}}, keys_to_keep_fn, mr, stream) ->release(); auto const filtered_view = table_keys[1]->view(); auto filtered_itr = cudf::detail::indexalator_factory::make_input_iterator(filtered_view); auto positions_itr = cudf::detail::indexalator_factory::make_input_iterator(keys_positions->view()); // build indices mapper // Example scatter([0,1,2][0,2,4][max,max,max,max,max]) => [0,max,1,max,2] thrust::scatter(execpol->on(stream), positions_itr, positions_itr + filtered_view.size(), filtered_itr, map_itr); return std::move(table_keys.front()); }(); // create non-nullable indices view with offset applied -- this is used as a gather map column_view indices_view(dictionary_column.indices().type(), dictionary_column.size(), dictionary_column.indices().head(), nullptr, 0, dictionary_column.offset()); // create new indices column // Example: gather([0,max,1,max,2],[4,0,3,1,2,2,2,4,0]) => [2,0,max,max,1,1,1,2,0] auto table_indices = cudf::detail::gather(table_view{{map_indices->view()}}, indices_view, cudf::detail::out_of_bounds_policy::NULLIFY, cudf::detail::negative_index_policy::NOT_ALLOWED, mr, stream) ->release(); std::unique_ptr<column> indices_column(std::move(table_indices.front())); // compute new nulls -- merge the existing nulls with the newly created ones (value<0) auto const offset = dictionary_column.offset(); auto d_null_mask = dictionary_column.null_mask(); auto indices_itr = cudf::detail::indexalator_factory::make_input_iterator(indices_column->view()); auto new_nulls = cudf::detail::valid_if( thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(dictionary_column.size()), [offset, d_null_mask, indices_itr, max_size] __device__(size_type idx) { if (d_null_mask && !bit_is_set(d_null_mask, idx + offset)) return false; return (indices_itr[idx] < max_size); // new nulls have max values }, stream, mr); rmm::device_buffer new_null_mask = (new_nulls.second > 0) ? std::move(new_nulls.first) : rmm::device_buffer{0, stream, mr}; // create column with keys_column and indices_column return make_dictionary_column( std::move(keys_column), std::move(indices_column), std::move(new_null_mask), new_nulls.second); } } // namespace std::unique_ptr<column> remove_keys( dictionary_column_view const& dictionary_column, column_view const& keys_to_remove, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(), cudaStream_t stream = 0) { CUDF_EXPECTS(!keys_to_remove.has_nulls(), "keys_to_remove must not have nulls"); auto const keys_view = dictionary_column.keys(); CUDF_EXPECTS(keys_view.type() == keys_to_remove.type(), "keys types must match"); // locate keys to remove by searching the keys column auto const matches = cudf::detail::contains(keys_view, keys_to_remove, mr, stream); auto d_matches = matches->view().data<bool>(); // call common utility method to keep the keys not matched to keys_to_remove auto key_matcher = [d_matches] __device__(size_type idx) { return !d_matches[idx]; }; return remove_keys_fn(dictionary_column, key_matcher, mr, stream); } std::unique_ptr<column> remove_unused_keys( dictionary_column_view const& dictionary_column, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(), cudaStream_t stream = 0) { // locate the keys to remove auto const keys_size = dictionary_column.keys_size(); column_view indices_view = dictionary_column.get_indices_annotated(); // search the indices values with key indices to look for any holes auto const matches = [&] { // build keys index to verify against indices values rmm::device_uvector<uint32_t> keys_positions(keys_size, stream); thrust::sequence( rmm::exec_policy(stream)->on(stream), keys_positions.begin(), keys_positions.end()); // wrap the indices for comparison in contains() column_view keys_positions_view(data_type{type_id::UINT32}, keys_size, keys_positions.data()); return cudf::detail::contains(keys_positions_view, indices_view, mr, stream); }(); auto d_matches = matches->view().data<bool>(); // call common utility method to keep the keys that match auto key_matcher = [d_matches] __device__(size_type idx) { return d_matches[idx]; }; return remove_keys_fn(dictionary_column, key_matcher, mr, stream); } } // namespace detail // external APIs std::unique_ptr<column> remove_keys(dictionary_column_view const& dictionary_column, column_view const& keys_to_remove, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::remove_keys(dictionary_column, keys_to_remove, mr); } std::unique_ptr<column> remove_unused_keys(dictionary_column_view const& dictionary_column, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::remove_unused_keys(dictionary_column, mr); } } // namespace dictionary } // namespace cudf
281ba5578ba4575c80ad341b53d50cfdbee87807.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/L2Norm.cuh> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/PtxUtils.cuh> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/gpu/utils/Reductions.cuh> namespace faiss { namespace gpu { // Input: (batch x dim) // Output: (batch norm) // Done under the presumption that the dimension size is not too large // (<10k or so), since there wouldn't be enough parallelism applying a // single block to the problem. Also that each vector is large enough // (>64), since a single block works on multiple rows' norms at the // same time. // T: the type we are doing the math in (e.g., float, half) // TVec: the potentially vectorized type we are loading in (e.g., // float4, half2) template <typename T, typename TVec, typename IndexType, int RowTileSize, bool NormLoop, bool NormSquared> __global__ void l2NormRowMajor(Tensor<TVec, 2, true, IndexType> input, Tensor<float, 1, true, IndexType> output) { extern __shared__ char smemByte[]; // #warps * RowTileSize elements float* smem = (float*) smemByte; IndexType numWarps = utils::divUp(blockDim.x, kWarpSize); IndexType laneId = getLaneId(); IndexType warpId = threadIdx.x / kWarpSize; bool lastRowTile = (blockIdx.x == (gridDim.x - 1)); IndexType rowStart = RowTileSize * blockIdx.x; // accumulate in f32 float rowNorm[RowTileSize]; if (lastRowTile) { // We are handling the very end of the input matrix rows for (IndexType row = 0; row < input.getSize(0) - rowStart; ++row) { if (NormLoop) { rowNorm[0] = 0; for (IndexType col = threadIdx.x; col < input.getSize(1); col += blockDim.x) { TVec val = input[rowStart + row][col]; val = Math<TVec>::mul(val, val); rowNorm[0] = rowNorm[0] + Math<TVec>::reduceAdd(val); } } else { TVec val = input[rowStart + row][threadIdx.x]; val = Math<TVec>::mul(val, val); rowNorm[0] = Math<TVec>::reduceAdd(val); } rowNorm[0] = warpReduceAllSum(rowNorm[0]); if (laneId == 0) { smem[row * numWarps + warpId] = rowNorm[0]; } } } else { // We are guaranteed that all RowTileSize rows are available in // [rowStart, rowStart + RowTileSize) if (NormLoop) { // A single block of threads is not big enough to span each // vector TVec tmp[RowTileSize]; #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = 0; } for (IndexType col = threadIdx.x; col < input.getSize(1); col += blockDim.x) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = input[rowStart + row][col]; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = Math<TVec>::mul(tmp[row], tmp[row]); } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = rowNorm[row] + Math<TVec>::reduceAdd(tmp[row]); } } } else { TVec tmp[RowTileSize]; // A block of threads is the exact size of the vector #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = input[rowStart + row][threadIdx.x]; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = Math<TVec>::mul(tmp[row], tmp[row]); } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = Math<TVec>::reduceAdd(tmp[row]); } } // Sum up all parts in each warp #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = warpReduceAllSum(rowNorm[row]); } if (laneId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { smem[row * numWarps + warpId] = rowNorm[row]; } } } __syncthreads(); // Sum across warps if (warpId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = laneId < numWarps ? smem[row * numWarps + laneId] : 0; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = warpReduceAllSum(rowNorm[row]); } // Write out answer if (laneId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { int outCol = rowStart + row; if (lastRowTile) { if (outCol < output.getSize(0)) { output[outCol] = NormSquared ? ConvertTo<float>::to(rowNorm[row]) : sqrtf(ConvertTo<float>::to(rowNorm[row])); } } else { output[outCol] = NormSquared ? ConvertTo<float>::to(rowNorm[row]) : sqrtf(ConvertTo<float>::to(rowNorm[row])); } } } } } // Input: (dim x batch) // Output: (batch norm) // Handles the case where `input` is column major. A single thread calculates // the norm of each vector instead of a block-wide reduction. template <typename T, typename IndexType, bool NormSquared> __global__ void l2NormColMajor(Tensor<T, 2, true, IndexType> input, Tensor<float, 1, true, IndexType> output) { // grid-stride loop to handle all batch elements for (IndexType batch = blockIdx.x * blockDim.x + threadIdx.x; batch < input.getSize(1); batch += gridDim.x * blockDim.x) { float sum = 0; // This is still a coalesced load from the memory for (IndexType dim = 0; dim < input.getSize(0); ++dim) { // Just do the math in float32, even if the input is float16 float v = ConvertTo<float>::to(input[dim][batch]); sum += v * v; } if (!NormSquared) { sum = sqrtf(sum); } output[batch] = ConvertTo<float>::to(sum); } } template <typename T, typename TVec, typename IndexType> void runL2Norm(Tensor<T, 2, true, IndexType>& input, bool inputRowMajor, Tensor<float, 1, true, IndexType>& output, bool normSquared, hipStream_t stream) { IndexType maxThreads = (IndexType) getMaxThreadsCurrentDevice(); constexpr int rowTileSize = 8; #define RUN_L2_ROW_MAJOR(TYPE_T, TYPE_TVEC, INPUT) \ do { \ if (normLoop) { \ if (normSquared) { \ hipLaunchKernelGGL(( l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, true, true>) \ , dim3(grid), dim3(block), smem, stream, INPUT, output); \ } else { \ hipLaunchKernelGGL(( l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, true, false>) \ , dim3(grid), dim3(block), smem, stream, INPUT, output); \ } \ } else { \ if (normSquared) { \ hipLaunchKernelGGL(( l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, false, true>) \ , dim3(grid), dim3(block), smem, stream, INPUT, output); \ } else { \ hipLaunchKernelGGL(( l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, false, false>) \ , dim3(grid), dim3(block), smem, stream, INPUT, output); \ } \ } \ } while (0) if (inputRowMajor) { // // Row-major kernel /// if (input.template canCastResize<TVec>()) { // Can load using the vectorized type auto inputV = input.template castResize<TVec>(); auto dim = inputV.getSize(1); bool normLoop = dim > maxThreads; auto numThreads = ::min(dim, maxThreads); auto grid = dim3(utils::divUp(inputV.getSize(0), rowTileSize)); auto block = dim3(numThreads); auto smem = sizeof(float) * rowTileSize * utils::divUp(numThreads, kWarpSize); RUN_L2_ROW_MAJOR(T, TVec, inputV); } else { // Can't load using the vectorized type auto dim = input.getSize(1); bool normLoop = dim > maxThreads; auto numThreads = ::min(dim, maxThreads); auto grid = dim3(utils::divUp(input.getSize(0), rowTileSize)); auto block = dim3(numThreads); auto smem = sizeof(float) * rowTileSize * utils::divUp(numThreads, kWarpSize); RUN_L2_ROW_MAJOR(T, T, input); } } else { // // Column-major kernel // // Just use a fixed-sized block, since the kernel threads are fully // independent auto block = 128; // Cap the grid size at 2^16 since there is a grid-stride loop to handle // processing everything auto grid = (int) ::min(utils::divUp(input.getSize(1), (IndexType) block), (IndexType) 65536); if (normSquared) { hipLaunchKernelGGL(( l2NormColMajor<T, IndexType, true>), dim3(grid), dim3(block), 0, stream, input, output); } else { hipLaunchKernelGGL(( l2NormColMajor<T, IndexType, false>), dim3(grid), dim3(block), 0, stream, input, output); } } #undef RUN_L2 CUDA_TEST_ERROR(); } void runL2Norm(Tensor<float, 2, true>& input, bool inputRowMajor, Tensor<float, 1, true>& output, bool normSquared, hipStream_t stream) { if (input.canUseIndexType<int>()) { runL2Norm<float, float4, int>( input, inputRowMajor, output, normSquared, stream); } else { auto inputCast = input.castIndexType<long>(); auto outputCast = output.castIndexType<long>(); runL2Norm<float, float4, long>( inputCast, inputRowMajor, outputCast, normSquared, stream); } } void runL2Norm(Tensor<half, 2, true>& input, bool inputRowMajor, Tensor<float, 1, true>& output, bool normSquared, hipStream_t stream) { if (input.canUseIndexType<int>()) { runL2Norm<half, half2, int>( input, inputRowMajor, output, normSquared, stream); } else { auto inputCast = input.castIndexType<long>(); auto outputCast = output.castIndexType<long>(); runL2Norm<half, half2, long>( inputCast, inputRowMajor, outputCast, normSquared, stream); } } } } // namespace
281ba5578ba4575c80ad341b53d50cfdbee87807.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/L2Norm.cuh> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/PtxUtils.cuh> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/gpu/utils/Reductions.cuh> namespace faiss { namespace gpu { // Input: (batch x dim) // Output: (batch norm) // Done under the presumption that the dimension size is not too large // (<10k or so), since there wouldn't be enough parallelism applying a // single block to the problem. Also that each vector is large enough // (>64), since a single block works on multiple rows' norms at the // same time. // T: the type we are doing the math in (e.g., float, half) // TVec: the potentially vectorized type we are loading in (e.g., // float4, half2) template <typename T, typename TVec, typename IndexType, int RowTileSize, bool NormLoop, bool NormSquared> __global__ void l2NormRowMajor(Tensor<TVec, 2, true, IndexType> input, Tensor<float, 1, true, IndexType> output) { extern __shared__ char smemByte[]; // #warps * RowTileSize elements float* smem = (float*) smemByte; IndexType numWarps = utils::divUp(blockDim.x, kWarpSize); IndexType laneId = getLaneId(); IndexType warpId = threadIdx.x / kWarpSize; bool lastRowTile = (blockIdx.x == (gridDim.x - 1)); IndexType rowStart = RowTileSize * blockIdx.x; // accumulate in f32 float rowNorm[RowTileSize]; if (lastRowTile) { // We are handling the very end of the input matrix rows for (IndexType row = 0; row < input.getSize(0) - rowStart; ++row) { if (NormLoop) { rowNorm[0] = 0; for (IndexType col = threadIdx.x; col < input.getSize(1); col += blockDim.x) { TVec val = input[rowStart + row][col]; val = Math<TVec>::mul(val, val); rowNorm[0] = rowNorm[0] + Math<TVec>::reduceAdd(val); } } else { TVec val = input[rowStart + row][threadIdx.x]; val = Math<TVec>::mul(val, val); rowNorm[0] = Math<TVec>::reduceAdd(val); } rowNorm[0] = warpReduceAllSum(rowNorm[0]); if (laneId == 0) { smem[row * numWarps + warpId] = rowNorm[0]; } } } else { // We are guaranteed that all RowTileSize rows are available in // [rowStart, rowStart + RowTileSize) if (NormLoop) { // A single block of threads is not big enough to span each // vector TVec tmp[RowTileSize]; #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = 0; } for (IndexType col = threadIdx.x; col < input.getSize(1); col += blockDim.x) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = input[rowStart + row][col]; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = Math<TVec>::mul(tmp[row], tmp[row]); } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = rowNorm[row] + Math<TVec>::reduceAdd(tmp[row]); } } } else { TVec tmp[RowTileSize]; // A block of threads is the exact size of the vector #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = input[rowStart + row][threadIdx.x]; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = Math<TVec>::mul(tmp[row], tmp[row]); } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = Math<TVec>::reduceAdd(tmp[row]); } } // Sum up all parts in each warp #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = warpReduceAllSum(rowNorm[row]); } if (laneId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { smem[row * numWarps + warpId] = rowNorm[row]; } } } __syncthreads(); // Sum across warps if (warpId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = laneId < numWarps ? smem[row * numWarps + laneId] : 0; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = warpReduceAllSum(rowNorm[row]); } // Write out answer if (laneId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { int outCol = rowStart + row; if (lastRowTile) { if (outCol < output.getSize(0)) { output[outCol] = NormSquared ? ConvertTo<float>::to(rowNorm[row]) : sqrtf(ConvertTo<float>::to(rowNorm[row])); } } else { output[outCol] = NormSquared ? ConvertTo<float>::to(rowNorm[row]) : sqrtf(ConvertTo<float>::to(rowNorm[row])); } } } } } // Input: (dim x batch) // Output: (batch norm) // Handles the case where `input` is column major. A single thread calculates // the norm of each vector instead of a block-wide reduction. template <typename T, typename IndexType, bool NormSquared> __global__ void l2NormColMajor(Tensor<T, 2, true, IndexType> input, Tensor<float, 1, true, IndexType> output) { // grid-stride loop to handle all batch elements for (IndexType batch = blockIdx.x * blockDim.x + threadIdx.x; batch < input.getSize(1); batch += gridDim.x * blockDim.x) { float sum = 0; // This is still a coalesced load from the memory for (IndexType dim = 0; dim < input.getSize(0); ++dim) { // Just do the math in float32, even if the input is float16 float v = ConvertTo<float>::to(input[dim][batch]); sum += v * v; } if (!NormSquared) { sum = sqrtf(sum); } output[batch] = ConvertTo<float>::to(sum); } } template <typename T, typename TVec, typename IndexType> void runL2Norm(Tensor<T, 2, true, IndexType>& input, bool inputRowMajor, Tensor<float, 1, true, IndexType>& output, bool normSquared, cudaStream_t stream) { IndexType maxThreads = (IndexType) getMaxThreadsCurrentDevice(); constexpr int rowTileSize = 8; #define RUN_L2_ROW_MAJOR(TYPE_T, TYPE_TVEC, INPUT) \ do { \ if (normLoop) { \ if (normSquared) { \ l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, true, true> \ <<<grid, block, smem, stream>>>(INPUT, output); \ } else { \ l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, true, false> \ <<<grid, block, smem, stream>>>(INPUT, output); \ } \ } else { \ if (normSquared) { \ l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, false, true> \ <<<grid, block, smem, stream>>>(INPUT, output); \ } else { \ l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, false, false> \ <<<grid, block, smem, stream>>>(INPUT, output); \ } \ } \ } while (0) if (inputRowMajor) { // // Row-major kernel /// if (input.template canCastResize<TVec>()) { // Can load using the vectorized type auto inputV = input.template castResize<TVec>(); auto dim = inputV.getSize(1); bool normLoop = dim > maxThreads; auto numThreads = std::min(dim, maxThreads); auto grid = dim3(utils::divUp(inputV.getSize(0), rowTileSize)); auto block = dim3(numThreads); auto smem = sizeof(float) * rowTileSize * utils::divUp(numThreads, kWarpSize); RUN_L2_ROW_MAJOR(T, TVec, inputV); } else { // Can't load using the vectorized type auto dim = input.getSize(1); bool normLoop = dim > maxThreads; auto numThreads = std::min(dim, maxThreads); auto grid = dim3(utils::divUp(input.getSize(0), rowTileSize)); auto block = dim3(numThreads); auto smem = sizeof(float) * rowTileSize * utils::divUp(numThreads, kWarpSize); RUN_L2_ROW_MAJOR(T, T, input); } } else { // // Column-major kernel // // Just use a fixed-sized block, since the kernel threads are fully // independent auto block = 128; // Cap the grid size at 2^16 since there is a grid-stride loop to handle // processing everything auto grid = (int) std::min(utils::divUp(input.getSize(1), (IndexType) block), (IndexType) 65536); if (normSquared) { l2NormColMajor<T, IndexType, true><<<grid, block, 0, stream>>>( input, output); } else { l2NormColMajor<T, IndexType, false><<<grid, block, 0, stream>>>( input, output); } } #undef RUN_L2 CUDA_TEST_ERROR(); } void runL2Norm(Tensor<float, 2, true>& input, bool inputRowMajor, Tensor<float, 1, true>& output, bool normSquared, cudaStream_t stream) { if (input.canUseIndexType<int>()) { runL2Norm<float, float4, int>( input, inputRowMajor, output, normSquared, stream); } else { auto inputCast = input.castIndexType<long>(); auto outputCast = output.castIndexType<long>(); runL2Norm<float, float4, long>( inputCast, inputRowMajor, outputCast, normSquared, stream); } } void runL2Norm(Tensor<half, 2, true>& input, bool inputRowMajor, Tensor<float, 1, true>& output, bool normSquared, cudaStream_t stream) { if (input.canUseIndexType<int>()) { runL2Norm<half, half2, int>( input, inputRowMajor, output, normSquared, stream); } else { auto inputCast = input.castIndexType<long>(); auto outputCast = output.castIndexType<long>(); runL2Norm<half, half2, long>( inputCast, inputRowMajor, outputCast, normSquared, stream); } } } } // namespace
5de9d5ff51bc4305bc6494d51226cad9fc5f7eb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include "rotate_rect_ops.h" // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ inline void get_rotated_rect_bounding_box(const T* pts, int& leftMost, int& topMost, int& rightMost, int& bottomMost, const int width, const int height) { // const T* P = pts; // leftMost = int(max(round(min(min(P[0],P[2]),min(P[4],P[6]))),0.0)); // rightMost= int(min(round(max(max(P[0],P[2]),max(P[4],P[6]))),width-1.0)); // topMost= int(max(round(min(min(P[1],P[3]),min(P[5],P[7]))),0.0)); // bottomMost= int(min(round(max(max(P[1],P[3]),max(P[5],P[7]))),height-1.0)); leftMost = int(max(min(min(pts[0], pts[2]), min(pts[4], pts[6])), 0.0)); topMost = int(max(min(min(pts[1], pts[3]), min(pts[5], pts[7])), 0.0)); rightMost = int(min(max(max(pts[0], pts[2]), max(pts[4], pts[6])) + 1, width - 1.0)); bottomMost = int(min(max(max(pts[1], pts[3]), max(pts[5], pts[7])) + 1, height - 1.0)); } template <typename T> __global__ void RRoIPoolFForward(const int nthreads, const T* bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* bottom_rois, T* top_data, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; T P[8]; compute_roi_pool_pts(offset_bottom_rois, P, spatial_scale, pooled_height, pooled_width, ph, pw); int leftMost, topMost, rightMost, bottomMost; get_rotated_rect_bounding_box(P, leftMost, topMost, rightMost, bottomMost, width, height); T maxval = 0; int maxidx = -1; const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; T AB[2]; AB[0] = P[0] - P[2]; AB[1] = P[1] - P[3]; T ABAB = AB[0]*AB[0] +AB[1]*AB[1]; T AC[2]; AC[0] = P[4] - P[2]; AC[1] = P[5] - P[3]; T ACAC = AC[0]*AC[0] + AC[1]*AC[1]; for (int hh = topMost; hh < bottomMost+1; ++hh) { for (int ww = leftMost; ww < rightMost+1; ++ww) { T AP[2]; AP[0] = ww - P[2]; AP[1] = hh - P[3]; T ABAP = AB[0]*AP[0] + AB[1]*AP[1]; T ACAP = AC[0]*AP[0] + AC[1]*AP[1]; if ( ABAP >= 1e-3 && (ABAB - ABAP) > -1e-3 && ACAP >= 1e-3 && (ACAC - ACAP) > -1e-3 ) { int bottom_index = hh * width + ww; if (offset_bottom_data[bottom_index] > maxval) { maxval = offset_bottom_data[bottom_index]; maxidx = bottom_index; } } } } top_data[index] = maxval; argmax_data[index] = maxidx; // T bin_size_h = static_cast<T>(roi_height) // / static_cast<T>(pooled_height); // T bin_size_w = static_cast<T>(roi_width) // / static_cast<T>(pooled_width); // int hstart = static_cast<int>(floor(static_cast<T>(ph) // * bin_size_h)); // int wstart = static_cast<int>(floor(static_cast<T>(pw) // * bin_size_w)); // int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) // * bin_size_h)); // int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) // * bin_size_w)); // // Add roi offsets and clip to input boundaries // hstart = min(max(hstart + roi_start_h, 0), height); // hend = min(max(hend + roi_start_h, 0), height); // wstart = min(max(wstart + roi_start_w, 0), width); // wend = min(max(wend + roi_start_w, 0), width); // bool is_empty = (hend <= hstart) || (wend <= wstart); // // Define an empty pooling region to be zero // T maxval = is_empty ? 0 : -FLT_MAX; // // If nothing is pooled, argmax = -1 causes nothing to be backprop'd // int maxidx = -1; // const T* offset_bottom_data = // bottom_data + (roi_batch_ind * channels + c) * height * width; // for (int h = hstart; h < hend; ++h) { // for (int w = wstart; w < wend; ++w) { // int bottom_index = h * width + w; // if (offset_bottom_data[bottom_index] > maxval) { // maxval = offset_bottom_data[bottom_index]; // maxidx = bottom_index; // } // } // } // top_data[index] = maxval; // argmax_data[index] = maxidx; } } template <typename T> __global__ void RRoIPoolFBackward(const int nthreads, const T* top_diff, const int* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; int bottom_offset = (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; T* offset_bottom_diff = bottom_diff + bottom_offset; const int* offset_argmax_data = argmax_data + top_offset; int argmax = offset_argmax_data[ph * pooled_width + pw]; if (argmax != -1) { atomicAdd( offset_bottom_diff + argmax, static_cast<T>(offset_top_diff[ph * pooled_width + pw])); } } } std::tuple<at::Tensor, at::Tensor> RROIPool_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; auto argmax = at::zeros({num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(hipGetLastError()); return std::make_tuple(output, argmax); } AT_DISPATCH_FLOATING_TYPES(input.type(), "RROIPool_forward", [&] { hipLaunchKernelGGL(( RRoIPoolFForward<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois.contiguous().data<scalar_t>(), output.data<scalar_t>(), argmax.data<int>()); }); THCudaCheck(hipGetLastError()); return std::make_tuple(output, argmax); } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor RROIPool_backward_cuda(const at::Tensor& grad, const at::Tensor& input, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); // TODO add more checks auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(hipGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "RROIPool_backward", [&] { hipLaunchKernelGGL(( RRoIPoolFBackward<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.contiguous().data<scalar_t>(), argmax.data<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return grad_input; }
5de9d5ff51bc4305bc6494d51226cad9fc5f7eb1.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include "rotate_rect_ops.h" // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ inline void get_rotated_rect_bounding_box(const T* pts, int& leftMost, int& topMost, int& rightMost, int& bottomMost, const int width, const int height) { // const T* P = pts; // leftMost = int(max(round(min(min(P[0],P[2]),min(P[4],P[6]))),0.0)); // rightMost= int(min(round(max(max(P[0],P[2]),max(P[4],P[6]))),width-1.0)); // topMost= int(max(round(min(min(P[1],P[3]),min(P[5],P[7]))),0.0)); // bottomMost= int(min(round(max(max(P[1],P[3]),max(P[5],P[7]))),height-1.0)); leftMost = int(max(min(min(pts[0], pts[2]), min(pts[4], pts[6])), 0.0)); topMost = int(max(min(min(pts[1], pts[3]), min(pts[5], pts[7])), 0.0)); rightMost = int(min(max(max(pts[0], pts[2]), max(pts[4], pts[6])) + 1, width - 1.0)); bottomMost = int(min(max(max(pts[1], pts[3]), max(pts[5], pts[7])) + 1, height - 1.0)); } template <typename T> __global__ void RRoIPoolFForward(const int nthreads, const T* bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* bottom_rois, T* top_data, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; T P[8]; compute_roi_pool_pts(offset_bottom_rois, P, spatial_scale, pooled_height, pooled_width, ph, pw); int leftMost, topMost, rightMost, bottomMost; get_rotated_rect_bounding_box(P, leftMost, topMost, rightMost, bottomMost, width, height); T maxval = 0; int maxidx = -1; const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; T AB[2]; AB[0] = P[0] - P[2]; AB[1] = P[1] - P[3]; T ABAB = AB[0]*AB[0] +AB[1]*AB[1]; T AC[2]; AC[0] = P[4] - P[2]; AC[1] = P[5] - P[3]; T ACAC = AC[0]*AC[0] + AC[1]*AC[1]; for (int hh = topMost; hh < bottomMost+1; ++hh) { for (int ww = leftMost; ww < rightMost+1; ++ww) { T AP[2]; AP[0] = ww - P[2]; AP[1] = hh - P[3]; T ABAP = AB[0]*AP[0] + AB[1]*AP[1]; T ACAP = AC[0]*AP[0] + AC[1]*AP[1]; if ( ABAP >= 1e-3 && (ABAB - ABAP) > -1e-3 && ACAP >= 1e-3 && (ACAC - ACAP) > -1e-3 ) { int bottom_index = hh * width + ww; if (offset_bottom_data[bottom_index] > maxval) { maxval = offset_bottom_data[bottom_index]; maxidx = bottom_index; } } } } top_data[index] = maxval; argmax_data[index] = maxidx; // T bin_size_h = static_cast<T>(roi_height) // / static_cast<T>(pooled_height); // T bin_size_w = static_cast<T>(roi_width) // / static_cast<T>(pooled_width); // int hstart = static_cast<int>(floor(static_cast<T>(ph) // * bin_size_h)); // int wstart = static_cast<int>(floor(static_cast<T>(pw) // * bin_size_w)); // int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) // * bin_size_h)); // int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) // * bin_size_w)); // // Add roi offsets and clip to input boundaries // hstart = min(max(hstart + roi_start_h, 0), height); // hend = min(max(hend + roi_start_h, 0), height); // wstart = min(max(wstart + roi_start_w, 0), width); // wend = min(max(wend + roi_start_w, 0), width); // bool is_empty = (hend <= hstart) || (wend <= wstart); // // Define an empty pooling region to be zero // T maxval = is_empty ? 0 : -FLT_MAX; // // If nothing is pooled, argmax = -1 causes nothing to be backprop'd // int maxidx = -1; // const T* offset_bottom_data = // bottom_data + (roi_batch_ind * channels + c) * height * width; // for (int h = hstart; h < hend; ++h) { // for (int w = wstart; w < wend; ++w) { // int bottom_index = h * width + w; // if (offset_bottom_data[bottom_index] > maxval) { // maxval = offset_bottom_data[bottom_index]; // maxidx = bottom_index; // } // } // } // top_data[index] = maxval; // argmax_data[index] = maxidx; } } template <typename T> __global__ void RRoIPoolFBackward(const int nthreads, const T* top_diff, const int* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; int bottom_offset = (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; T* offset_bottom_diff = bottom_diff + bottom_offset; const int* offset_argmax_data = argmax_data + top_offset; int argmax = offset_argmax_data[ph * pooled_width + pw]; if (argmax != -1) { atomicAdd( offset_bottom_diff + argmax, static_cast<T>(offset_top_diff[ph * pooled_width + pw])); } } } std::tuple<at::Tensor, at::Tensor> RROIPool_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; auto argmax = at::zeros({num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return std::make_tuple(output, argmax); } AT_DISPATCH_FLOATING_TYPES(input.type(), "RROIPool_forward", [&] { RRoIPoolFForward<scalar_t><<<grid, block, 0, stream>>>( output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois.contiguous().data<scalar_t>(), output.data<scalar_t>(), argmax.data<int>()); }); THCudaCheck(cudaGetLastError()); return std::make_tuple(output, argmax); } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor RROIPool_backward_cuda(const at::Tensor& grad, const at::Tensor& input, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); // TODO add more checks auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(cudaGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "RROIPool_backward", [&] { RRoIPoolFBackward<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad.contiguous().data<scalar_t>(), argmax.data<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return grad_input; }
3e313a72cf28b8af453a1ab54577daf19d6860c3.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.hpp" namespace filter { template void linearColumn<float4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream); } #endif /* CUDA_DISABLER */
3e313a72cf28b8af453a1ab54577daf19d6860c3.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.hpp" namespace filter { template void linearColumn<float4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream); } #endif /* CUDA_DISABLER */
fd277ce9d2affeb2ba7d42dd51ff69f095d9c1eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cutil.h> #include "params.h" #include "common.h" #include "bnd.h" #include "GPU.h" #include "Boundary.h" #include "cosmo.h" #include "Allocation.h" #include "Io.h" #include "Explicit.h" #include "Atomic.h" #ifdef WMPI #include "communication.h" #include "Interface.h" #endif //********************************************************** //********************************************************** extern "C" int Mainloop(int rank, int *pos, int *neigh, int ic_rank); //********************************************************** //********************************************************** #define CUERR() //printf("\n %s on %d \n",hipGetErrorString(hipGetLastError()),ic_rank) #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef min #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define NCELLS3 (NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2) #define N_INT 2048 #define A_INT_MAX 0.166667 //********************************************************** //********************************************************** int Mainloop(int rank, int *pos, int *neigh, int ic_rank) { if(rank==0) printf("Mainloop entered by proc %d\n",rank); float tnext; dim3 blockion(NCELLX); // USED BY IONISATION dim3 gridion(NCELLY,NCELLZ); dim3 bcool(BLOCKCOOL); // USED BY COOLING dim3 gcool(GRIDCOOLX,GRIDCOOLY); dim3 blocksimple(NCELLX); // USED BY ADVECTION THREADS dim3 gridsimple(NCELLY,NCELLZ); #ifdef SDISCRETE int nthreadsource=min(nsource,128); dim3 gridsource((int)(round((float)(nsource)/float(nthreadsource)))); dim3 blocksource(nthreadsource); #endif #ifndef WMPI dim3 blockboundx(NCELLY); dim3 gridboundx(NCELLZ); dim3 blockboundy(NCELLX); dim3 gridboundy(NCELLZ); dim3 blockboundz(NCELLX); dim3 gridboundz(NCELLY); for (int igrp=0;igrp<NGRP;igrp++) { if(boundary==0) // transmissive boundary conditions { hipLaunchKernelGGL(( cusetboundarytrans_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundarytrans_yp), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundarytrans_zp), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundarytrans_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundarytrans_ym), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundarytrans_zm), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } else if(boundary==1) // reflexive boundary conditions { hipLaunchKernelGGL(( cusetboundaryref_zp), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_zm), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_yp), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_ym), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } else if(boundary==2) // Periodic boundary conditions { hipLaunchKernelGGL(( cusetboundaryper_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryper_yp), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryper_zp), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryper_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryper_ym), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryper_zm), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } else if(boundary==3) // Mixed boundary conditions { hipLaunchKernelGGL(( cusetboundarytrans_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_yp) , dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_zp) , dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundarytrans_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_ym) , dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_zm) , dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } } #else dim3 blockboundx(NCELLY); dim3 gridboundx(NCELLZ); dim3 blockboundy(NCELLX); dim3 gridboundy(NCELLZ); dim3 blockboundz(NCELLX); dim3 gridboundz(NCELLY); if(neigh[5]!=rank) { exchange_zp(cuegy, cuflx, cuegy_new, buff, neigh, pos[2]%2); exchange_zm(cuegy, cuflx, cuegy_new, buff, neigh, pos[2]%2); } else { hipLaunchKernelGGL(( cusetboundaryper_zp), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); hipLaunchKernelGGL(( cusetboundaryper_zm), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); } if(neigh[3]!=rank) { exchange_yp(cuegy, cuflx, cuegy_new, buff, neigh, pos[1]%2); exchange_ym(cuegy, cuflx, cuegy_new, buff, neigh, pos[1]%2); } else { hipLaunchKernelGGL(( cusetboundaryper_yp), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); hipLaunchKernelGGL(( cusetboundaryper_ym), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); } if(neigh[1]!=rank) { exchange_xp(cuegy, cuflx, cuegy_new, buff, neigh, pos[0]%2); exchange_xm(cuegy, cuflx, cuegy_new, buff, neigh, pos[0]%2); } else { hipLaunchKernelGGL(( cusetboundaryper_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); hipLaunchKernelGGL(( cusetboundaryper_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); } if(boundary==0) { if(pos[0]==0)hipLaunchKernelGGL(( cusetboundarytrans_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[1]==0)hipLaunchKernelGGL(( cusetboundarytrans_ym), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[2]==0)hipLaunchKernelGGL(( cusetboundarytrans_zm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[0]==(NGPUX-1))hipLaunchKernelGGL(( cusetboundarytrans_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[1]==(NGPUY-1))hipLaunchKernelGGL(( cusetboundarytrans_yp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[2]==(NGPUZ-1))hipLaunchKernelGGL(( cusetboundarytrans_zp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); } #endif #ifndef COSMO dt=courantnumber*dx/3./c; if(rank==0) printf("dx=%e cfl=%e dt=%e\n",dx,courantnumber,dt); tnext=t;//+ndumps*dt; #else aexp=astart; #ifndef FLAT_COSMO t=a2tgen(aexp,omegam,omegav,Hubble0);// Hubble0 in sec-1 #else t=a2t(aexp,omegav,Hubble0);// Hubble0 in sec-1 #endif tnext=t; float tstart=t; if(rank==0) printf("aexp= %f tstart=%f tmax=%f\n",aexp,t/unit_time,tmax/unit_time); #ifndef FLAT_COSMO if(rank==0) printf("Building Expansion factor table"); float da=(A_INT_MAX-aexp)/N_INT; float a_int[N_INT],t_int[N_INT]; for(int i_int=0;i_int<N_INT;i_int++) { a_int[i]=aexp+i_int*da; t_int[i]=a2tgen(a_int[i],omegam,omegav,Hubble0); // Hubble0 in sec-1 } int n_int=0; #endif #endif // some variables for field update int changefield=0; int forcedump; int ifield=0; // 1 because tfield stores the NEXT field float tfield; if(fieldlist){ while(t>=tlist[ifield]) { ifield++; } tfield=tlist[ifield]; if(rank==0) printf("ICs (tstart=%f) between field #%d (t=%f) and field #%d (t=%f)\n",t/unit_time,ifield-1,tlist[ifield-1]/unit_time,ifield,tlist[ifield]/unit_time); if(rank==0) printf("starting with NEXT field #%d @ tfield =%f with tstart=%f\n",ifield,tlist[ifield]/unit_time,t/unit_time);// -1 because tfield stores the NEXT field } // log file FILE *logfile; if(rank==0) logfile=fopen("log.out","w"); #ifdef TIMINGS FILE *timefile; if(rank==0) { timefile=fopen("time.out","w"); } #endif //float ft=1./powf(2.,20); float ft=1.; #ifdef COSMO float factfesc=1.; #endif float *factgrp; factgrp=(float*)malloc(NGRP*sizeof(float)); FACTGRP; unsigned int timer; float q0=0.,q1=0.,q3; #ifdef TIMINGS float q4,q7,q8,q9,q10,q11; double time_old,time_new; #endif if(rank==0) { cutCreateTimer(&timer); cutStartTimer(timer); } // MAIN LOOP STARTS HERE ======================================================>>>> // ============================================================================>>>> // ============================================================================>>>> // ============================================================================>>>> // ============================================================================>>>> // ============================================================================>>>> hipDeviceSynchronize(); #ifdef WMPI mpisynch(); #endif cuDumpResults(0,t,aexp,0); while(t<=tmax) { hipDeviceSynchronize(); #ifdef WMPI get_elapsed(&time_old); mpisynch(); #endif if(rank==0) { q3=q1-q0; q0=cutGetTimerValue(timer); } #ifndef COSMO dt=courantnumber*dx/3./c*ft; if(((nstep%ndisp)==0)&&(rank==0)) { printf(" ------------------ \n"); printf(" Step= %d Time= %f dt=%f tnext=%f cgpu (msec)=%f\n",nstep,t/unit_time,dt/unit_time,tnext/unit_time,q3); printf(" ------------------ \n"); } #else dt=courantnumber*dx/3./c*ft; if(((nstep%ndisp)==0)&&(rank==0)) { printf(" ------------------------------\n"); printf(" Step= %d Time= %f Elapsed= %f dt= %f aexp=%f z=%f fesc=%f clump= %f Next tfield=%f cgpu=%f\n",nstep,t/unit_time,(t-tstart)/unit_time,dt/unit_time,aexp,1./aexp-1.,factfesc*fesc,clump,tfield/unit_time,q3); printf(" ----------------------------- \n"); fprintf(logfile,"%d %f %f %f %f %f %f %f\n",nstep,t/unit_time,(t-tstart)/unit_time,dt/unit_time,aexp,1./aexp-1.,tfield/unit_time,q3); } #endif if(fieldlist) { // we must not go further than the next field if(dt>=tfield-t) { #ifdef WMPI if(rank==0) printf("last timestep with field #%d : next field= %f t=%f t+dt=%f\n",ifield,tfield/unit_time,t/unit_time,(t+dt)/unit_time); if(((tfield-t)/unit_time)==0.) { if(rank==0) printf("WARNING FIELD DT=O -> switch immediatly to next field\n"); cuGetField(ifield,ic_rank); changefield=0; ifield++; tfield=tlist[ifield]; ft=1./powf(2.,20); } else { changefield=1; dt=tfield-t; if(rank==0) printf("dt set to %f\n",dt/unit_time); } #else if(rank==0) printf("last timestep with field #%d : next field= %f t=%f t+dt=%f\n",ifield,tfield/unit_time,t/unit_time,(t+dt)/unit_time); if(((tfield-t)/unit_time)==0.) { if(rank==0) printf("WARNING FIELD DT=O -> switch immediatly to next field\n"); cuGetField(ifield,ic_rank); changefield=0; ifield++; tfield=tlist[ifield]; ft=1./powf(2.,20); } else { changefield=1; dt=tfield-t; if(rank==0) printf("dt set to %f\n",dt/unit_time); } #endif } } //================================== UNSPLIT 3D SCHEME============================= for (int igrp=0;igrp<NGRP;igrp++) { #ifdef COSMO hipLaunchKernelGGL(( cuComputeELF), dim3(gridsimple),dim3(blocksimple), 0, 0, cuegy+igrp*NCELLS3, cuflx+igrp*NCELLS3*3, cusrc0, cuegy_new+igrp*NCELLS3, c, dx, dt, nstep,aexp); #else hipLaunchKernelGGL(( cuComputeELF), dim3(gridsimple),dim3(blocksimple), 0, 0, cuegy+igrp*NCELLS3, cuflx+igrp*NCELLS3*3, cusrc0, cuegy_new+igrp*NCELLS3, c, dx, dt, nstep,1.); #endif hipDeviceSynchronize(); CUERR(); if(verbose) puts("Hyperbolic Egy ok"); #ifdef COSMO hipLaunchKernelGGL(( cuComputeF_TOTAL_LF), dim3(gridsimple),dim3(blocksimple), 0, 0, cuflx+igrp*NCELLS3*3,cudedd,cusrc0,cuflx_new+igrp*NCELLS3*3,c,dx,dt,nstep,cuegy+igrp*NCELLS3, aexp); #else hipLaunchKernelGGL(( cuComputeF_TOTAL_LF), dim3(gridsimple),dim3(blocksimple), 0, 0, cuflx+igrp*NCELLS3*3,cudedd,cusrc0,cuflx_new+igrp*NCELLS3*3,c,dx,dt,nstep,cuegy+igrp*NCELLS3,1.); #endif hipDeviceSynchronize(); CUERR(); #ifdef SDISCRETE #ifdef COSMO if(kf!=0.) factfesc=exp(kf-powf(aexp/a0,af)); hipLaunchKernelGGL(( cuAddSource), dim3(gridsource),dim3(blocksource), 0, 0, cuegy_new+igrp*NCELLS3,cuflx_new+igrp*NCELLS3*3,cusrc0,cusrc0pos,dt*fesc*factfesc*factgrp[igrp],dx,nsource,aexp,c); #else hipLaunchKernelGGL(( cuAddSource), dim3(gridsource),dim3(blocksource), 0, 0, cuegy_new+igrp*NCELLS3,cuflx_new+igrp*NCELLS3*3,cusrc0,cusrc0pos,dt*fesc*factgrp[igrp],dx,nsource,1.,c); #endif CUERR(); if(verbose) puts("Add Source ok"); #endif if(verbose) puts("Hyperbolic Flux ok"); hipDeviceSynchronize(); } #ifdef TIMINGS #ifdef WMPI mpisynch(); #endif if(rank==0) { q11=cutGetTimerValue(timer); } #endif #ifdef TESTCOOL #ifdef COSMO hipLaunchKernelGGL(( cuComputeIon), dim3(gridion),dim3(blockion), 0, 0, cuegy_new, cuflx_new, cuxion, cudensity, cutemperature, dt/cooling, c, egy_min,unit_number,aexp); #else hipLaunchKernelGGL(( cuComputeIon), dim3(gridion),dim3(blockion), 0, 0, cuegy_new, cuflx_new, cuxion, cudensity, cutemperature, dt/cooling, c, egy_min,unit_number,1.); #endif #endif CUERR(); if(verbose) puts("Chemistry ok"); hipDeviceSynchronize(); #ifdef WMPI mpisynch(); #endif #ifdef TIMINGS if(rank==0) { q4=cutGetTimerValue(timer); } #endif // Here cuegy is used to store the temperature #ifdef COSMO float hubblet=Hubble0*sqrtf(omegam/aexp+omegav*(aexp*aexp))/aexp; hipLaunchKernelGGL(( cuComputeTemp), dim3(gcool),dim3(bcool), 0, 0, cuxion, cudensity, cutemperature, cuegy_new, fudgecool, c, dt/cooling, unit_number, ncvgcool, aexp, hubblet, cuflx_new, clump); #else hipLaunchKernelGGL(( cuComputeTemp), dim3(gcool),dim3(bcool), 0, 0, cuxion, cudensity, cutemperature, cuegy_new, fudgecool, c, dt/cooling, unit_number, ncvgcool, 1., 0., cuflx_new, clump); #endif CUERR(); if(verbose) puts("Cooling ok"); hipDeviceSynchronize(); #ifdef WMPI mpisynch(); #endif #ifdef TIMINGS hipDeviceSynchronize(); #ifdef WMPI mpisynch(); #endif if(rank==0) { q8=cutGetTimerValue(timer); } #endif hipMemcpy(cuegy,cuegy_new,NCELLS3*sizeof(float)*NGRP,hipMemcpyDeviceToDevice); hipMemcpy(cuflx,cuflx_new,NCELLS3*sizeof(float)*3*NGRP,hipMemcpyDeviceToDevice); #ifdef TIMINGS hipDeviceSynchronize(); #ifdef WMPI mpisynch(); #endif if(rank==0) { q10=cutGetTimerValue(timer); } #endif if(verbose) puts("Dealing with boundaries"); #ifndef WMPI for (int igrp=0;igrp<NGRP;igrp++) { if(boundary==0) // transmissive boundary conditions { hipLaunchKernelGGL(( cusetboundarytrans_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); hipLaunchKernelGGL(( cusetboundarytrans_yp), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); hipLaunchKernelGGL(( cusetboundarytrans_zp), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); hipLaunchKernelGGL(( cusetboundarytrans_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); hipLaunchKernelGGL(( cusetboundarytrans_ym), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); hipLaunchKernelGGL(( cusetboundarytrans_zm), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); } else if(boundary==1) // reflexive boundary conditions { hipLaunchKernelGGL(( cusetboundaryref_zp), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_zm), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_yp), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_ym), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } else if(boundary==2) // Periodic boundary conditions { hipLaunchKernelGGL(( cusetboundaryper_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryper_yp), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryper_zp), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryper_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryper_ym), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryper_zm), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } else if(boundary==3) // Mixed boundary conditions { hipLaunchKernelGGL(( cusetboundarytrans_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_yp) , dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_zp) , dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundarytrans_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_ym) , dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); hipLaunchKernelGGL(( cusetboundaryref_zm) , dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } } #else if(neigh[5]!=rank) { exchange_zp(cuegy, cuflx, cuegy_new, buff, neigh, pos[2]%2); exchange_zm(cuegy, cuflx, cuegy_new, buff, neigh, pos[2]%2); } else { hipLaunchKernelGGL(( cusetboundaryper_zp), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); hipLaunchKernelGGL(( cusetboundaryper_zm), dim3(gridboundz),dim3(blockboundz), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); } if(neigh[3]!=rank) { exchange_yp(cuegy, cuflx, cuegy_new, buff, neigh, pos[1]%2); exchange_ym(cuegy, cuflx, cuegy_new, buff, neigh, pos[1]%2); } else { hipLaunchKernelGGL(( cusetboundaryper_yp), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); hipLaunchKernelGGL(( cusetboundaryper_ym), dim3(gridboundy),dim3(blockboundy), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); } if(neigh[1]!=rank) { exchange_xp(cuegy, cuflx, cuegy_new, buff, neigh, pos[0]%2); exchange_xm(cuegy, cuflx, cuegy_new, buff, neigh, pos[0]%2); } else { hipLaunchKernelGGL(( cusetboundaryper_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); hipLaunchKernelGGL(( cusetboundaryper_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); } if(boundary==0) { //printf("coucou\n"); if(pos[0]==0)hipLaunchKernelGGL(( cusetboundarytrans_xm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[1]==0)hipLaunchKernelGGL(( cusetboundarytrans_ym), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[2]==0)hipLaunchKernelGGL(( cusetboundarytrans_zm), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[0]==(NGPUX-1))hipLaunchKernelGGL(( cusetboundarytrans_xp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[1]==(NGPUY-1))hipLaunchKernelGGL(( cusetboundarytrans_yp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[2]==(NGPUZ-1))hipLaunchKernelGGL(( cusetboundarytrans_zp), dim3(gridboundx),dim3(blockboundx), 0, 0, cuegy, cuxion, cudensity, cutemperature, cuflx); } #endif hipDeviceSynchronize(); #ifdef WMPI mpisynch(); #endif #ifdef TIMINGS if(rank==0) { q7=cutGetTimerValue(timer); } #endif //printf("proc %d ready to dump\n",ic_rank); if(((nstep%ndumps)==0)||(forcedump)) { ntsteps=ntsteps+1; forcedump=0; #ifdef COSMO #ifdef FLAT_COSMO float aexpdump=t2a(t+dt,omegav,Hubble0); #else if(t+dt>t_int_max) { aexpdump=(a_int[int_step+2]-a_int[int_step+1])/(t_int[int_step+2]-t_int[int_step+1])*(t+dt-t_int[int_step+1]); } else { aexpdump=(a_int[int_step+1]-a_int[int_step])/(t_int[int_step+1]-t_int[int_step])*(t+dt-t_int[int_step]); } #endif cuDumpResults(ntsteps,t+dt,aexpdump,ic_rank); #else cuDumpResults(ntsteps,t+dt,0.,ic_rank); #endif tnext=tnext+ndumps*dt/ft; if(rank==0) printf("tnext=%f\n",tnext/unit_time); } //-------------------------------------------------------------------- // Dealing with fieldlists //-------------------------------------------------------------------- ft=fminf(ft*2.,1.); if(fieldlist) { if(changefield) { int ercode; #ifdef WMPI ercode=cuGetField(ifield,ic_rank); #else ercode=cuGetField(ifield,0); #endif if(ercode==38) { if(rank==0) { fclose(logfile); fclose(timefile); } abort(); } forcedump=0; changefield=0; ifield++; tfield=tlist[ifield]; ft=1./powf(2.,20); //ft=1.; } } // UPDATING VARIABLES t=t+dt; if(t>tmax) { puts("t > tmax -----> run will be terminated"); } #ifdef COSMO #ifdef FLAT_COSMO aexp=t2a(t,omegav,Hubble0); // A CHANGER PAR INTERPOLATION #else if(t>t_int_max) { int_step++; } aexp=(a_int[int_step+1]-a_int[int_step])/(t_int[int_step+1]-t_int[int_step])*(t-t_int[int_step]); #endif c=c_r/aexp; #endif hipDeviceSynchronize(); #ifdef WMPI mpisynch(); #endif if(rank==0) { q1=cutGetTimerValue(timer); } nstep++; if(nstep==nmax) { if(rank==0) puts("Max number of steps achieved: STOP"); break; } hipDeviceSynchronize(); #ifdef WMPI get_elapsed(&time_new); time_new=time_new-time_old; mpireducemax(&time_new); mpisynch(); #endif #ifdef TIMINGS if(rank==0){ q9=cutGetTimerValue(timer); printf("transport=%f chem=%f cool=%f update=%f bound=%f IO=%f,grand total=%f time_new=%lf\n",q11-q0,q4-q11,q8-q4,q10-q8,q7-q10,q9-q7,q9-q0,time_new); fprintf(timefile,"%d %f %f %f %f %f %f %f\n",nstep-1,q11-q0,q4-q11,q8-q4,q10-q8,q7-q10,q9-q7,q9-q0,time_new); } #endif hipDeviceSynchronize(); #ifdef WMPI mpisynch(); #endif } if(rank==0) fclose(logfile); #ifdef TIMINGS if(rank==0) fclose(timefile); #endif return 0; }
fd277ce9d2affeb2ba7d42dd51ff69f095d9c1eb.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cutil.h> #include "params.h" #include "common.h" #include "bnd.h" #include "GPU.h" #include "Boundary.h" #include "cosmo.h" #include "Allocation.h" #include "Io.h" #include "Explicit.h" #include "Atomic.h" #ifdef WMPI #include "communication.h" #include "Interface.h" #endif //********************************************************** //********************************************************** extern "C" int Mainloop(int rank, int *pos, int *neigh, int ic_rank); //********************************************************** //********************************************************** #define CUERR() //printf("\n %s on %d \n",cudaGetErrorString(cudaGetLastError()),ic_rank) #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef min #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define NCELLS3 (NCELLX+NBOUND2)*(NCELLY+NBOUND2)*(NCELLZ+NBOUND2) #define N_INT 2048 #define A_INT_MAX 0.166667 //********************************************************** //********************************************************** int Mainloop(int rank, int *pos, int *neigh, int ic_rank) { if(rank==0) printf("Mainloop entered by proc %d\n",rank); float tnext; dim3 blockion(NCELLX); // USED BY IONISATION dim3 gridion(NCELLY,NCELLZ); dim3 bcool(BLOCKCOOL); // USED BY COOLING dim3 gcool(GRIDCOOLX,GRIDCOOLY); dim3 blocksimple(NCELLX); // USED BY ADVECTION THREADS dim3 gridsimple(NCELLY,NCELLZ); #ifdef SDISCRETE int nthreadsource=min(nsource,128); dim3 gridsource((int)(round((float)(nsource)/float(nthreadsource)))); dim3 blocksource(nthreadsource); #endif #ifndef WMPI dim3 blockboundx(NCELLY); dim3 gridboundx(NCELLZ); dim3 blockboundy(NCELLX); dim3 gridboundy(NCELLZ); dim3 blockboundz(NCELLX); dim3 gridboundz(NCELLY); for (int igrp=0;igrp<NGRP;igrp++) { if(boundary==0) // transmissive boundary conditions { cusetboundarytrans_xp<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundarytrans_yp<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundarytrans_zp<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundarytrans_xm<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundarytrans_ym<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundarytrans_zm<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } else if(boundary==1) // reflexive boundary conditions { cusetboundaryref_zp<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_zm<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_yp<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_ym<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_xp<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_xm<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } else if(boundary==2) // Periodic boundary conditions { cusetboundaryper_xp<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryper_yp<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryper_zp<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryper_xm<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryper_ym<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryper_zm<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } else if(boundary==3) // Mixed boundary conditions { cusetboundarytrans_xp<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_yp <<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_zp <<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundarytrans_xm<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_ym <<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_zm <<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } } #else dim3 blockboundx(NCELLY); dim3 gridboundx(NCELLZ); dim3 blockboundy(NCELLX); dim3 gridboundy(NCELLZ); dim3 blockboundz(NCELLX); dim3 gridboundz(NCELLY); if(neigh[5]!=rank) { exchange_zp(cuegy, cuflx, cuegy_new, buff, neigh, pos[2]%2); exchange_zm(cuegy, cuflx, cuegy_new, buff, neigh, pos[2]%2); } else { cusetboundaryper_zp<<<gridboundz,blockboundz>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); cusetboundaryper_zm<<<gridboundz,blockboundz>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); } if(neigh[3]!=rank) { exchange_yp(cuegy, cuflx, cuegy_new, buff, neigh, pos[1]%2); exchange_ym(cuegy, cuflx, cuegy_new, buff, neigh, pos[1]%2); } else { cusetboundaryper_yp<<<gridboundy,blockboundy>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); cusetboundaryper_ym<<<gridboundy,blockboundy>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); } if(neigh[1]!=rank) { exchange_xp(cuegy, cuflx, cuegy_new, buff, neigh, pos[0]%2); exchange_xm(cuegy, cuflx, cuegy_new, buff, neigh, pos[0]%2); } else { cusetboundaryper_xp<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); cusetboundaryper_xm<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); } if(boundary==0) { if(pos[0]==0) cusetboundarytrans_xm<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[1]==0) cusetboundarytrans_ym<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[2]==0) cusetboundarytrans_zm<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[0]==(NGPUX-1)) cusetboundarytrans_xp<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[1]==(NGPUY-1)) cusetboundarytrans_yp<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[2]==(NGPUZ-1)) cusetboundarytrans_zp<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); } #endif #ifndef COSMO dt=courantnumber*dx/3./c; if(rank==0) printf("dx=%e cfl=%e dt=%e\n",dx,courantnumber,dt); tnext=t;//+ndumps*dt; #else aexp=astart; #ifndef FLAT_COSMO t=a2tgen(aexp,omegam,omegav,Hubble0);// Hubble0 in sec-1 #else t=a2t(aexp,omegav,Hubble0);// Hubble0 in sec-1 #endif tnext=t; float tstart=t; if(rank==0) printf("aexp= %f tstart=%f tmax=%f\n",aexp,t/unit_time,tmax/unit_time); #ifndef FLAT_COSMO if(rank==0) printf("Building Expansion factor table"); float da=(A_INT_MAX-aexp)/N_INT; float a_int[N_INT],t_int[N_INT]; for(int i_int=0;i_int<N_INT;i_int++) { a_int[i]=aexp+i_int*da; t_int[i]=a2tgen(a_int[i],omegam,omegav,Hubble0); // Hubble0 in sec-1 } int n_int=0; #endif #endif // some variables for field update int changefield=0; int forcedump; int ifield=0; // 1 because tfield stores the NEXT field float tfield; if(fieldlist){ while(t>=tlist[ifield]) { ifield++; } tfield=tlist[ifield]; if(rank==0) printf("ICs (tstart=%f) between field #%d (t=%f) and field #%d (t=%f)\n",t/unit_time,ifield-1,tlist[ifield-1]/unit_time,ifield,tlist[ifield]/unit_time); if(rank==0) printf("starting with NEXT field #%d @ tfield =%f with tstart=%f\n",ifield,tlist[ifield]/unit_time,t/unit_time);// -1 because tfield stores the NEXT field } // log file FILE *logfile; if(rank==0) logfile=fopen("log.out","w"); #ifdef TIMINGS FILE *timefile; if(rank==0) { timefile=fopen("time.out","w"); } #endif //float ft=1./powf(2.,20); float ft=1.; #ifdef COSMO float factfesc=1.; #endif float *factgrp; factgrp=(float*)malloc(NGRP*sizeof(float)); FACTGRP; unsigned int timer; float q0=0.,q1=0.,q3; #ifdef TIMINGS float q4,q7,q8,q9,q10,q11; double time_old,time_new; #endif if(rank==0) { cutCreateTimer(&timer); cutStartTimer(timer); } // MAIN LOOP STARTS HERE ======================================================>>>> // ============================================================================>>>> // ============================================================================>>>> // ============================================================================>>>> // ============================================================================>>>> // ============================================================================>>>> cudaThreadSynchronize(); #ifdef WMPI mpisynch(); #endif cuDumpResults(0,t,aexp,0); while(t<=tmax) { cudaThreadSynchronize(); #ifdef WMPI get_elapsed(&time_old); mpisynch(); #endif if(rank==0) { q3=q1-q0; q0=cutGetTimerValue(timer); } #ifndef COSMO dt=courantnumber*dx/3./c*ft; if(((nstep%ndisp)==0)&&(rank==0)) { printf(" ------------------ \n"); printf(" Step= %d Time= %f dt=%f tnext=%f cgpu (msec)=%f\n",nstep,t/unit_time,dt/unit_time,tnext/unit_time,q3); printf(" ------------------ \n"); } #else dt=courantnumber*dx/3./c*ft; if(((nstep%ndisp)==0)&&(rank==0)) { printf(" ------------------------------\n"); printf(" Step= %d Time= %f Elapsed= %f dt= %f aexp=%f z=%f fesc=%f clump= %f Next tfield=%f cgpu=%f\n",nstep,t/unit_time,(t-tstart)/unit_time,dt/unit_time,aexp,1./aexp-1.,factfesc*fesc,clump,tfield/unit_time,q3); printf(" ----------------------------- \n"); fprintf(logfile,"%d %f %f %f %f %f %f %f\n",nstep,t/unit_time,(t-tstart)/unit_time,dt/unit_time,aexp,1./aexp-1.,tfield/unit_time,q3); } #endif if(fieldlist) { // we must not go further than the next field if(dt>=tfield-t) { #ifdef WMPI if(rank==0) printf("last timestep with field #%d : next field= %f t=%f t+dt=%f\n",ifield,tfield/unit_time,t/unit_time,(t+dt)/unit_time); if(((tfield-t)/unit_time)==0.) { if(rank==0) printf("WARNING FIELD DT=O -> switch immediatly to next field\n"); cuGetField(ifield,ic_rank); changefield=0; ifield++; tfield=tlist[ifield]; ft=1./powf(2.,20); } else { changefield=1; dt=tfield-t; if(rank==0) printf("dt set to %f\n",dt/unit_time); } #else if(rank==0) printf("last timestep with field #%d : next field= %f t=%f t+dt=%f\n",ifield,tfield/unit_time,t/unit_time,(t+dt)/unit_time); if(((tfield-t)/unit_time)==0.) { if(rank==0) printf("WARNING FIELD DT=O -> switch immediatly to next field\n"); cuGetField(ifield,ic_rank); changefield=0; ifield++; tfield=tlist[ifield]; ft=1./powf(2.,20); } else { changefield=1; dt=tfield-t; if(rank==0) printf("dt set to %f\n",dt/unit_time); } #endif } } //================================== UNSPLIT 3D SCHEME============================= for (int igrp=0;igrp<NGRP;igrp++) { #ifdef COSMO cuComputeELF<<<gridsimple,blocksimple>>>(cuegy+igrp*NCELLS3, cuflx+igrp*NCELLS3*3, cusrc0, cuegy_new+igrp*NCELLS3, c, dx, dt, nstep,aexp); #else cuComputeELF<<<gridsimple,blocksimple>>>(cuegy+igrp*NCELLS3, cuflx+igrp*NCELLS3*3, cusrc0, cuegy_new+igrp*NCELLS3, c, dx, dt, nstep,1.); #endif cudaThreadSynchronize(); CUERR(); if(verbose) puts("Hyperbolic Egy ok"); #ifdef COSMO cuComputeF_TOTAL_LF<<<gridsimple,blocksimple>>>(cuflx+igrp*NCELLS3*3,cudedd,cusrc0,cuflx_new+igrp*NCELLS3*3,c,dx,dt,nstep,cuegy+igrp*NCELLS3, aexp); #else cuComputeF_TOTAL_LF<<<gridsimple,blocksimple>>>(cuflx+igrp*NCELLS3*3,cudedd,cusrc0,cuflx_new+igrp*NCELLS3*3,c,dx,dt,nstep,cuegy+igrp*NCELLS3,1.); #endif cudaThreadSynchronize(); CUERR(); #ifdef SDISCRETE #ifdef COSMO if(kf!=0.) factfesc=exp(kf-powf(aexp/a0,af)); cuAddSource<<<gridsource,blocksource>>>(cuegy_new+igrp*NCELLS3,cuflx_new+igrp*NCELLS3*3,cusrc0,cusrc0pos,dt*fesc*factfesc*factgrp[igrp],dx,nsource,aexp,c); #else cuAddSource<<<gridsource,blocksource>>>(cuegy_new+igrp*NCELLS3,cuflx_new+igrp*NCELLS3*3,cusrc0,cusrc0pos,dt*fesc*factgrp[igrp],dx,nsource,1.,c); #endif CUERR(); if(verbose) puts("Add Source ok"); #endif if(verbose) puts("Hyperbolic Flux ok"); cudaThreadSynchronize(); } #ifdef TIMINGS #ifdef WMPI mpisynch(); #endif if(rank==0) { q11=cutGetTimerValue(timer); } #endif #ifdef TESTCOOL #ifdef COSMO cuComputeIon<<<gridion,blockion>>>(cuegy_new, cuflx_new, cuxion, cudensity, cutemperature, dt/cooling, c, egy_min,unit_number,aexp); #else cuComputeIon<<<gridion,blockion>>>(cuegy_new, cuflx_new, cuxion, cudensity, cutemperature, dt/cooling, c, egy_min,unit_number,1.); #endif #endif CUERR(); if(verbose) puts("Chemistry ok"); cudaThreadSynchronize(); #ifdef WMPI mpisynch(); #endif #ifdef TIMINGS if(rank==0) { q4=cutGetTimerValue(timer); } #endif // Here cuegy is used to store the temperature #ifdef COSMO float hubblet=Hubble0*sqrtf(omegam/aexp+omegav*(aexp*aexp))/aexp; cuComputeTemp<<<gcool,bcool>>>( cuxion, cudensity, cutemperature, cuegy_new, fudgecool, c, dt/cooling, unit_number, ncvgcool, aexp, hubblet, cuflx_new, clump); #else cuComputeTemp<<<gcool,bcool>>>( cuxion, cudensity, cutemperature, cuegy_new, fudgecool, c, dt/cooling, unit_number, ncvgcool, 1., 0., cuflx_new, clump); #endif CUERR(); if(verbose) puts("Cooling ok"); cudaThreadSynchronize(); #ifdef WMPI mpisynch(); #endif #ifdef TIMINGS cudaThreadSynchronize(); #ifdef WMPI mpisynch(); #endif if(rank==0) { q8=cutGetTimerValue(timer); } #endif cudaMemcpy(cuegy,cuegy_new,NCELLS3*sizeof(float)*NGRP,cudaMemcpyDeviceToDevice); cudaMemcpy(cuflx,cuflx_new,NCELLS3*sizeof(float)*3*NGRP,cudaMemcpyDeviceToDevice); #ifdef TIMINGS cudaThreadSynchronize(); #ifdef WMPI mpisynch(); #endif if(rank==0) { q10=cutGetTimerValue(timer); } #endif if(verbose) puts("Dealing with boundaries"); #ifndef WMPI for (int igrp=0;igrp<NGRP;igrp++) { if(boundary==0) // transmissive boundary conditions { cusetboundarytrans_xp<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); cusetboundarytrans_yp<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); cusetboundarytrans_zp<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); cusetboundarytrans_xm<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); cusetboundarytrans_ym<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); cusetboundarytrans_zm<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NGRP*3); } else if(boundary==1) // reflexive boundary conditions { cusetboundaryref_zp<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_zm<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_yp<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_ym<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_xp<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_xm<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } else if(boundary==2) // Periodic boundary conditions { cusetboundaryper_xp<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryper_yp<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryper_zp<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryper_xm<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryper_ym<<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryper_zm<<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } else if(boundary==3) // Mixed boundary conditions { cusetboundarytrans_xp<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_yp <<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_zp <<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundarytrans_xm<<<gridboundx,blockboundx>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_ym <<<gridboundy,blockboundy>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); cusetboundaryref_zm <<<gridboundz,blockboundz>>>(cuegy+igrp*NCELLS3, cuxion, cudensity, cutemperature, cuflx+igrp*NCELLS3*3); } } #else if(neigh[5]!=rank) { exchange_zp(cuegy, cuflx, cuegy_new, buff, neigh, pos[2]%2); exchange_zm(cuegy, cuflx, cuegy_new, buff, neigh, pos[2]%2); } else { cusetboundaryper_zp<<<gridboundz,blockboundz>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); cusetboundaryper_zm<<<gridboundz,blockboundz>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); } if(neigh[3]!=rank) { exchange_yp(cuegy, cuflx, cuegy_new, buff, neigh, pos[1]%2); exchange_ym(cuegy, cuflx, cuegy_new, buff, neigh, pos[1]%2); } else { cusetboundaryper_yp<<<gridboundy,blockboundy>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); cusetboundaryper_ym<<<gridboundy,blockboundy>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); } if(neigh[1]!=rank) { exchange_xp(cuegy, cuflx, cuegy_new, buff, neigh, pos[0]%2); exchange_xm(cuegy, cuflx, cuegy_new, buff, neigh, pos[0]%2); } else { cusetboundaryper_xp<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); cusetboundaryper_xm<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); } if(boundary==0) { //printf("coucou\n"); if(pos[0]==0) cusetboundarytrans_xm<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[1]==0) cusetboundarytrans_ym<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[2]==0) cusetboundarytrans_zm<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[0]==(NGPUX-1)) cusetboundarytrans_xp<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[1]==(NGPUY-1)) cusetboundarytrans_yp<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); if(pos[2]==(NGPUZ-1)) cusetboundarytrans_zp<<<gridboundx,blockboundx>>>(cuegy, cuxion, cudensity, cutemperature, cuflx); } #endif cudaThreadSynchronize(); #ifdef WMPI mpisynch(); #endif #ifdef TIMINGS if(rank==0) { q7=cutGetTimerValue(timer); } #endif //printf("proc %d ready to dump\n",ic_rank); if(((nstep%ndumps)==0)||(forcedump)) { ntsteps=ntsteps+1; forcedump=0; #ifdef COSMO #ifdef FLAT_COSMO float aexpdump=t2a(t+dt,omegav,Hubble0); #else if(t+dt>t_int_max) { aexpdump=(a_int[int_step+2]-a_int[int_step+1])/(t_int[int_step+2]-t_int[int_step+1])*(t+dt-t_int[int_step+1]); } else { aexpdump=(a_int[int_step+1]-a_int[int_step])/(t_int[int_step+1]-t_int[int_step])*(t+dt-t_int[int_step]); } #endif cuDumpResults(ntsteps,t+dt,aexpdump,ic_rank); #else cuDumpResults(ntsteps,t+dt,0.,ic_rank); #endif tnext=tnext+ndumps*dt/ft; if(rank==0) printf("tnext=%f\n",tnext/unit_time); } //-------------------------------------------------------------------- // Dealing with fieldlists //-------------------------------------------------------------------- ft=fminf(ft*2.,1.); if(fieldlist) { if(changefield) { int ercode; #ifdef WMPI ercode=cuGetField(ifield,ic_rank); #else ercode=cuGetField(ifield,0); #endif if(ercode==38) { if(rank==0) { fclose(logfile); fclose(timefile); } abort(); } forcedump=0; changefield=0; ifield++; tfield=tlist[ifield]; ft=1./powf(2.,20); //ft=1.; } } // UPDATING VARIABLES t=t+dt; if(t>tmax) { puts("t > tmax -----> run will be terminated"); } #ifdef COSMO #ifdef FLAT_COSMO aexp=t2a(t,omegav,Hubble0); // A CHANGER PAR INTERPOLATION #else if(t>t_int_max) { int_step++; } aexp=(a_int[int_step+1]-a_int[int_step])/(t_int[int_step+1]-t_int[int_step])*(t-t_int[int_step]); #endif c=c_r/aexp; #endif cudaThreadSynchronize(); #ifdef WMPI mpisynch(); #endif if(rank==0) { q1=cutGetTimerValue(timer); } nstep++; if(nstep==nmax) { if(rank==0) puts("Max number of steps achieved: STOP"); break; } cudaThreadSynchronize(); #ifdef WMPI get_elapsed(&time_new); time_new=time_new-time_old; mpireducemax(&time_new); mpisynch(); #endif #ifdef TIMINGS if(rank==0){ q9=cutGetTimerValue(timer); printf("transport=%f chem=%f cool=%f update=%f bound=%f IO=%f,grand total=%f time_new=%lf\n",q11-q0,q4-q11,q8-q4,q10-q8,q7-q10,q9-q7,q9-q0,time_new); fprintf(timefile,"%d %f %f %f %f %f %f %f\n",nstep-1,q11-q0,q4-q11,q8-q4,q10-q8,q7-q10,q9-q7,q9-q0,time_new); } #endif cudaThreadSynchronize(); #ifdef WMPI mpisynch(); #endif } if(rank==0) fclose(logfile); #ifdef TIMINGS if(rank==0) fclose(timefile); #endif return 0; }
Spmv.hip
// !!! This is a file automatically generated by hipify!!! #include "cudacommon.h" #include <cassert> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <iostream> #include "OptionParser.h" #include "ResultDatabase.h" #include "Spmv.h" #include "util.h" using namespace std; texture<float, 1> vecTex; // vector textures texture<int2, 1> vecTexD; // Texture Readers (used so kernels can be templated) struct texReaderSP { __device__ __forceinline__ float operator()(const int idx) const { return tex1Dfetch(vecTex, idx); } }; struct texReaderDP { __device__ __forceinline__ double operator()(const int idx) const { int2 v = tex1Dfetch(vecTexD, idx); #if (__CUDA_ARCH__ < 130) // Devices before arch 130 don't support DP, and having the // __hiloint2double() intrinsic will cause compilation to fail. // This return statement added as a workaround -- it will compile, // but since the arch doesn't support DP, it will never be called return 0; #else return __hiloint2double(v.y, v.x); #endif } }; // Forward declarations for kernels template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out); template <typename fpType, typename texReader> __global__ void spmv_csr_vector_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out); template <typename fpType, typename texReader> __global__ void spmv_ellpackr_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowLengths, const int dim, fpType * __restrict__ out); template <typename fpType> __global__ void zero(fpType * __restrict__ a, const int size); // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing. // // Arguments: // op: the options parser / parameter database // // Programmer: Lukasz Wesolowski // Creation: June 21, 2010 // Returns: nothing // // **************************************************************************** void addBenchmarkSpecOptions(OptionParser &op) { op.addOption("iterations", OPT_INT, "100", "Number of SpMV iterations " "per pass"); op.addOption("mm_filename", OPT_STRING, "random", "Name of file " "which stores the matrix in Matrix Market format"); op.addOption("maxval", OPT_FLOAT, "10", "Maximum value for random " "matrices"); } // **************************************************************************** // Function: spmvCpu // // Purpose: // Runs sparse matrix vector multiplication on the CPU // // Arguements: // val: array holding the non-zero values for the matrix // cols: array of column indices for each element of A // rowDelimiters: array of size dim+1 holding indices to rows of A; // last element is the index one past the last // element of A // vec: dense vector of size dim to be used for multiplication // dim: number of rows/columns in the matrix // out: input - buffer of size dim // output - result from the spmv calculation // // Programmer: Lukasz Wesolowski // Creation: June 23, 2010 // Returns: // nothing directly // out indirectly through a pointer // **************************************************************************** template <typename floatType> void spmvCpu(const floatType *val, const int *cols, const int *rowDelimiters, const floatType *vec, int dim, floatType *out) { for (int i=0; i<dim; i++) { floatType t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col]; } out[i] = t; } } // **************************************************************************** // Function: verifyResults // // Purpose: // Verifies correctness of GPU results by comparing to CPU results // // Arguments: // cpuResults: array holding the CPU result vector // gpuResults: array hodling the GPU result vector // size: number of elements per vector // pass: optional iteration number // // Programmer: Lukasz Wesolowski // Creation: June 23, 2010 // Returns: // nothing // prints "Passed" if the vectors agree within a relative error of // MAX_RELATIVE_ERROR and "FAILED" if they are different // **************************************************************************** template <typename floatType> bool verifyResults(const floatType *cpuResults, const floatType *gpuResults, const int size, const int pass = -1) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { // cout << "Mismatch at i: "<< i << " ref: " << cpuResults[i] << // " dev: " << gpuResults[i] << endl; passed = false; } } if (pass != -1) { cout << "Pass "<<pass<<": "; } if (passed) { cout << "Passed" << endl; } else { cout << "---FAILED---" << endl; } return passed; } template <typename floatType, typename texReader> void csrTest(ResultDatabase& resultDB, OptionParser& op, floatType* h_val, int* h_cols, int* h_rowDelimiters, floatType* h_vec, floatType* h_out, int numRows, int numNonZeroes, floatType* refOut, bool padded) { // Device data structures floatType *d_val, *d_vec, *d_out; int *d_cols, *d_rowDelimiters; // Allocate device memory CUDA_SAFE_CALL(hipMalloc(&d_val, numNonZeroes * sizeof(floatType))); CUDA_SAFE_CALL(hipMalloc(&d_cols, numNonZeroes * sizeof(int))); CUDA_SAFE_CALL(hipMalloc(&d_vec, numRows * sizeof(floatType))); CUDA_SAFE_CALL(hipMalloc(&d_out, numRows * sizeof(floatType))); CUDA_SAFE_CALL(hipMalloc(&d_rowDelimiters, (numRows+1) * sizeof(int))); // Setup events for timing hipEvent_t start, stop; CUDA_SAFE_CALL(hipEventCreate(&start)); CUDA_SAFE_CALL(hipEventCreate(&stop)); // Transfer data to device CUDA_SAFE_CALL(hipEventRecord(start, 0)); CUDA_SAFE_CALL(hipMemcpy(d_val, h_val, numNonZeroes * sizeof(floatType), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_cols, h_cols, numNonZeroes * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_vec, h_vec, numRows * sizeof(floatType), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_rowDelimiters, h_rowDelimiters, (numRows+1) * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipEventRecord(stop, 0)); CUDA_SAFE_CALL(hipEventSynchronize(stop)); float iTransferTime, oTransferTime; CUDA_SAFE_CALL(hipEventElapsedTime(&iTransferTime, start, stop)); iTransferTime *= 1.e-3; // Bind texture for position string suffix; if (sizeof(floatType) == sizeof(float)) { hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); CUDA_SAFE_CALL(hipBindTexture(0, vecTex, d_vec, channelDesc, numRows * sizeof(float))); suffix = "-SP"; } else { hipChannelFormatDesc channelDesc = hipCreateChannelDesc<int2>(); CUDA_SAFE_CALL(hipBindTexture(0, vecTexD, d_vec, channelDesc, numRows * sizeof(int2))); suffix = "-DP"; } // Setup thread configuration int nBlocksScalar = (int) ceil((floatType) numRows / BLOCK_SIZE); int nBlocksVector = (int) ceil(numRows / (floatType)(BLOCK_SIZE / WARP_SIZE)); int passes = op.getOptionInt("passes"); int iters = op.getOptionInt("iterations"); // Results description info char atts[TEMP_BUFFER_SIZE]; sprintf(atts, "%d_elements_%d_rows", numNonZeroes, numRows); string prefix = ""; prefix += (padded) ? "Padded_" : ""; double gflop = 2 * (double) numNonZeroes / 1e9; cout << "CSR Scalar Kernel\n"; for (int k=0; k<passes; k++) { // Run Scalar Kernel CUDA_SAFE_CALL(hipEventRecord(start, 0)); for (int j = 0; j < iters; j++) { hipLaunchKernelGGL(( spmv_csr_scalar_kernel<floatType, texReader>) , dim3(nBlocksScalar), dim3(BLOCK_SIZE), 0, 0, d_val, d_cols, d_rowDelimiters, numRows, d_out); } CUDA_SAFE_CALL(hipEventRecord(stop, 0)); CUDA_SAFE_CALL(hipEventSynchronize(stop)); float scalarKernelTime; CUDA_SAFE_CALL(hipEventElapsedTime(&scalarKernelTime, start, stop)); // Transfer data back to host CUDA_SAFE_CALL(hipEventRecord(start, 0)); CUDA_SAFE_CALL(hipMemcpy(h_out, d_out, numRows * sizeof(floatType), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipEventRecord(stop, 0)); CUDA_SAFE_CALL(hipEventSynchronize(stop)); CUDA_SAFE_CALL(hipEventElapsedTime(&oTransferTime, start, stop)); oTransferTime *= 1.e-3; // Compare reference solution to GPU result if (! verifyResults(refOut, h_out, numRows, k)) { return; // If results don't match, don't report performance } scalarKernelTime = (scalarKernelTime / (float)iters) * 1.e-3; string testName = prefix+"CSR-Scalar"+suffix; double totalTransfer = iTransferTime + oTransferTime; resultDB.AddResult(testName, atts, "Gflop/s", gflop/(scalarKernelTime)); resultDB.AddResult(testName+"_PCIe", atts, "Gflop/s", gflop / (scalarKernelTime+totalTransfer)); } hipLaunchKernelGGL(( zero<floatType>), dim3(nBlocksScalar), dim3(BLOCK_SIZE), 0, 0, d_out, numRows); hipDeviceSynchronize(); cout << "CSR Vector Kernel\n"; for (int k=0; k<passes; k++) { // Run Vector Kernel CUDA_SAFE_CALL(hipEventRecord(start, 0)); for (int j = 0; j < iters; j++) { hipLaunchKernelGGL(( spmv_csr_vector_kernel<floatType, texReader>) , dim3(nBlocksVector), dim3(BLOCK_SIZE), 0, 0, d_val, d_cols, d_rowDelimiters, numRows, d_out); } CUDA_SAFE_CALL(hipEventRecord(stop, 0)); CUDA_SAFE_CALL(hipEventSynchronize(stop)); float vectorKernelTime; CUDA_SAFE_CALL(hipEventElapsedTime(&vectorKernelTime, start, stop)); CUDA_SAFE_CALL(hipMemcpy(h_out, d_out, numRows * sizeof(floatType), hipMemcpyDeviceToHost)); hipDeviceSynchronize(); // Compare reference solution to GPU result if (! verifyResults(refOut, h_out, numRows, k)) { return; // If results don't match, don't report performance } vectorKernelTime = (vectorKernelTime / (float)iters) * 1.e-3; string testName = prefix+"CSR-Vector"+suffix; double totalTransfer = iTransferTime + oTransferTime; resultDB.AddResult(testName, atts, "Gflop/s", gflop/vectorKernelTime); resultDB.AddResult(testName+"_PCIe", atts, "Gflop/s", gflop/(vectorKernelTime+totalTransfer)); } // Free device memory CUDA_SAFE_CALL(hipFree(d_rowDelimiters)); CUDA_SAFE_CALL(hipFree(d_vec)); CUDA_SAFE_CALL(hipFree(d_out)); CUDA_SAFE_CALL(hipFree(d_val)); CUDA_SAFE_CALL(hipFree(d_cols)); CUDA_SAFE_CALL(hipUnbindTexture(vecTexD)); CUDA_SAFE_CALL(hipUnbindTexture(vecTex)); CUDA_SAFE_CALL(hipEventDestroy(start)); CUDA_SAFE_CALL(hipEventDestroy(stop)); } template <typename floatType, typename texReader> void ellPackTest(ResultDatabase& resultDB, OptionParser& op, floatType* h_val, int* h_cols, int* h_rowDelimiters, floatType* h_vec, floatType* h_out, int numRows, int numNonZeroes, floatType* refOut, bool padded, int paddedSize) { int *h_rowLengths; CUDA_SAFE_CALL(hipHostMalloc(&h_rowLengths, paddedSize * sizeof(int))); int maxrl = 0; for (int k=0; k<numRows; k++) { h_rowLengths[k] = h_rowDelimiters[k+1] - h_rowDelimiters[k]; if (h_rowLengths[k] > maxrl) { maxrl = h_rowLengths[k]; } } for (int p=numRows; p < paddedSize; p++) { h_rowLengths[p] = 0; } // Column major format host data structures int cmSize = padded ? paddedSize : numRows; floatType *h_valcm; CUDA_SAFE_CALL(hipHostMalloc(&h_valcm, maxrl * cmSize * sizeof(floatType))); int *h_colscm; CUDA_SAFE_CALL(hipHostMalloc(&h_colscm, maxrl * cmSize * sizeof(int))); convertToColMajor(h_val, h_cols, numRows, h_rowDelimiters, h_valcm, h_colscm, h_rowLengths, maxrl, padded); // Device data structures floatType *d_val, *d_vec, *d_out; int *d_cols, *d_rowLengths; // Allocate device memory CUDA_SAFE_CALL(hipMalloc(&d_val, maxrl*cmSize * sizeof(floatType))); CUDA_SAFE_CALL(hipMalloc(&d_cols, maxrl*cmSize * sizeof(int))); CUDA_SAFE_CALL(hipMalloc(&d_vec, numRows * sizeof(floatType))); CUDA_SAFE_CALL(hipMalloc(&d_out, paddedSize * sizeof(floatType))); CUDA_SAFE_CALL(hipMalloc(&d_rowLengths, cmSize * sizeof(int))); // Transfer data to device CUDA_SAFE_CALL(hipMemcpy(d_val, h_valcm, maxrl*cmSize * sizeof(floatType), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_cols, h_colscm, maxrl*cmSize * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_vec, h_vec, numRows * sizeof(floatType), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_rowLengths, h_rowLengths, cmSize * sizeof(int), hipMemcpyHostToDevice)); // Bind texture for position if (sizeof(floatType) == sizeof(float)) { hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); CUDA_SAFE_CALL(hipBindTexture(0, vecTex, d_vec, channelDesc, numRows * sizeof(float))); } else { hipChannelFormatDesc channelDesc = hipCreateChannelDesc<int2>(); CUDA_SAFE_CALL(hipBindTexture(0, vecTexD, d_vec, channelDesc, numRows * sizeof(int2))); } int nBlocks = (int) ceil((floatType) cmSize / BLOCK_SIZE); int passes = op.getOptionInt("passes"); int iters = op.getOptionInt("iterations"); hipEvent_t start, stop; CUDA_SAFE_CALL(hipEventCreate(&start)); CUDA_SAFE_CALL(hipEventCreate(&stop)); for (int k=0; k<passes; k++) { CUDA_SAFE_CALL(hipEventRecord(start, 0)); for (int j = 0; j < iters; j++) { hipLaunchKernelGGL(( spmv_ellpackr_kernel<floatType, texReader>), dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, d_val, d_cols, d_rowLengths, cmSize, d_out); } CUDA_SAFE_CALL(hipEventRecord(stop, 0)); CUDA_SAFE_CALL(hipEventSynchronize(stop)); float totalKernelTime; CUDA_SAFE_CALL(hipEventElapsedTime(&totalKernelTime, start, stop)); totalKernelTime *= 1.e-3; CUDA_SAFE_CALL(hipMemcpy(h_out, d_out, cmSize * sizeof(floatType), hipMemcpyDeviceToHost)); // Compare reference solution to GPU result if (! verifyResults(refOut, h_out, numRows, k)) { return; } char atts[TEMP_BUFFER_SIZE]; char benchName[TEMP_BUFFER_SIZE]; double avgTime = totalKernelTime / (float)iters; sprintf(atts, "%d_elements_%d_rows", numNonZeroes, cmSize); double gflop = 2 * (double) numNonZeroes / 1e9; bool dpTest = (sizeof(floatType) == sizeof(double)) ? true : false; sprintf(benchName, "%sELLPACKR-%s", padded ? "Padded_":"", dpTest ? "DP":"SP"); resultDB.AddResult(benchName, atts, "Gflop/s", gflop/avgTime); } // Free device memory CUDA_SAFE_CALL(hipFree(d_rowLengths)); CUDA_SAFE_CALL(hipFree(d_vec)); CUDA_SAFE_CALL(hipFree(d_out)); CUDA_SAFE_CALL(hipFree(d_val)); CUDA_SAFE_CALL(hipFree(d_cols)); if (sizeof(floatType) == sizeof(double)) { CUDA_SAFE_CALL(hipUnbindTexture(vecTexD)); } else { CUDA_SAFE_CALL(hipUnbindTexture(vecTex)); } CUDA_SAFE_CALL(hipEventDestroy(start)); CUDA_SAFE_CALL(hipEventDestroy(stop)); CUDA_SAFE_CALL(hipHostFree(h_rowLengths)); CUDA_SAFE_CALL(hipHostFree(h_valcm)); CUDA_SAFE_CALL(hipHostFree(h_colscm)); } // **************************************************************************** // Function: RunTest // // Purpose: // Executes a run of the sparse matrix - vector multiplication benchmark // in either single or double precision // // Arguments: // resultDB: stores results from the benchmark // op: the options parser / parameter database // nRows: number of rows in generated matrix // // Returns: nothing // // Programmer: Lukasz Wesolowski // Creation: June 21, 2010 // // Modifications: // // **************************************************************************** template <typename floatType, typename texReader> void RunTest(ResultDatabase &resultDB, OptionParser &op, int nRows=0) { // Host data structures // Array of values in the sparse matrix floatType *h_val, *h_valPad; // Array of column indices for each value in h_val int *h_cols, *h_colsPad; // Array of indices to the start of each row in h_Val int *h_rowDelimiters, *h_rowDelimitersPad; // Dense vector and space for dev/cpu reference solution floatType *h_vec, *h_out, *refOut; // nItems = number of non zero elems int nItems, nItemsPadded, numRows; // This benchmark either reads in a matrix market input file or // generates a random matrix string inFileName = op.getOptionString("mm_filename"); if (inFileName == "random") { numRows = nRows; nItems = numRows * numRows / 100; // 1% of entries will be non-zero float maxval = op.getOptionFloat("maxval"); CUDA_SAFE_CALL(hipHostMalloc(&h_val, nItems * sizeof(floatType))); CUDA_SAFE_CALL(hipHostMalloc(&h_cols, nItems * sizeof(int))); CUDA_SAFE_CALL(hipHostMalloc(&h_rowDelimiters, (numRows + 1) * sizeof(int))); fill(h_val, nItems, maxval); initRandomMatrix(h_cols, h_rowDelimiters, nItems, numRows); } else { char filename[FIELD_LENGTH]; strcpy(filename, inFileName.c_str()); readMatrix(filename, &h_val, &h_cols, &h_rowDelimiters, &nItems, &numRows); } // Set up remaining host data CUDA_SAFE_CALL(hipHostMalloc(&h_vec, numRows * sizeof(floatType))); refOut = new floatType[numRows]; CUDA_SAFE_CALL(hipHostMalloc(&h_rowDelimitersPad, (numRows + 1) * sizeof(int))); fill(h_vec, numRows, op.getOptionFloat("maxval")); // Set up the padded data structures int paddedSize = numRows + (PAD_FACTOR - numRows % PAD_FACTOR); CUDA_SAFE_CALL(hipHostMalloc(&h_out, paddedSize * sizeof(floatType))); convertToPadded(h_val, h_cols, numRows, h_rowDelimiters, &h_valPad, &h_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_val, h_cols, h_rowDelimiters, h_vec, numRows, refOut); // Test CSR kernels on normal data cout << "CSR Test\n"; csrTest<floatType, texReader>(resultDB, op, h_val, h_cols, h_rowDelimiters, h_vec, h_out, numRows, nItems, refOut, false); // Test CSR kernels on padded data cout << "CSR Test -- Padded Data\n"; csrTest<floatType, texReader>(resultDB, op, h_valPad, h_colsPad, h_rowDelimitersPad, h_vec, h_out, numRows, nItemsPadded, refOut, true); // Test ELLPACKR kernel cout << "ELLPACKR Test\n"; ellPackTest<floatType, texReader>(resultDB, op, h_val, h_cols, h_rowDelimiters, h_vec, h_out, numRows, nItems, refOut, false, paddedSize); delete[] refOut; CUDA_SAFE_CALL(hipHostFree(h_val)); CUDA_SAFE_CALL(hipHostFree(h_cols)); CUDA_SAFE_CALL(hipHostFree(h_rowDelimiters)); CUDA_SAFE_CALL(hipHostFree(h_vec)); CUDA_SAFE_CALL(hipHostFree(h_out)); CUDA_SAFE_CALL(hipHostFree(h_valPad)); CUDA_SAFE_CALL(hipHostFree(h_colsPad)); CUDA_SAFE_CALL(hipHostFree(h_rowDelimitersPad)); } // **************************************************************************** // Function: RunBenchmark // // Purpose: // Executes the sparse matrix - vector multiplication benchmark // // Arguments: // resultDB: stores results from the benchmark // op: the options parser / parameter database // // Returns: nothing // // Programmer: Lukasz Wesolowski // Creation: June 21, 2010 // // Modifications: // // **************************************************************************** void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { int device; hipGetDevice(&device); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device); bool doDouble = false; if ((deviceProp.major == 1 && deviceProp.minor >= 3) || (deviceProp.major >= 2)) { doDouble = true; } int probSizes[4] = {1024, 8192, 12288, 16384}; int sizeClass = op.getOptionInt("size") - 1; cout <<"Single precision tests:\n"; RunTest<float, texReaderSP>(resultDB, op, probSizes[sizeClass]); if (doDouble) { cout <<"Double precision tests:\n"; RunTest<double, texReaderDP>(resultDB, op, probSizes[sizeClass]); } else { std::cout << "Double precision not supported by chosen device, skipping" << std::endl; // driver script still needs entries for all tests, even if not run int nPasses = (int)op.getOptionInt( "passes" ); for( unsigned int p = 0; p < nPasses; p++ ) { resultDB.AddResult( (const char*)"CSR-Scalar-DP", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"CSR-Scalar-DP_PCIe", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"CSR-Vector-DP", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"CSR-Vector-DP_PCIe", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"ELLPACKR-DP", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"ELLPACKR-DP_PCIe", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"Padded_CSR-Scalar-DP", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"Padded_CSR-Scalar-DP_PCIe", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"Padded_CSR-Vector-DP", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"Padded_CSR-Vector-DP_PCIe", "N/A", "Gflop/s", FLT_MAX ); } } } // **************************************************************************** // Function: spmv_csr_scalar_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the CSR data storage format, using a thread per row of the sparse // matrix; based on Bell (SC09) and Baskaran (IBM Tech Report) // // Arguments: // val: array holding the non-zero values for the matrix // cols: array of column indices for each element of the sparse matrix // rowDelimiters: array of size dim+1 holding indices to rows of the matrix // last element is the index one past the last // element of the matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 28, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out) { int myRow = blockIdx.x * blockDim.x + threadIdx.x; texReader vecTexReader; if (myRow < dim) { fpType t = 0.0f; int start = rowDelimiters[myRow]; int end = rowDelimiters[myRow+1]; for (int j = start; j < end; j++) { int col = cols[j]; t += val[j] * vecTexReader(col); } out[myRow] = t; } } // **************************************************************************** // Function: spmv_csr_vector_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the CSR data storage format, using a warp per row of the sparse // matrix; based on Bell (SC09) and Baskaran (IBM Tech Report) // // Arguments: // val: array holding the non-zero values for the matrix // cols: array of column indices for each element of the sparse matrix // rowDelimiters: array of size dim+1 holding indices to rows of the matrix // last element is the index one past the last // element of the matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 28, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_csr_vector_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (warpSize-1); int warpsPerBlock = blockDim.x / warpSize; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / warpSize); // Texture reader for the dense vector texReader vecTexReader; __shared__ volatile fpType partialSums[BLOCK_SIZE]; if (myRow < dim) { int warpStart = rowDelimiters[myRow]; int warpEnd = rowDelimiters[myRow+1]; fpType mySum = 0; for (int j = warpStart + id; j < warpEnd; j += warpSize) { int col = cols[j]; mySum += val[j] * vecTexReader(col); } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } // **************************************************************************** // Function: spmv_ellpackr_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the ELLPACK-R data storage format; based on Vazquez et al (Univ. of // Almeria Tech Report 2009) // // Arguments: // val: array holding the non-zero values for the matrix in column // major format and padded with zeros up to the length of longest row // cols: array of column indices for each element of the sparse matrix // rowLengths: array storing the length of each row of the sparse matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing directly // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 29, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_ellpackr_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowLengths, const int dim, fpType * __restrict__ out) { int t = blockIdx.x * blockDim.x + threadIdx.x; texReader vecTexReader; if (t < dim) { fpType result = 0.0f; int max = rowLengths[t]; for (int i = 0; i < max; i++) { int ind = i*dim+t; result += val[ind] * vecTexReader(cols[ind]); } out[t] = result; } } template <typename fpType> __global__ void zero(fpType * __restrict__ a, const int size) { int t = blockIdx.x * blockDim.x + threadIdx.x; if (t < size) a[t] = 0; }
Spmv.cu
#include "cudacommon.h" #include <cassert> #include <cuda.h> #include <cuda_runtime_api.h> #include <iostream> #include "OptionParser.h" #include "ResultDatabase.h" #include "Spmv.h" #include "util.h" using namespace std; texture<float, 1> vecTex; // vector textures texture<int2, 1> vecTexD; // Texture Readers (used so kernels can be templated) struct texReaderSP { __device__ __forceinline__ float operator()(const int idx) const { return tex1Dfetch(vecTex, idx); } }; struct texReaderDP { __device__ __forceinline__ double operator()(const int idx) const { int2 v = tex1Dfetch(vecTexD, idx); #if (__CUDA_ARCH__ < 130) // Devices before arch 130 don't support DP, and having the // __hiloint2double() intrinsic will cause compilation to fail. // This return statement added as a workaround -- it will compile, // but since the arch doesn't support DP, it will never be called return 0; #else return __hiloint2double(v.y, v.x); #endif } }; // Forward declarations for kernels template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out); template <typename fpType, typename texReader> __global__ void spmv_csr_vector_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out); template <typename fpType, typename texReader> __global__ void spmv_ellpackr_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowLengths, const int dim, fpType * __restrict__ out); template <typename fpType> __global__ void zero(fpType * __restrict__ a, const int size); // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing. // // Arguments: // op: the options parser / parameter database // // Programmer: Lukasz Wesolowski // Creation: June 21, 2010 // Returns: nothing // // **************************************************************************** void addBenchmarkSpecOptions(OptionParser &op) { op.addOption("iterations", OPT_INT, "100", "Number of SpMV iterations " "per pass"); op.addOption("mm_filename", OPT_STRING, "random", "Name of file " "which stores the matrix in Matrix Market format"); op.addOption("maxval", OPT_FLOAT, "10", "Maximum value for random " "matrices"); } // **************************************************************************** // Function: spmvCpu // // Purpose: // Runs sparse matrix vector multiplication on the CPU // // Arguements: // val: array holding the non-zero values for the matrix // cols: array of column indices for each element of A // rowDelimiters: array of size dim+1 holding indices to rows of A; // last element is the index one past the last // element of A // vec: dense vector of size dim to be used for multiplication // dim: number of rows/columns in the matrix // out: input - buffer of size dim // output - result from the spmv calculation // // Programmer: Lukasz Wesolowski // Creation: June 23, 2010 // Returns: // nothing directly // out indirectly through a pointer // **************************************************************************** template <typename floatType> void spmvCpu(const floatType *val, const int *cols, const int *rowDelimiters, const floatType *vec, int dim, floatType *out) { for (int i=0; i<dim; i++) { floatType t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col]; } out[i] = t; } } // **************************************************************************** // Function: verifyResults // // Purpose: // Verifies correctness of GPU results by comparing to CPU results // // Arguments: // cpuResults: array holding the CPU result vector // gpuResults: array hodling the GPU result vector // size: number of elements per vector // pass: optional iteration number // // Programmer: Lukasz Wesolowski // Creation: June 23, 2010 // Returns: // nothing // prints "Passed" if the vectors agree within a relative error of // MAX_RELATIVE_ERROR and "FAILED" if they are different // **************************************************************************** template <typename floatType> bool verifyResults(const floatType *cpuResults, const floatType *gpuResults, const int size, const int pass = -1) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { // cout << "Mismatch at i: "<< i << " ref: " << cpuResults[i] << // " dev: " << gpuResults[i] << endl; passed = false; } } if (pass != -1) { cout << "Pass "<<pass<<": "; } if (passed) { cout << "Passed" << endl; } else { cout << "---FAILED---" << endl; } return passed; } template <typename floatType, typename texReader> void csrTest(ResultDatabase& resultDB, OptionParser& op, floatType* h_val, int* h_cols, int* h_rowDelimiters, floatType* h_vec, floatType* h_out, int numRows, int numNonZeroes, floatType* refOut, bool padded) { // Device data structures floatType *d_val, *d_vec, *d_out; int *d_cols, *d_rowDelimiters; // Allocate device memory CUDA_SAFE_CALL(cudaMalloc(&d_val, numNonZeroes * sizeof(floatType))); CUDA_SAFE_CALL(cudaMalloc(&d_cols, numNonZeroes * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc(&d_vec, numRows * sizeof(floatType))); CUDA_SAFE_CALL(cudaMalloc(&d_out, numRows * sizeof(floatType))); CUDA_SAFE_CALL(cudaMalloc(&d_rowDelimiters, (numRows+1) * sizeof(int))); // Setup events for timing cudaEvent_t start, stop; CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&stop)); // Transfer data to device CUDA_SAFE_CALL(cudaEventRecord(start, 0)); CUDA_SAFE_CALL(cudaMemcpy(d_val, h_val, numNonZeroes * sizeof(floatType), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_cols, h_cols, numNonZeroes * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_vec, h_vec, numRows * sizeof(floatType), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (numRows+1) * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); float iTransferTime, oTransferTime; CUDA_SAFE_CALL(cudaEventElapsedTime(&iTransferTime, start, stop)); iTransferTime *= 1.e-3; // Bind texture for position string suffix; if (sizeof(floatType) == sizeof(float)) { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); CUDA_SAFE_CALL(cudaBindTexture(0, vecTex, d_vec, channelDesc, numRows * sizeof(float))); suffix = "-SP"; } else { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<int2>(); CUDA_SAFE_CALL(cudaBindTexture(0, vecTexD, d_vec, channelDesc, numRows * sizeof(int2))); suffix = "-DP"; } // Setup thread configuration int nBlocksScalar = (int) ceil((floatType) numRows / BLOCK_SIZE); int nBlocksVector = (int) ceil(numRows / (floatType)(BLOCK_SIZE / WARP_SIZE)); int passes = op.getOptionInt("passes"); int iters = op.getOptionInt("iterations"); // Results description info char atts[TEMP_BUFFER_SIZE]; sprintf(atts, "%d_elements_%d_rows", numNonZeroes, numRows); string prefix = ""; prefix += (padded) ? "Padded_" : ""; double gflop = 2 * (double) numNonZeroes / 1e9; cout << "CSR Scalar Kernel\n"; for (int k=0; k<passes; k++) { // Run Scalar Kernel CUDA_SAFE_CALL(cudaEventRecord(start, 0)); for (int j = 0; j < iters; j++) { spmv_csr_scalar_kernel<floatType, texReader> <<<nBlocksScalar, BLOCK_SIZE>>> (d_val, d_cols, d_rowDelimiters, numRows, d_out); } CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); float scalarKernelTime; CUDA_SAFE_CALL(cudaEventElapsedTime(&scalarKernelTime, start, stop)); // Transfer data back to host CUDA_SAFE_CALL(cudaEventRecord(start, 0)); CUDA_SAFE_CALL(cudaMemcpy(h_out, d_out, numRows * sizeof(floatType), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); CUDA_SAFE_CALL(cudaEventElapsedTime(&oTransferTime, start, stop)); oTransferTime *= 1.e-3; // Compare reference solution to GPU result if (! verifyResults(refOut, h_out, numRows, k)) { return; // If results don't match, don't report performance } scalarKernelTime = (scalarKernelTime / (float)iters) * 1.e-3; string testName = prefix+"CSR-Scalar"+suffix; double totalTransfer = iTransferTime + oTransferTime; resultDB.AddResult(testName, atts, "Gflop/s", gflop/(scalarKernelTime)); resultDB.AddResult(testName+"_PCIe", atts, "Gflop/s", gflop / (scalarKernelTime+totalTransfer)); } zero<floatType><<<nBlocksScalar, BLOCK_SIZE>>>(d_out, numRows); cudaThreadSynchronize(); cout << "CSR Vector Kernel\n"; for (int k=0; k<passes; k++) { // Run Vector Kernel CUDA_SAFE_CALL(cudaEventRecord(start, 0)); for (int j = 0; j < iters; j++) { spmv_csr_vector_kernel<floatType, texReader> <<<nBlocksVector, BLOCK_SIZE>>> (d_val, d_cols, d_rowDelimiters, numRows, d_out); } CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); float vectorKernelTime; CUDA_SAFE_CALL(cudaEventElapsedTime(&vectorKernelTime, start, stop)); CUDA_SAFE_CALL(cudaMemcpy(h_out, d_out, numRows * sizeof(floatType), cudaMemcpyDeviceToHost)); cudaThreadSynchronize(); // Compare reference solution to GPU result if (! verifyResults(refOut, h_out, numRows, k)) { return; // If results don't match, don't report performance } vectorKernelTime = (vectorKernelTime / (float)iters) * 1.e-3; string testName = prefix+"CSR-Vector"+suffix; double totalTransfer = iTransferTime + oTransferTime; resultDB.AddResult(testName, atts, "Gflop/s", gflop/vectorKernelTime); resultDB.AddResult(testName+"_PCIe", atts, "Gflop/s", gflop/(vectorKernelTime+totalTransfer)); } // Free device memory CUDA_SAFE_CALL(cudaFree(d_rowDelimiters)); CUDA_SAFE_CALL(cudaFree(d_vec)); CUDA_SAFE_CALL(cudaFree(d_out)); CUDA_SAFE_CALL(cudaFree(d_val)); CUDA_SAFE_CALL(cudaFree(d_cols)); CUDA_SAFE_CALL(cudaUnbindTexture(vecTexD)); CUDA_SAFE_CALL(cudaUnbindTexture(vecTex)); CUDA_SAFE_CALL(cudaEventDestroy(start)); CUDA_SAFE_CALL(cudaEventDestroy(stop)); } template <typename floatType, typename texReader> void ellPackTest(ResultDatabase& resultDB, OptionParser& op, floatType* h_val, int* h_cols, int* h_rowDelimiters, floatType* h_vec, floatType* h_out, int numRows, int numNonZeroes, floatType* refOut, bool padded, int paddedSize) { int *h_rowLengths; CUDA_SAFE_CALL(cudaMallocHost(&h_rowLengths, paddedSize * sizeof(int))); int maxrl = 0; for (int k=0; k<numRows; k++) { h_rowLengths[k] = h_rowDelimiters[k+1] - h_rowDelimiters[k]; if (h_rowLengths[k] > maxrl) { maxrl = h_rowLengths[k]; } } for (int p=numRows; p < paddedSize; p++) { h_rowLengths[p] = 0; } // Column major format host data structures int cmSize = padded ? paddedSize : numRows; floatType *h_valcm; CUDA_SAFE_CALL(cudaMallocHost(&h_valcm, maxrl * cmSize * sizeof(floatType))); int *h_colscm; CUDA_SAFE_CALL(cudaMallocHost(&h_colscm, maxrl * cmSize * sizeof(int))); convertToColMajor(h_val, h_cols, numRows, h_rowDelimiters, h_valcm, h_colscm, h_rowLengths, maxrl, padded); // Device data structures floatType *d_val, *d_vec, *d_out; int *d_cols, *d_rowLengths; // Allocate device memory CUDA_SAFE_CALL(cudaMalloc(&d_val, maxrl*cmSize * sizeof(floatType))); CUDA_SAFE_CALL(cudaMalloc(&d_cols, maxrl*cmSize * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc(&d_vec, numRows * sizeof(floatType))); CUDA_SAFE_CALL(cudaMalloc(&d_out, paddedSize * sizeof(floatType))); CUDA_SAFE_CALL(cudaMalloc(&d_rowLengths, cmSize * sizeof(int))); // Transfer data to device CUDA_SAFE_CALL(cudaMemcpy(d_val, h_valcm, maxrl*cmSize * sizeof(floatType), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_cols, h_colscm, maxrl*cmSize * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_vec, h_vec, numRows * sizeof(floatType), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_rowLengths, h_rowLengths, cmSize * sizeof(int), cudaMemcpyHostToDevice)); // Bind texture for position if (sizeof(floatType) == sizeof(float)) { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); CUDA_SAFE_CALL(cudaBindTexture(0, vecTex, d_vec, channelDesc, numRows * sizeof(float))); } else { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<int2>(); CUDA_SAFE_CALL(cudaBindTexture(0, vecTexD, d_vec, channelDesc, numRows * sizeof(int2))); } int nBlocks = (int) ceil((floatType) cmSize / BLOCK_SIZE); int passes = op.getOptionInt("passes"); int iters = op.getOptionInt("iterations"); cudaEvent_t start, stop; CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&stop)); for (int k=0; k<passes; k++) { CUDA_SAFE_CALL(cudaEventRecord(start, 0)); for (int j = 0; j < iters; j++) { spmv_ellpackr_kernel<floatType, texReader><<<nBlocks, BLOCK_SIZE>>> (d_val, d_cols, d_rowLengths, cmSize, d_out); } CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); float totalKernelTime; CUDA_SAFE_CALL(cudaEventElapsedTime(&totalKernelTime, start, stop)); totalKernelTime *= 1.e-3; CUDA_SAFE_CALL(cudaMemcpy(h_out, d_out, cmSize * sizeof(floatType), cudaMemcpyDeviceToHost)); // Compare reference solution to GPU result if (! verifyResults(refOut, h_out, numRows, k)) { return; } char atts[TEMP_BUFFER_SIZE]; char benchName[TEMP_BUFFER_SIZE]; double avgTime = totalKernelTime / (float)iters; sprintf(atts, "%d_elements_%d_rows", numNonZeroes, cmSize); double gflop = 2 * (double) numNonZeroes / 1e9; bool dpTest = (sizeof(floatType) == sizeof(double)) ? true : false; sprintf(benchName, "%sELLPACKR-%s", padded ? "Padded_":"", dpTest ? "DP":"SP"); resultDB.AddResult(benchName, atts, "Gflop/s", gflop/avgTime); } // Free device memory CUDA_SAFE_CALL(cudaFree(d_rowLengths)); CUDA_SAFE_CALL(cudaFree(d_vec)); CUDA_SAFE_CALL(cudaFree(d_out)); CUDA_SAFE_CALL(cudaFree(d_val)); CUDA_SAFE_CALL(cudaFree(d_cols)); if (sizeof(floatType) == sizeof(double)) { CUDA_SAFE_CALL(cudaUnbindTexture(vecTexD)); } else { CUDA_SAFE_CALL(cudaUnbindTexture(vecTex)); } CUDA_SAFE_CALL(cudaEventDestroy(start)); CUDA_SAFE_CALL(cudaEventDestroy(stop)); CUDA_SAFE_CALL(cudaFreeHost(h_rowLengths)); CUDA_SAFE_CALL(cudaFreeHost(h_valcm)); CUDA_SAFE_CALL(cudaFreeHost(h_colscm)); } // **************************************************************************** // Function: RunTest // // Purpose: // Executes a run of the sparse matrix - vector multiplication benchmark // in either single or double precision // // Arguments: // resultDB: stores results from the benchmark // op: the options parser / parameter database // nRows: number of rows in generated matrix // // Returns: nothing // // Programmer: Lukasz Wesolowski // Creation: June 21, 2010 // // Modifications: // // **************************************************************************** template <typename floatType, typename texReader> void RunTest(ResultDatabase &resultDB, OptionParser &op, int nRows=0) { // Host data structures // Array of values in the sparse matrix floatType *h_val, *h_valPad; // Array of column indices for each value in h_val int *h_cols, *h_colsPad; // Array of indices to the start of each row in h_Val int *h_rowDelimiters, *h_rowDelimitersPad; // Dense vector and space for dev/cpu reference solution floatType *h_vec, *h_out, *refOut; // nItems = number of non zero elems int nItems, nItemsPadded, numRows; // This benchmark either reads in a matrix market input file or // generates a random matrix string inFileName = op.getOptionString("mm_filename"); if (inFileName == "random") { numRows = nRows; nItems = numRows * numRows / 100; // 1% of entries will be non-zero float maxval = op.getOptionFloat("maxval"); CUDA_SAFE_CALL(cudaMallocHost(&h_val, nItems * sizeof(floatType))); CUDA_SAFE_CALL(cudaMallocHost(&h_cols, nItems * sizeof(int))); CUDA_SAFE_CALL(cudaMallocHost(&h_rowDelimiters, (numRows + 1) * sizeof(int))); fill(h_val, nItems, maxval); initRandomMatrix(h_cols, h_rowDelimiters, nItems, numRows); } else { char filename[FIELD_LENGTH]; strcpy(filename, inFileName.c_str()); readMatrix(filename, &h_val, &h_cols, &h_rowDelimiters, &nItems, &numRows); } // Set up remaining host data CUDA_SAFE_CALL(cudaMallocHost(&h_vec, numRows * sizeof(floatType))); refOut = new floatType[numRows]; CUDA_SAFE_CALL(cudaMallocHost(&h_rowDelimitersPad, (numRows + 1) * sizeof(int))); fill(h_vec, numRows, op.getOptionFloat("maxval")); // Set up the padded data structures int paddedSize = numRows + (PAD_FACTOR - numRows % PAD_FACTOR); CUDA_SAFE_CALL(cudaMallocHost(&h_out, paddedSize * sizeof(floatType))); convertToPadded(h_val, h_cols, numRows, h_rowDelimiters, &h_valPad, &h_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_val, h_cols, h_rowDelimiters, h_vec, numRows, refOut); // Test CSR kernels on normal data cout << "CSR Test\n"; csrTest<floatType, texReader>(resultDB, op, h_val, h_cols, h_rowDelimiters, h_vec, h_out, numRows, nItems, refOut, false); // Test CSR kernels on padded data cout << "CSR Test -- Padded Data\n"; csrTest<floatType, texReader>(resultDB, op, h_valPad, h_colsPad, h_rowDelimitersPad, h_vec, h_out, numRows, nItemsPadded, refOut, true); // Test ELLPACKR kernel cout << "ELLPACKR Test\n"; ellPackTest<floatType, texReader>(resultDB, op, h_val, h_cols, h_rowDelimiters, h_vec, h_out, numRows, nItems, refOut, false, paddedSize); delete[] refOut; CUDA_SAFE_CALL(cudaFreeHost(h_val)); CUDA_SAFE_CALL(cudaFreeHost(h_cols)); CUDA_SAFE_CALL(cudaFreeHost(h_rowDelimiters)); CUDA_SAFE_CALL(cudaFreeHost(h_vec)); CUDA_SAFE_CALL(cudaFreeHost(h_out)); CUDA_SAFE_CALL(cudaFreeHost(h_valPad)); CUDA_SAFE_CALL(cudaFreeHost(h_colsPad)); CUDA_SAFE_CALL(cudaFreeHost(h_rowDelimitersPad)); } // **************************************************************************** // Function: RunBenchmark // // Purpose: // Executes the sparse matrix - vector multiplication benchmark // // Arguments: // resultDB: stores results from the benchmark // op: the options parser / parameter database // // Returns: nothing // // Programmer: Lukasz Wesolowski // Creation: June 21, 2010 // // Modifications: // // **************************************************************************** void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { int device; cudaGetDevice(&device); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); bool doDouble = false; if ((deviceProp.major == 1 && deviceProp.minor >= 3) || (deviceProp.major >= 2)) { doDouble = true; } int probSizes[4] = {1024, 8192, 12288, 16384}; int sizeClass = op.getOptionInt("size") - 1; cout <<"Single precision tests:\n"; RunTest<float, texReaderSP>(resultDB, op, probSizes[sizeClass]); if (doDouble) { cout <<"Double precision tests:\n"; RunTest<double, texReaderDP>(resultDB, op, probSizes[sizeClass]); } else { std::cout << "Double precision not supported by chosen device, skipping" << std::endl; // driver script still needs entries for all tests, even if not run int nPasses = (int)op.getOptionInt( "passes" ); for( unsigned int p = 0; p < nPasses; p++ ) { resultDB.AddResult( (const char*)"CSR-Scalar-DP", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"CSR-Scalar-DP_PCIe", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"CSR-Vector-DP", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"CSR-Vector-DP_PCIe", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"ELLPACKR-DP", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"ELLPACKR-DP_PCIe", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"Padded_CSR-Scalar-DP", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"Padded_CSR-Scalar-DP_PCIe", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"Padded_CSR-Vector-DP", "N/A", "Gflop/s", FLT_MAX ); resultDB.AddResult( (const char*)"Padded_CSR-Vector-DP_PCIe", "N/A", "Gflop/s", FLT_MAX ); } } } // **************************************************************************** // Function: spmv_csr_scalar_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the CSR data storage format, using a thread per row of the sparse // matrix; based on Bell (SC09) and Baskaran (IBM Tech Report) // // Arguments: // val: array holding the non-zero values for the matrix // cols: array of column indices for each element of the sparse matrix // rowDelimiters: array of size dim+1 holding indices to rows of the matrix // last element is the index one past the last // element of the matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 28, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out) { int myRow = blockIdx.x * blockDim.x + threadIdx.x; texReader vecTexReader; if (myRow < dim) { fpType t = 0.0f; int start = rowDelimiters[myRow]; int end = rowDelimiters[myRow+1]; for (int j = start; j < end; j++) { int col = cols[j]; t += val[j] * vecTexReader(col); } out[myRow] = t; } } // **************************************************************************** // Function: spmv_csr_vector_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the CSR data storage format, using a warp per row of the sparse // matrix; based on Bell (SC09) and Baskaran (IBM Tech Report) // // Arguments: // val: array holding the non-zero values for the matrix // cols: array of column indices for each element of the sparse matrix // rowDelimiters: array of size dim+1 holding indices to rows of the matrix // last element is the index one past the last // element of the matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 28, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_csr_vector_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (warpSize-1); int warpsPerBlock = blockDim.x / warpSize; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / warpSize); // Texture reader for the dense vector texReader vecTexReader; __shared__ volatile fpType partialSums[BLOCK_SIZE]; if (myRow < dim) { int warpStart = rowDelimiters[myRow]; int warpEnd = rowDelimiters[myRow+1]; fpType mySum = 0; for (int j = warpStart + id; j < warpEnd; j += warpSize) { int col = cols[j]; mySum += val[j] * vecTexReader(col); } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } // **************************************************************************** // Function: spmv_ellpackr_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the ELLPACK-R data storage format; based on Vazquez et al (Univ. of // Almeria Tech Report 2009) // // Arguments: // val: array holding the non-zero values for the matrix in column // major format and padded with zeros up to the length of longest row // cols: array of column indices for each element of the sparse matrix // rowLengths: array storing the length of each row of the sparse matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing directly // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 29, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_ellpackr_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowLengths, const int dim, fpType * __restrict__ out) { int t = blockIdx.x * blockDim.x + threadIdx.x; texReader vecTexReader; if (t < dim) { fpType result = 0.0f; int max = rowLengths[t]; for (int i = 0; i < max; i++) { int ind = i*dim+t; result += val[ind] * vecTexReader(cols[ind]); } out[t] = result; } } template <typename fpType> __global__ void zero(fpType * __restrict__ a, const int size) { int t = blockIdx.x * blockDim.x + threadIdx.x; if (t < size) a[t] = 0; }
c1d45a8b385baa645489930bed587c6300cc88b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <omp.h> __global__ void scalarAdd(int *data, int inc) { int i = blockIdx.x * blockDim.x + threadIdx.x; data[i] += inc; } int main(int argc, char *argv[]) { // Get the number of CUDA devices. int numDevices; hipGetDeviceCount(&numDevices); // Allocate and initialize data. size_t numElements = 8192 * numDevices; size_t numBytes = sizeof(int) * numElements; int *data = (int *)malloc(numBytes); memset(data, 0, numBytes); int inc = 7; // Create as many CPU threads as there are CUDA devices. Each CPU thread controls a different device, processing its portion of the data. omp_set_num_threads(numDevices); // All variables declared inside an "omp parallel" scope are local to each CPU thread. // highlight: #pragma omp parallel { // This block is multi-threaded by CPU // So we use sync operations in it // CPU will switch when one thread blocks // Get the number of CPU threads and the thread number of the current CPU thread. // 0 <= threadNum <= numThreads - 1 int numThreads = omp_get_num_threads(); // the thread ID.. // what bad names for the two API... int threadNum = omp_get_thread_num(); // Set device to be used for GPU executions. int deviceNum = threadNum % numDevices; hipSetDevice(deviceNum); // Calculate the number of elements per CPU thread and the number of bytes per CPU thread. size_t numElementsPerThread = numElements / numThreads; size_t numBytesPerThread = sizeof(int) * numElementsPerThread; // Calculate the offset to the original data for the current CPU thread. int *h = data + numElementsPerThread * threadNum; // Allocate device memory to temporarily hold the portion of the data of the current CPU thread. int *d; hipMalloc((void **)&d, numBytesPerThread); // Copy the portion of the data of the current CPU thread from host memory to device memory. hipMemcpy(d, h, numBytesPerThread, hipMemcpyHostToDevice); // Invoke the kernel for the current portion of the data. hipLaunchKernelGGL(( scalarAdd), dim3(numElementsPerThread / 128), dim3(128), 0, 0, d, inc); // Copy the portion of the data of the current CPU thread from device memory to host memory. hipMemcpy(h, d, numBytesPerThread, hipMemcpyDeviceToHost); // Deallocate the temporary device memory. hipFree(d); } for (int i = 0; i < numElements; ++i) { int actual = data[i]; int expected = 0 + inc; if (actual != expected) { printf("data[%d] = %d, expected = %d\n", i, actual, expected); break; } } // Cleanup. for (int i = 0; i < numDevices; ++i) { hipSetDevice(i); hipDeviceReset(); } free(data); }
c1d45a8b385baa645489930bed587c6300cc88b4.cu
#include <stdio.h> #include <omp.h> __global__ void scalarAdd(int *data, int inc) { int i = blockIdx.x * blockDim.x + threadIdx.x; data[i] += inc; } int main(int argc, char *argv[]) { // Get the number of CUDA devices. int numDevices; cudaGetDeviceCount(&numDevices); // Allocate and initialize data. size_t numElements = 8192 * numDevices; size_t numBytes = sizeof(int) * numElements; int *data = (int *)malloc(numBytes); memset(data, 0, numBytes); int inc = 7; // Create as many CPU threads as there are CUDA devices. Each CPU thread controls a different device, processing its portion of the data. omp_set_num_threads(numDevices); // All variables declared inside an "omp parallel" scope are local to each CPU thread. // highlight: #pragma omp parallel { // This block is multi-threaded by CPU // So we use sync operations in it // CPU will switch when one thread blocks // Get the number of CPU threads and the thread number of the current CPU thread. // 0 <= threadNum <= numThreads - 1 int numThreads = omp_get_num_threads(); // the thread ID.. // what bad names for the two API... int threadNum = omp_get_thread_num(); // Set device to be used for GPU executions. int deviceNum = threadNum % numDevices; cudaSetDevice(deviceNum); // Calculate the number of elements per CPU thread and the number of bytes per CPU thread. size_t numElementsPerThread = numElements / numThreads; size_t numBytesPerThread = sizeof(int) * numElementsPerThread; // Calculate the offset to the original data for the current CPU thread. int *h = data + numElementsPerThread * threadNum; // Allocate device memory to temporarily hold the portion of the data of the current CPU thread. int *d; cudaMalloc((void **)&d, numBytesPerThread); // Copy the portion of the data of the current CPU thread from host memory to device memory. cudaMemcpy(d, h, numBytesPerThread, cudaMemcpyHostToDevice); // Invoke the kernel for the current portion of the data. scalarAdd<<<numElementsPerThread / 128, 128>>>(d, inc); // Copy the portion of the data of the current CPU thread from device memory to host memory. cudaMemcpy(h, d, numBytesPerThread, cudaMemcpyDeviceToHost); // Deallocate the temporary device memory. cudaFree(d); } for (int i = 0; i < numElements; ++i) { int actual = data[i]; int expected = 0 + inc; if (actual != expected) { printf("data[%d] = %d, expected = %d\n", i, actual, expected); break; } } // Cleanup. for (int i = 0; i < numDevices; ++i) { cudaSetDevice(i); cudaDeviceReset(); } free(data); }
2b580ff648375081672d4f0ddff8325845af6679.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include <thrust/device_ptr.h> #include <thrust/device_malloc.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include "fmmtl/config.hpp" #include "fmmtl/dispatch/S2T/S2T_Compressed.hpp" #include "fmmtl/dispatch/S2T/S2T_Blocked_CSR.cu" struct Data { unsigned num_sources; unsigned num_targets; unsigned num_threads_per_block; unsigned num_blocks; Data(unsigned s, unsigned t, unsigned b) : num_sources(s), num_targets(t), num_threads_per_block(256), num_blocks(b) { } }; template <typename T> inline T* gpu_new(unsigned n) { return thrust::raw_pointer_cast(thrust::device_malloc<T>(n)); } template <typename Container> inline typename Container::value_type* gpu_copy(const Container& c) { typedef typename Container::value_type c_value; // Allocate thrust::device_ptr<c_value> dptr = thrust::device_malloc<c_value>(c.size()); // Copy //thrust::uninitialized_copy(c.begin(), c.end(), dptr); thrust::copy(c.begin(), c.end(), dptr); // Return return thrust::raw_pointer_cast(dptr); } template <typename T> inline void gpu_free(T* p) { thrust::device_free(thrust::device_pointer_cast<void>(p)); } template <typename Kernel> S2T_Compressed<Kernel>::S2T_Compressed() : data_(0) { } template <typename Kernel> S2T_Compressed<Kernel>::S2T_Compressed( std::vector<std::pair<unsigned,unsigned> >& target_ranges, std::vector<unsigned>& source_range_ptrs, std::vector<std::pair<unsigned,unsigned> >& source_ranges, const std::vector<source_type>& sources, const std::vector<target_type>& targets) : data_(new Data(sources.size(), targets.size(), target_ranges.size())), target_ranges_(gpu_copy(target_ranges)), source_range_ptrs_(gpu_copy(source_range_ptrs)), source_ranges_(gpu_copy(source_ranges)), sources_(gpu_copy(sources)), targets_(gpu_copy(targets)) { } template <typename Kernel> S2T_Compressed<Kernel>::~S2T_Compressed() { delete data_; gpu_free(target_ranges_); gpu_free(source_range_ptrs_); gpu_free(source_ranges_); gpu_free(sources_); gpu_free(targets_); } /** A functor that indexes an array as one type but returns another type */ template <typename T1, typename T2> class tricky_cast { T1* a_; public: __host__ __device__ tricky_cast(T1* a) : a_(a) {} __host__ __device__ T2 operator[](unsigned blockidx) const { return *((T2*)(a_ + blockidx)); } }; template <typename Kernel> void S2T_Compressed<Kernel>::execute( const Kernel& K, const std::vector<charge_type>& charges, std::vector<result_type>& results) { typedef Kernel kernel_type; typedef typename kernel_type::source_type source_type; typedef typename kernel_type::target_type target_type; typedef typename kernel_type::charge_type charge_type; typedef typename kernel_type::result_type result_type; // XXX: Using a device_vector here was giving "floating point exceptions"... // XXX: device_vector doesn't like the Vec? charge_type* d_charges = gpu_copy(charges); result_type* d_results = gpu_copy(results); Data* data = reinterpret_cast<Data*>(data_); const unsigned num_tpb = 256; //data->num_threads_per_block; const unsigned num_blocks = data->num_blocks; #if defined(FMMTL_DEBUG) std::cout << "Launching GPU Kernel: (blocks, threads/block) = (" << num_blocks << ", " << num_tpb << ")" << std::endl; #endif typedef thrust::pair<unsigned,unsigned> upair; // Launch kernel <<<grid_size, block_size>>> hipLaunchKernelGGL(( blocked_p2p<num_tpb>), dim3(num_blocks),dim3(num_tpb), 0, 0, K, target_ranges_, tricky_cast<unsigned, upair>(source_range_ptrs_), source_ranges_, sources_, //thrust::raw_pointer_cast(d_charges.data()), d_charges, targets_, d_results); //thrust::raw_pointer_cast(d_results.data())); FMMTL_CUDA_CHECK; // Copy results back thrust::device_ptr<result_type> d_results_ptr = thrust::device_pointer_cast(d_results); thrust::copy(d_results_ptr, d_results_ptr + results.size(), results.begin()); gpu_free(d_results); gpu_free(d_charges); } /** A functor that maps blockidx -> (target_begin,target_end) */ template <unsigned BLOCKDIM> class block_range { unsigned N_; public: __host__ __device__ block_range(unsigned N) : N_(N) {} __host__ __device__ thrust::pair<unsigned,unsigned> operator[](unsigned blockidx) const { return thrust::make_pair(blockidx * BLOCKDIM, min(blockidx * BLOCKDIM + BLOCKDIM, N_)); } }; /** A functor that returns a constant */ template <typename T> class constant { T value_; public: __host__ __device__ constant(T value) : value_(value) {} __host__ __device__ T operator[](unsigned) const { return value_; } }; template <typename Kernel> void S2T_Compressed<Kernel>::execute(const Kernel& K, const std::vector<source_type>& s, const std::vector<charge_type>& c, const std::vector<target_type>& t, std::vector<result_type>& r) { typedef Kernel kernel_type; typedef typename kernel_type::source_type source_type; typedef typename kernel_type::target_type target_type; typedef typename kernel_type::charge_type charge_type; typedef typename kernel_type::result_type result_type; source_type* d_sources = gpu_copy(s); charge_type* d_charges = gpu_copy(c); target_type* d_targets = gpu_copy(t); result_type* d_results = gpu_copy(r); // XXX: device_vector doesn't like our vector? //thrust::device_vector<source_type> d_sources(s); //thrust::device_vector<charge_type> d_charges(c); //thrust::device_vector<target_type> d_targets(t); //thrust::device_vector<result_type> d_results(r); const unsigned num_tpb = 256; const unsigned num_blocks = (t.size() + num_tpb - 1) / num_tpb; #if defined(FMMTL_DEBUG) std::cout << "Launching GPU Kernel: (blocks, threads/block) = (" << num_blocks << ", " << num_tpb << ")" << std::endl; #endif typedef thrust::pair<unsigned,unsigned> upair; // Launch kernel <<<grid_size, block_size>>> hipLaunchKernelGGL(( blocked_p2p<num_tpb>), dim3(num_blocks), dim3(num_tpb), 0, 0, K, block_range<num_tpb>(t.size()), constant<upair>(upair(0,1)), constant<upair>(upair(0,s.size())), d_sources, d_charges, d_targets, d_results); //thrust::raw_pointer_cast(d_sources.data()), //thrust::raw_pointer_cast(d_charges.data()), //thrust::raw_pointer_cast(d_targets.data()), //thrust::raw_pointer_cast(d_results.data())); FMMTL_CUDA_CHECK; // Copy results back and assign thrust::device_ptr<result_type> d_results_ptr = thrust::device_pointer_cast(d_results); thrust::copy(d_results_ptr, d_results_ptr + r.size(), r.begin()); gpu_free(d_sources); gpu_free(d_charges); gpu_free(d_targets); gpu_free(d_results); }
2b580ff648375081672d4f0ddff8325845af6679.cu
#pragma once #include <thrust/device_ptr.h> #include <thrust/device_malloc.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include "fmmtl/config.hpp" #include "fmmtl/dispatch/S2T/S2T_Compressed.hpp" #include "fmmtl/dispatch/S2T/S2T_Blocked_CSR.cu" struct Data { unsigned num_sources; unsigned num_targets; unsigned num_threads_per_block; unsigned num_blocks; Data(unsigned s, unsigned t, unsigned b) : num_sources(s), num_targets(t), num_threads_per_block(256), num_blocks(b) { } }; template <typename T> inline T* gpu_new(unsigned n) { return thrust::raw_pointer_cast(thrust::device_malloc<T>(n)); } template <typename Container> inline typename Container::value_type* gpu_copy(const Container& c) { typedef typename Container::value_type c_value; // Allocate thrust::device_ptr<c_value> dptr = thrust::device_malloc<c_value>(c.size()); // Copy //thrust::uninitialized_copy(c.begin(), c.end(), dptr); thrust::copy(c.begin(), c.end(), dptr); // Return return thrust::raw_pointer_cast(dptr); } template <typename T> inline void gpu_free(T* p) { thrust::device_free(thrust::device_pointer_cast<void>(p)); } template <typename Kernel> S2T_Compressed<Kernel>::S2T_Compressed() : data_(0) { } template <typename Kernel> S2T_Compressed<Kernel>::S2T_Compressed( std::vector<std::pair<unsigned,unsigned> >& target_ranges, std::vector<unsigned>& source_range_ptrs, std::vector<std::pair<unsigned,unsigned> >& source_ranges, const std::vector<source_type>& sources, const std::vector<target_type>& targets) : data_(new Data(sources.size(), targets.size(), target_ranges.size())), target_ranges_(gpu_copy(target_ranges)), source_range_ptrs_(gpu_copy(source_range_ptrs)), source_ranges_(gpu_copy(source_ranges)), sources_(gpu_copy(sources)), targets_(gpu_copy(targets)) { } template <typename Kernel> S2T_Compressed<Kernel>::~S2T_Compressed() { delete data_; gpu_free(target_ranges_); gpu_free(source_range_ptrs_); gpu_free(source_ranges_); gpu_free(sources_); gpu_free(targets_); } /** A functor that indexes an array as one type but returns another type */ template <typename T1, typename T2> class tricky_cast { T1* a_; public: __host__ __device__ tricky_cast(T1* a) : a_(a) {} __host__ __device__ T2 operator[](unsigned blockidx) const { return *((T2*)(a_ + blockidx)); } }; template <typename Kernel> void S2T_Compressed<Kernel>::execute( const Kernel& K, const std::vector<charge_type>& charges, std::vector<result_type>& results) { typedef Kernel kernel_type; typedef typename kernel_type::source_type source_type; typedef typename kernel_type::target_type target_type; typedef typename kernel_type::charge_type charge_type; typedef typename kernel_type::result_type result_type; // XXX: Using a device_vector here was giving "floating point exceptions"... // XXX: device_vector doesn't like the Vec? charge_type* d_charges = gpu_copy(charges); result_type* d_results = gpu_copy(results); Data* data = reinterpret_cast<Data*>(data_); const unsigned num_tpb = 256; //data->num_threads_per_block; const unsigned num_blocks = data->num_blocks; #if defined(FMMTL_DEBUG) std::cout << "Launching GPU Kernel: (blocks, threads/block) = (" << num_blocks << ", " << num_tpb << ")" << std::endl; #endif typedef thrust::pair<unsigned,unsigned> upair; // Launch kernel <<<grid_size, block_size>>> blocked_p2p<num_tpb><<<num_blocks,num_tpb>>>( K, target_ranges_, tricky_cast<unsigned, upair>(source_range_ptrs_), source_ranges_, sources_, //thrust::raw_pointer_cast(d_charges.data()), d_charges, targets_, d_results); //thrust::raw_pointer_cast(d_results.data())); FMMTL_CUDA_CHECK; // Copy results back thrust::device_ptr<result_type> d_results_ptr = thrust::device_pointer_cast(d_results); thrust::copy(d_results_ptr, d_results_ptr + results.size(), results.begin()); gpu_free(d_results); gpu_free(d_charges); } /** A functor that maps blockidx -> (target_begin,target_end) */ template <unsigned BLOCKDIM> class block_range { unsigned N_; public: __host__ __device__ block_range(unsigned N) : N_(N) {} __host__ __device__ thrust::pair<unsigned,unsigned> operator[](unsigned blockidx) const { return thrust::make_pair(blockidx * BLOCKDIM, min(blockidx * BLOCKDIM + BLOCKDIM, N_)); } }; /** A functor that returns a constant */ template <typename T> class constant { T value_; public: __host__ __device__ constant(T value) : value_(value) {} __host__ __device__ T operator[](unsigned) const { return value_; } }; template <typename Kernel> void S2T_Compressed<Kernel>::execute(const Kernel& K, const std::vector<source_type>& s, const std::vector<charge_type>& c, const std::vector<target_type>& t, std::vector<result_type>& r) { typedef Kernel kernel_type; typedef typename kernel_type::source_type source_type; typedef typename kernel_type::target_type target_type; typedef typename kernel_type::charge_type charge_type; typedef typename kernel_type::result_type result_type; source_type* d_sources = gpu_copy(s); charge_type* d_charges = gpu_copy(c); target_type* d_targets = gpu_copy(t); result_type* d_results = gpu_copy(r); // XXX: device_vector doesn't like our vector? //thrust::device_vector<source_type> d_sources(s); //thrust::device_vector<charge_type> d_charges(c); //thrust::device_vector<target_type> d_targets(t); //thrust::device_vector<result_type> d_results(r); const unsigned num_tpb = 256; const unsigned num_blocks = (t.size() + num_tpb - 1) / num_tpb; #if defined(FMMTL_DEBUG) std::cout << "Launching GPU Kernel: (blocks, threads/block) = (" << num_blocks << ", " << num_tpb << ")" << std::endl; #endif typedef thrust::pair<unsigned,unsigned> upair; // Launch kernel <<<grid_size, block_size>>> blocked_p2p<num_tpb><<<num_blocks, num_tpb>>>( K, block_range<num_tpb>(t.size()), constant<upair>(upair(0,1)), constant<upair>(upair(0,s.size())), d_sources, d_charges, d_targets, d_results); //thrust::raw_pointer_cast(d_sources.data()), //thrust::raw_pointer_cast(d_charges.data()), //thrust::raw_pointer_cast(d_targets.data()), //thrust::raw_pointer_cast(d_results.data())); FMMTL_CUDA_CHECK; // Copy results back and assign thrust::device_ptr<result_type> d_results_ptr = thrust::device_pointer_cast(d_results); thrust::copy(d_results_ptr, d_results_ptr + r.size(), r.begin()); gpu_free(d_sources); gpu_free(d_charges); gpu_free(d_targets); gpu_free(d_results); }
40bf1bc9bcd7c3a92e283a800200f247714313f0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <limits.h> #include <algorithm> #include <list> #include "CudaDevInfo.h" #include "ObjectMultiPool.h" class DemoVecSpec { public: // Constructors DemoVecSpec(size_t sz_ = 0) : sz(sz_) {} DemoVecSpec(const DemoVecSpec &other) : sz(other.size()) {} size_t size() const { return sz; } void setSize(size_t val) { sz = val; } bool onGpu() const { return false; } //* Allocate a vector of size_t bytes. //* \return pointer to the allocated vector static void *allocate(size_t nb, //*< number of bytes to allocate bool *on_g //*< If not NULL, returns true if //*allocated vector is on GPU ) { if (on_g != NULL) *on_g = false; return new char[nb]; } //* Deallocate vector allocated by allocate() static void deallocate(void * vec) { delete [] (char *) vec; } private: size_t sz; //*< object size }; // DemoVecSpec #if defined(__NVCC__) class D_DemoVecSpec : public DemoVecSpec { public: D_DemoVecSpec(size_t sz_ = 0) : DemoVecSpec(sz_) {} D_DemoVecSpec(const DemoVecSpec & other) : DemoVecSpec(other) {} bool onGpu() const { return true; } //* Allocate a vector of size_t bytes. //* \return pointer to the allocated vector static void *allocate(size_t nb, //*< number of bytes to allocate bool *on_g //*< If not NULL, returns true if //*allocated vector is on GPU ) { if (on_g != NULL) *on_g = true; void *vec; gpuErrChk(hipMalloc(&vec, nb), "cuda_ObjectMultiPool:D_DemoVecSpec:alloc_error", ""); return vec; } //* Deallocate vector allocated by allocate() static void deallocate(void * vec) { gpuErrChk(hipFree(vec), "cuda_ObjectMultiPool:D_DemoVecSpec:alloc_error", ""); } }; // D_DemoVecSpec #endif // defined(__NVCC__) //* A vector template class template<typename data_t, class frm_spec_t> class DemoVec : public GenericPoolObject { public: DemoVec(const frm_spec_t &spec_, GenericObjectPool * const pool) : GenericPoolObject(pool), spec(spec_), tst_indx(0), len(0), data((data_t *) spec_.allocate(spec_.size()*sizeof(data_t), &on_gpu)) {} ~DemoVec() { if(data != NULL) spec.deallocate(data); } unsigned tstIndx() const { return tst_indx; } void setTstIndx(unsigned val) { tst_indx = val; } size_t size() const {return spec.size();} bool onGpu() const { return on_gpu; } size_t length() const {return len; } void setLength(size_t val) {len = val; } operator data_t * () { return data; } operator const data_t * () const {return data; } private: const frm_spec_t &spec; unsigned tst_indx; //*< an index for testing purposes; bool on_gpu; size_t len; data_t *data; }; #ifdef __NVCC__ typedef D_DemoVecSpec spec_t; #else typedef DemoVecSpec spec_t; #endif typedef DemoVec<short, spec_t> vec_t; typedef std::list<vec_t *> vec_list_t; typedef ObjectMultiPool<vec_t, spec_t> mpl_t ; typedef ObjectPool<vec_t, spec_t> pool_t ; void print_status(const mpl_t &mpl, const vec_list_t vec_list) { printf("+++++++++++++++++\n"); mpl.print(); if(vec_list.empty()) printf("vec_list is empty\n"); else { printf("vec_list:"); vec_list_t::const_iterator itr; unsigned k=0; for(itr = vec_list.begin(); itr != vec_list.end(); itr++) { const vec_t *p = *itr; printf("%s%4u%s 0x%06lX:0x%06lX", (k++%5)? ", ":",\n ", p->tstIndx(), p->onGpu()?"G":"C", (unsigned long) p->length(), (unsigned long)p->size()); } } printf("\n------------------\n"); } void help(const char *cmnd) { fprintf(stderr, "USAGE:\n" " %s [-v] <n_tst> [<min_size> [<rnd_seed> [<max_size>]]]\n" " The function tests the operation of ObjectMultiPool by running \n" " <n_tst> (unsigned int) random test of get() or put() from or to the\n" " MultiPool, respectively. The objects in the MultiPool are of class DemoVec\n" " (defined here), which can be specified with any size from <min_size> to\n" " <max_size>.\n" " The default for <min_size> is 1 and for max_size it is 0x%08lX\n" " In each test the function randomly either:\n" " A. Calls get() to get a DemoVec object of a random size and insers\n" " it into a FIFO.\n" " B. removes a DemoVec object from the FIFO and calls put() to put\n" " this object back into the MultiPool.\n" " (if the FIFO is empty B is not done). Correctness is tested and the\n" " status of the FIFO and the MultiPool are tested after each iteration.\n" " If -v is specified status is printed after each iteration.\n" , cmnd, (unsigned long)mpl_t::defaultMaxSize()); } int main(int argc, const char *argv[]) { int err = EXIT_SUCCESS; unsigned n_tst; //*< number of test to run unsigned i_tst; //*< test index size_t min_size; //*< minimum block size unsigned rnd_seed = 0; //*< seed of random number generator unsigned long msz=1, mxsz = (unsigned long)mpl_t::defaultMaxSize(); int n_opts = 0; bool verbose = false; if(argc <= 1) { help(argv[0]); return EXIT_SUCCESS; } if(!strcmp(argv[1], "-v")) { verbose = true; n_opts++; } if(argc-n_opts<2 || argc-n_opts>5 || sscanf(argv[n_opts+1], "%u", &n_tst) != 1 || (argc-n_opts >= 3 && sscanf(argv[n_opts+2], "%lu", &msz) != 1) || (argc-n_opts>=4 && sscanf(argv[n_opts+3], "%u", &rnd_seed) != 1) || (argc-n_opts>=5 && sscanf(argv[n_opts+4], "%lu", &mxsz) != 1) ) { fprintf(stderr, "Illegal arguments\n\n"); help(argv[0]); exit(EXIT_FAILURE); } size_t max_size = size_t(mxsz); min_size = size_t(msz); srand(rnd_seed); spec_t spec; vec_list_t vec_list; vec_list_t::iterator itr; size_t ttl_get = 0; unsigned last_get = 0; size_t max_vec_list = 0; mpl_t mpl(spec, min_size, max_size); clock_t start_time = clock(); if(verbose) { printf("Initial state:\n"); print_status(mpl, vec_list); } for(i_tst= 0; i_tst < n_tst;i_tst++ ) { // make the probability of get 7/16, slightly less than half. This makes // emptying vec_list have a probability of 1. unsigned rnd = rand(); bool do_get = vec_list.empty() || ((rnd & 0xF) < 7); if(do_get) { size_t sz = size_t(floor(exp(double(rand())*log(double(max_size))/RAND_MAX))); if(verbose) printf("%u: get object of size %lu=0x%06lX\n", i_tst, (unsigned long) sz, (unsigned long) sz); size_t ttl = mpl.total(); vec_t & vec = mpl.get(sz); assert(vec.size() <= ((pool_t *)(vec.pool))->elmntSize()); vec.setTstIndx(i_tst); vec.setLength(sz); vec_list.push_back(&vec); max_vec_list = ::max(vec_list.size(), max_vec_list); if(ttl == mpl.total()) last_get = ttl_get; ttl_get++; } else{ unsigned indx = rand() % vec_list.size(); bool use_discard = bool(rnd & 0x10); if(verbose) printf("%u %s object %u ", i_tst, use_discard?"discard":"put", indx); for(itr = vec_list.begin(); indx-- > 0; itr++); vec_t *pvec = *itr; if(verbose) printf("object index=%u, size=%lu=0x%06lX\n", pvec->tstIndx(), (unsigned long)pvec->size(), (unsigned long)pvec->size()); assert(pvec->size() <= ((pool_t *)(pvec->pool))->elmntSize()); if(use_discard) pvec->discard(); else mpl.put(*pvec); vec_list.erase(itr); } if(verbose) print_status(mpl, vec_list); } if(verbose) { printf("Status at end:\n"); print_status(mpl, vec_list); } // Clear vec_list if(!verbose) { double elapsed = double(clock()-start_time)/CLOCKS_PER_SEC; printf("\n====== Done. in %f sec. (%f microsec/test) =====\n", elapsed, elapsed*1E6/n_tst); } else printf("\n====== Done. =====\n"); if(verbose) printf("\n****** Clearing up ******\n\n"); for(itr = vec_list.begin(); itr != vec_list.end();) { mpl.put(**itr); itr = vec_list.erase(itr); if(verbose) print_status(mpl, vec_list); } printf("Total gets: %lu, total blocks in pool: %lu, last_get: %u, max_vec_list=%lu\n", (unsigned long) ttl_get, (unsigned long) mpl.total(), last_get, (unsigned long)max_vec_list); print_status(mpl, vec_list); return err; }
40bf1bc9bcd7c3a92e283a800200f247714313f0.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <limits.h> #include <algorithm> #include <list> #include "CudaDevInfo.h" #include "ObjectMultiPool.h" class DemoVecSpec { public: // Constructors DemoVecSpec(size_t sz_ = 0) : sz(sz_) {} DemoVecSpec(const DemoVecSpec &other) : sz(other.size()) {} size_t size() const { return sz; } void setSize(size_t val) { sz = val; } bool onGpu() const { return false; } //* Allocate a vector of size_t bytes. //* \return pointer to the allocated vector static void *allocate(size_t nb, //*< number of bytes to allocate bool *on_g //*< If not NULL, returns true if //*allocated vector is on GPU ) { if (on_g != NULL) *on_g = false; return new char[nb]; } //* Deallocate vector allocated by allocate() static void deallocate(void * vec) { delete [] (char *) vec; } private: size_t sz; //*< object size }; // DemoVecSpec #if defined(__NVCC__) class D_DemoVecSpec : public DemoVecSpec { public: D_DemoVecSpec(size_t sz_ = 0) : DemoVecSpec(sz_) {} D_DemoVecSpec(const DemoVecSpec & other) : DemoVecSpec(other) {} bool onGpu() const { return true; } //* Allocate a vector of size_t bytes. //* \return pointer to the allocated vector static void *allocate(size_t nb, //*< number of bytes to allocate bool *on_g //*< If not NULL, returns true if //*allocated vector is on GPU ) { if (on_g != NULL) *on_g = true; void *vec; gpuErrChk(cudaMalloc(&vec, nb), "cuda_ObjectMultiPool:D_DemoVecSpec:alloc_error", ""); return vec; } //* Deallocate vector allocated by allocate() static void deallocate(void * vec) { gpuErrChk(cudaFree(vec), "cuda_ObjectMultiPool:D_DemoVecSpec:alloc_error", ""); } }; // D_DemoVecSpec #endif // defined(__NVCC__) //* A vector template class template<typename data_t, class frm_spec_t> class DemoVec : public GenericPoolObject { public: DemoVec(const frm_spec_t &spec_, GenericObjectPool * const pool) : GenericPoolObject(pool), spec(spec_), tst_indx(0), len(0), data((data_t *) spec_.allocate(spec_.size()*sizeof(data_t), &on_gpu)) {} ~DemoVec() { if(data != NULL) spec.deallocate(data); } unsigned tstIndx() const { return tst_indx; } void setTstIndx(unsigned val) { tst_indx = val; } size_t size() const {return spec.size();} bool onGpu() const { return on_gpu; } size_t length() const {return len; } void setLength(size_t val) {len = val; } operator data_t * () { return data; } operator const data_t * () const {return data; } private: const frm_spec_t &spec; unsigned tst_indx; //*< an index for testing purposes; bool on_gpu; size_t len; data_t *data; }; #ifdef __NVCC__ typedef D_DemoVecSpec spec_t; #else typedef DemoVecSpec spec_t; #endif typedef DemoVec<short, spec_t> vec_t; typedef std::list<vec_t *> vec_list_t; typedef ObjectMultiPool<vec_t, spec_t> mpl_t ; typedef ObjectPool<vec_t, spec_t> pool_t ; void print_status(const mpl_t &mpl, const vec_list_t vec_list) { printf("+++++++++++++++++\n"); mpl.print(); if(vec_list.empty()) printf("vec_list is empty\n"); else { printf("vec_list:"); vec_list_t::const_iterator itr; unsigned k=0; for(itr = vec_list.begin(); itr != vec_list.end(); itr++) { const vec_t *p = *itr; printf("%s%4u%s 0x%06lX:0x%06lX", (k++%5)? ", ":",\n ", p->tstIndx(), p->onGpu()?"G":"C", (unsigned long) p->length(), (unsigned long)p->size()); } } printf("\n------------------\n"); } void help(const char *cmnd) { fprintf(stderr, "USAGE:\n" " %s [-v] <n_tst> [<min_size> [<rnd_seed> [<max_size>]]]\n" " The function tests the operation of ObjectMultiPool by running \n" " <n_tst> (unsigned int) random test of get() or put() from or to the\n" " MultiPool, respectively. The objects in the MultiPool are of class DemoVec\n" " (defined here), which can be specified with any size from <min_size> to\n" " <max_size>.\n" " The default for <min_size> is 1 and for max_size it is 0x%08lX\n" " In each test the function randomly either:\n" " A. Calls get() to get a DemoVec object of a random size and insers\n" " it into a FIFO.\n" " B. removes a DemoVec object from the FIFO and calls put() to put\n" " this object back into the MultiPool.\n" " (if the FIFO is empty B is not done). Correctness is tested and the\n" " status of the FIFO and the MultiPool are tested after each iteration.\n" " If -v is specified status is printed after each iteration.\n" , cmnd, (unsigned long)mpl_t::defaultMaxSize()); } int main(int argc, const char *argv[]) { int err = EXIT_SUCCESS; unsigned n_tst; //*< number of test to run unsigned i_tst; //*< test index size_t min_size; //*< minimum block size unsigned rnd_seed = 0; //*< seed of random number generator unsigned long msz=1, mxsz = (unsigned long)mpl_t::defaultMaxSize(); int n_opts = 0; bool verbose = false; if(argc <= 1) { help(argv[0]); return EXIT_SUCCESS; } if(!strcmp(argv[1], "-v")) { verbose = true; n_opts++; } if(argc-n_opts<2 || argc-n_opts>5 || sscanf(argv[n_opts+1], "%u", &n_tst) != 1 || (argc-n_opts >= 3 && sscanf(argv[n_opts+2], "%lu", &msz) != 1) || (argc-n_opts>=4 && sscanf(argv[n_opts+3], "%u", &rnd_seed) != 1) || (argc-n_opts>=5 && sscanf(argv[n_opts+4], "%lu", &mxsz) != 1) ) { fprintf(stderr, "Illegal arguments\n\n"); help(argv[0]); exit(EXIT_FAILURE); } size_t max_size = size_t(mxsz); min_size = size_t(msz); srand(rnd_seed); spec_t spec; vec_list_t vec_list; vec_list_t::iterator itr; size_t ttl_get = 0; unsigned last_get = 0; size_t max_vec_list = 0; mpl_t mpl(spec, min_size, max_size); clock_t start_time = clock(); if(verbose) { printf("Initial state:\n"); print_status(mpl, vec_list); } for(i_tst= 0; i_tst < n_tst;i_tst++ ) { // make the probability of get 7/16, slightly less than half. This makes // emptying vec_list have a probability of 1. unsigned rnd = rand(); bool do_get = vec_list.empty() || ((rnd & 0xF) < 7); if(do_get) { size_t sz = size_t(floor(exp(double(rand())*log(double(max_size))/RAND_MAX))); if(verbose) printf("%u: get object of size %lu=0x%06lX\n", i_tst, (unsigned long) sz, (unsigned long) sz); size_t ttl = mpl.total(); vec_t & vec = mpl.get(sz); assert(vec.size() <= ((pool_t *)(vec.pool))->elmntSize()); vec.setTstIndx(i_tst); vec.setLength(sz); vec_list.push_back(&vec); max_vec_list = std::max(vec_list.size(), max_vec_list); if(ttl == mpl.total()) last_get = ttl_get; ttl_get++; } else{ unsigned indx = rand() % vec_list.size(); bool use_discard = bool(rnd & 0x10); if(verbose) printf("%u %s object %u ", i_tst, use_discard?"discard":"put", indx); for(itr = vec_list.begin(); indx-- > 0; itr++); vec_t *pvec = *itr; if(verbose) printf("object index=%u, size=%lu=0x%06lX\n", pvec->tstIndx(), (unsigned long)pvec->size(), (unsigned long)pvec->size()); assert(pvec->size() <= ((pool_t *)(pvec->pool))->elmntSize()); if(use_discard) pvec->discard(); else mpl.put(*pvec); vec_list.erase(itr); } if(verbose) print_status(mpl, vec_list); } if(verbose) { printf("Status at end:\n"); print_status(mpl, vec_list); } // Clear vec_list if(!verbose) { double elapsed = double(clock()-start_time)/CLOCKS_PER_SEC; printf("\n====== Done. in %f sec. (%f microsec/test) =====\n", elapsed, elapsed*1E6/n_tst); } else printf("\n====== Done. =====\n"); if(verbose) printf("\n****** Clearing up ******\n\n"); for(itr = vec_list.begin(); itr != vec_list.end();) { mpl.put(**itr); itr = vec_list.erase(itr); if(verbose) print_status(mpl, vec_list); } printf("Total gets: %lu, total blocks in pool: %lu, last_get: %u, max_vec_list=%lu\n", (unsigned long) ttl_get, (unsigned long) mpl.total(), last_get, (unsigned long)max_vec_list); print_status(mpl, vec_list); return err; }
e7ac064e8c69a4557eeb95401531002af01045b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ //////////////////////////////////////////////////////////////////////////////// // // NVIDIA CUDA implementation of Brox et al Optical Flow algorithm // // Algorithm is explained in the original paper: // T. Brox, A. Bruhn, N. Papenberg, J. Weickert: // High accuracy optical flow estimation based on a theory for warping. // ECCV 2004. // // Implementation by Mikhail Smirnov // email: [email protected], [email protected] // // Credits for help with the code to: // Alexey Mendelenko, Anton Obukhov, and Alexander Kharlamov. // //////////////////////////////////////////////////////////////////////////////// #include <iostream> #include <vector> #include <memory> #include "opencv2/core/cuda/utility.hpp" #include "opencv2/cudalegacy/NPP_staging.hpp" #include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp" #include <opencv2/cudev/ptr2d/texture.hpp> typedef NCVVectorAlloc<Ncv32f> FloatVector; typedef cv::cudev::TexturePtr<float> Ptr2D; typedef cv::cudev::Texture<float> Texture; ///////////////////////////////////////////////////////////////////////////////////////// // Implementation specific constants ///////////////////////////////////////////////////////////////////////////////////////// __device__ const float eps2 = 1e-6f; ///////////////////////////////////////////////////////////////////////////////////////// // Additional defines ///////////////////////////////////////////////////////////////////////////////////////// // rounded up division inline int iDivUp(int a, int b) { return (a + b - 1)/b; } ///////////////////////////////////////////////////////////////////////////////////////// // SUPPLEMENTARY FUNCTIONS ///////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// /// \brief performs pointwise summation of two vectors stored in device memory /// \param d_res - pointer to resulting vector (device memory) /// \param d_op1 - term #1 (device memory) /// \param d_op2 - term #2 (device memory) /// \param len - vector size /////////////////////////////////////////////////////////////////////////////// __global__ void pointwise_add(float *d_res, const float *d_op1, const float *d_op2, const int len) { const int pos = blockIdx.x*blockDim.x + threadIdx.x; if(pos >= len) return; d_res[pos] = d_op1[pos] + d_op2[pos]; } /////////////////////////////////////////////////////////////////////////////// /// \brief wrapper for summation kernel. /// Computes \b op1 + \b op2 and stores result to \b res /// \param res array, containing op1 + op2 (device memory) /// \param op1 term #1 (device memory) /// \param op2 term #2 (device memory) /// \param count vector size /////////////////////////////////////////////////////////////////////////////// static void add(float *res, const float *op1, const float *op2, const int count, hipStream_t stream) { dim3 threads(256); dim3 blocks(iDivUp(count, threads.x)); hipLaunchKernelGGL(( pointwise_add), dim3(blocks), dim3(threads), 0, stream, res, op1, op2, count); } /////////////////////////////////////////////////////////////////////////////// /// \brief wrapper for summation kernel. /// Increments \b res by \b rhs /// \param res initial vector, will be replaced with result (device memory) /// \param rhs increment (device memory) /// \param count vector size /////////////////////////////////////////////////////////////////////////////// static void add(float *res, const float *rhs, const int count, hipStream_t stream) { add(res, res, rhs, count, stream); } /////////////////////////////////////////////////////////////////////////////// /// \brief kernel for scaling vector by scalar /// \param d_res scaled vector (device memory) /// \param d_src source vector (device memory) /// \param scale scalar to scale by /// \param len vector size (number of elements) /////////////////////////////////////////////////////////////////////////////// __global__ void scaleVector(float *d_res, const float *d_src, float scale, const int len) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= len) return; d_res[pos] = d_src[pos] * scale; } /////////////////////////////////////////////////////////////////////////////// /// \brief scale vector by scalar /// /// kernel wrapper /// \param d_res scaled vector (device memory) /// \param d_src source vector (device memory) /// \param scale scalar to scale by /// \param len vector size (number of elements) /// \param stream CUDA stream /////////////////////////////////////////////////////////////////////////////// static void ScaleVector(float *d_res, const float *d_src, float scale, const int len, hipStream_t stream) { dim3 threads(256); dim3 blocks(iDivUp(len, threads.x)); hipLaunchKernelGGL(( scaleVector), dim3(blocks), dim3(threads), 0, stream, d_res, d_src, scale, len); } const int SOR_TILE_WIDTH = 32; const int SOR_TILE_HEIGHT = 6; const int PSOR_TILE_WIDTH = 32; const int PSOR_TILE_HEIGHT = 6; const int PSOR_PITCH = PSOR_TILE_WIDTH + 4; const int PSOR_HEIGHT = PSOR_TILE_HEIGHT + 4; /////////////////////////////////////////////////////////////////////////////// ///\brief Utility function. Compute smooth term diffusivity along x axis ///\param s (out) pointer to memory location for result (diffusivity) ///\param pos (in) position within shared memory array containing \b u ///\param u (in) shared memory array containing \b u ///\param v (in) shared memory array containing \b v ///\param du (in) shared memory array containing \b du ///\param dv (in) shared memory array containing \b dv /////////////////////////////////////////////////////////////////////////////// __forceinline__ __device__ void diffusivity_along_x(float *s, int pos, const float *u, const float *v, const float *du, const float *dv) { //x derivative between pixels (i,j) and (i-1,j) const int left = pos-1; float u_x = u[pos] + du[pos] - u[left] - du[left]; float v_x = v[pos] + dv[pos] - v[left] - dv[left]; const int up = pos + PSOR_PITCH; const int down = pos - PSOR_PITCH; const int up_left = up - 1; const int down_left = down-1; //y derivative between pixels (i,j) and (i-1,j) float u_y = 0.25f*(u[up] + du[up] + u[up_left] + du[up_left] - u[down] - du[down] - u[down_left] - du[down_left]); float v_y = 0.25f*(v[up] + dv[up] + v[up_left] + dv[up_left] - v[down] - dv[down] - v[down_left] - dv[down_left]); *s = 0.5f / sqrtf(u_x*u_x + v_x*v_x + u_y*u_y + v_y*v_y + eps2); } /////////////////////////////////////////////////////////////////////////////// ///\brief Utility function. Compute smooth term diffusivity along y axis ///\param s (out) pointer to memory location for result (diffusivity) ///\param pos (in) position within shared memory array containing \b u ///\param u (in) shared memory array containing \b u ///\param v (in) shared memory array containing \b v ///\param du (in) shared memory array containing \b du ///\param dv (in) shared memory array containing \b dv /////////////////////////////////////////////////////////////////////////////// __forceinline__ __device__ void diffusivity_along_y(float *s, int pos, const float *u, const float *v, const float *du, const float *dv) { //y derivative between pixels (i,j) and (i,j-1) const int down = pos-PSOR_PITCH; float u_y = u[pos] + du[pos] - u[down] - du[down]; float v_y = v[pos] + dv[pos] - v[down] - dv[down]; const int right = pos + 1; const int left = pos - 1; const int down_right = down + 1; const int down_left = down - 1; //x derivative between pixels (i,j) and (i,j-1); float u_x = 0.25f*(u[right] + u[down_right] + du[right] + du[down_right] - u[left] - u[down_left] - du[left] - du[down_left]); float v_x = 0.25f*(v[right] + v[down_right] + dv[right] + dv[down_right] - v[left] - v[down_left] - dv[left] - dv[down_left]); *s = 0.5f/sqrtf(u_x*u_x + v_x*v_x + u_y*u_y + v_y*v_y + eps2); } /////////////////////////////////////////////////////////////////////////////// ///\brief Utility function. Load element of 2D global memory to shared memory ///\param smem pointer to shared memory array ///\param is shared memory array column ///\param js shared memory array row ///\param w number of columns in global memory array ///\param h number of rows in global memory array ///\param p global memory array pitch in floats /////////////////////////////////////////////////////////////////////////////// __forceinline__ __device__ void load_array_element(Ptr2D texSrc, float *smem, int is, int js, int i, int j, int w, int h, int p) { //position within shared memory array const int ijs = js * PSOR_PITCH + is; //mirror reflection across borders i = max(i, -i-1); i = min(i, w-i+w-1); j = max(j, -j-1); j = min(j, h-j+h-1); const int pos = j * p + i; smem[ijs] = texSrc(pos); } /////////////////////////////////////////////////////////////////////////////// ///\brief Utility function. Load part (tile) of 2D global memory to shared memory ///\param smem pointer to target shared memory array ///\param ig column number within source ///\param jg row number within source ///\param w number of columns in global memory array ///\param h number of rows in global memory array ///\param p global memory array pitch in floats /////////////////////////////////////////////////////////////////////////////// __forceinline__ __device__ void load_array(Ptr2D texSrc, float *smem, int ig, int jg, int w, int h, int p) { const int i = threadIdx.x + 2; const int j = threadIdx.y + 2; load_array_element(texSrc, smem, i, j, ig, jg, w, h, p);//load current pixel __syncthreads(); if(threadIdx.y < 2) { //load bottom shadow elements load_array_element(texSrc, smem, i, j-2, ig, jg-2, w, h, p); if(threadIdx.x < 2) { //load bottom right shadow elements load_array_element(texSrc, smem, i+PSOR_TILE_WIDTH, j-2, ig+PSOR_TILE_WIDTH, jg-2, w, h, p); //load middle right shadow elements load_array_element(texSrc, smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p); } else if(threadIdx.x >= PSOR_TILE_WIDTH-2) { //load bottom left shadow elements load_array_element(texSrc, smem, i-PSOR_TILE_WIDTH, j-2, ig-PSOR_TILE_WIDTH, jg-2, w, h, p); //load middle left shadow elements load_array_element(texSrc, smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p); } } else if(threadIdx.y >= PSOR_TILE_HEIGHT-2) { //load upper shadow elements load_array_element(texSrc, smem, i, j+2, ig, jg+2, w, h, p); if(threadIdx.x < 2) { //load upper right shadow elements load_array_element(texSrc, smem, i+PSOR_TILE_WIDTH, j+2, ig+PSOR_TILE_WIDTH, jg+2, w, h, p); //load middle right shadow elements load_array_element(texSrc, smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p); } else if(threadIdx.x >= PSOR_TILE_WIDTH-2) { //load upper left shadow elements load_array_element(texSrc, smem, i-PSOR_TILE_WIDTH, j+2, ig-PSOR_TILE_WIDTH, jg+2, w, h, p); //load middle left shadow elements load_array_element(texSrc, smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p); } } else { //load middle shadow elements if(threadIdx.x < 2) { //load middle right shadow elements load_array_element(texSrc, smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p); } else if(threadIdx.x >= PSOR_TILE_WIDTH-2) { //load middle left shadow elements load_array_element(texSrc, smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p); } } __syncthreads(); } /////////////////////////////////////////////////////////////////////////////// /// \brief computes matrix of linearised system for \c du, \c dv /// Computed values reside in GPU memory. \n /// Matrix computation is divided into two steps. This kernel performs first step\n /// - compute smoothness term diffusivity between pixels - psi dash smooth /// - compute robustness factor in the data term - psi dash data /// \param diffusivity_x (in/out) diffusivity between pixels along x axis in smoothness term /// \param diffusivity_y (in/out) diffusivity between pixels along y axis in smoothness term /// \param denominator_u (in/out) precomputed part of expression for new du value in SOR iteration /// \param denominator_v (in/out) precomputed part of expression for new dv value in SOR iteration /// \param numerator_dudv (in/out) precomputed part of expression for new du and dv value in SOR iteration /// \param numerator_u (in/out) precomputed part of expression for new du value in SOR iteration /// \param numerator_v (in/out) precomputed part of expression for new dv value in SOR iteration /// \param w (in) frame width /// \param h (in) frame height /// \param pitch (in) pitch in floats /// \param alpha (in) alpha in Brox model (flow smoothness) /// \param gamma (in) gamma in Brox model (edge importance) /////////////////////////////////////////////////////////////////////////////// __global__ void prepare_sor_stage_1_tex(Ptr2D texU, Ptr2D texV, Ptr2D texDu, Ptr2D texDv, Ptr2D texI0, Ptr2D texI1, Ptr2D texIx, Ptr2D texIxx, Ptr2D texIx0, Ptr2D texIy, Ptr2D texIyy, Ptr2D texIy0, Ptr2D texIxy, float *diffusivity_x, float *diffusivity_y, float *denominator_u, float *denominator_v, float *numerator_dudv, float *numerator_u, float *numerator_v, int w, int h, int s, float alpha, float gamma) { __shared__ float u[PSOR_PITCH * PSOR_HEIGHT]; __shared__ float v[PSOR_PITCH * PSOR_HEIGHT]; __shared__ float du[PSOR_PITCH * PSOR_HEIGHT]; __shared__ float dv[PSOR_PITCH * PSOR_HEIGHT]; //position within tile const int i = threadIdx.x; const int j = threadIdx.y; //position within smem arrays const int ijs = (j+2) * PSOR_PITCH + i + 2; //position within global memory const int ig = blockIdx.x * blockDim.x + threadIdx.x; const int jg = blockIdx.y * blockDim.y + threadIdx.y; const int ijg = jg * s + ig; //position within texture float x = (float)ig + 0.5f; float y = (float)jg + 0.5f; //load u and v to smem load_array(texU, u, ig, jg, w, h, s); load_array(texV, v, ig, jg, w, h, s); load_array(texDu, du, ig, jg, w, h, s); load_array(texDv, dv, ig, jg, w, h, s); //warped position float wx = (x + u[ijs])/(float)w; float wy = (y + v[ijs])/(float)h; x /= (float)w; y /= (float)h; //compute image derivatives const float Iz = texI1(wy, wx) - texI0(y,x); const float Ix = texIx(wy, wx); const float Ixz = Ix - texIx0(y, x); const float Ixy = texIxy(wy, wx); const float Ixx = texIxx(wy, wx); const float Iy = texIy(wy, wx); const float Iyz = Iy - texIy0(y, x); const float Iyy = texIyy(wy, wx); //compute data term float q0, q1, q2; q0 = Iz + Ix * du[ijs] + Iy * dv[ijs]; q1 = Ixz + Ixx * du[ijs] + Ixy * dv[ijs]; q2 = Iyz + Ixy * du[ijs] + Iyy * dv[ijs]; float data_term = 0.5f * rsqrtf(q0*q0 + gamma*(q1*q1 + q2*q2) + eps2); //scale data term by 1/alpha data_term /= alpha; //compute smoothness term (diffusivity) float sx, sy; if(ig >= w || jg >= h) return; diffusivity_along_x(&sx, ijs, u, v, du, dv); diffusivity_along_y(&sy, ijs, u, v, du, dv); if(ig == 0) sx = 0.0f; if(jg == 0) sy = 0.0f; numerator_dudv[ijg] = data_term * (Ix*Iy + gamma * Ixy*(Ixx + Iyy)); numerator_u[ijg] = data_term * (Ix*Iz + gamma * (Ixx*Ixz + Ixy*Iyz)); numerator_v[ijg] = data_term * (Iy*Iz + gamma * (Iyy*Iyz + Ixy*Ixz)); denominator_u[ijg] = data_term * (Ix*Ix + gamma * (Ixy*Ixy + Ixx*Ixx)); denominator_v[ijg] = data_term * (Iy*Iy + gamma * (Ixy*Ixy + Iyy*Iyy)); diffusivity_x[ijg] = sx; diffusivity_y[ijg] = sy; } /////////////////////////////////////////////////////////////////////////////// ///\brief computes matrix of linearised system for \c du, \c dv ///\param inv_denominator_u ///\param inv_denominator_v ///\param w ///\param h ///\param s /////////////////////////////////////////////////////////////////////////////// __global__ void prepare_sor_stage_2(Ptr2D texDiffX, Ptr2D texDiffY, float *inv_denominator_u, float *inv_denominator_v, int w, int h, int s) { __shared__ float sx[(PSOR_TILE_WIDTH+1) * (PSOR_TILE_HEIGHT+1)]; __shared__ float sy[(PSOR_TILE_WIDTH+1) * (PSOR_TILE_HEIGHT+1)]; //position within tile const int i = threadIdx.x; const int j = threadIdx.y; //position within smem arrays const int ijs = j*(PSOR_TILE_WIDTH+1) + i; //position within global memory const int ig = blockIdx.x * blockDim.x + threadIdx.x; const int jg = blockIdx.y * blockDim.y + threadIdx.y; const int ijg = jg*s + ig; int inside = ig < w && jg < h; float denom_u; float denom_v; if(inside) { denom_u = inv_denominator_u[ijg]; denom_v = inv_denominator_v[ijg]; } if(inside) { sx[ijs] = texDiffX(ijg); sy[ijs] = texDiffY(ijg); } else { sx[ijs] = 0.0f; sy[ijs] = 0.0f; } int up = ijs+PSOR_TILE_WIDTH+1; if(j == PSOR_TILE_HEIGHT-1) { if(jg < h-1 && inside) sy[up] = texDiffY(ijg + s); else sy[up] = 0.0f; } int right = ijs + 1; if(threadIdx.x == PSOR_TILE_WIDTH-1) { if(ig < w-1 && inside) sx[right] = texDiffX(ijg + 1); else sx[right] = 0.0f; } __syncthreads(); float diffusivity_sum; diffusivity_sum = sx[ijs] + sx[ijs+1] + sy[ijs] + sy[ijs+PSOR_TILE_WIDTH+1]; if(inside) { denom_u += diffusivity_sum; denom_v += diffusivity_sum; inv_denominator_u[ijg] = 1.0f/denom_u; inv_denominator_v[ijg] = 1.0f/denom_v; } } ///////////////////////////////////////////////////////////////////////////////////////// // Red-Black SOR ///////////////////////////////////////////////////////////////////////////////////////// template<int isBlack> __global__ void sor_pass(Ptr2D texU, Ptr2D texV, Ptr2D texDu, Ptr2D texDv, Ptr2D texDiffX, Ptr2D texDiffY, float *new_du, float *new_dv, const float *g_inv_denominator_u, const float *g_inv_denominator_v, const float *g_numerator_u, const float *g_numerator_v, const float *g_numerator_dudv, float omega, int width, int height, int stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i >= width || j >= height) return; const int pos = j * stride + i; const int pos_r = i < width - 1 ? pos + 1 : pos; const int pos_u = j < height - 1 ? pos + stride : pos; const int pos_d = j > 0 ? pos - stride : pos; const int pos_l = i > 0 ? pos - 1 : pos; //load smooth term float s_up, s_left, s_right, s_down; s_left = texDiffX(pos); s_down = texDiffY(pos); if(i < width-1) s_right = texDiffX(pos_r); else s_right = 0.0f; //Neumann BC if(j < height-1) s_up = texDiffY(pos_u); else s_up = 0.0f; //Neumann BC //load u, v and du, dv float u_up, u_left, u_right, u_down, u; float v_up, v_left, v_right, v_down, v; float du_up, du_left, du_right, du_down, du; float dv_up, dv_left, dv_right, dv_down, dv; u_left = texU(pos_l); u_right = texU(pos_r); u_down = texU(pos_d); u_up = texU(pos_u); u = texU(pos); v_left = texV(pos_l); v_right = texV(pos_r); v_down = texV(pos_d); v = texV(pos); v_up = texV(pos_u); du = texDu(pos); du_left = texDu(pos_l); du_right = texDu(pos_r); du_down = texDu(pos_d); du_up = texDu(pos_u); dv = texDv(pos); dv_left = texDv(pos_l); dv_right = texDv(pos_r); dv_down = texDv(pos_d); dv_up = texDv(pos_u); float numerator_dudv = g_numerator_dudv[pos]; if((i+j)%2 == isBlack) { // update du float numerator_u = (s_left*(u_left + du_left) + s_up*(u_up + du_up) + s_right*(u_right + du_right) + s_down*(u_down + du_down) - u * (s_left + s_right + s_up + s_down) - g_numerator_u[pos] - numerator_dudv*dv); du = (1.0f - omega) * du + omega * g_inv_denominator_u[pos] * numerator_u; // update dv float numerator_v = (s_left*(v_left + dv_left) + s_up*(v_up + dv_up) + s_right*(v_right + dv_right) + s_down*(v_down + dv_down) - v * (s_left + s_right + s_up + s_down) - g_numerator_v[pos] - numerator_dudv*du); dv = (1.0f - omega) * dv + omega * g_inv_denominator_v[pos] * numerator_v; } new_du[pos] = du; new_dv[pos] = dv; } /////////////////////////////////////////////////////////////////////////////// // utility functions /////////////////////////////////////////////////////////////////////////////// namespace { struct ImagePyramid { std::vector<FloatVector*> img0; std::vector<FloatVector*> img1; std::vector<Ncv32u> w; std::vector<Ncv32u> h; explicit ImagePyramid(int outer_iterations) { img0.reserve(outer_iterations); img1.reserve(outer_iterations); w.reserve(outer_iterations); h.reserve(outer_iterations); } ~ImagePyramid() { w.clear(); h.clear(); for (int i = static_cast<int>(img0.size()) - 1; i >= 0; --i) { delete img1[i]; delete img0[i]; } img0.clear(); img1.clear(); } }; } ///////////////////////////////////////////////////////////////////////////////////////// // MAIN FUNCTION ///////////////////////////////////////////////////////////////////////////////////////// NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc, INCVMemAllocator &gpu_mem_allocator, const NCVMatrix<Ncv32f> &frame0, const NCVMatrix<Ncv32f> &frame1, NCVMatrix<Ncv32f> &uOut, NCVMatrix<Ncv32f> &vOut, hipStream_t stream) { ncvAssertPrintReturn(desc.alpha > 0.0f , "Invalid alpha" , NCV_INCONSISTENT_INPUT); ncvAssertPrintReturn(desc.gamma >= 0.0f , "Invalid gamma" , NCV_INCONSISTENT_INPUT); ncvAssertPrintReturn(desc.number_of_inner_iterations > 0 , "Invalid number of inner iterations" , NCV_INCONSISTENT_INPUT); ncvAssertPrintReturn(desc.number_of_outer_iterations > 0 , "Invalid number of outer iterations" , NCV_INCONSISTENT_INPUT); ncvAssertPrintReturn(desc.number_of_solver_iterations > 0, "Invalid number of solver iterations", NCV_INCONSISTENT_INPUT); const Ncv32u kSourceWidth = frame0.width(); const Ncv32u kSourceHeight = frame0.height(); ncvAssertPrintReturn(frame1.width() == kSourceWidth && frame1.height() == kSourceHeight, "Frame dims do not match", NCV_INCONSISTENT_INPUT); ncvAssertReturn(uOut.width() == kSourceWidth && vOut.width() == kSourceWidth && uOut.height() == kSourceHeight && vOut.height() == kSourceHeight, NCV_INCONSISTENT_INPUT); ncvAssertReturn(gpu_mem_allocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); bool kSkipProcessing = gpu_mem_allocator.isCounting(); int cuda_device; ncvAssertCUDAReturn(hipGetDevice(&cuda_device), NCV_CUDA_ERROR); hipDeviceProp_t device_props; ncvAssertCUDAReturn(hipGetDeviceProperties(&device_props, cuda_device), NCV_CUDA_ERROR); Ncv32u alignmentValue = gpu_mem_allocator.alignment (); const Ncv32u kStrideAlignmentFloat = alignmentValue / sizeof(float); const Ncv32u kSourcePitch = alignUp(kSourceWidth, kStrideAlignmentFloat) * sizeof(float); const Ncv32f scale_factor = desc.scale_factor; const Ncv32f alpha = desc.alpha; const Ncv32f gamma = desc.gamma; const Ncv32u kSizeInPixelsAligned = alignUp(kSourceWidth, kStrideAlignmentFloat)*kSourceHeight; #if defined SAFE_VECTOR_DECL #undef SAFE_VECTOR_DECL #endif #define SAFE_VECTOR_DECL(name, allocator, size) \ FloatVector name((allocator), (size)); \ ncvAssertReturn(name.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); // matrix elements SAFE_VECTOR_DECL(diffusivity_x, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(diffusivity_y, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(denom_u, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(denom_v, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(num_dudv, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(num_u, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(num_v, gpu_mem_allocator, kSizeInPixelsAligned); // flow components SAFE_VECTOR_DECL(u, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(v, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(u_new, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(v_new, gpu_mem_allocator, kSizeInPixelsAligned); // flow increments SAFE_VECTOR_DECL(du, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(dv, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(du_new, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(dv_new, gpu_mem_allocator, kSizeInPixelsAligned); // temporary storage SAFE_VECTOR_DECL(device_buffer, gpu_mem_allocator, alignUp(kSourceWidth, kStrideAlignmentFloat) * alignUp(kSourceHeight, kStrideAlignmentFloat)); // image derivatives SAFE_VECTOR_DECL(Ix, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Ixx, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Ix0, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Iy, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Iyy, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Iy0, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Ixy, gpu_mem_allocator, kSizeInPixelsAligned); // spatial derivative filter size const int kDFilterSize = 5; SAFE_VECTOR_DECL(derivativeFilter, gpu_mem_allocator, kDFilterSize); if (!kSkipProcessing) { const float derivativeFilterHost[kDFilterSize] = {1.0f, -8.0f, 0.0f, 8.0f, -1.0f}; ncvAssertCUDAReturn(hipMemcpy(derivativeFilter.ptr(), derivativeFilterHost, sizeof(float) * kDFilterSize, hipMemcpyHostToDevice), NCV_CUDA_ERROR); } //prepare image pyramid ImagePyramid pyr(desc.number_of_outer_iterations); hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float>(); float scale = 1.0f; //cuda arrays for frames std::unique_ptr<FloatVector> pI0(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned)); ncvAssertReturn(pI0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); std::unique_ptr<FloatVector> pI1(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned)); ncvAssertReturn(pI1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); if (!kSkipProcessing) { //copy frame data to device size_t dst_width_in_bytes = alignUp(kSourceWidth, kStrideAlignmentFloat) * sizeof(float); size_t src_width_in_bytes = kSourceWidth * sizeof(float); size_t src_pitch_in_bytes = frame0.pitch(); ncvAssertCUDAReturn( hipMemcpy2DAsync(pI0->ptr(), dst_width_in_bytes, frame0.ptr(), src_pitch_in_bytes, src_width_in_bytes, kSourceHeight, hipMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR ); ncvAssertCUDAReturn( hipMemcpy2DAsync(pI1->ptr(), dst_width_in_bytes, frame1.ptr(), src_pitch_in_bytes, src_width_in_bytes, kSourceHeight, hipMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR ); } FloatVector* I0 = pI0.release(); FloatVector* I1 = pI1.release(); //prepare pyramid pyr.img0.push_back(I0); pyr.img1.push_back(I1); pyr.w.push_back(kSourceWidth); pyr.h.push_back(kSourceHeight); scale *= scale_factor; Ncv32u prev_level_width = kSourceWidth; Ncv32u prev_level_height = kSourceHeight; while((prev_level_width > 15) && (prev_level_height > 15) && (static_cast<Ncv32u>(pyr.img0.size()) < desc.number_of_outer_iterations)) { //current resolution Ncv32u level_width = static_cast<Ncv32u>(ceilf(kSourceWidth * scale)); Ncv32u level_height = static_cast<Ncv32u>(ceilf(kSourceHeight * scale)); Ncv32u level_width_aligned = alignUp(level_width, kStrideAlignmentFloat); Ncv32u buffer_size = alignUp(level_width, kStrideAlignmentFloat) * level_height; // buffer size in floats Ncv32u prev_level_pitch = alignUp(prev_level_width, kStrideAlignmentFloat) * sizeof(float); std::unique_ptr<FloatVector> level_frame0(new FloatVector(gpu_mem_allocator, buffer_size)); ncvAssertReturn(level_frame0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); std::unique_ptr<FloatVector> level_frame1(new FloatVector(gpu_mem_allocator, buffer_size)); ncvAssertReturn(level_frame1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); if (!kSkipProcessing) { ncvAssertCUDAReturn(hipStreamSynchronize(stream), NCV_CUDA_ERROR); NcvSize32u srcSize (prev_level_width, prev_level_height); NcvSize32u dstSize (level_width, level_height); NcvRect32u srcROI (0, 0, prev_level_width, prev_level_height); NcvRect32u dstROI (0, 0, level_width, level_height); // frame 0 ncvAssertReturnNcvStat( nppiStResize_32f_C1R (I0->ptr(), srcSize, prev_level_pitch, srcROI, level_frame0->ptr(), dstSize, level_width_aligned * sizeof (float), dstROI, scale_factor, scale_factor, nppStSupersample) ); // frame 1 ncvAssertReturnNcvStat( nppiStResize_32f_C1R (I1->ptr(), srcSize, prev_level_pitch, srcROI, level_frame1->ptr(), dstSize, level_width_aligned * sizeof (float), dstROI, scale_factor, scale_factor, nppStSupersample) ); } I0 = level_frame0.release(); I1 = level_frame1.release(); //store pointers pyr.img0.push_back(I0); pyr.img1.push_back(I1); pyr.w.push_back(level_width); pyr.h.push_back(level_height); scale *= scale_factor; prev_level_width = level_width; prev_level_height = level_height; } if (!kSkipProcessing) { //initial values for flow is 0 ncvAssertCUDAReturn(hipMemsetAsync(u.ptr(), 0, kSizeInPixelsAligned * sizeof(float), stream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemsetAsync(v.ptr(), 0, kSizeInPixelsAligned * sizeof(float), stream), NCV_CUDA_ERROR); //select images with lowest resolution ncvAssertCUDAReturn(hipStreamSynchronize(stream), NCV_CUDA_ERROR); FloatVector* ptrU = &u; FloatVector* ptrV = &v; FloatVector* ptrUNew = &u_new; FloatVector* ptrVNew = &v_new; std::vector<FloatVector*>::const_reverse_iterator img0Iter = pyr.img0.rbegin(); std::vector<FloatVector*>::const_reverse_iterator img1Iter = pyr.img1.rbegin(); //outer loop //warping fixed point iteration while(!pyr.w.empty()) { //current grid dimensions const Ncv32u kLevelWidth = pyr.w.back(); const Ncv32u kLevelHeight = pyr.h.back(); const Ncv32u kLevelStride = alignUp(kLevelWidth, kStrideAlignmentFloat); //size of current image in bytes const int kLevelSizeInBytes = kLevelStride * kLevelHeight * sizeof(float); //number of points at current resolution const int kLevelSizeInPixels = kLevelStride * kLevelHeight; //initial guess for du and dv ncvAssertCUDAReturn(hipMemsetAsync(du.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemsetAsync(dv.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR); I0 = *img0Iter; I1 = *img1Iter; ++img0Iter; ++img1Iter; Texture texI0(kLevelHeight, kLevelWidth, I0->ptr(), kLevelStride * sizeof(float), true, hipFilterModeLinear, hipAddressModeMirror); Texture texI1(kLevelHeight, kLevelWidth, I1->ptr(), kLevelStride * sizeof(float), true, hipFilterModeLinear, hipAddressModeMirror); //compute derivatives dim3 dBlocks(iDivUp(kLevelWidth, 32), iDivUp(kLevelHeight, 6)); //dim3 dThreads(32, 6); const int kPitchTex = kLevelStride * sizeof(float); NcvSize32u srcSize(kLevelWidth, kLevelHeight); Ncv32u nSrcStep = kLevelStride * sizeof(float); NcvRect32u oROI(0, 0, kLevelWidth, kLevelHeight); // Ix0 ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (I0->ptr(), srcSize, nSrcStep, Ix0.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Iy0 ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (I0->ptr(), srcSize, nSrcStep, Iy0.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Ix ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (I1->ptr(), srcSize, nSrcStep, Ix.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Iy ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (I1->ptr(), srcSize, nSrcStep, Iy.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Ixx ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Ix.ptr(), srcSize, nSrcStep, Ixx.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Iyy ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Iyy.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Ixy ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Ixy.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); Texture texIx(kLevelHeight, kLevelWidth, Ix.ptr(), kPitchTex, true, hipFilterModeLinear, hipAddressModeMirror); Texture texIxx(kLevelHeight, kLevelWidth, Ixx.ptr(), kPitchTex, true, hipFilterModeLinear, hipAddressModeMirror); Texture texIx0(kLevelHeight, kLevelWidth, Ix0.ptr(), kPitchTex, true, hipFilterModeLinear, hipAddressModeMirror); Texture texIy(kLevelHeight, kLevelWidth, Iy.ptr(), kPitchTex, true, hipFilterModeLinear, hipAddressModeMirror); Texture texIyy(kLevelHeight, kLevelWidth, Iyy.ptr(), kPitchTex, true, hipFilterModeLinear, hipAddressModeMirror); Texture texIy0(kLevelHeight, kLevelWidth, Iy0.ptr(), kPitchTex, true, hipFilterModeLinear, hipAddressModeMirror); Texture texIxy(kLevelHeight, kLevelWidth, Ixy.ptr(), kPitchTex, true, hipFilterModeLinear, hipAddressModeMirror); Texture texDiffX(kLevelSizeInBytes, diffusivity_x.ptr()); Texture texDiffY(kLevelSizeInBytes, diffusivity_y.ptr()); // flow Texture texU(kLevelSizeInBytes, ptrU->ptr()); Texture texV(kLevelSizeInBytes, ptrV->ptr()); // flow increments Texture texDu(kLevelSizeInBytes, du.ptr()); Texture texDv(kLevelSizeInBytes, dv.ptr()); Texture texDuNew(kLevelSizeInBytes, du_new.ptr()); Texture texDvNew(kLevelSizeInBytes, dv_new.ptr()); dim3 psor_blocks(iDivUp(kLevelWidth, PSOR_TILE_WIDTH), iDivUp(kLevelHeight, PSOR_TILE_HEIGHT)); dim3 psor_threads(PSOR_TILE_WIDTH, PSOR_TILE_HEIGHT); dim3 sor_blocks(iDivUp(kLevelWidth, SOR_TILE_WIDTH), iDivUp(kLevelHeight, SOR_TILE_HEIGHT)); dim3 sor_threads(SOR_TILE_WIDTH, SOR_TILE_HEIGHT); // inner loop // lagged nonlinearity fixed point iteration ncvAssertCUDAReturn(hipStreamSynchronize(stream), NCV_CUDA_ERROR); for (Ncv32u current_inner_iteration = 0; current_inner_iteration < desc.number_of_inner_iterations; ++current_inner_iteration) { //compute coefficients hipLaunchKernelGGL(( prepare_sor_stage_1_tex), dim3(psor_blocks), dim3(psor_threads), 0, stream, texU, texV, texDu, texDv, texI0, texI1, texIx, texIxx, texIx0, texIy, texIyy, texIy0, texIxy, diffusivity_x.ptr(), diffusivity_y.ptr(), denom_u.ptr(), denom_v.ptr(), num_dudv.ptr(), num_u.ptr(), num_v.ptr(), kLevelWidth, kLevelHeight, kLevelStride, alpha, gamma); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); hipLaunchKernelGGL(( prepare_sor_stage_2), dim3(psor_blocks), dim3(psor_threads), 0, stream, texDiffX, texDiffY, denom_u.ptr(), denom_v.ptr(), kLevelWidth, kLevelHeight, kLevelStride); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); //solve linear system for (Ncv32u solver_iteration = 0; solver_iteration < desc.number_of_solver_iterations; ++solver_iteration) { float omega = 1.99f; hipLaunchKernelGGL(( sor_pass<0>), dim3(sor_blocks), dim3(sor_threads), 0, stream, texU, texV, texDu, texDv, texDiffX, texDiffY, du_new.ptr(), dv_new.ptr(), denom_u.ptr(), denom_v.ptr(), num_u.ptr(), num_v.ptr(), num_dudv.ptr(), omega, kLevelWidth, kLevelHeight, kLevelStride); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); hipLaunchKernelGGL(( sor_pass<1>), dim3(sor_blocks), dim3(sor_threads), 0, stream, texU, texV, texDuNew, texDvNew, texDiffX, texDiffY, du.ptr(), dv.ptr(), denom_u.ptr(), denom_v.ptr(), num_u.ptr(), num_v.ptr(),num_dudv.ptr(), omega, kLevelWidth, kLevelHeight, kLevelStride); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); }//end of solver loop }// end of inner loop //update u and v add(ptrU->ptr(), du.ptr(), kLevelSizeInPixels, stream); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); add(ptrV->ptr(), dv.ptr(), kLevelSizeInPixels, stream); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); //prolongate using texture pyr.w.pop_back(); pyr.h.pop_back(); if (!pyr.w.empty()) { //compute new image size Ncv32u nw = pyr.w.back(); Ncv32u nh = pyr.h.back(); Ncv32u ns = alignUp(nw, kStrideAlignmentFloat); dim3 p_blocks(iDivUp(nw, 32), iDivUp(nh, 8)); //dim3 p_threads(32, 8); NcvSize32u inner_srcSize (kLevelWidth, kLevelHeight); NcvSize32u dstSize (nw, nh); NcvRect32u srcROI (0, 0, kLevelWidth, kLevelHeight); NcvRect32u dstROI (0, 0, nw, nh); ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrU->ptr(), inner_srcSize, kLevelStride * sizeof (float), srcROI, ptrUNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) ); ScaleVector(ptrUNew->ptr(), ptrUNew->ptr(), 1.0f/scale_factor, ns * nh, stream); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrV->ptr(), inner_srcSize, kLevelStride * sizeof (float), srcROI, ptrVNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) ); ScaleVector(ptrVNew->ptr(), ptrVNew->ptr(), 1.0f/scale_factor, ns * nh, stream); ncvAssertCUDALastErrorReturn((int)NCV_CUDA_ERROR); cv::cuda::device::swap<FloatVector*>(ptrU, ptrUNew); cv::cuda::device::swap<FloatVector*>(ptrV, ptrVNew); } scale /= scale_factor; } // end of warping iterations ncvAssertCUDAReturn(hipStreamSynchronize(stream), (int)NCV_CUDA_ERROR); ncvAssertCUDAReturn( hipMemcpy2DAsync (uOut.ptr(), uOut.pitch(), ptrU->ptr(), kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, hipMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR ); ncvAssertCUDAReturn( hipMemcpy2DAsync (vOut.ptr(), vOut.pitch(), ptrV->ptr(), kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, hipMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR ); ncvAssertCUDAReturn(hipStreamSynchronize(stream), (int)NCV_CUDA_ERROR); } return NCV_SUCCESS; }
e7ac064e8c69a4557eeb95401531002af01045b6.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ //////////////////////////////////////////////////////////////////////////////// // // NVIDIA CUDA implementation of Brox et al Optical Flow algorithm // // Algorithm is explained in the original paper: // T. Brox, A. Bruhn, N. Papenberg, J. Weickert: // High accuracy optical flow estimation based on a theory for warping. // ECCV 2004. // // Implementation by Mikhail Smirnov // email: [email protected], [email protected] // // Credits for help with the code to: // Alexey Mendelenko, Anton Obukhov, and Alexander Kharlamov. // //////////////////////////////////////////////////////////////////////////////// #include <iostream> #include <vector> #include <memory> #include "opencv2/core/cuda/utility.hpp" #include "opencv2/cudalegacy/NPP_staging.hpp" #include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp" #include <opencv2/cudev/ptr2d/texture.hpp> typedef NCVVectorAlloc<Ncv32f> FloatVector; typedef cv::cudev::TexturePtr<float> Ptr2D; typedef cv::cudev::Texture<float> Texture; ///////////////////////////////////////////////////////////////////////////////////////// // Implementation specific constants ///////////////////////////////////////////////////////////////////////////////////////// __device__ const float eps2 = 1e-6f; ///////////////////////////////////////////////////////////////////////////////////////// // Additional defines ///////////////////////////////////////////////////////////////////////////////////////// // rounded up division inline int iDivUp(int a, int b) { return (a + b - 1)/b; } ///////////////////////////////////////////////////////////////////////////////////////// // SUPPLEMENTARY FUNCTIONS ///////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// /// \brief performs pointwise summation of two vectors stored in device memory /// \param d_res - pointer to resulting vector (device memory) /// \param d_op1 - term #1 (device memory) /// \param d_op2 - term #2 (device memory) /// \param len - vector size /////////////////////////////////////////////////////////////////////////////// __global__ void pointwise_add(float *d_res, const float *d_op1, const float *d_op2, const int len) { const int pos = blockIdx.x*blockDim.x + threadIdx.x; if(pos >= len) return; d_res[pos] = d_op1[pos] + d_op2[pos]; } /////////////////////////////////////////////////////////////////////////////// /// \brief wrapper for summation kernel. /// Computes \b op1 + \b op2 and stores result to \b res /// \param res array, containing op1 + op2 (device memory) /// \param op1 term #1 (device memory) /// \param op2 term #2 (device memory) /// \param count vector size /////////////////////////////////////////////////////////////////////////////// static void add(float *res, const float *op1, const float *op2, const int count, cudaStream_t stream) { dim3 threads(256); dim3 blocks(iDivUp(count, threads.x)); pointwise_add<<<blocks, threads, 0, stream>>>(res, op1, op2, count); } /////////////////////////////////////////////////////////////////////////////// /// \brief wrapper for summation kernel. /// Increments \b res by \b rhs /// \param res initial vector, will be replaced with result (device memory) /// \param rhs increment (device memory) /// \param count vector size /////////////////////////////////////////////////////////////////////////////// static void add(float *res, const float *rhs, const int count, cudaStream_t stream) { add(res, res, rhs, count, stream); } /////////////////////////////////////////////////////////////////////////////// /// \brief kernel for scaling vector by scalar /// \param d_res scaled vector (device memory) /// \param d_src source vector (device memory) /// \param scale scalar to scale by /// \param len vector size (number of elements) /////////////////////////////////////////////////////////////////////////////// __global__ void scaleVector(float *d_res, const float *d_src, float scale, const int len) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= len) return; d_res[pos] = d_src[pos] * scale; } /////////////////////////////////////////////////////////////////////////////// /// \brief scale vector by scalar /// /// kernel wrapper /// \param d_res scaled vector (device memory) /// \param d_src source vector (device memory) /// \param scale scalar to scale by /// \param len vector size (number of elements) /// \param stream CUDA stream /////////////////////////////////////////////////////////////////////////////// static void ScaleVector(float *d_res, const float *d_src, float scale, const int len, cudaStream_t stream) { dim3 threads(256); dim3 blocks(iDivUp(len, threads.x)); scaleVector<<<blocks, threads, 0, stream>>>(d_res, d_src, scale, len); } const int SOR_TILE_WIDTH = 32; const int SOR_TILE_HEIGHT = 6; const int PSOR_TILE_WIDTH = 32; const int PSOR_TILE_HEIGHT = 6; const int PSOR_PITCH = PSOR_TILE_WIDTH + 4; const int PSOR_HEIGHT = PSOR_TILE_HEIGHT + 4; /////////////////////////////////////////////////////////////////////////////// ///\brief Utility function. Compute smooth term diffusivity along x axis ///\param s (out) pointer to memory location for result (diffusivity) ///\param pos (in) position within shared memory array containing \b u ///\param u (in) shared memory array containing \b u ///\param v (in) shared memory array containing \b v ///\param du (in) shared memory array containing \b du ///\param dv (in) shared memory array containing \b dv /////////////////////////////////////////////////////////////////////////////// __forceinline__ __device__ void diffusivity_along_x(float *s, int pos, const float *u, const float *v, const float *du, const float *dv) { //x derivative between pixels (i,j) and (i-1,j) const int left = pos-1; float u_x = u[pos] + du[pos] - u[left] - du[left]; float v_x = v[pos] + dv[pos] - v[left] - dv[left]; const int up = pos + PSOR_PITCH; const int down = pos - PSOR_PITCH; const int up_left = up - 1; const int down_left = down-1; //y derivative between pixels (i,j) and (i-1,j) float u_y = 0.25f*(u[up] + du[up] + u[up_left] + du[up_left] - u[down] - du[down] - u[down_left] - du[down_left]); float v_y = 0.25f*(v[up] + dv[up] + v[up_left] + dv[up_left] - v[down] - dv[down] - v[down_left] - dv[down_left]); *s = 0.5f / sqrtf(u_x*u_x + v_x*v_x + u_y*u_y + v_y*v_y + eps2); } /////////////////////////////////////////////////////////////////////////////// ///\brief Utility function. Compute smooth term diffusivity along y axis ///\param s (out) pointer to memory location for result (diffusivity) ///\param pos (in) position within shared memory array containing \b u ///\param u (in) shared memory array containing \b u ///\param v (in) shared memory array containing \b v ///\param du (in) shared memory array containing \b du ///\param dv (in) shared memory array containing \b dv /////////////////////////////////////////////////////////////////////////////// __forceinline__ __device__ void diffusivity_along_y(float *s, int pos, const float *u, const float *v, const float *du, const float *dv) { //y derivative between pixels (i,j) and (i,j-1) const int down = pos-PSOR_PITCH; float u_y = u[pos] + du[pos] - u[down] - du[down]; float v_y = v[pos] + dv[pos] - v[down] - dv[down]; const int right = pos + 1; const int left = pos - 1; const int down_right = down + 1; const int down_left = down - 1; //x derivative between pixels (i,j) and (i,j-1); float u_x = 0.25f*(u[right] + u[down_right] + du[right] + du[down_right] - u[left] - u[down_left] - du[left] - du[down_left]); float v_x = 0.25f*(v[right] + v[down_right] + dv[right] + dv[down_right] - v[left] - v[down_left] - dv[left] - dv[down_left]); *s = 0.5f/sqrtf(u_x*u_x + v_x*v_x + u_y*u_y + v_y*v_y + eps2); } /////////////////////////////////////////////////////////////////////////////// ///\brief Utility function. Load element of 2D global memory to shared memory ///\param smem pointer to shared memory array ///\param is shared memory array column ///\param js shared memory array row ///\param w number of columns in global memory array ///\param h number of rows in global memory array ///\param p global memory array pitch in floats /////////////////////////////////////////////////////////////////////////////// __forceinline__ __device__ void load_array_element(Ptr2D texSrc, float *smem, int is, int js, int i, int j, int w, int h, int p) { //position within shared memory array const int ijs = js * PSOR_PITCH + is; //mirror reflection across borders i = max(i, -i-1); i = min(i, w-i+w-1); j = max(j, -j-1); j = min(j, h-j+h-1); const int pos = j * p + i; smem[ijs] = texSrc(pos); } /////////////////////////////////////////////////////////////////////////////// ///\brief Utility function. Load part (tile) of 2D global memory to shared memory ///\param smem pointer to target shared memory array ///\param ig column number within source ///\param jg row number within source ///\param w number of columns in global memory array ///\param h number of rows in global memory array ///\param p global memory array pitch in floats /////////////////////////////////////////////////////////////////////////////// __forceinline__ __device__ void load_array(Ptr2D texSrc, float *smem, int ig, int jg, int w, int h, int p) { const int i = threadIdx.x + 2; const int j = threadIdx.y + 2; load_array_element(texSrc, smem, i, j, ig, jg, w, h, p);//load current pixel __syncthreads(); if(threadIdx.y < 2) { //load bottom shadow elements load_array_element(texSrc, smem, i, j-2, ig, jg-2, w, h, p); if(threadIdx.x < 2) { //load bottom right shadow elements load_array_element(texSrc, smem, i+PSOR_TILE_WIDTH, j-2, ig+PSOR_TILE_WIDTH, jg-2, w, h, p); //load middle right shadow elements load_array_element(texSrc, smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p); } else if(threadIdx.x >= PSOR_TILE_WIDTH-2) { //load bottom left shadow elements load_array_element(texSrc, smem, i-PSOR_TILE_WIDTH, j-2, ig-PSOR_TILE_WIDTH, jg-2, w, h, p); //load middle left shadow elements load_array_element(texSrc, smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p); } } else if(threadIdx.y >= PSOR_TILE_HEIGHT-2) { //load upper shadow elements load_array_element(texSrc, smem, i, j+2, ig, jg+2, w, h, p); if(threadIdx.x < 2) { //load upper right shadow elements load_array_element(texSrc, smem, i+PSOR_TILE_WIDTH, j+2, ig+PSOR_TILE_WIDTH, jg+2, w, h, p); //load middle right shadow elements load_array_element(texSrc, smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p); } else if(threadIdx.x >= PSOR_TILE_WIDTH-2) { //load upper left shadow elements load_array_element(texSrc, smem, i-PSOR_TILE_WIDTH, j+2, ig-PSOR_TILE_WIDTH, jg+2, w, h, p); //load middle left shadow elements load_array_element(texSrc, smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p); } } else { //load middle shadow elements if(threadIdx.x < 2) { //load middle right shadow elements load_array_element(texSrc, smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p); } else if(threadIdx.x >= PSOR_TILE_WIDTH-2) { //load middle left shadow elements load_array_element(texSrc, smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p); } } __syncthreads(); } /////////////////////////////////////////////////////////////////////////////// /// \brief computes matrix of linearised system for \c du, \c dv /// Computed values reside in GPU memory. \n /// Matrix computation is divided into two steps. This kernel performs first step\n /// - compute smoothness term diffusivity between pixels - psi dash smooth /// - compute robustness factor in the data term - psi dash data /// \param diffusivity_x (in/out) diffusivity between pixels along x axis in smoothness term /// \param diffusivity_y (in/out) diffusivity between pixels along y axis in smoothness term /// \param denominator_u (in/out) precomputed part of expression for new du value in SOR iteration /// \param denominator_v (in/out) precomputed part of expression for new dv value in SOR iteration /// \param numerator_dudv (in/out) precomputed part of expression for new du and dv value in SOR iteration /// \param numerator_u (in/out) precomputed part of expression for new du value in SOR iteration /// \param numerator_v (in/out) precomputed part of expression for new dv value in SOR iteration /// \param w (in) frame width /// \param h (in) frame height /// \param pitch (in) pitch in floats /// \param alpha (in) alpha in Brox model (flow smoothness) /// \param gamma (in) gamma in Brox model (edge importance) /////////////////////////////////////////////////////////////////////////////// __global__ void prepare_sor_stage_1_tex(Ptr2D texU, Ptr2D texV, Ptr2D texDu, Ptr2D texDv, Ptr2D texI0, Ptr2D texI1, Ptr2D texIx, Ptr2D texIxx, Ptr2D texIx0, Ptr2D texIy, Ptr2D texIyy, Ptr2D texIy0, Ptr2D texIxy, float *diffusivity_x, float *diffusivity_y, float *denominator_u, float *denominator_v, float *numerator_dudv, float *numerator_u, float *numerator_v, int w, int h, int s, float alpha, float gamma) { __shared__ float u[PSOR_PITCH * PSOR_HEIGHT]; __shared__ float v[PSOR_PITCH * PSOR_HEIGHT]; __shared__ float du[PSOR_PITCH * PSOR_HEIGHT]; __shared__ float dv[PSOR_PITCH * PSOR_HEIGHT]; //position within tile const int i = threadIdx.x; const int j = threadIdx.y; //position within smem arrays const int ijs = (j+2) * PSOR_PITCH + i + 2; //position within global memory const int ig = blockIdx.x * blockDim.x + threadIdx.x; const int jg = blockIdx.y * blockDim.y + threadIdx.y; const int ijg = jg * s + ig; //position within texture float x = (float)ig + 0.5f; float y = (float)jg + 0.5f; //load u and v to smem load_array(texU, u, ig, jg, w, h, s); load_array(texV, v, ig, jg, w, h, s); load_array(texDu, du, ig, jg, w, h, s); load_array(texDv, dv, ig, jg, w, h, s); //warped position float wx = (x + u[ijs])/(float)w; float wy = (y + v[ijs])/(float)h; x /= (float)w; y /= (float)h; //compute image derivatives const float Iz = texI1(wy, wx) - texI0(y,x); const float Ix = texIx(wy, wx); const float Ixz = Ix - texIx0(y, x); const float Ixy = texIxy(wy, wx); const float Ixx = texIxx(wy, wx); const float Iy = texIy(wy, wx); const float Iyz = Iy - texIy0(y, x); const float Iyy = texIyy(wy, wx); //compute data term float q0, q1, q2; q0 = Iz + Ix * du[ijs] + Iy * dv[ijs]; q1 = Ixz + Ixx * du[ijs] + Ixy * dv[ijs]; q2 = Iyz + Ixy * du[ijs] + Iyy * dv[ijs]; float data_term = 0.5f * rsqrtf(q0*q0 + gamma*(q1*q1 + q2*q2) + eps2); //scale data term by 1/alpha data_term /= alpha; //compute smoothness term (diffusivity) float sx, sy; if(ig >= w || jg >= h) return; diffusivity_along_x(&sx, ijs, u, v, du, dv); diffusivity_along_y(&sy, ijs, u, v, du, dv); if(ig == 0) sx = 0.0f; if(jg == 0) sy = 0.0f; numerator_dudv[ijg] = data_term * (Ix*Iy + gamma * Ixy*(Ixx + Iyy)); numerator_u[ijg] = data_term * (Ix*Iz + gamma * (Ixx*Ixz + Ixy*Iyz)); numerator_v[ijg] = data_term * (Iy*Iz + gamma * (Iyy*Iyz + Ixy*Ixz)); denominator_u[ijg] = data_term * (Ix*Ix + gamma * (Ixy*Ixy + Ixx*Ixx)); denominator_v[ijg] = data_term * (Iy*Iy + gamma * (Ixy*Ixy + Iyy*Iyy)); diffusivity_x[ijg] = sx; diffusivity_y[ijg] = sy; } /////////////////////////////////////////////////////////////////////////////// ///\brief computes matrix of linearised system for \c du, \c dv ///\param inv_denominator_u ///\param inv_denominator_v ///\param w ///\param h ///\param s /////////////////////////////////////////////////////////////////////////////// __global__ void prepare_sor_stage_2(Ptr2D texDiffX, Ptr2D texDiffY, float *inv_denominator_u, float *inv_denominator_v, int w, int h, int s) { __shared__ float sx[(PSOR_TILE_WIDTH+1) * (PSOR_TILE_HEIGHT+1)]; __shared__ float sy[(PSOR_TILE_WIDTH+1) * (PSOR_TILE_HEIGHT+1)]; //position within tile const int i = threadIdx.x; const int j = threadIdx.y; //position within smem arrays const int ijs = j*(PSOR_TILE_WIDTH+1) + i; //position within global memory const int ig = blockIdx.x * blockDim.x + threadIdx.x; const int jg = blockIdx.y * blockDim.y + threadIdx.y; const int ijg = jg*s + ig; int inside = ig < w && jg < h; float denom_u; float denom_v; if(inside) { denom_u = inv_denominator_u[ijg]; denom_v = inv_denominator_v[ijg]; } if(inside) { sx[ijs] = texDiffX(ijg); sy[ijs] = texDiffY(ijg); } else { sx[ijs] = 0.0f; sy[ijs] = 0.0f; } int up = ijs+PSOR_TILE_WIDTH+1; if(j == PSOR_TILE_HEIGHT-1) { if(jg < h-1 && inside) sy[up] = texDiffY(ijg + s); else sy[up] = 0.0f; } int right = ijs + 1; if(threadIdx.x == PSOR_TILE_WIDTH-1) { if(ig < w-1 && inside) sx[right] = texDiffX(ijg + 1); else sx[right] = 0.0f; } __syncthreads(); float diffusivity_sum; diffusivity_sum = sx[ijs] + sx[ijs+1] + sy[ijs] + sy[ijs+PSOR_TILE_WIDTH+1]; if(inside) { denom_u += diffusivity_sum; denom_v += diffusivity_sum; inv_denominator_u[ijg] = 1.0f/denom_u; inv_denominator_v[ijg] = 1.0f/denom_v; } } ///////////////////////////////////////////////////////////////////////////////////////// // Red-Black SOR ///////////////////////////////////////////////////////////////////////////////////////// template<int isBlack> __global__ void sor_pass(Ptr2D texU, Ptr2D texV, Ptr2D texDu, Ptr2D texDv, Ptr2D texDiffX, Ptr2D texDiffY, float *new_du, float *new_dv, const float *g_inv_denominator_u, const float *g_inv_denominator_v, const float *g_numerator_u, const float *g_numerator_v, const float *g_numerator_dudv, float omega, int width, int height, int stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i >= width || j >= height) return; const int pos = j * stride + i; const int pos_r = i < width - 1 ? pos + 1 : pos; const int pos_u = j < height - 1 ? pos + stride : pos; const int pos_d = j > 0 ? pos - stride : pos; const int pos_l = i > 0 ? pos - 1 : pos; //load smooth term float s_up, s_left, s_right, s_down; s_left = texDiffX(pos); s_down = texDiffY(pos); if(i < width-1) s_right = texDiffX(pos_r); else s_right = 0.0f; //Neumann BC if(j < height-1) s_up = texDiffY(pos_u); else s_up = 0.0f; //Neumann BC //load u, v and du, dv float u_up, u_left, u_right, u_down, u; float v_up, v_left, v_right, v_down, v; float du_up, du_left, du_right, du_down, du; float dv_up, dv_left, dv_right, dv_down, dv; u_left = texU(pos_l); u_right = texU(pos_r); u_down = texU(pos_d); u_up = texU(pos_u); u = texU(pos); v_left = texV(pos_l); v_right = texV(pos_r); v_down = texV(pos_d); v = texV(pos); v_up = texV(pos_u); du = texDu(pos); du_left = texDu(pos_l); du_right = texDu(pos_r); du_down = texDu(pos_d); du_up = texDu(pos_u); dv = texDv(pos); dv_left = texDv(pos_l); dv_right = texDv(pos_r); dv_down = texDv(pos_d); dv_up = texDv(pos_u); float numerator_dudv = g_numerator_dudv[pos]; if((i+j)%2 == isBlack) { // update du float numerator_u = (s_left*(u_left + du_left) + s_up*(u_up + du_up) + s_right*(u_right + du_right) + s_down*(u_down + du_down) - u * (s_left + s_right + s_up + s_down) - g_numerator_u[pos] - numerator_dudv*dv); du = (1.0f - omega) * du + omega * g_inv_denominator_u[pos] * numerator_u; // update dv float numerator_v = (s_left*(v_left + dv_left) + s_up*(v_up + dv_up) + s_right*(v_right + dv_right) + s_down*(v_down + dv_down) - v * (s_left + s_right + s_up + s_down) - g_numerator_v[pos] - numerator_dudv*du); dv = (1.0f - omega) * dv + omega * g_inv_denominator_v[pos] * numerator_v; } new_du[pos] = du; new_dv[pos] = dv; } /////////////////////////////////////////////////////////////////////////////// // utility functions /////////////////////////////////////////////////////////////////////////////// namespace { struct ImagePyramid { std::vector<FloatVector*> img0; std::vector<FloatVector*> img1; std::vector<Ncv32u> w; std::vector<Ncv32u> h; explicit ImagePyramid(int outer_iterations) { img0.reserve(outer_iterations); img1.reserve(outer_iterations); w.reserve(outer_iterations); h.reserve(outer_iterations); } ~ImagePyramid() { w.clear(); h.clear(); for (int i = static_cast<int>(img0.size()) - 1; i >= 0; --i) { delete img1[i]; delete img0[i]; } img0.clear(); img1.clear(); } }; } ///////////////////////////////////////////////////////////////////////////////////////// // MAIN FUNCTION ///////////////////////////////////////////////////////////////////////////////////////// NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc, INCVMemAllocator &gpu_mem_allocator, const NCVMatrix<Ncv32f> &frame0, const NCVMatrix<Ncv32f> &frame1, NCVMatrix<Ncv32f> &uOut, NCVMatrix<Ncv32f> &vOut, cudaStream_t stream) { ncvAssertPrintReturn(desc.alpha > 0.0f , "Invalid alpha" , NCV_INCONSISTENT_INPUT); ncvAssertPrintReturn(desc.gamma >= 0.0f , "Invalid gamma" , NCV_INCONSISTENT_INPUT); ncvAssertPrintReturn(desc.number_of_inner_iterations > 0 , "Invalid number of inner iterations" , NCV_INCONSISTENT_INPUT); ncvAssertPrintReturn(desc.number_of_outer_iterations > 0 , "Invalid number of outer iterations" , NCV_INCONSISTENT_INPUT); ncvAssertPrintReturn(desc.number_of_solver_iterations > 0, "Invalid number of solver iterations", NCV_INCONSISTENT_INPUT); const Ncv32u kSourceWidth = frame0.width(); const Ncv32u kSourceHeight = frame0.height(); ncvAssertPrintReturn(frame1.width() == kSourceWidth && frame1.height() == kSourceHeight, "Frame dims do not match", NCV_INCONSISTENT_INPUT); ncvAssertReturn(uOut.width() == kSourceWidth && vOut.width() == kSourceWidth && uOut.height() == kSourceHeight && vOut.height() == kSourceHeight, NCV_INCONSISTENT_INPUT); ncvAssertReturn(gpu_mem_allocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); bool kSkipProcessing = gpu_mem_allocator.isCounting(); int cuda_device; ncvAssertCUDAReturn(cudaGetDevice(&cuda_device), NCV_CUDA_ERROR); cudaDeviceProp device_props; ncvAssertCUDAReturn(cudaGetDeviceProperties(&device_props, cuda_device), NCV_CUDA_ERROR); Ncv32u alignmentValue = gpu_mem_allocator.alignment (); const Ncv32u kStrideAlignmentFloat = alignmentValue / sizeof(float); const Ncv32u kSourcePitch = alignUp(kSourceWidth, kStrideAlignmentFloat) * sizeof(float); const Ncv32f scale_factor = desc.scale_factor; const Ncv32f alpha = desc.alpha; const Ncv32f gamma = desc.gamma; const Ncv32u kSizeInPixelsAligned = alignUp(kSourceWidth, kStrideAlignmentFloat)*kSourceHeight; #if defined SAFE_VECTOR_DECL #undef SAFE_VECTOR_DECL #endif #define SAFE_VECTOR_DECL(name, allocator, size) \ FloatVector name((allocator), (size)); \ ncvAssertReturn(name.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); // matrix elements SAFE_VECTOR_DECL(diffusivity_x, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(diffusivity_y, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(denom_u, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(denom_v, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(num_dudv, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(num_u, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(num_v, gpu_mem_allocator, kSizeInPixelsAligned); // flow components SAFE_VECTOR_DECL(u, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(v, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(u_new, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(v_new, gpu_mem_allocator, kSizeInPixelsAligned); // flow increments SAFE_VECTOR_DECL(du, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(dv, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(du_new, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(dv_new, gpu_mem_allocator, kSizeInPixelsAligned); // temporary storage SAFE_VECTOR_DECL(device_buffer, gpu_mem_allocator, alignUp(kSourceWidth, kStrideAlignmentFloat) * alignUp(kSourceHeight, kStrideAlignmentFloat)); // image derivatives SAFE_VECTOR_DECL(Ix, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Ixx, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Ix0, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Iy, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Iyy, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Iy0, gpu_mem_allocator, kSizeInPixelsAligned); SAFE_VECTOR_DECL(Ixy, gpu_mem_allocator, kSizeInPixelsAligned); // spatial derivative filter size const int kDFilterSize = 5; SAFE_VECTOR_DECL(derivativeFilter, gpu_mem_allocator, kDFilterSize); if (!kSkipProcessing) { const float derivativeFilterHost[kDFilterSize] = {1.0f, -8.0f, 0.0f, 8.0f, -1.0f}; ncvAssertCUDAReturn(cudaMemcpy(derivativeFilter.ptr(), derivativeFilterHost, sizeof(float) * kDFilterSize, cudaMemcpyHostToDevice), NCV_CUDA_ERROR); } //prepare image pyramid ImagePyramid pyr(desc.number_of_outer_iterations); cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>(); float scale = 1.0f; //cuda arrays for frames std::unique_ptr<FloatVector> pI0(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned)); ncvAssertReturn(pI0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); std::unique_ptr<FloatVector> pI1(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned)); ncvAssertReturn(pI1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); if (!kSkipProcessing) { //copy frame data to device size_t dst_width_in_bytes = alignUp(kSourceWidth, kStrideAlignmentFloat) * sizeof(float); size_t src_width_in_bytes = kSourceWidth * sizeof(float); size_t src_pitch_in_bytes = frame0.pitch(); ncvAssertCUDAReturn( cudaMemcpy2DAsync(pI0->ptr(), dst_width_in_bytes, frame0.ptr(), src_pitch_in_bytes, src_width_in_bytes, kSourceHeight, cudaMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR ); ncvAssertCUDAReturn( cudaMemcpy2DAsync(pI1->ptr(), dst_width_in_bytes, frame1.ptr(), src_pitch_in_bytes, src_width_in_bytes, kSourceHeight, cudaMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR ); } FloatVector* I0 = pI0.release(); FloatVector* I1 = pI1.release(); //prepare pyramid pyr.img0.push_back(I0); pyr.img1.push_back(I1); pyr.w.push_back(kSourceWidth); pyr.h.push_back(kSourceHeight); scale *= scale_factor; Ncv32u prev_level_width = kSourceWidth; Ncv32u prev_level_height = kSourceHeight; while((prev_level_width > 15) && (prev_level_height > 15) && (static_cast<Ncv32u>(pyr.img0.size()) < desc.number_of_outer_iterations)) { //current resolution Ncv32u level_width = static_cast<Ncv32u>(ceilf(kSourceWidth * scale)); Ncv32u level_height = static_cast<Ncv32u>(ceilf(kSourceHeight * scale)); Ncv32u level_width_aligned = alignUp(level_width, kStrideAlignmentFloat); Ncv32u buffer_size = alignUp(level_width, kStrideAlignmentFloat) * level_height; // buffer size in floats Ncv32u prev_level_pitch = alignUp(prev_level_width, kStrideAlignmentFloat) * sizeof(float); std::unique_ptr<FloatVector> level_frame0(new FloatVector(gpu_mem_allocator, buffer_size)); ncvAssertReturn(level_frame0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); std::unique_ptr<FloatVector> level_frame1(new FloatVector(gpu_mem_allocator, buffer_size)); ncvAssertReturn(level_frame1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); if (!kSkipProcessing) { ncvAssertCUDAReturn(cudaStreamSynchronize(stream), NCV_CUDA_ERROR); NcvSize32u srcSize (prev_level_width, prev_level_height); NcvSize32u dstSize (level_width, level_height); NcvRect32u srcROI (0, 0, prev_level_width, prev_level_height); NcvRect32u dstROI (0, 0, level_width, level_height); // frame 0 ncvAssertReturnNcvStat( nppiStResize_32f_C1R (I0->ptr(), srcSize, prev_level_pitch, srcROI, level_frame0->ptr(), dstSize, level_width_aligned * sizeof (float), dstROI, scale_factor, scale_factor, nppStSupersample) ); // frame 1 ncvAssertReturnNcvStat( nppiStResize_32f_C1R (I1->ptr(), srcSize, prev_level_pitch, srcROI, level_frame1->ptr(), dstSize, level_width_aligned * sizeof (float), dstROI, scale_factor, scale_factor, nppStSupersample) ); } I0 = level_frame0.release(); I1 = level_frame1.release(); //store pointers pyr.img0.push_back(I0); pyr.img1.push_back(I1); pyr.w.push_back(level_width); pyr.h.push_back(level_height); scale *= scale_factor; prev_level_width = level_width; prev_level_height = level_height; } if (!kSkipProcessing) { //initial values for flow is 0 ncvAssertCUDAReturn(cudaMemsetAsync(u.ptr(), 0, kSizeInPixelsAligned * sizeof(float), stream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemsetAsync(v.ptr(), 0, kSizeInPixelsAligned * sizeof(float), stream), NCV_CUDA_ERROR); //select images with lowest resolution ncvAssertCUDAReturn(cudaStreamSynchronize(stream), NCV_CUDA_ERROR); FloatVector* ptrU = &u; FloatVector* ptrV = &v; FloatVector* ptrUNew = &u_new; FloatVector* ptrVNew = &v_new; std::vector<FloatVector*>::const_reverse_iterator img0Iter = pyr.img0.rbegin(); std::vector<FloatVector*>::const_reverse_iterator img1Iter = pyr.img1.rbegin(); //outer loop //warping fixed point iteration while(!pyr.w.empty()) { //current grid dimensions const Ncv32u kLevelWidth = pyr.w.back(); const Ncv32u kLevelHeight = pyr.h.back(); const Ncv32u kLevelStride = alignUp(kLevelWidth, kStrideAlignmentFloat); //size of current image in bytes const int kLevelSizeInBytes = kLevelStride * kLevelHeight * sizeof(float); //number of points at current resolution const int kLevelSizeInPixels = kLevelStride * kLevelHeight; //initial guess for du and dv ncvAssertCUDAReturn(cudaMemsetAsync(du.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemsetAsync(dv.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR); I0 = *img0Iter; I1 = *img1Iter; ++img0Iter; ++img1Iter; Texture texI0(kLevelHeight, kLevelWidth, I0->ptr(), kLevelStride * sizeof(float), true, cudaFilterModeLinear, cudaAddressModeMirror); Texture texI1(kLevelHeight, kLevelWidth, I1->ptr(), kLevelStride * sizeof(float), true, cudaFilterModeLinear, cudaAddressModeMirror); //compute derivatives dim3 dBlocks(iDivUp(kLevelWidth, 32), iDivUp(kLevelHeight, 6)); //dim3 dThreads(32, 6); const int kPitchTex = kLevelStride * sizeof(float); NcvSize32u srcSize(kLevelWidth, kLevelHeight); Ncv32u nSrcStep = kLevelStride * sizeof(float); NcvRect32u oROI(0, 0, kLevelWidth, kLevelHeight); // Ix0 ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (I0->ptr(), srcSize, nSrcStep, Ix0.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Iy0 ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (I0->ptr(), srcSize, nSrcStep, Iy0.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Ix ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (I1->ptr(), srcSize, nSrcStep, Ix.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Iy ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (I1->ptr(), srcSize, nSrcStep, Iy.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Ixx ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Ix.ptr(), srcSize, nSrcStep, Ixx.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Iyy ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Iyy.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); // Ixy ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Ixy.ptr(), srcSize, nSrcStep, oROI, nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) ); Texture texIx(kLevelHeight, kLevelWidth, Ix.ptr(), kPitchTex, true, cudaFilterModeLinear, cudaAddressModeMirror); Texture texIxx(kLevelHeight, kLevelWidth, Ixx.ptr(), kPitchTex, true, cudaFilterModeLinear, cudaAddressModeMirror); Texture texIx0(kLevelHeight, kLevelWidth, Ix0.ptr(), kPitchTex, true, cudaFilterModeLinear, cudaAddressModeMirror); Texture texIy(kLevelHeight, kLevelWidth, Iy.ptr(), kPitchTex, true, cudaFilterModeLinear, cudaAddressModeMirror); Texture texIyy(kLevelHeight, kLevelWidth, Iyy.ptr(), kPitchTex, true, cudaFilterModeLinear, cudaAddressModeMirror); Texture texIy0(kLevelHeight, kLevelWidth, Iy0.ptr(), kPitchTex, true, cudaFilterModeLinear, cudaAddressModeMirror); Texture texIxy(kLevelHeight, kLevelWidth, Ixy.ptr(), kPitchTex, true, cudaFilterModeLinear, cudaAddressModeMirror); Texture texDiffX(kLevelSizeInBytes, diffusivity_x.ptr()); Texture texDiffY(kLevelSizeInBytes, diffusivity_y.ptr()); // flow Texture texU(kLevelSizeInBytes, ptrU->ptr()); Texture texV(kLevelSizeInBytes, ptrV->ptr()); // flow increments Texture texDu(kLevelSizeInBytes, du.ptr()); Texture texDv(kLevelSizeInBytes, dv.ptr()); Texture texDuNew(kLevelSizeInBytes, du_new.ptr()); Texture texDvNew(kLevelSizeInBytes, dv_new.ptr()); dim3 psor_blocks(iDivUp(kLevelWidth, PSOR_TILE_WIDTH), iDivUp(kLevelHeight, PSOR_TILE_HEIGHT)); dim3 psor_threads(PSOR_TILE_WIDTH, PSOR_TILE_HEIGHT); dim3 sor_blocks(iDivUp(kLevelWidth, SOR_TILE_WIDTH), iDivUp(kLevelHeight, SOR_TILE_HEIGHT)); dim3 sor_threads(SOR_TILE_WIDTH, SOR_TILE_HEIGHT); // inner loop // lagged nonlinearity fixed point iteration ncvAssertCUDAReturn(cudaStreamSynchronize(stream), NCV_CUDA_ERROR); for (Ncv32u current_inner_iteration = 0; current_inner_iteration < desc.number_of_inner_iterations; ++current_inner_iteration) { //compute coefficients prepare_sor_stage_1_tex<<<psor_blocks, psor_threads, 0, stream>>> (texU, texV, texDu, texDv, texI0, texI1, texIx, texIxx, texIx0, texIy, texIyy, texIy0, texIxy, diffusivity_x.ptr(), diffusivity_y.ptr(), denom_u.ptr(), denom_v.ptr(), num_dudv.ptr(), num_u.ptr(), num_v.ptr(), kLevelWidth, kLevelHeight, kLevelStride, alpha, gamma); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); prepare_sor_stage_2<<<psor_blocks, psor_threads, 0, stream>>>(texDiffX, texDiffY, denom_u.ptr(), denom_v.ptr(), kLevelWidth, kLevelHeight, kLevelStride); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); //solve linear system for (Ncv32u solver_iteration = 0; solver_iteration < desc.number_of_solver_iterations; ++solver_iteration) { float omega = 1.99f; sor_pass<0><<<sor_blocks, sor_threads, 0, stream>>>(texU, texV, texDu, texDv, texDiffX, texDiffY, du_new.ptr(), dv_new.ptr(), denom_u.ptr(), denom_v.ptr(), num_u.ptr(), num_v.ptr(), num_dudv.ptr(), omega, kLevelWidth, kLevelHeight, kLevelStride); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); sor_pass<1><<<sor_blocks, sor_threads, 0, stream>>>(texU, texV, texDuNew, texDvNew, texDiffX, texDiffY, du.ptr(), dv.ptr(), denom_u.ptr(), denom_v.ptr(), num_u.ptr(), num_v.ptr(),num_dudv.ptr(), omega, kLevelWidth, kLevelHeight, kLevelStride); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); }//end of solver loop }// end of inner loop //update u and v add(ptrU->ptr(), du.ptr(), kLevelSizeInPixels, stream); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); add(ptrV->ptr(), dv.ptr(), kLevelSizeInPixels, stream); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); //prolongate using texture pyr.w.pop_back(); pyr.h.pop_back(); if (!pyr.w.empty()) { //compute new image size Ncv32u nw = pyr.w.back(); Ncv32u nh = pyr.h.back(); Ncv32u ns = alignUp(nw, kStrideAlignmentFloat); dim3 p_blocks(iDivUp(nw, 32), iDivUp(nh, 8)); //dim3 p_threads(32, 8); NcvSize32u inner_srcSize (kLevelWidth, kLevelHeight); NcvSize32u dstSize (nw, nh); NcvRect32u srcROI (0, 0, kLevelWidth, kLevelHeight); NcvRect32u dstROI (0, 0, nw, nh); ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrU->ptr(), inner_srcSize, kLevelStride * sizeof (float), srcROI, ptrUNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) ); ScaleVector(ptrUNew->ptr(), ptrUNew->ptr(), 1.0f/scale_factor, ns * nh, stream); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrV->ptr(), inner_srcSize, kLevelStride * sizeof (float), srcROI, ptrVNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) ); ScaleVector(ptrVNew->ptr(), ptrVNew->ptr(), 1.0f/scale_factor, ns * nh, stream); ncvAssertCUDALastErrorReturn((int)NCV_CUDA_ERROR); cv::cuda::device::swap<FloatVector*>(ptrU, ptrUNew); cv::cuda::device::swap<FloatVector*>(ptrV, ptrVNew); } scale /= scale_factor; } // end of warping iterations ncvAssertCUDAReturn(cudaStreamSynchronize(stream), (int)NCV_CUDA_ERROR); ncvAssertCUDAReturn( cudaMemcpy2DAsync (uOut.ptr(), uOut.pitch(), ptrU->ptr(), kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, cudaMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR ); ncvAssertCUDAReturn( cudaMemcpy2DAsync (vOut.ptr(), vOut.pitch(), ptrV->ptr(), kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, cudaMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR ); ncvAssertCUDAReturn(cudaStreamSynchronize(stream), (int)NCV_CUDA_ERROR); } return NCV_SUCCESS; }
38ad51fca4280494d2c1cbdd177b9c8635a00115.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define N 9 #define threads 3 __global__ void mulKernel(int* A, int* X, int* B) { __shared__ int a[threads * threads]; __shared__ int x[threads]; __shared__ int parcial[threads]; int res = 0; int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; int iA = col + row * N; int tidA = threadIdx.x + threads * threadIdx.y; int tidX = threadIdx.y; if (row >= N || col >= N) return; a[tidA] = A[iA]; if (threadIdx.x == 0) x[tidX] = B[row]; __syncthreads(); res = a[tidA] * x[tidX]; atomicAdd(&parcial[tidX], res); __syncthreads(); if (threadIdx.x == 0) atomicAdd(&B[col], parcial[tidX]); } //lab2-6 __global__ void kernel6(int* A, int* posmin) { //__shared__ int sh[threads]; __shared__ int locPosMin; int loc; int index = threadIdx.x + blockIdx.x * blockDim.x; loc = A[index]; if (threadIdx.x == 0) locPosMin = INT_MAX; __syncthreads(); if(loc > 0) atomicMin(&locPosMin, loc); __syncthreads(); if (threadIdx.x == 0) atomicMin(posmin, locPosMin); __syncthreads(); if (threadIdx.x == 0) locPosMin = *posmin; __syncthreads(); if (loc <= 0) loc = locPosMin; A[index] = loc; } int main() { return 0; }
38ad51fca4280494d2c1cbdd177b9c8635a00115.cu
 #include <stdio.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define N 9 #define threads 3 __global__ void mulKernel(int* A, int* X, int* B) { __shared__ int a[threads * threads]; __shared__ int x[threads]; __shared__ int parcial[threads]; int res = 0; int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; int iA = col + row * N; int tidA = threadIdx.x + threads * threadIdx.y; int tidX = threadIdx.y; if (row >= N || col >= N) return; a[tidA] = A[iA]; if (threadIdx.x == 0) x[tidX] = B[row]; __syncthreads(); res = a[tidA] * x[tidX]; atomicAdd(&parcial[tidX], res); __syncthreads(); if (threadIdx.x == 0) atomicAdd(&B[col], parcial[tidX]); } //lab2-6 __global__ void kernel6(int* A, int* posmin) { //__shared__ int sh[threads]; __shared__ int locPosMin; int loc; int index = threadIdx.x + blockIdx.x * blockDim.x; loc = A[index]; if (threadIdx.x == 0) locPosMin = INT_MAX; __syncthreads(); if(loc > 0) atomicMin(&locPosMin, loc); __syncthreads(); if (threadIdx.x == 0) atomicMin(posmin, locPosMin); __syncthreads(); if (threadIdx.x == 0) locPosMin = *posmin; __syncthreads(); if (loc <= 0) loc = locPosMin; A[index] = loc; } int main() { return 0; }
8b59ed54119c17d10952b104f18c8e5ba6eddc03.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_kernel.h" ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void index_max_cuda_kernel( float *index_vals, // [batch_size, 32, num_block] int *indices, // [batch_size, num_block] float *max_vals, // [batch_size, A_num_block * 32] float *max_vals_scatter, // [batch_size, 32, num_block] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.x; long thread_idx = threadIdx.x; long num_thread = blockDim.x; extern __shared__ float buffer[]; int *max_buffer = (int*)buffer; for (int i = 0; i < A_num_block * 32; i = i + num_thread) { int idx = i + thread_idx; if (idx < A_num_block * 32) { max_buffer[idx] = -1e8; } } __syncthreads(); int *indices_pt = &indices[batch_idx * num_block]; float *index_vals_pt = &index_vals[batch_idx * num_block * 32]; for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) { int idx = idx_start + thread_idx; int A_block_idx = indices_pt[idx % num_block] / B_num_block; atomicMax(&max_buffer[A_block_idx * 32 + idx / num_block], (int)(index_vals_pt[idx] * 1000)); } __syncthreads(); float *max_vals_pt = &max_vals[batch_idx * A_num_block * 32]; for (int i = 0; i < A_num_block * 32; i = i + num_thread) { int idx = i + thread_idx; if (idx < A_num_block * 32) { max_vals_pt[idx] = (float)max_buffer[idx] / 1000.; } } float *max_vals_scatter_pt = &max_vals_scatter[batch_idx * num_block * 32]; for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) { int idx = idx_start + thread_idx; int A_block_idx = indices_pt[idx % num_block] / B_num_block; max_vals_scatter_pt[idx] = (float)max_buffer[A_block_idx * 32 + idx / num_block] / 1000.; } } __global__ void mm_to_sparse_cuda_kernel( float *dense_A, // [batch_size, A_num_block, dim, 32] float *dense_B, // [batch_size, B_num_block, dim, 32] int *indices, // [batch_size, num_block] float *sparse_C, // [batch_size, num_block, 32, 32] long batch_size, long A_num_block, long B_num_block, long dim, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; __shared__ float buffer[4096]; float *A_buffer = &buffer[threadIdx.y * 1024]; // [2, 8, 32] float *B_buffer = &buffer[threadIdx.y * 1024 + 512]; // [2, 8, 32] long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * dim * 32]; float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * dim * 32]; int reg_1_idx = thread_idx / 8; // [0000000011111111222222223333333344444444555555556666666677777777] int reg_2_idx = thread_idx % 8; // [0123456701234567012345670123456701234567012345670123456701234567] float reg_1[8]; float reg_2[8]; float reg_array[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #pragma unroll for (int i = 0; i < 4; i++) { A_buffer[i * 64 + thread_idx] = dense_A_pt[i * 64 + thread_idx]; B_buffer[i * 64 + thread_idx] = dense_B_pt[i * 64 + thread_idx]; } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = A_buffer[reg_1_idx * 4 + i]; reg_2[i] = B_buffer[reg_2_idx * 4 + i]; } for (int dim_stride = 1; dim_stride < (dim / 8); dim_stride++) { #pragma unroll for (int i = 0; i < 4; i++) { A_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_A_pt[dim_stride * 256 + i * 64 + thread_idx]; B_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_B_pt[dim_stride * 256 + i * 64 + thread_idx]; } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_1_idx * 4 + i]; reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = A_buffer[(dim_stride % 2) * 256 + reg_1_idx * 4 + i]; reg_2[i] = B_buffer[(dim_stride % 2) * 256 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[256 + mini_dim_idx * 32 + reg_1_idx * 4 + i]; reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[256 + mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } __syncthreads(); float *C_buffer = &buffer[threadIdx.y * 1024]; // [32, 32] #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { C_buffer[(reg_2_idx * 4 + j) * 32 + reg_1_idx * 4 + i] = reg_array[i * 4 + j]; } } __syncthreads(); float *sparse_C_pt = &sparse_C[batch_idx__block_idx * 1024]; #pragma unroll for (int i = 0; i < 16; i++) { sparse_C_pt[i * 64 + thread_idx] = C_buffer[i * 64 + thread_idx]; } } __global__ void sparse_dense_mm_cuda_kernel( float *sparse_A, // [batch_size, num_block, 32, 32] int *indices, // [batch_size, num_block] float *dense_B, // [batch_size, B_num_block, dim, 32] float *dense_C, // [batch_size, A_num_block, dim, 32] long batch_size, long A_num_block, long B_num_block, long dim, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; __shared__ float buffer[6144]; float *A_buffer = &buffer[threadIdx.y * 3072]; // [32, 32] float *B_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [32, 64] long batch_idx__block_idx = batch_idx * num_block + block_idx; float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024]; #pragma unroll for (int i = 0; i < 8; i++) { A_buffer[i * 128 + thread_idx] = sparse_A_pt[i * 128 + thread_idx]; } long AB_block_idx = indices[batch_idx__block_idx]; float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * 32 * dim]; float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32 * dim]; // [0000000011111111222222223333333344444444555555556666666677777777] // [0123456701234567012345670123456701234567012345670123456701234567] int reg_1_idx = thread_idx / 8; int reg_2_idx = thread_idx % 8; float reg_1[8]; float reg_2[8]; float reg_array[16]; for (int dim_stride = 0; dim_stride < dim; dim_stride = dim_stride + 64) { #pragma unroll for (int i = 0; i < 16; i++) { B_buffer[i * 128 + thread_idx] = dense_B_pt[dim_stride * 32 + i * 128 + thread_idx]; } #pragma unroll for (int i = 0; i < 16; i++) { reg_array[i] = 0; } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = B_buffer[(reg_1_idx * 4 + i) * 32]; reg_2[i] = A_buffer[reg_2_idx * 4 + i]; } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 32; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = B_buffer[(reg_1_idx * 4 + i) * 32 + mini_dim_idx]; reg_2[(mini_dim_idx % 2) * 4 + i] = A_buffer[mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } __syncthreads(); float *C_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [64, 32] #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { C_buffer[(reg_1_idx * 4 + i) * 32 + reg_2_idx * 4 + j] = reg_array[i * 4 + j]; } } __syncthreads(); #pragma unroll for (int i = 0; i < 16; i++) { atomicAdd(&dense_C_pt[dim_stride * 32 + i * 128 + thread_idx], C_buffer[i * 128 + thread_idx]); } __syncthreads(); } } __global__ void reduce_sum_cuda_kernel( float *sparse_A, // [batch_size, num_block, 32, 32] int *indices, // [batch_size, num_block] float *dense_C, // [batch_size, A_num_block, 32] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024]; float reg_array[16]; float value = 0; #pragma unroll for (int i = 0; i < 8; i++) { reg_array[i] = sparse_A_pt[i * 32 + thread_idx]; } #pragma unroll for (int stride = 8; stride < 32; stride = stride + 8) { #pragma unroll for (int i = 0; i < 8; i++) { reg_array[(stride + i) % 16] = sparse_A_pt[(stride + i) * 32 + thread_idx]; } #pragma unroll for (int i = 0; i < 8; i++) { value = value + reg_array[(stride - 8 + i) % 16]; } } #pragma unroll for (int i = 0; i < 8; i++) { value = value + reg_array[8 + i]; } float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32]; atomicAdd(&dense_C_pt[thread_idx], value); } __global__ void scatter_cuda_kernel( float *dense_A, // [batch_size, A_num_block, 32] int *indices, // [batch_size, num_block] float *sparse_C, // [batch_size, num_block, 32, 32] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32]; float *sparse_C_pt = &sparse_C[(batch_idx * num_block + block_idx) * 1024]; float value = dense_A_pt[thread_idx]; #pragma unroll for (int i = 0; i < 32; i++) { sparse_C_pt[i * 32 + thread_idx] = value; } }
8b59ed54119c17d10952b104f18c8e5ba6eddc03.cu
#include "cuda_kernel.h" ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void index_max_cuda_kernel( float *index_vals, // [batch_size, 32, num_block] int *indices, // [batch_size, num_block] float *max_vals, // [batch_size, A_num_block * 32] float *max_vals_scatter, // [batch_size, 32, num_block] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.x; long thread_idx = threadIdx.x; long num_thread = blockDim.x; extern __shared__ float buffer[]; int *max_buffer = (int*)buffer; for (int i = 0; i < A_num_block * 32; i = i + num_thread) { int idx = i + thread_idx; if (idx < A_num_block * 32) { max_buffer[idx] = -1e8; } } __syncthreads(); int *indices_pt = &indices[batch_idx * num_block]; float *index_vals_pt = &index_vals[batch_idx * num_block * 32]; for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) { int idx = idx_start + thread_idx; int A_block_idx = indices_pt[idx % num_block] / B_num_block; atomicMax(&max_buffer[A_block_idx * 32 + idx / num_block], (int)(index_vals_pt[idx] * 1000)); } __syncthreads(); float *max_vals_pt = &max_vals[batch_idx * A_num_block * 32]; for (int i = 0; i < A_num_block * 32; i = i + num_thread) { int idx = i + thread_idx; if (idx < A_num_block * 32) { max_vals_pt[idx] = (float)max_buffer[idx] / 1000.; } } float *max_vals_scatter_pt = &max_vals_scatter[batch_idx * num_block * 32]; for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) { int idx = idx_start + thread_idx; int A_block_idx = indices_pt[idx % num_block] / B_num_block; max_vals_scatter_pt[idx] = (float)max_buffer[A_block_idx * 32 + idx / num_block] / 1000.; } } __global__ void mm_to_sparse_cuda_kernel( float *dense_A, // [batch_size, A_num_block, dim, 32] float *dense_B, // [batch_size, B_num_block, dim, 32] int *indices, // [batch_size, num_block] float *sparse_C, // [batch_size, num_block, 32, 32] long batch_size, long A_num_block, long B_num_block, long dim, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; __shared__ float buffer[4096]; float *A_buffer = &buffer[threadIdx.y * 1024]; // [2, 8, 32] float *B_buffer = &buffer[threadIdx.y * 1024 + 512]; // [2, 8, 32] long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * dim * 32]; float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * dim * 32]; int reg_1_idx = thread_idx / 8; // [0000000011111111222222223333333344444444555555556666666677777777] int reg_2_idx = thread_idx % 8; // [0123456701234567012345670123456701234567012345670123456701234567] float reg_1[8]; float reg_2[8]; float reg_array[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #pragma unroll for (int i = 0; i < 4; i++) { A_buffer[i * 64 + thread_idx] = dense_A_pt[i * 64 + thread_idx]; B_buffer[i * 64 + thread_idx] = dense_B_pt[i * 64 + thread_idx]; } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = A_buffer[reg_1_idx * 4 + i]; reg_2[i] = B_buffer[reg_2_idx * 4 + i]; } for (int dim_stride = 1; dim_stride < (dim / 8); dim_stride++) { #pragma unroll for (int i = 0; i < 4; i++) { A_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_A_pt[dim_stride * 256 + i * 64 + thread_idx]; B_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_B_pt[dim_stride * 256 + i * 64 + thread_idx]; } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_1_idx * 4 + i]; reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = A_buffer[(dim_stride % 2) * 256 + reg_1_idx * 4 + i]; reg_2[i] = B_buffer[(dim_stride % 2) * 256 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[256 + mini_dim_idx * 32 + reg_1_idx * 4 + i]; reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[256 + mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } __syncthreads(); float *C_buffer = &buffer[threadIdx.y * 1024]; // [32, 32] #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { C_buffer[(reg_2_idx * 4 + j) * 32 + reg_1_idx * 4 + i] = reg_array[i * 4 + j]; } } __syncthreads(); float *sparse_C_pt = &sparse_C[batch_idx__block_idx * 1024]; #pragma unroll for (int i = 0; i < 16; i++) { sparse_C_pt[i * 64 + thread_idx] = C_buffer[i * 64 + thread_idx]; } } __global__ void sparse_dense_mm_cuda_kernel( float *sparse_A, // [batch_size, num_block, 32, 32] int *indices, // [batch_size, num_block] float *dense_B, // [batch_size, B_num_block, dim, 32] float *dense_C, // [batch_size, A_num_block, dim, 32] long batch_size, long A_num_block, long B_num_block, long dim, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; __shared__ float buffer[6144]; float *A_buffer = &buffer[threadIdx.y * 3072]; // [32, 32] float *B_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [32, 64] long batch_idx__block_idx = batch_idx * num_block + block_idx; float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024]; #pragma unroll for (int i = 0; i < 8; i++) { A_buffer[i * 128 + thread_idx] = sparse_A_pt[i * 128 + thread_idx]; } long AB_block_idx = indices[batch_idx__block_idx]; float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * 32 * dim]; float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32 * dim]; // [0000000011111111222222223333333344444444555555556666666677777777] // [0123456701234567012345670123456701234567012345670123456701234567] int reg_1_idx = thread_idx / 8; int reg_2_idx = thread_idx % 8; float reg_1[8]; float reg_2[8]; float reg_array[16]; for (int dim_stride = 0; dim_stride < dim; dim_stride = dim_stride + 64) { #pragma unroll for (int i = 0; i < 16; i++) { B_buffer[i * 128 + thread_idx] = dense_B_pt[dim_stride * 32 + i * 128 + thread_idx]; } #pragma unroll for (int i = 0; i < 16; i++) { reg_array[i] = 0; } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = B_buffer[(reg_1_idx * 4 + i) * 32]; reg_2[i] = A_buffer[reg_2_idx * 4 + i]; } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 32; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = B_buffer[(reg_1_idx * 4 + i) * 32 + mini_dim_idx]; reg_2[(mini_dim_idx % 2) * 4 + i] = A_buffer[mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } __syncthreads(); float *C_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [64, 32] #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { C_buffer[(reg_1_idx * 4 + i) * 32 + reg_2_idx * 4 + j] = reg_array[i * 4 + j]; } } __syncthreads(); #pragma unroll for (int i = 0; i < 16; i++) { atomicAdd(&dense_C_pt[dim_stride * 32 + i * 128 + thread_idx], C_buffer[i * 128 + thread_idx]); } __syncthreads(); } } __global__ void reduce_sum_cuda_kernel( float *sparse_A, // [batch_size, num_block, 32, 32] int *indices, // [batch_size, num_block] float *dense_C, // [batch_size, A_num_block, 32] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024]; float reg_array[16]; float value = 0; #pragma unroll for (int i = 0; i < 8; i++) { reg_array[i] = sparse_A_pt[i * 32 + thread_idx]; } #pragma unroll for (int stride = 8; stride < 32; stride = stride + 8) { #pragma unroll for (int i = 0; i < 8; i++) { reg_array[(stride + i) % 16] = sparse_A_pt[(stride + i) * 32 + thread_idx]; } #pragma unroll for (int i = 0; i < 8; i++) { value = value + reg_array[(stride - 8 + i) % 16]; } } #pragma unroll for (int i = 0; i < 8; i++) { value = value + reg_array[8 + i]; } float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32]; atomicAdd(&dense_C_pt[thread_idx], value); } __global__ void scatter_cuda_kernel( float *dense_A, // [batch_size, A_num_block, 32] int *indices, // [batch_size, num_block] float *sparse_C, // [batch_size, num_block, 32, 32] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32]; float *sparse_C_pt = &sparse_C[(batch_idx * num_block + block_idx) * 1024]; float value = dense_A_pt[thread_idx]; #pragma unroll for (int i = 0; i < 32; i++) { sparse_C_pt[i * 32 + thread_idx] = value; } }
3e472c8751a0f93e40456775c453bb188c0ee8e1.hip
// !!! This is a file automatically generated by hipify!!! #include <cupy/complex.cuh> #include <hipcub/hipcub.hpp> #include <cub/device/device_segmented_reduce.cuh> #include "cupy_cub.h" #include <stdexcept> using namespace cub; /* ------------------------------------ Minimum boilerplate to support complex numbers ------------------------------------ */ // - This works only because all data fields in the *Traits struct are not // used in <hipcub/hipcub.hpp>. // - The Max() and Lowest() below are chosen to comply with NumPy's lexical // ordering; note that std::numeric_limits<T> does not support complex // numbers as in general the comparison is ill defined. // - DO NOT USE THIS STUB for supporting CUB sorting!!!!!! template <> struct FpLimits<complex<float>> { static __host__ __device__ __forceinline__ complex<float> Max() { return (complex<float>(FLT_MAX, FLT_MAX)); } static __host__ __device__ __forceinline__ complex<float> Lowest() { return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1))); } }; template <> struct FpLimits<complex<double>> { static __host__ __device__ __forceinline__ complex<double> Max() { return (complex<double>(DBL_MAX, DBL_MAX)); } static __host__ __device__ __forceinline__ complex<double> Lowest() { return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1))); } }; template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {}; template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {}; /* ------------------------------------ end of boilerplate ------------------------------------ */ /* ------------------------------------ "Patches" to CUB ------------------------------------ These stubs are needed because CUB does not handle NaNs properly, while NumPy has certain behaviors with which we must comply. TODO(leofang): support half precision? */ // // Max() // // specialization for float for handling NaNs template <> __host__ __device__ __forceinline__ float Max::operator()(const float &a, const float &b) const { // NumPy behavior: NaN is always chosen! if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return CUB_MAX(a, b);} } // specialization for double for handling NaNs template <> __host__ __device__ __forceinline__ double Max::operator()(const double &a, const double &b) const { // NumPy behavior: NaN is always chosen! if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return CUB_MAX(a, b);} } // specialization for complex<float> for handling NaNs template <> __host__ __device__ __forceinline__ complex<float> Max::operator()(const complex<float> &a, const complex<float> &b) const { // - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed // - NumPy behavior: If both a and b contain NaN, the first argument is chosen // - isnan() and max() are defined in cupy/complex.cuh if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return max(a, b);} } // specialization for complex<double> for handling NaNs template <> __host__ __device__ __forceinline__ complex<double> Max::operator()(const complex<double> &a, const complex<double> &b) const { // - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed // - NumPy behavior: If both a and b contain NaN, the first argument is chosen // - isnan() and max() are defined in cupy/complex.cuh if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return max(a, b);} } // // Min() // // specialization for float for handling NaNs template <> __host__ __device__ __forceinline__ float Min::operator()(const float &a, const float &b) const { // NumPy behavior: NaN is always chosen! if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return CUB_MIN(a, b);} } // specialization for double for handling NaNs template <> __host__ __device__ __forceinline__ double Min::operator()(const double &a, const double &b) const { // NumPy behavior: NaN is always chosen! if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return CUB_MIN(a, b);} } // specialization for complex<float> for handling NaNs template <> __host__ __device__ __forceinline__ complex<float> Min::operator()(const complex<float> &a, const complex<float> &b) const { // - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed // - NumPy behavior: If both a and b contain NaN, the first argument is chosen // - isnan() and min() are defined in cupy/complex.cuh if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return min(a, b);} } // specialization for complex<double> for handling NaNs template <> __host__ __device__ __forceinline__ complex<double> Min::operator()(const complex<double> &a, const complex<double> &b) const { // - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed // - NumPy behavior: If both a and b contain NaN, the first argument is chosen // - isnan() and min() are defined in cupy/complex.cuh if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return min(a, b);} } // // ArgMax() // // specialization for float for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMax::operator()( const KeyValuePair<int, float> &a, const KeyValuePair<int, float> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for double for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMax::operator()( const KeyValuePair<int, double> &a, const KeyValuePair<int, double> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for complex<float> for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMax::operator()( const KeyValuePair<int, complex<float>> &a, const KeyValuePair<int, complex<float>> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for complex<double> for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMax::operator()( const KeyValuePair<int, complex<double>> &a, const KeyValuePair<int, complex<double>> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // // ArgMin() // // specialization for float for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMin::operator()( const KeyValuePair<int, float> &a, const KeyValuePair<int, float> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for double for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMin::operator()( const KeyValuePair<int, double> &a, const KeyValuePair<int, double> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for complex<float> for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMin::operator()( const KeyValuePair<int, complex<float>> &a, const KeyValuePair<int, complex<float>> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for complex<double> for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMin::operator()( const KeyValuePair<int, complex<double>> &a, const KeyValuePair<int, complex<double>> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } /* ------------------------------------ End of "patches" ------------------------------------ */ // // **** dtype_dispatcher **** // // This is implemented with reference to the following implementation. // https://github.com/rapidsai/cudf/blob/branch-0.6/cpp/src/utilities/type_dispatcher.hpp // template <class functor_t, typename... Ts> void dtype_dispatcher(int dtype_id, functor_t f, Ts&&... args) { switch (dtype_id) { case CUPY_CUB_INT8: return f.template operator()<char>(std::forward<Ts>(args)...); case CUPY_CUB_INT16: return f.template operator()<short>(std::forward<Ts>(args)...); case CUPY_CUB_INT32: return f.template operator()<int>(std::forward<Ts>(args)...); case CUPY_CUB_INT64: return f.template operator()<long>(std::forward<Ts>(args)...); case CUPY_CUB_UINT8: return f.template operator()<unsigned char>(std::forward<Ts>(args)...); case CUPY_CUB_UINT16: return f.template operator()<unsigned short>(std::forward<Ts>(args)...); case CUPY_CUB_UINT32: return f.template operator()<unsigned int>(std::forward<Ts>(args)...); case CUPY_CUB_UINT64: return f.template operator()<unsigned long>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT32: return f.template operator()<float>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT64: return f.template operator()<double>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX64: return f.template operator()<complex<float>>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX128: return f.template operator()<complex<double>>(std::forward<Ts>(args)...); default: throw std::runtime_error("Unsupported dtype ID"); } } // // **** CUB Sum **** // struct _cub_reduce_sum { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t s) { DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_sum { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, hipStream_t s) { DeviceSegmentedReduce::Sum(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB Min **** // struct _cub_reduce_min { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t s) { DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_min { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, hipStream_t s) { DeviceSegmentedReduce::Min(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB Max **** // struct _cub_reduce_max { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t s) { DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_max { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, hipStream_t s) { DeviceSegmentedReduce::Max(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB ArgMin **** // struct _cub_reduce_argmin { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t s) { DeviceReduce::ArgMin(workspace, workspace_size, static_cast<T*>(x), static_cast<KeyValuePair<int, T>*>(y), num_items, s); } }; // TODO(leofang): add _cub_segmented_reduce_argmin // // **** CUB ArgMax **** // struct _cub_reduce_argmax { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t s) { DeviceReduce::ArgMax(workspace, workspace_size, static_cast<T*>(x), static_cast<KeyValuePair<int, T>*>(y), num_items, s); } }; // TODO(leofang): add _cub_segmented_reduce_argmax // // APIs exposed to CuPy // /* -------- device reduce -------- */ void cub_device_reduce(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, hipStream_t stream, int op, int dtype_id) { switch(op) { case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_reduce_sum(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_reduce_min(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_reduce_max(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_ARGMIN: return dtype_dispatcher(dtype_id, _cub_reduce_argmin(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_ARGMAX: return dtype_dispatcher(dtype_id, _cub_reduce_argmax(), workspace, workspace_size, x, y, num_items, stream); default: throw std::runtime_error("Unsupported operation"); } } size_t cub_device_reduce_get_workspace_size(void* x, void* y, int num_items, hipStream_t stream, int op, int dtype_id) { size_t workspace_size = 0; cub_device_reduce(NULL, workspace_size, x, y, num_items, stream, op, dtype_id); return workspace_size; } /* -------- device segmented reduce -------- */ void cub_device_segmented_reduce(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, hipStream_t stream, int op, int dtype_id) { switch(op) { case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_sum(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_min(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_max(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); default: throw std::runtime_error("Unsupported operation"); } } size_t cub_device_segmented_reduce_get_workspace_size(void* x, void* y, int num_segments, void* offset_start, void* offset_end, hipStream_t stream, int op, int dtype_id) { size_t workspace_size = 0; cub_device_segmented_reduce(NULL, workspace_size, x, y, num_segments, offset_start, offset_end, stream, op, dtype_id); return workspace_size; }
3e472c8751a0f93e40456775c453bb188c0ee8e1.cu
#include <cupy/complex.cuh> #include <cub/device/device_reduce.cuh> #include <cub/device/device_segmented_reduce.cuh> #include "cupy_cub.h" #include <stdexcept> using namespace cub; /* ------------------------------------ Minimum boilerplate to support complex numbers ------------------------------------ */ // - This works only because all data fields in the *Traits struct are not // used in <cub/device/device_reduce.cuh>. // - The Max() and Lowest() below are chosen to comply with NumPy's lexical // ordering; note that std::numeric_limits<T> does not support complex // numbers as in general the comparison is ill defined. // - DO NOT USE THIS STUB for supporting CUB sorting!!!!!! template <> struct FpLimits<complex<float>> { static __host__ __device__ __forceinline__ complex<float> Max() { return (complex<float>(FLT_MAX, FLT_MAX)); } static __host__ __device__ __forceinline__ complex<float> Lowest() { return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1))); } }; template <> struct FpLimits<complex<double>> { static __host__ __device__ __forceinline__ complex<double> Max() { return (complex<double>(DBL_MAX, DBL_MAX)); } static __host__ __device__ __forceinline__ complex<double> Lowest() { return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1))); } }; template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {}; template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {}; /* ------------------------------------ end of boilerplate ------------------------------------ */ /* ------------------------------------ "Patches" to CUB ------------------------------------ These stubs are needed because CUB does not handle NaNs properly, while NumPy has certain behaviors with which we must comply. TODO(leofang): support half precision? */ // // Max() // // specialization for float for handling NaNs template <> __host__ __device__ __forceinline__ float Max::operator()(const float &a, const float &b) const { // NumPy behavior: NaN is always chosen! if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return CUB_MAX(a, b);} } // specialization for double for handling NaNs template <> __host__ __device__ __forceinline__ double Max::operator()(const double &a, const double &b) const { // NumPy behavior: NaN is always chosen! if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return CUB_MAX(a, b);} } // specialization for complex<float> for handling NaNs template <> __host__ __device__ __forceinline__ complex<float> Max::operator()(const complex<float> &a, const complex<float> &b) const { // - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed // - NumPy behavior: If both a and b contain NaN, the first argument is chosen // - isnan() and max() are defined in cupy/complex.cuh if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return max(a, b);} } // specialization for complex<double> for handling NaNs template <> __host__ __device__ __forceinline__ complex<double> Max::operator()(const complex<double> &a, const complex<double> &b) const { // - TODO(leofang): just call max() here when the bug in cupy/complex.cuh is fixed // - NumPy behavior: If both a and b contain NaN, the first argument is chosen // - isnan() and max() are defined in cupy/complex.cuh if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return max(a, b);} } // // Min() // // specialization for float for handling NaNs template <> __host__ __device__ __forceinline__ float Min::operator()(const float &a, const float &b) const { // NumPy behavior: NaN is always chosen! if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return CUB_MIN(a, b);} } // specialization for double for handling NaNs template <> __host__ __device__ __forceinline__ double Min::operator()(const double &a, const double &b) const { // NumPy behavior: NaN is always chosen! if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return CUB_MIN(a, b);} } // specialization for complex<float> for handling NaNs template <> __host__ __device__ __forceinline__ complex<float> Min::operator()(const complex<float> &a, const complex<float> &b) const { // - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed // - NumPy behavior: If both a and b contain NaN, the first argument is chosen // - isnan() and min() are defined in cupy/complex.cuh if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return min(a, b);} } // specialization for complex<double> for handling NaNs template <> __host__ __device__ __forceinline__ complex<double> Min::operator()(const complex<double> &a, const complex<double> &b) const { // - TODO(leofang): just call min() here when the bug in cupy/complex.cuh is fixed // - NumPy behavior: If both a and b contain NaN, the first argument is chosen // - isnan() and min() are defined in cupy/complex.cuh if (isnan(a)) {return a;} else if (isnan(b)) {return b;} else {return min(a, b);} } // // ArgMax() // // specialization for float for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMax::operator()( const KeyValuePair<int, float> &a, const KeyValuePair<int, float> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for double for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMax::operator()( const KeyValuePair<int, double> &a, const KeyValuePair<int, double> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for complex<float> for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMax::operator()( const KeyValuePair<int, complex<float>> &a, const KeyValuePair<int, complex<float>> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for complex<double> for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMax::operator()( const KeyValuePair<int, complex<double>> &a, const KeyValuePair<int, complex<double>> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // // ArgMin() // // specialization for float for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMin::operator()( const KeyValuePair<int, float> &a, const KeyValuePair<int, float> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for double for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMin::operator()( const KeyValuePair<int, double> &a, const KeyValuePair<int, double> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for complex<float> for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMin::operator()( const KeyValuePair<int, complex<float>> &a, const KeyValuePair<int, complex<float>> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } // specialization for complex<double> for handling NaNs template <> __host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMin::operator()( const KeyValuePair<int, complex<double>> &a, const KeyValuePair<int, complex<double>> &b) const { if (isnan(a.value)) return a; else if (isnan(b.value)) return b; else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) return b; else return a; } /* ------------------------------------ End of "patches" ------------------------------------ */ // // **** dtype_dispatcher **** // // This is implemented with reference to the following implementation. // https://github.com/rapidsai/cudf/blob/branch-0.6/cpp/src/utilities/type_dispatcher.hpp // template <class functor_t, typename... Ts> void dtype_dispatcher(int dtype_id, functor_t f, Ts&&... args) { switch (dtype_id) { case CUPY_CUB_INT8: return f.template operator()<char>(std::forward<Ts>(args)...); case CUPY_CUB_INT16: return f.template operator()<short>(std::forward<Ts>(args)...); case CUPY_CUB_INT32: return f.template operator()<int>(std::forward<Ts>(args)...); case CUPY_CUB_INT64: return f.template operator()<long>(std::forward<Ts>(args)...); case CUPY_CUB_UINT8: return f.template operator()<unsigned char>(std::forward<Ts>(args)...); case CUPY_CUB_UINT16: return f.template operator()<unsigned short>(std::forward<Ts>(args)...); case CUPY_CUB_UINT32: return f.template operator()<unsigned int>(std::forward<Ts>(args)...); case CUPY_CUB_UINT64: return f.template operator()<unsigned long>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT32: return f.template operator()<float>(std::forward<Ts>(args)...); case CUPY_CUB_FLOAT64: return f.template operator()<double>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX64: return f.template operator()<complex<float>>(std::forward<Ts>(args)...); case CUPY_CUB_COMPLEX128: return f.template operator()<complex<double>>(std::forward<Ts>(args)...); default: throw std::runtime_error("Unsupported dtype ID"); } } // // **** CUB Sum **** // struct _cub_reduce_sum { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t s) { DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_sum { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, cudaStream_t s) { DeviceSegmentedReduce::Sum(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB Min **** // struct _cub_reduce_min { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t s) { DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_min { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, cudaStream_t s) { DeviceSegmentedReduce::Min(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB Max **** // struct _cub_reduce_max { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t s) { DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_items, s); } }; struct _cub_segmented_reduce_max { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, cudaStream_t s) { DeviceSegmentedReduce::Max(workspace, workspace_size, static_cast<T*>(x), static_cast<T*>(y), num_segments, static_cast<int*>(offset_start), static_cast<int*>(offset_end), s); } }; // // **** CUB ArgMin **** // struct _cub_reduce_argmin { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t s) { DeviceReduce::ArgMin(workspace, workspace_size, static_cast<T*>(x), static_cast<KeyValuePair<int, T>*>(y), num_items, s); } }; // TODO(leofang): add _cub_segmented_reduce_argmin // // **** CUB ArgMax **** // struct _cub_reduce_argmax { template <typename T> void operator()(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t s) { DeviceReduce::ArgMax(workspace, workspace_size, static_cast<T*>(x), static_cast<KeyValuePair<int, T>*>(y), num_items, s); } }; // TODO(leofang): add _cub_segmented_reduce_argmax // // APIs exposed to CuPy // /* -------- device reduce -------- */ void cub_device_reduce(void* workspace, size_t& workspace_size, void* x, void* y, int num_items, cudaStream_t stream, int op, int dtype_id) { switch(op) { case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_reduce_sum(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_reduce_min(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_reduce_max(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_ARGMIN: return dtype_dispatcher(dtype_id, _cub_reduce_argmin(), workspace, workspace_size, x, y, num_items, stream); case CUPY_CUB_ARGMAX: return dtype_dispatcher(dtype_id, _cub_reduce_argmax(), workspace, workspace_size, x, y, num_items, stream); default: throw std::runtime_error("Unsupported operation"); } } size_t cub_device_reduce_get_workspace_size(void* x, void* y, int num_items, cudaStream_t stream, int op, int dtype_id) { size_t workspace_size = 0; cub_device_reduce(NULL, workspace_size, x, y, num_items, stream, op, dtype_id); return workspace_size; } /* -------- device segmented reduce -------- */ void cub_device_segmented_reduce(void* workspace, size_t& workspace_size, void* x, void* y, int num_segments, void* offset_start, void* offset_end, cudaStream_t stream, int op, int dtype_id) { switch(op) { case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_sum(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_min(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_segmented_reduce_max(), workspace, workspace_size, x, y, num_segments, offset_start, offset_end, stream); default: throw std::runtime_error("Unsupported operation"); } } size_t cub_device_segmented_reduce_get_workspace_size(void* x, void* y, int num_segments, void* offset_start, void* offset_end, cudaStream_t stream, int op, int dtype_id) { size_t workspace_size = 0; cub_device_segmented_reduce(NULL, workspace_size, x, y, num_segments, offset_start, offset_end, stream, op, dtype_id); return workspace_size; }
8ec6f4494e1e8d84ae64f5936e6cc41a64bc87e8.hip
// !!! This is a file automatically generated by hipify!!! /** * David Dao, Johannes Rausch, Michal Szymczak * TU Munich * Sep 2015 */ #include <stdio.h> #include "mex.h" #include <iostream> #include <vector> #include <cmath> #include <hip/hip_runtime.h> #include <rocblas.h> #include <cula.h> #include <cula_lapack.h> using namespace std; void checkStatus(culaStatus status) { char buf[80]; if(!status) return; culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf)); printf("%s\n %d", buf, status); culaShutdown(); //exit(EXIT_FAILURE); } /* Input arguments */ #define IN_A prhs[0] #define IN_B prhs[1] #define IN_X prhs[2] /* Output arguments */ #define OUT_X plhs[0] /* Gateway routine */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { double *matrix_A, *matrix_B, *matrix_X; int N, D; int iter = 0; /* Get the sizes of each input argument */ N = mxGetM(IN_A); D = mxGetN(IN_B); /* Assign pointers to the input arguments */ matrix_A = mxGetPr(IN_A); matrix_B = mxGetPr(IN_B); matrix_X = mxGetPr(IN_X); int matrix_pivot[N]; culaStatus status; status = culaInitialize(); checkStatus(status); status = culaDsgesv(N, D, matrix_A, N, matrix_pivot, matrix_B, N, matrix_X, N, &iter); checkStatus(status); culaShutdown(); return; }
8ec6f4494e1e8d84ae64f5936e6cc41a64bc87e8.cu
/** * David Dao, Johannes Rausch, Michal Szymczak * TU Munich * Sep 2015 */ #include <stdio.h> #include "mex.h" #include <iostream> #include <vector> #include <cmath> #include <cuda_runtime.h> #include <cublas_v2.h> #include <cula.h> #include <cula_lapack.h> using namespace std; void checkStatus(culaStatus status) { char buf[80]; if(!status) return; culaGetErrorInfoString(status, culaGetErrorInfo(), buf, sizeof(buf)); printf("%s\n %d", buf, status); culaShutdown(); //exit(EXIT_FAILURE); } /* Input arguments */ #define IN_A prhs[0] #define IN_B prhs[1] #define IN_X prhs[2] /* Output arguments */ #define OUT_X plhs[0] /* Gateway routine */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { double *matrix_A, *matrix_B, *matrix_X; int N, D; int iter = 0; /* Get the sizes of each input argument */ N = mxGetM(IN_A); D = mxGetN(IN_B); /* Assign pointers to the input arguments */ matrix_A = mxGetPr(IN_A); matrix_B = mxGetPr(IN_B); matrix_X = mxGetPr(IN_X); int matrix_pivot[N]; culaStatus status; status = culaInitialize(); checkStatus(status); status = culaDsgesv(N, D, matrix_A, N, matrix_pivot, matrix_B, N, matrix_X, N, &iter); checkStatus(status); culaShutdown(); return; }
e4c7ecaf8f6ac08e6333c6eea4598993678b913e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "shared/kernels.h" int main(void) { int cuda_devices = 0; hipGetDeviceCount(&cuda_devices); if(cuda_devices == 0) { printf("No Cuda hardware found. Exiting.\n"); return 0; } if(run_tests() != 0){ printf("CUDA tests failed! Exiting.\n"); return 0; } return 0; }
e4c7ecaf8f6ac08e6333c6eea4598993678b913e.cu
#include <stdio.h> #include <cuda_runtime.h> #include "shared/kernels.h" int main(void) { int cuda_devices = 0; cudaGetDeviceCount(&cuda_devices); if(cuda_devices == 0) { printf("No Cuda hardware found. Exiting.\n"); return 0; } if(run_tests() != 0){ printf("CUDA tests failed! Exiting.\n"); return 0; } return 0; }
4cc87d7665b557b35cb575a9cd485c3b9500c3de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"Header.h" //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ // This.cu file contains the implementation of core CUDA kernels required to implement a deep feed - forward convolutional network. //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ template<int> __global__ void MaxPoolingForward(float *s, float *c, int *Indx, int CRC, int SRC, int Src1, int Src2, int NumCh) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward pass for maxpooling. The input channel size is CRCxCRC, the pooling size is Src1xSrc1, the pooling stride is Src2xSrc2, and the output channel size is SRCxSRC where SRC = CRC/Src2. The index of the maximum output in each pooling square is stored in the Indx matrix to speed up the backward pass through the maxpooling stage. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. Src1:- Pooling size is Src1Src1. Src2:- Pooling stride. CRC:- Input channel size is CRCCRC (output channel size of previous stage before applying maxpooling). SRC:- Output channel size is SRCSRC after applying pooling. c:- Input buffer that conatians all input channels (output channels of the previous layer). Indx:- Output buffer to store the positon of the maximum value in each pooling square to be used by MaxPoolingBackward in the backward pass. s:- Output buffer where this cuda kernel stores the all output channels after applying maximum pooling. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int iss = blockIdx.x*blockDim.x + threadIdx.x; int SN = SRC*SRC; if (iss < SN) { int n = blockIdx.y + NumCh*blockIdx.z; int icx = Src1 * (iss % SRC); int icy = Src1 * (iss / SRC); int is = iss + n*SN; int ic = (n*CRC + icy)*CRC + icx; float max = -9.9e+30f; int index; for (int i = -1; i < Src2 - 1; i++) for (int j = -1; j < Src2 - 1; j++) { int j1 = icx + j, i1 = icy + i, ix = ic + i*CRC + j; if (j1 >= 0 && i1 >= 0 && j1 < CRC && i1 < CRC && c[ix] > max) { index = ix; max = c[index]; } } s[is] = max; Indx[is] = index; } } //============================================================================================================================================================ template<int> __global__ void MaxPoolingBackward(float *c, float *s, int *Indx, int SN, int NumCh) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the backward pass for maxpooling. The only job of this cuda function is to propagate back the error signal through the maxpooling stage, there is no parameter update required because the maxpooling stage has no trainable parameters. The error signal will only be passed to the location of the maximum value in each pooling square using the Indx matrix which stores those maximum values. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. SN:- The channel size on the output side of the mapooling stage, SN = SRCSRC. s:- Input buffer that contains the error signal with respect to the activations of all channels on the output side of the maxpooling stage. Indx:- Input buffer where the positon of the maximum value in each pooling square was stored in the forward pass by MaxPoolingForward. c:- Output buffer where this cuda kernel stores the error signal with respect to the activations of all channels on the input side of the maxpooling stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int iss = blockIdx.x*blockDim.x + threadIdx.x; if (iss < SN) { int n = blockIdx.y + NumCh*blockIdx.z; int is = n*SN + iss; atomicAdd(c + Indx[is], s[is]); } } //============================================================================================================================================================ template<const int BLOCKSIZE> __global__ void GlobalAvgPoolingForward(float *S, float *C, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward pass of the global average pooling stage used after the last convolutional layer. The input channel size is ChSize which is reduced to a single value that is equal to the average of all values in the channel. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- Input Channel size. c:- Input buffer that conatians all input channels (output channels of the previous layer). s:- Output buffer where this cuda kernel stores the all output channels after applying global average pooling. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; int is = threadIdx.x; if (is < ChSize) { int n = blockIdx.x + NumCh*blockIdx.y; int b = blockIdx.y; int ig1 = n*ChSize + is; int ig2 = n + b; float sum = 0.0f; while (ig1 < (n + 1)*ChSize) { sum += C[ig1]; ig1 += BLOCKSIZE; } s[is] = sum; __syncthreads(); int i = blockDim.x / 2; while (i > 0 && is + i < ChSize) { if (is < i) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) S[ig2] = s[0] / ChSize; } } //============================================================================================================================================================ template<const int BLOCKSIZE> __global__ void GlobalAvgPoolingBackward(float *C, float *S, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the backward pass of the global average pooling stage. This stage has no trainable parameters, and therefore this function only propagates back the error signal through the average pooling stage. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- Input Channel size. s:- Input buffer that contains the error signal with respect to the activations of all channels on the output side of the average pooling stage. c:- Output buffer where this cuda kernel stores the error signal with respect to the activations of all channels on the input side of the average pooling stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_temp; int is = threadIdx.x; if (is < ChSize) { int n = blockIdx.x + NumCh*blockIdx.y; int b = blockIdx.y; int ig1 = n*ChSize + is; int ig2 = n + b; if (is == 0)s_temp = S[ig2]; __syncthreads(); float temp = s_temp / ChSize; while (ig1 < (n + 1)*ChSize) { C[ig1] = temp; ig1 += BLOCKSIZE; } } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void Softmax(float *y, int *t, int *Indx, float *mse, float *count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements both the forward and backward passes of the softmax function. Because the softmax stage is the last stage in the network, doing the backward propagation of the error signal immediately after doing the forward pass in the same cuda function is more efficient and adds minimal cost. The implementation is slightly complex because it takes into consideration the possibility of overflow, and the thread block size limitation to 1024 cuda threads per block. */ /**** Argument List ****/ /* Indx:- input buffer that stores the indices or locations of the images in the current batch. t:- input buffer that contains the image labels. count:- output variable to store the total number of images that were correctly classified for the training set or validation set. mse:- output variable to accumulate the mean square error of the training set or validation set y:- input/output buffer where the inputs to the softmax function are stored and where this kernel stores the error signal at the input side of the softmax stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ double sD[BLOCKSIZE]; __shared__ int indx[BLOCKSIZE], s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0) { int indx1 = Indx[n]; s_t = t[indx1]; } float tempy[8], max = -9.9e+30f, mse1; double tempyD[8], sum; int k = 0, ix; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; if (tempy[k]>max){ max = tempy[k]; ix = k; } k++; ig += BLOCKSIZE; } s[is] = max; indx[is] = is + BLOCKSIZE*ix; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { if (s[is + i] > s[is]) { s[is] = s[is + i]; indx[is] = indx[is + i]; } } __syncthreads(); i /= 2; } max = s[0]; //---------------------------------------------------------------------------------------------- ig = ig1; sum = 0.0; k = 0; float a = 1.0; while (ig < (n + 1)*SIZE) { if (max > 700){ a = (700 / max); tempy[k] *= a; } tempyD[k] = exp(double(tempy[k])); sum += tempyD[k]; k++; ig += BLOCKSIZE; } sD[is] = sum; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) sD[is] += sD[is + i]; __syncthreads(); i /= 2; } sum = sD[0]; //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(tempyD[k] / (sum + 2.0e-20)); if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; y[ig] = a*temp; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; if (indx[0] == s_t) atomicAdd(count, 1.0f); } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void SoftmaxInference(float *ys, float *y, int *t, float *mse) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the softmax function used in the inference stage. This function is similar in structure to the Softmax cuda function used in the training phase. However, rather than propagating back the error signal, this function calculates and stores the whole output label for each test image in Ys which then can be analyzed to calculate the mean square error, the confusion matrix, and the classification rate of the test set. */ /**** Argument List ****/ /* t:- input buffer that contains the image labels. mse:- output variable to accumulate the mean square error of the test set. y:- input buffer where the inputs to the softmax function are stored. ys:- output buffer where this kernel stores the whole predicted labels of the test images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ double sD[BLOCKSIZE]; __shared__ int s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0){ s_t = t[n]; } float tempy[8], max = -9.9e+30f, mse1; double tempyD[8], sum; int k = 0; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; max = fmaxf(max, tempy[k]); k++; ig += BLOCKSIZE; } s[is] = max; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { s[is] = fmaxf(s[is], s[is + i]); } __syncthreads(); i /= 2; } max = s[0]; //---------------------------------------------------------------------------------------------- ig = ig1; sum = 0.0; k = 0; while (ig < (n + 1)*SIZE) { if (max > 700)tempy[k] *= 700 / max; tempyD[k] = exp(double(tempy[k])); sum += tempyD[k]; k++; ig += BLOCKSIZE; } sD[is] = sum; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) sD[is] += sD[is + i]; __syncthreads(); i /= 2; } sum = sD[0]; //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(tempyD[k] / (sum + 2.0e-20)); ys[ig] += temp; if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void LogSigmoid(float *y, int *t, int *Indx, float *mse, float *count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward and backward passes of the log sigmoid function 1/(1+exp(-x)). */ /**** Argument List ****/ /* Indx:- input buffer that stores the indices or locations of the images in the current batch. t:- input buffer that contains the image labels. count:- output variable to store the total number of images that were correctly classified for the training set or validation set. mse:- output variable to accumulate the mean square error of the training set or validation set y:- input/output buffer where the inputs to the log sigmoid function are stored and where this kernel stores the error signal at the input side of the softmax stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ int indx[BLOCKSIZE], s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0) { int indx1 = Indx[n]; s_t = t[indx1]; } float tempy[8], max = -9.9e+30f, mse1; int k = 0, ix; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; if (tempy[k]>max){ max = tempy[k]; ix = k; } k++; ig += BLOCKSIZE; } s[is] = max; indx[is] = is + BLOCKSIZE*ix; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { if (s[is + i] > s[is]) { s[is] = s[is + i]; indx[is] = indx[is + i]; } } __syncthreads(); i /= 2; } //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(1.0f / (1.0f + expf(-tempy[k]))); if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; y[ig] = temp; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; if (indx[0] == s_t) atomicAdd(count, 1.0f); } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void LogSigmoidInference(float *ys, float *y) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the log sigmoid function 1/(1+exp(-x)) used in the Inference stage. It stores the whole output label of each test image in ys. */ /**** Argument List ****/ /* y:- input buffer where the inputs to the softmax function are stored. ys:- output buffer where this kernel stores the whole predicted labels of the test images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is; while (ig < (n + 1)*SIZE) { float temp = float(1.0f / (1.0f + expf(-y[ig]))); ys[ig] += temp; ig += BLOCKSIZE; } } //============================================================================================================================================================ template < int> __global__ void Add_Mtx(float *c, float *a, int SIZE) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is an auxiliary cuda function that adds two GPU matrices c = c + a. */ /**** Argument List ****/ /* a:- input matrix. c:- input/output matrix to store c = c + a.. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { c[i] += a[i]; i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_SGD_WDecay(float *c, float *a, float lr, float lmda, int SIZE) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel updates the parameters in matrix w with the derivatives in matrix dw. The upate equation implements steepest gradient decent with L2 regularization (weight decay). */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. dw:- input buffer that contains the derivatives. w:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { c[i] = (1 - lr*lmda)*c[i] - lr * a[i]; i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_RMSprop1(float *w, float *v, float *dw, float lr, float lmda, int SIZE, int iter) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel updates the parameters in matrix w with the current derivatives in matrix dw and the running averages of the derivatives in matrix v. The update equation implements Root Mean Square Propagation (RMSprop) with L2 regularization (weight decay). The initialization of the running average of the derivatives based on the time step (iteration number) is borrowed from the Adam algorithm. */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. iter:- current training iteration. dw:- input buffer that contains the derivatives. v:- input/output buffer that maintains the running average of the squared derivative per parameter. w:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { float gamma = 0.999f; v[i] = gamma*v[i] + (1 - gamma)*dw[i] * dw[i]; float m = v[i] / (1 - powf(gamma, float(iter))); w[i] = (1 - lr*lmda)*w[i] - lr* dw[i] / (sqrtf(m) + 0.00000001); i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_RMSprop2(float *A, float *V, float *DA, float lr, float lmda, int SIZE, int iter) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* Update_RMSprop2 is similar to Update_RMSprop1, but it is used for smaller matrices. */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. iter:- current training iteration. dA:- input buffer that contains the derivatives. V:- input/output buffer that maintains the running average of the squared derivative per parameter. A:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < SIZE) { float gamma = 0.999f; V[i] = gamma*V[i] + (1 - gamma)*DA[i] * DA[i]; float m = V[i] / (1 - powf(gamma, float(iter))); A[i] = (1 - lr*lmda)*A[i] - lr* DA[i] / (sqrtf(m) + 0.00000001); } } //============================================================================================================================================================ template<int> __global__ void DataAugmentation(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *Indx, unsigned int *Crop, float *RGB, float *Cropf, int IR1) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements data augmentations which is applied at the start of each training iteration. The augmentation is applied to a batch of images where the Red matrix stores the Red channels for all images, Green matrix stores the Green channels for all images, and the Blue matrix stores the Blue channels for all images. The Height and Width matrices store the height and width of each image in the batch, and the Start matrix stores the starting address of each image in the Red, Green, and Blue buffers. The Indx matrix stores the indices of the images in the current batch selected by the reshuffle algorithm. This function crops a random rectangular with size randomly selected to be between 8% and 100% of the image size and with aspect ratio randomly selected to be between 3/4 and 4/3. Then the cropped rectangular is fitted to the square window size of the network. Random horizontal flipping and color augmentation is also added. Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. Indx:- input buffer that contains the indices of the images in the current batch selected by the reshuffle algorithm. Crop:- input buffer that contains integer random values used to choose the cropping position for each image, and decide on horizontal flipping. RGB:- input buffer that contains 3 random values per image each added to one of the RGB channels for colour augmentation. Cropf:- input buffer that contains 2 random values per image, one decides the amount of scaling, and the other decides the amount of change to the aspect ratio. XCrop:- output buffer to store a batch of data augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_crop[3], s_height, s_width, s_indx; __shared__ float s_RGB[3], s_cropf[2]; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int n = blockIdx.y; if (is < 3) { s_crop[is] = Crop[3 * n + is]; s_RGB[is] = RGB[3 * n + is]; if (is < 2) s_cropf[is] = Cropf[2 * n + is]; } int ISize1 = IR1*IR1; if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; if (is == 0) { s_indx = Indx[n]; s_height = Height[s_indx]; s_width = Width[s_indx]; s_start = Start[s_indx]; } __syncthreads(); int H = s_height; int W = s_width; int Hc, Wc; size_t start = s_start; float a = 0.08f + s_cropf[0] * (1.0f - 0.08f); //float a = 0.1914 + s_cropf[0] * (0.765625 - 0.1914);// float minHW = fminf(float(H), float(W)); float smax = fminf(1.3333333f, (W*W) / (minHW*minHW*a)); float smin = fmaxf(0.75f, (minHW*minHW*a) / (H*H)); float s = smin + s_cropf[1] * (smax - smin); Wc = int(minHW*sqrtf(a*s)); Hc = int(minHW*sqrtf(a / s)); float ScaleH = float(IR1 - 1) / float(Hc - 1); float ScaleW = float(IR1 - 1) / float(Wc - 1); int xd = s_crop[0] % (H + 1 - Hc); int yd = s_crop[1] % (W + 1 - Wc); int flip = s_crop[2] % 10; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = float(ix) / ScaleH + float(xd); if (flip < 5) iys = float(iy) / ScaleW + float(yd); else iys = float(IR1 - 1 - iy) / ScaleW + float(yd); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (s_RGB[0] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (s_RGB[1] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (s_RGB[2] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1]) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1]) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1]) / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<const int EpochT> __global__ void DataAugmentationInference(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *MTX, unsigned int *Flip, int epoch, int IR1) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements data augmentations for test images in the inference stage. This function can do Single-Crop and Multi-Crop inference based on the EpochTs value which contains the number of crops per image. If EpochTs is equal to 1 then this function will do a single crop-crop inference. If EpochTs >1 this function will do a multi-crop inference. The cropping locations and scales for each test image are stored in the MTX matrix. Each crop will be horizontally flipped with 0.5 probability. The Prediction for each test image is equal to the average predictions of all the crops stored in the MTX matrix. EpochTs is a control variable stored in "ControlVariables.h". Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. MTX:- input buffer that contains the cropping positions and amount of scaling applied to all images in the batch. Flip:- input buffer that contains one random value per image that is used to decide on horizontal flipping. epoch:- represents the crop number in multi-crop inference. XCrop:- output buffer to store a test batch of augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_height, s_width, s_flip, s_mtx[3]; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int n = blockIdx.y; if (is < 3) s_mtx[is] = MTX[n * 3 * EpochT + 3 * epoch + is]; int ISize1 = IR1*IR1; if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; if (is == 0) { s_height = Height[n]; s_width = Width[n]; s_start = Start[n]; s_flip = Flip[n]; } __syncthreads(); int H = s_height; int W = s_width; int Hs, Ws; float Hf = H, Wf = W; size_t start = s_start; if (H > W) { Ws = s_mtx[0]; Hs = Ws*(Hf / Wf); } else { Hs = s_mtx[0]; Ws = Hs*(Wf / Hf); } int xd = s_mtx[1]; int flip = s_flip % 10; int yd = s_mtx[2]; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = (ix + xd)*((Hf - 1) / (Hs - 1)); if (flip < 5) iys = (iy + yd)*((Wf - 1) / (Ws - 1)); else iys = (IR1 - 1 - iy + yd)*((Wf - 1) / (Ws - 1)); //else iys = (Ws - 1 - iy - yd)*((Wf - 1) / (Ws - 1)); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = Red[ia1] / 255.0f - 0.5f; XCrop[ic + ISize1] = Green[ia1] / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = Blue[ia1] / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<int> __global__ void DataAugmentationValidate(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *Indx, int IR1) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is a simplified version of DataAugmentation used with the validation images. A single central crop with size equal to 224/256 of the maximum square size in the image is used to calculate the validation error. Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. Indx:- input buffer that contains the indices of the images in the current batch selected by the reshuffle algorithm. XCrop:- output buffer to store a batch of data augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_height, s_width, s_indx; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int ISize1 = IR1*IR1; if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; int n = blockIdx.y; if (is == 0) { s_indx = Indx[n]; s_height = Height[s_indx]; s_width = Width[s_indx]; s_start = Start[s_indx]; } __syncthreads(); int H = s_height; int W = s_width; int Hs, Ws; float Hf = H, Wf = W; size_t start = s_start; if (H > W) { Ws = IR1;//1.143f*IR1; // Hs = Ws*(Hf / Wf); } else { Hs = IR1;//1.143f*IR1; // Ws = Hs*(Wf / Hf); } int xd = (Hs - IR1) / 2; int yd = (Ws - IR1) / 2; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = (ix + xd)*((Hf - 1) / (Hs - 1)); iys = (iy + yd)*((Wf - 1) / (Ws - 1)); //if (flip == 0) iys = (iy + yd)*((Wf - 1) / (Ws - 1)); else iys = (IR1 - 1 - iy + yd)*((Wf - 1) / (Ws - 1)); //else iys = (Ws - 1 - iy - yd)*((Wf - 1) / (Ws - 1)); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = Red[ia1] / 255.0f - 0.5f; XCrop[ic + ISize1] = Green[ia1] / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = Blue[ia1] / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward1a(float *SMU, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel calcualtes the mean and variance per thread block. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- The size of each output channel. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- output buffer where this function stores all means and variances calculated per thread block. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE]; int is = threadIdx.x; int n = blockIdx.x + NumCh*blockIdx.y; int ig = is + n*ChSize; float temp, sum = 0, sum_sq = 0; while (ig < (n + 1)*ChSize) { temp = X[ig]; sum += temp; sum_sq += temp*temp; ig += BLOCKSIZE; } s1[is] = sum; s2[is] = sum_sq; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { SMU[2 * n] = s1[0]; SMU[2 * n + 1] = s2[0]; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward1b(float *SMU, int Ch, int TotalChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel accumulates the means and variances per thread block calculated by BatchNormForward1a to calculate the mean and variance per output channel. The reason for this two stage calculation of the means and variances is caused by the layout of the output channels in the GPU memory used by the convolutional layer implementation of the cudnn.lib library. The layout is CNHW where the order of the tensor inner dimensions is Width, Height, N for image index and Channel. If the layout was NCHW the calculations of the means and variances can easily and efficiently be implemented in a single stage. Anyway splitting the calculation into two consecutive stages adds minimal overhead. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. TotalChSize:- The size of each output channel across all images in the batch TotalChSize = ChSize*BatchSize. SMU:- Output buffer where this function calculates and stores a total of NumCh mean-variance pairs for each of the NumCh output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = blockIdx.x*blockDim.x + threadIdx.x; if (is < Ch) { int ix = 2 * is; int size = 2 * Ch*BatchSize; float sum = 0.0f, sum_sq = 0.0f; while (ix < size) { sum += SMU[ix]; sum_sq += SMU[ix + 1]; ix += 2 * Ch; } float temp = TotalChSize; sum /= temp; SMU[2 * is] = sum; temp = sum_sq / temp - sum*sum; SMU[2 * is + 1] = sqrtf(temp + 0.0001); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward2(float *Y, float *X, float *SMU, float *Param, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel uses the means and variances calculated by BatchNormForward1a and BatchNormForward1b to apply batch normalization to the output channels. */ /**** Argument list***/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- A buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. Y :- output buffer where this function stores the normalized activations of all output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_param[2], s_smu[2]; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_param[is] = Param[2 * n + is]; s_smu[is] = SMU[2 * n + is]; } __syncthreads(); if (ix < ChSize) { int ig = (NumCh*b + n)*ChSize + ix; float temp = (X[ig] - s_smu[0]) / s_smu[1]; temp = s_param[0] * temp + s_param[1]; Y[ig] = fmaxf(temp, 0); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward2(float *DParam, float *Derv, float *Param, float *SMU, float *DY, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is the first stage that propagates the error signal back through the batch normalization stage. DY contains the error signal at the output side of the BN stage. This kernel calculates the derivatives for the BN trainable parameters in DParam, and partially propagates the error signal back to the inputs of the BN stage and stores these intermediate values in Derv. */ /****Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- output buffer where this function calculates and stores a total of NumCh pairs of intermediate values that will used by the next stage to propagate back the error signal to the inputs of the BN stage. DParam:- output buffer where this function calculates and stores the derivatives of beta and gamma, the trainable parameters of BN. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE], s3[BLOCKSIZE]; __shared__ float s_smu[2], s_param[2]; int is = threadIdx.x; int n = blockIdx.x; int b = blockIdx.y; int n2 = n + NumCh*b; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_param[is] = Param[2 * n + is]; } __syncthreads(); float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1], gamma = s_param[0], beta = s_param[1]; int ig = is + n2*ChSize; float temp1, temp2, temp, sum1 = 0.0f, sum2 = 0.0f, sum3 = 0.0f; float a = gamma*inv_sigma; float b1 = beta - a*mu; while (ig < (n2 + 1)*ChSize) { temp2 = X[ig]; temp = a*temp2 + b1; if (temp>0) { temp1 = DY[ig]; sum1 += temp1; sum2 += temp1*(temp2 - mu) * inv_sigma; sum3 += temp1*(temp2 - mu); } else { DY[ig] = 0; } ig += BLOCKSIZE; } s1[is] = sum1; s2[is] = sum2; s3[is] = sum3; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; s3[is] += s3[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { atomicAdd(DParam + 2 * n, s2[0]); atomicAdd(DParam + 2 * n + 1, s1[0]); atomicAdd(Derv + 2 * n, gamma*s1[0]); atomicAdd(Derv + 2 * n + 1, gamma*s3[0]); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward1(float *X, float *DY, float *Param, float *SMU, float *Derv, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel completes the back propagation of the error signal through the BN stage. */ /****Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- input buffer which contains a total of NumCh pairs of intermediate values that will used by the this function to propagate back ther error signal to the inputs (X) of the BN stage. X :- output buffer where this function calculates and stores the error signal with respect to the inputs of the BN stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_smu[2], s_derv[2], s_gamma; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_derv[is] = Derv[2 * n + is]; if (is == 0) { s_gamma = Param[2 * n]; } } __syncthreads(); if (ix < ChSize) { float temp; float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1]; float derv1 = s_derv[0], derv2 = s_derv[1], inv_m = 1.0f / (BatchSize*ChSize); int ig = (NumCh*b + n)*ChSize + ix; temp = inv_sigma*(s_gamma*DY[ig] - derv1*inv_m - (X[ig] - mu)*derv2*inv_m*inv_sigma*inv_sigma); X[ig] = temp; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward22(float *Y, float *X, float *Y0, bool *F, float *SMU, float *Param, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormForward22 is similar to BatchNormForward2, but it has an additional input Y0, which is an input from a residual connection. Also this kernel stores the sign of the output in F to be used in the backward pass. Therefore, when the current stage (Layer) has an additional input coming from a previous stage through a residual connection, BatchNormForward22 is used instead of BatchNormForward2. */ /****Argument list****/ /* NumCh :- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize :- The size of each output channel. Param :- A buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. Y0 :- input buffer that contains the activations of the jump-ahead residual connections. SMU :- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. F :- output buffer that holds the signs of each output element in Y0 which will be used in the backward pass to propagate the error signal through the ReLUs. Y :- output buffer where this function stores the normalized activations of all output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_param[2], s_smu[2]; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_param[is] = Param[2 * n + is]; s_smu[is] = SMU[2 * n + is]; } __syncthreads(); if (ix < ChSize) { int ig = (NumCh*b + n)*ChSize + ix; float temp = (X[ig] - s_smu[0]) / s_smu[1]; temp = s_param[0] * temp + s_param[1] + Y0[ig]; temp = fmaxf(temp, 0); Y[ig] = temp; F[ig] = (temp>0) ? 1 : 0; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward22(float *DParam, float *Derv, float *Param, float *SMU, float *DY, bool *F, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormBackward22 is similar to BatchNormBackward2, but it has an additional input F, which is the sign of the BN output in forward pass. Therefore, when the current stage (Layer) has an additional input coming from a previous stage through a residual connection, BatchNormBackward22 is used instead of BatchNormBackward2. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. F :- input buffer that is used to propagate the error signal back through the ReLU activation function. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- output buffer where this function calculates and stores a total of NumCh pairs of intermediate values that will used by the next stage to propagate back the error signal to the inputs of the BN stage. DParam:- output buffer where this function calculates and stores the derivatives of beta and gamma, the trainable parameters of BN. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE], s3[BLOCKSIZE]; __shared__ float s_smu[2], s_param[2]; int is = threadIdx.x; int n = blockIdx.x; int b = blockIdx.y; int n2 = n + NumCh*b; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_param[is] = Param[2 * n + is]; } __syncthreads(); float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1], gamma = s_param[0]; int ig = is + n2*ChSize; float temp1, temp2, sum1 = 0.0f, sum2 = 0.0f, sum3 = 0.0f; while (ig < (n2 + 1)*ChSize) { temp2 = X[ig]; if (F[ig]>0) { temp1 = DY[ig]; sum1 += temp1; sum2 += temp1*(temp2 - mu) * inv_sigma; sum3 += temp1*(temp2 - mu); } else { DY[ig] = 0; } ig += BLOCKSIZE; } s1[is] = sum1; s2[is] = sum2; s3[is] = sum3; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; s3[is] += s3[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { atomicAdd(DParam + 2 * n, s2[0]); atomicAdd(DParam + 2 * n + 1, s1[0]); atomicAdd(Derv + 2 * n, gamma*s1[0]); atomicAdd(Derv + 2 * n + 1, gamma*s3[0]); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForwardT1b(float *SMU, float *SMUs, int NumCh, int count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormForwardT1b is similar to BatchNormForward1b, but it has an extra output SMUs to accumulate the means and variances from all training images. This kernel will only be executed after the last training epoch. After training stops these accumulated values will averaged by AdjustFixedMeansStds. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize TotalChSize:- The size of each output channel across all images in the batch TotalChSize = ChSize*BatchSize. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. SMUs:- Output buffer where this function calculates and stores a total of NumCh fixed mean-variance pairs that will be used in the inference stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = blockIdx.x*blockDim.x + threadIdx.x; if (is < NumCh) { int ix = 2 * is; int size = 2 * NumCh*BatchSize; float sum = 0.0f, sum_sq = 0.0f; while (ix < size) { sum += SMU[ix]; sum_sq += SMU[ix + 1]; ix += 2 * NumCh; } float temp = count; sum /= temp; SMU[2 * is] = sum; SMUs[2 * is] += sum; temp = sum_sq / temp - sum*sum; SMU[2 * is + 1] = sqrtf(temp + 0.0001); SMUs[2 * is + 1] += temp; } } //============================================================================================================================================================ template<int> __global__ void AdjustFixedMeansStds(float *SMU, int NumCh) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel uses the accumulated values of means and variances calculated by BatchNormForward1b, to calculated the fixed means and variances that will be used in the inference stage. */ /**** Argument list****/ /* NumCh:- Number of output channels. SMU:- input buffer that conatins the accumulated means and variances for all training data. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int ig = threadIdx.x + blockIdx.x*blockDim.x; float temp = float(TrainSizeM / BatchSize); if (ig < NumCh) { float temp_value = SMU[ig] / temp; if (ig % 2 == 1) { temp_value = sqrtf(temp_value + 0.0001); } SMU[ig] = temp_value; } } //============================================================================================================================================================ template<int SIZE> __global__ void RGBrandPCA(float *RGBrand, float *rand1) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements calculates a set of 3 stochastic values per image to be added to the 3 RGB channels for the purpose of colour augmentation. For each random variable in the input buffer rand1 this kernel will calculate a corresponding stochastic value in RGBrand based on PCA analysis of the RGB pixel values of all the training set. */ /****Argument List****/ /* rand1:- input buffer of random values drawn from a normal distribution with zero mean and unity variance. RGBrand:- output buffer to store the */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = threadIdx.x; int ig = is + blockIdx.x*blockDim.x; if (ig < SIZE) { float alpha1 = rand1[3 * ig] * 6.9514; float alpha2 = rand1[3 * ig + 1] * 17.3739; float alpha3 = rand1[3 * ig + 2] * 305.65817; float vr1 = -0.4000, vr2 = -0.7061, vr3 = 0.58426; float vg1 = 0.80526, vg2 = 0.0336, vg3 = 0.59196; float vb1 = -0.4376, vb2 = 0.7073, vb3 = 0.55517; RGBrand[3 * ig] = vr1*alpha1 + vr2*alpha2 + vr3*alpha3; RGBrand[3 * ig + 1] = vg1*alpha1 + vg2*alpha2 + vg3*alpha3; RGBrand[3 * ig + 2] = vb1*alpha1 + vb2*alpha2 + vb3*alpha3; } } //============================================================================================================================================================
4cc87d7665b557b35cb575a9cd485c3b9500c3de.cu
#include"Header.h" //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ // This.cu file contains the implementation of core CUDA kernels required to implement a deep feed - forward convolutional network. //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ template<int> __global__ void MaxPoolingForward(float *s, float *c, int *Indx, int CRC, int SRC, int Src1, int Src2, int NumCh) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward pass for maxpooling. The input channel size is CRCxCRC, the pooling size is Src1xSrc1, the pooling stride is Src2xSrc2, and the output channel size is SRCxSRC where SRC = CRC/Src2. The index of the maximum output in each pooling square is stored in the Indx matrix to speed up the backward pass through the maxpooling stage. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. Src1:- Pooling size is Src1×Src1. Src2:- Pooling stride. CRC:- Input channel size is CRC×CRC (output channel size of previous stage before applying maxpooling). SRC:- Output channel size is SRC×SRC after applying pooling. c:- Input buffer that conatians all input channels (output channels of the previous layer). Indx:- Output buffer to store the positon of the maximum value in each pooling square to be used by MaxPoolingBackward in the backward pass. s:- Output buffer where this cuda kernel stores the all output channels after applying maximum pooling. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int iss = blockIdx.x*blockDim.x + threadIdx.x; int SN = SRC*SRC; if (iss < SN) { int n = blockIdx.y + NumCh*blockIdx.z; int icx = Src1 * (iss % SRC); int icy = Src1 * (iss / SRC); int is = iss + n*SN; int ic = (n*CRC + icy)*CRC + icx; float max = -9.9e+30f; int index; for (int i = -1; i < Src2 - 1; i++) for (int j = -1; j < Src2 - 1; j++) { int j1 = icx + j, i1 = icy + i, ix = ic + i*CRC + j; if (j1 >= 0 && i1 >= 0 && j1 < CRC && i1 < CRC && c[ix] > max) { index = ix; max = c[index]; } } s[is] = max; Indx[is] = index; } } //============================================================================================================================================================ template<int> __global__ void MaxPoolingBackward(float *c, float *s, int *Indx, int SN, int NumCh) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the backward pass for maxpooling. The only job of this cuda function is to propagate back the error signal through the maxpooling stage, there is no parameter update required because the maxpooling stage has no trainable parameters. The error signal will only be passed to the location of the maximum value in each pooling square using the Indx matrix which stores those maximum values. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. SN:- The channel size on the output side of the mapooling stage, SN = SRC×SRC. s:- Input buffer that contains the error signal with respect to the activations of all channels on the output side of the maxpooling stage. Indx:- Input buffer where the positon of the maximum value in each pooling square was stored in the forward pass by MaxPoolingForward. c:- Output buffer where this cuda kernel stores the error signal with respect to the activations of all channels on the input side of the maxpooling stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int iss = blockIdx.x*blockDim.x + threadIdx.x; if (iss < SN) { int n = blockIdx.y + NumCh*blockIdx.z; int is = n*SN + iss; atomicAdd(c + Indx[is], s[is]); } } //============================================================================================================================================================ template<const int BLOCKSIZE> __global__ void GlobalAvgPoolingForward(float *S, float *C, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward pass of the global average pooling stage used after the last convolutional layer. The input channel size is ChSize which is reduced to a single value that is equal to the average of all values in the channel. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- Input Channel size. c:- Input buffer that conatians all input channels (output channels of the previous layer). s:- Output buffer where this cuda kernel stores the all output channels after applying global average pooling. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; int is = threadIdx.x; if (is < ChSize) { int n = blockIdx.x + NumCh*blockIdx.y; int b = blockIdx.y; int ig1 = n*ChSize + is; int ig2 = n + b; float sum = 0.0f; while (ig1 < (n + 1)*ChSize) { sum += C[ig1]; ig1 += BLOCKSIZE; } s[is] = sum; __syncthreads(); int i = blockDim.x / 2; while (i > 0 && is + i < ChSize) { if (is < i) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) S[ig2] = s[0] / ChSize; } } //============================================================================================================================================================ template<const int BLOCKSIZE> __global__ void GlobalAvgPoolingBackward(float *C, float *S, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the backward pass of the global average pooling stage. This stage has no trainable parameters, and therefore this function only propagates back the error signal through the average pooling stage. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- Input Channel size. s:- Input buffer that contains the error signal with respect to the activations of all channels on the output side of the average pooling stage. c:- Output buffer where this cuda kernel stores the error signal with respect to the activations of all channels on the input side of the average pooling stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_temp; int is = threadIdx.x; if (is < ChSize) { int n = blockIdx.x + NumCh*blockIdx.y; int b = blockIdx.y; int ig1 = n*ChSize + is; int ig2 = n + b; if (is == 0)s_temp = S[ig2]; __syncthreads(); float temp = s_temp / ChSize; while (ig1 < (n + 1)*ChSize) { C[ig1] = temp; ig1 += BLOCKSIZE; } } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void Softmax(float *y, int *t, int *Indx, float *mse, float *count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements both the forward and backward passes of the softmax function. Because the softmax stage is the last stage in the network, doing the backward propagation of the error signal immediately after doing the forward pass in the same cuda function is more efficient and adds minimal cost. The implementation is slightly complex because it takes into consideration the possibility of overflow, and the thread block size limitation to 1024 cuda threads per block. */ /**** Argument List ****/ /* Indx:- input buffer that stores the indices or locations of the images in the current batch. t:- input buffer that contains the image labels. count:- output variable to store the total number of images that were correctly classified for the training set or validation set. mse:- output variable to accumulate the mean square error of the training set or validation set y:- input/output buffer where the inputs to the softmax function are stored and where this kernel stores the error signal at the input side of the softmax stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ double sD[BLOCKSIZE]; __shared__ int indx[BLOCKSIZE], s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0) { int indx1 = Indx[n]; s_t = t[indx1]; } float tempy[8], max = -9.9e+30f, mse1; double tempyD[8], sum; int k = 0, ix; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; if (tempy[k]>max){ max = tempy[k]; ix = k; } k++; ig += BLOCKSIZE; } s[is] = max; indx[is] = is + BLOCKSIZE*ix; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { if (s[is + i] > s[is]) { s[is] = s[is + i]; indx[is] = indx[is + i]; } } __syncthreads(); i /= 2; } max = s[0]; //---------------------------------------------------------------------------------------------- ig = ig1; sum = 0.0; k = 0; float a = 1.0; while (ig < (n + 1)*SIZE) { if (max > 700){ a = (700 / max); tempy[k] *= a; } tempyD[k] = exp(double(tempy[k])); sum += tempyD[k]; k++; ig += BLOCKSIZE; } sD[is] = sum; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) sD[is] += sD[is + i]; __syncthreads(); i /= 2; } sum = sD[0]; //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(tempyD[k] / (sum + 2.0e-20)); if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; y[ig] = a*temp; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; if (indx[0] == s_t) atomicAdd(count, 1.0f); } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void SoftmaxInference(float *ys, float *y, int *t, float *mse) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the softmax function used in the inference stage. This function is similar in structure to the Softmax cuda function used in the training phase. However, rather than propagating back the error signal, this function calculates and stores the whole output label for each test image in Ys which then can be analyzed to calculate the mean square error, the confusion matrix, and the classification rate of the test set. */ /**** Argument List ****/ /* t:- input buffer that contains the image labels. mse:- output variable to accumulate the mean square error of the test set. y:- input buffer where the inputs to the softmax function are stored. ys:- output buffer where this kernel stores the whole predicted labels of the test images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ double sD[BLOCKSIZE]; __shared__ int s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0){ s_t = t[n]; } float tempy[8], max = -9.9e+30f, mse1; double tempyD[8], sum; int k = 0; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; max = fmaxf(max, tempy[k]); k++; ig += BLOCKSIZE; } s[is] = max; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { s[is] = fmaxf(s[is], s[is + i]); } __syncthreads(); i /= 2; } max = s[0]; //---------------------------------------------------------------------------------------------- ig = ig1; sum = 0.0; k = 0; while (ig < (n + 1)*SIZE) { if (max > 700)tempy[k] *= 700 / max; tempyD[k] = exp(double(tempy[k])); sum += tempyD[k]; k++; ig += BLOCKSIZE; } sD[is] = sum; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) sD[is] += sD[is + i]; __syncthreads(); i /= 2; } sum = sD[0]; //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(tempyD[k] / (sum + 2.0e-20)); ys[ig] += temp; if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void LogSigmoid(float *y, int *t, int *Indx, float *mse, float *count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward and backward passes of the log sigmoid function 1/(1+exp(-x)). */ /**** Argument List ****/ /* Indx:- input buffer that stores the indices or locations of the images in the current batch. t:- input buffer that contains the image labels. count:- output variable to store the total number of images that were correctly classified for the training set or validation set. mse:- output variable to accumulate the mean square error of the training set or validation set y:- input/output buffer where the inputs to the log sigmoid function are stored and where this kernel stores the error signal at the input side of the softmax stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ int indx[BLOCKSIZE], s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0) { int indx1 = Indx[n]; s_t = t[indx1]; } float tempy[8], max = -9.9e+30f, mse1; int k = 0, ix; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; if (tempy[k]>max){ max = tempy[k]; ix = k; } k++; ig += BLOCKSIZE; } s[is] = max; indx[is] = is + BLOCKSIZE*ix; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { if (s[is + i] > s[is]) { s[is] = s[is + i]; indx[is] = indx[is + i]; } } __syncthreads(); i /= 2; } //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(1.0f / (1.0f + expf(-tempy[k]))); if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; y[ig] = temp; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; if (indx[0] == s_t) atomicAdd(count, 1.0f); } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void LogSigmoidInference(float *ys, float *y) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the log sigmoid function 1/(1+exp(-x)) used in the Inference stage. It stores the whole output label of each test image in ys. */ /**** Argument List ****/ /* y:- input buffer where the inputs to the softmax function are stored. ys:- output buffer where this kernel stores the whole predicted labels of the test images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is; while (ig < (n + 1)*SIZE) { float temp = float(1.0f / (1.0f + expf(-y[ig]))); ys[ig] += temp; ig += BLOCKSIZE; } } //============================================================================================================================================================ template < int> __global__ void Add_Mtx(float *c, float *a, int SIZE) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is an auxiliary cuda function that adds two GPU matrices c = c + a. */ /**** Argument List ****/ /* a:- input matrix. c:- input/output matrix to store c = c + a.. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { c[i] += a[i]; i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_SGD_WDecay(float *c, float *a, float lr, float lmda, int SIZE) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel updates the parameters in matrix w with the derivatives in matrix dw. The upate equation implements steepest gradient decent with L2 regularization (weight decay). */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. dw:- input buffer that contains the derivatives. w:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { c[i] = (1 - lr*lmda)*c[i] - lr * a[i]; i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_RMSprop1(float *w, float *v, float *dw, float lr, float lmda, int SIZE, int iter) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel updates the parameters in matrix w with the current derivatives in matrix dw and the running averages of the derivatives in matrix v. The update equation implements Root Mean Square Propagation (RMSprop) with L2 regularization (weight decay). The initialization of the running average of the derivatives based on the time step (iteration number) is borrowed from the Adam algorithm. */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. iter:- current training iteration. dw:- input buffer that contains the derivatives. v:- input/output buffer that maintains the running average of the squared derivative per parameter. w:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { float gamma = 0.999f; v[i] = gamma*v[i] + (1 - gamma)*dw[i] * dw[i]; float m = v[i] / (1 - powf(gamma, float(iter))); w[i] = (1 - lr*lmda)*w[i] - lr* dw[i] / (sqrtf(m) + 0.00000001); i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_RMSprop2(float *A, float *V, float *DA, float lr, float lmda, int SIZE, int iter) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* Update_RMSprop2 is similar to Update_RMSprop1, but it is used for smaller matrices. */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. iter:- current training iteration. dA:- input buffer that contains the derivatives. V:- input/output buffer that maintains the running average of the squared derivative per parameter. A:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < SIZE) { float gamma = 0.999f; V[i] = gamma*V[i] + (1 - gamma)*DA[i] * DA[i]; float m = V[i] / (1 - powf(gamma, float(iter))); A[i] = (1 - lr*lmda)*A[i] - lr* DA[i] / (sqrtf(m) + 0.00000001); } } //============================================================================================================================================================ template<int> __global__ void DataAugmentation(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *Indx, unsigned int *Crop, float *RGB, float *Cropf, int IR1) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements data augmentations which is applied at the start of each training iteration. The augmentation is applied to a batch of images where the Red matrix stores the Red channels for all images, Green matrix stores the Green channels for all images, and the Blue matrix stores the Blue channels for all images. The Height and Width matrices store the height and width of each image in the batch, and the Start matrix stores the starting address of each image in the Red, Green, and Blue buffers. The Indx matrix stores the indices of the images in the current batch selected by the reshuffle algorithm. This function crops a random rectangular with size randomly selected to be between 8% and 100% of the image size and with aspect ratio randomly selected to be between 3/4 and 4/3. Then the cropped rectangular is fitted to the square window size of the network. Random horizontal flipping and color augmentation is also added. Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. Indx:- input buffer that contains the indices of the images in the current batch selected by the reshuffle algorithm. Crop:- input buffer that contains integer random values used to choose the cropping position for each image, and decide on horizontal flipping. RGB:- input buffer that contains 3 random values per image each added to one of the RGB channels for colour augmentation. Cropf:- input buffer that contains 2 random values per image, one decides the amount of scaling, and the other decides the amount of change to the aspect ratio. XCrop:- output buffer to store a batch of data augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_crop[3], s_height, s_width, s_indx; __shared__ float s_RGB[3], s_cropf[2]; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int n = blockIdx.y; if (is < 3) { s_crop[is] = Crop[3 * n + is]; s_RGB[is] = RGB[3 * n + is]; if (is < 2) s_cropf[is] = Cropf[2 * n + is]; } int ISize1 = IR1*IR1; if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; if (is == 0) { s_indx = Indx[n]; s_height = Height[s_indx]; s_width = Width[s_indx]; s_start = Start[s_indx]; } __syncthreads(); int H = s_height; int W = s_width; int Hc, Wc; size_t start = s_start; float a = 0.08f + s_cropf[0] * (1.0f - 0.08f); //float a = 0.1914 + s_cropf[0] * (0.765625 - 0.1914);// float minHW = fminf(float(H), float(W)); float smax = fminf(1.3333333f, (W*W) / (minHW*minHW*a)); float smin = fmaxf(0.75f, (minHW*minHW*a) / (H*H)); float s = smin + s_cropf[1] * (smax - smin); Wc = int(minHW*sqrtf(a*s)); Hc = int(minHW*sqrtf(a / s)); float ScaleH = float(IR1 - 1) / float(Hc - 1); float ScaleW = float(IR1 - 1) / float(Wc - 1); int xd = s_crop[0] % (H + 1 - Hc); int yd = s_crop[1] % (W + 1 - Wc); int flip = s_crop[2] % 10; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = float(ix) / ScaleH + float(xd); if (flip < 5) iys = float(iy) / ScaleW + float(yd); else iys = float(IR1 - 1 - iy) / ScaleW + float(yd); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (s_RGB[0] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (s_RGB[1] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (s_RGB[2] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1]) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1]) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1]) / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<const int EpochT> __global__ void DataAugmentationInference(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *MTX, unsigned int *Flip, int epoch, int IR1) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements data augmentations for test images in the inference stage. This function can do Single-Crop and Multi-Crop inference based on the EpochTs value which contains the number of crops per image. If EpochTs is equal to 1 then this function will do a single crop-crop inference. If EpochTs >1 this function will do a multi-crop inference. The cropping locations and scales for each test image are stored in the MTX matrix. Each crop will be horizontally flipped with 0.5 probability. The Prediction for each test image is equal to the average predictions of all the crops stored in the MTX matrix. EpochTs is a control variable stored in "ControlVariables.h". Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. MTX:- input buffer that contains the cropping positions and amount of scaling applied to all images in the batch. Flip:- input buffer that contains one random value per image that is used to decide on horizontal flipping. epoch:- represents the crop number in multi-crop inference. XCrop:- output buffer to store a test batch of augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_height, s_width, s_flip, s_mtx[3]; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int n = blockIdx.y; if (is < 3) s_mtx[is] = MTX[n * 3 * EpochT + 3 * epoch + is]; int ISize1 = IR1*IR1; if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; if (is == 0) { s_height = Height[n]; s_width = Width[n]; s_start = Start[n]; s_flip = Flip[n]; } __syncthreads(); int H = s_height; int W = s_width; int Hs, Ws; float Hf = H, Wf = W; size_t start = s_start; if (H > W) { Ws = s_mtx[0]; Hs = Ws*(Hf / Wf); } else { Hs = s_mtx[0]; Ws = Hs*(Wf / Hf); } int xd = s_mtx[1]; int flip = s_flip % 10; int yd = s_mtx[2]; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = (ix + xd)*((Hf - 1) / (Hs - 1)); if (flip < 5) iys = (iy + yd)*((Wf - 1) / (Ws - 1)); else iys = (IR1 - 1 - iy + yd)*((Wf - 1) / (Ws - 1)); //else iys = (Ws - 1 - iy - yd)*((Wf - 1) / (Ws - 1)); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = Red[ia1] / 255.0f - 0.5f; XCrop[ic + ISize1] = Green[ia1] / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = Blue[ia1] / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<int> __global__ void DataAugmentationValidate(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *Indx, int IR1) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is a simplified version of DataAugmentation used with the validation images. A single central crop with size equal to 224/256 of the maximum square size in the image is used to calculate the validation error. Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. Indx:- input buffer that contains the indices of the images in the current batch selected by the reshuffle algorithm. XCrop:- output buffer to store a batch of data augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_height, s_width, s_indx; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int ISize1 = IR1*IR1; if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; int n = blockIdx.y; if (is == 0) { s_indx = Indx[n]; s_height = Height[s_indx]; s_width = Width[s_indx]; s_start = Start[s_indx]; } __syncthreads(); int H = s_height; int W = s_width; int Hs, Ws; float Hf = H, Wf = W; size_t start = s_start; if (H > W) { Ws = IR1;//1.143f*IR1; // Hs = Ws*(Hf / Wf); } else { Hs = IR1;//1.143f*IR1; // Ws = Hs*(Wf / Hf); } int xd = (Hs - IR1) / 2; int yd = (Ws - IR1) / 2; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = (ix + xd)*((Hf - 1) / (Hs - 1)); iys = (iy + yd)*((Wf - 1) / (Ws - 1)); //if (flip == 0) iys = (iy + yd)*((Wf - 1) / (Ws - 1)); else iys = (IR1 - 1 - iy + yd)*((Wf - 1) / (Ws - 1)); //else iys = (Ws - 1 - iy - yd)*((Wf - 1) / (Ws - 1)); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = Red[ia1] / 255.0f - 0.5f; XCrop[ic + ISize1] = Green[ia1] / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = Blue[ia1] / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward1a(float *SMU, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel calcualtes the mean and variance per thread block. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- The size of each output channel. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- output buffer where this function stores all means and variances calculated per thread block. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE]; int is = threadIdx.x; int n = blockIdx.x + NumCh*blockIdx.y; int ig = is + n*ChSize; float temp, sum = 0, sum_sq = 0; while (ig < (n + 1)*ChSize) { temp = X[ig]; sum += temp; sum_sq += temp*temp; ig += BLOCKSIZE; } s1[is] = sum; s2[is] = sum_sq; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { SMU[2 * n] = s1[0]; SMU[2 * n + 1] = s2[0]; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward1b(float *SMU, int Ch, int TotalChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel accumulates the means and variances per thread block calculated by BatchNormForward1a to calculate the mean and variance per output channel. The reason for this two stage calculation of the means and variances is caused by the layout of the output channels in the GPU memory used by the convolutional layer implementation of the cudnn.lib library. The layout is CNHW where the order of the tensor inner dimensions is Width, Height, N for image index and Channel. If the layout was NCHW the calculations of the means and variances can easily and efficiently be implemented in a single stage. Anyway splitting the calculation into two consecutive stages adds minimal overhead. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. TotalChSize:- The size of each output channel across all images in the batch TotalChSize = ChSize*BatchSize. SMU:- Output buffer where this function calculates and stores a total of NumCh mean-variance pairs for each of the NumCh output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = blockIdx.x*blockDim.x + threadIdx.x; if (is < Ch) { int ix = 2 * is; int size = 2 * Ch*BatchSize; float sum = 0.0f, sum_sq = 0.0f; while (ix < size) { sum += SMU[ix]; sum_sq += SMU[ix + 1]; ix += 2 * Ch; } float temp = TotalChSize; sum /= temp; SMU[2 * is] = sum; temp = sum_sq / temp - sum*sum; SMU[2 * is + 1] = sqrtf(temp + 0.0001); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward2(float *Y, float *X, float *SMU, float *Param, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel uses the means and variances calculated by BatchNormForward1a and BatchNormForward1b to apply batch normalization to the output channels. */ /**** Argument list***/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- A buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. Y :- output buffer where this function stores the normalized activations of all output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_param[2], s_smu[2]; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_param[is] = Param[2 * n + is]; s_smu[is] = SMU[2 * n + is]; } __syncthreads(); if (ix < ChSize) { int ig = (NumCh*b + n)*ChSize + ix; float temp = (X[ig] - s_smu[0]) / s_smu[1]; temp = s_param[0] * temp + s_param[1]; Y[ig] = fmaxf(temp, 0); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward2(float *DParam, float *Derv, float *Param, float *SMU, float *DY, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is the first stage that propagates the error signal back through the batch normalization stage. DY contains the error signal at the output side of the BN stage. This kernel calculates the derivatives for the BN trainable parameters in DParam, and partially propagates the error signal back to the inputs of the BN stage and stores these intermediate values in Derv. */ /****Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- output buffer where this function calculates and stores a total of NumCh pairs of intermediate values that will used by the next stage to propagate back the error signal to the inputs of the BN stage. DParam:- output buffer where this function calculates and stores the derivatives of beta and gamma, the trainable parameters of BN. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE], s3[BLOCKSIZE]; __shared__ float s_smu[2], s_param[2]; int is = threadIdx.x; int n = blockIdx.x; int b = blockIdx.y; int n2 = n + NumCh*b; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_param[is] = Param[2 * n + is]; } __syncthreads(); float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1], gamma = s_param[0], beta = s_param[1]; int ig = is + n2*ChSize; float temp1, temp2, temp, sum1 = 0.0f, sum2 = 0.0f, sum3 = 0.0f; float a = gamma*inv_sigma; float b1 = beta - a*mu; while (ig < (n2 + 1)*ChSize) { temp2 = X[ig]; temp = a*temp2 + b1; if (temp>0) { temp1 = DY[ig]; sum1 += temp1; sum2 += temp1*(temp2 - mu) * inv_sigma; sum3 += temp1*(temp2 - mu); } else { DY[ig] = 0; } ig += BLOCKSIZE; } s1[is] = sum1; s2[is] = sum2; s3[is] = sum3; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; s3[is] += s3[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { atomicAdd(DParam + 2 * n, s2[0]); atomicAdd(DParam + 2 * n + 1, s1[0]); atomicAdd(Derv + 2 * n, gamma*s1[0]); atomicAdd(Derv + 2 * n + 1, gamma*s3[0]); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward1(float *X, float *DY, float *Param, float *SMU, float *Derv, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel completes the back propagation of the error signal through the BN stage. */ /****Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- input buffer which contains a total of NumCh pairs of intermediate values that will used by the this function to propagate back ther error signal to the inputs (X) of the BN stage. X :- output buffer where this function calculates and stores the error signal with respect to the inputs of the BN stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_smu[2], s_derv[2], s_gamma; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_derv[is] = Derv[2 * n + is]; if (is == 0) { s_gamma = Param[2 * n]; } } __syncthreads(); if (ix < ChSize) { float temp; float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1]; float derv1 = s_derv[0], derv2 = s_derv[1], inv_m = 1.0f / (BatchSize*ChSize); int ig = (NumCh*b + n)*ChSize + ix; temp = inv_sigma*(s_gamma*DY[ig] - derv1*inv_m - (X[ig] - mu)*derv2*inv_m*inv_sigma*inv_sigma); X[ig] = temp; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward22(float *Y, float *X, float *Y0, bool *F, float *SMU, float *Param, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormForward22 is similar to BatchNormForward2, but it has an additional input Y0, which is an input from a residual connection. Also this kernel stores the sign of the output in F to be used in the backward pass. Therefore, when the current stage (Layer) has an additional input coming from a previous stage through a residual connection, BatchNormForward22 is used instead of BatchNormForward2. */ /****Argument list****/ /* NumCh :- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize :- The size of each output channel. Param :- A buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. Y0 :- input buffer that contains the activations of the jump-ahead residual connections. SMU :- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. F :- output buffer that holds the signs of each output element in Y0 which will be used in the backward pass to propagate the error signal through the ReLUs. Y :- output buffer where this function stores the normalized activations of all output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_param[2], s_smu[2]; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_param[is] = Param[2 * n + is]; s_smu[is] = SMU[2 * n + is]; } __syncthreads(); if (ix < ChSize) { int ig = (NumCh*b + n)*ChSize + ix; float temp = (X[ig] - s_smu[0]) / s_smu[1]; temp = s_param[0] * temp + s_param[1] + Y0[ig]; temp = fmaxf(temp, 0); Y[ig] = temp; F[ig] = (temp>0) ? 1 : 0; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward22(float *DParam, float *Derv, float *Param, float *SMU, float *DY, bool *F, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormBackward22 is similar to BatchNormBackward2, but it has an additional input F, which is the sign of the BN output in forward pass. Therefore, when the current stage (Layer) has an additional input coming from a previous stage through a residual connection, BatchNormBackward22 is used instead of BatchNormBackward2. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. F :- input buffer that is used to propagate the error signal back through the ReLU activation function. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- output buffer where this function calculates and stores a total of NumCh pairs of intermediate values that will used by the next stage to propagate back the error signal to the inputs of the BN stage. DParam:- output buffer where this function calculates and stores the derivatives of beta and gamma, the trainable parameters of BN. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE], s3[BLOCKSIZE]; __shared__ float s_smu[2], s_param[2]; int is = threadIdx.x; int n = blockIdx.x; int b = blockIdx.y; int n2 = n + NumCh*b; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_param[is] = Param[2 * n + is]; } __syncthreads(); float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1], gamma = s_param[0]; int ig = is + n2*ChSize; float temp1, temp2, sum1 = 0.0f, sum2 = 0.0f, sum3 = 0.0f; while (ig < (n2 + 1)*ChSize) { temp2 = X[ig]; if (F[ig]>0) { temp1 = DY[ig]; sum1 += temp1; sum2 += temp1*(temp2 - mu) * inv_sigma; sum3 += temp1*(temp2 - mu); } else { DY[ig] = 0; } ig += BLOCKSIZE; } s1[is] = sum1; s2[is] = sum2; s3[is] = sum3; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; s3[is] += s3[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { atomicAdd(DParam + 2 * n, s2[0]); atomicAdd(DParam + 2 * n + 1, s1[0]); atomicAdd(Derv + 2 * n, gamma*s1[0]); atomicAdd(Derv + 2 * n + 1, gamma*s3[0]); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForwardT1b(float *SMU, float *SMUs, int NumCh, int count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormForwardT1b is similar to BatchNormForward1b, but it has an extra output SMUs to accumulate the means and variances from all training images. This kernel will only be executed after the last training epoch. After training stops these accumulated values will averaged by AdjustFixedMeansStds. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize TotalChSize:- The size of each output channel across all images in the batch TotalChSize = ChSize*BatchSize. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. SMUs:- Output buffer where this function calculates and stores a total of NumCh fixed mean-variance pairs that will be used in the inference stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = blockIdx.x*blockDim.x + threadIdx.x; if (is < NumCh) { int ix = 2 * is; int size = 2 * NumCh*BatchSize; float sum = 0.0f, sum_sq = 0.0f; while (ix < size) { sum += SMU[ix]; sum_sq += SMU[ix + 1]; ix += 2 * NumCh; } float temp = count; sum /= temp; SMU[2 * is] = sum; SMUs[2 * is] += sum; temp = sum_sq / temp - sum*sum; SMU[2 * is + 1] = sqrtf(temp + 0.0001); SMUs[2 * is + 1] += temp; } } //============================================================================================================================================================ template<int> __global__ void AdjustFixedMeansStds(float *SMU, int NumCh) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel uses the accumulated values of means and variances calculated by BatchNormForward1b, to calculated the fixed means and variances that will be used in the inference stage. */ /**** Argument list****/ /* NumCh:- Number of output channels. SMU:- input buffer that conatins the accumulated means and variances for all training data. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int ig = threadIdx.x + blockIdx.x*blockDim.x; float temp = float(TrainSizeM / BatchSize); if (ig < NumCh) { float temp_value = SMU[ig] / temp; if (ig % 2 == 1) { temp_value = sqrtf(temp_value + 0.0001); } SMU[ig] = temp_value; } } //============================================================================================================================================================ template<int SIZE> __global__ void RGBrandPCA(float *RGBrand, float *rand1) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements calculates a set of 3 stochastic values per image to be added to the 3 RGB channels for the purpose of colour augmentation. For each random variable in the input buffer rand1 this kernel will calculate a corresponding stochastic value in RGBrand based on PCA analysis of the RGB pixel values of all the training set. */ /****Argument List****/ /* rand1:- input buffer of random values drawn from a normal distribution with zero mean and unity variance. RGBrand:- output buffer to store the */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = threadIdx.x; int ig = is + blockIdx.x*blockDim.x; if (ig < SIZE) { float alpha1 = rand1[3 * ig] * 6.9514; float alpha2 = rand1[3 * ig + 1] * 17.3739; float alpha3 = rand1[3 * ig + 2] * 305.65817; float vr1 = -0.4000, vr2 = -0.7061, vr3 = 0.58426; float vg1 = 0.80526, vg2 = 0.0336, vg3 = 0.59196; float vb1 = -0.4376, vb2 = 0.7073, vb3 = 0.55517; RGBrand[3 * ig] = vr1*alpha1 + vr2*alpha2 + vr3*alpha3; RGBrand[3 * ig + 1] = vg1*alpha1 + vg2*alpha2 + vg3*alpha3; RGBrand[3 * ig + 2] = vb1*alpha1 + vb2*alpha2 + vb3*alpha3; } } //============================================================================================================================================================
55ae6f90fbb4f984d86928756d3419520bf2619b.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include "hip/hip_runtime.h" #include "matAddcuda.h" /** * CUDA Kernel Device code * Computes the matrix addition of a and b into c. */ __global__ void matrixAdd(float *a, float *b, float *c, int n2) { int x = blockIdx.x; int y = blockIdx.y; int i = (n2*y) + x; c[i] = a[i] + b[i]; } /** * Host main routine */ /*int main(void) { // Allocate host vectors float h_a[ROW][COL]; float h_b[ROW][COL]; float h_c[ROW][COL]; float *d_a; float *d_b; float *d_c; printf("Host memory allocated \n"); // Allocate memory hipMalloc((void **) &d_a, ROW*COL*sizeof(float)); hipMalloc((void **) &d_b, ROW*COL*sizeof(float)); hipMalloc((void **) &d_c, ROW*COL*sizeof(float)); printf("Device memory allocated \n"); for (int i1 = 0; i1 < ROW; i1++) { for (int i2 = 0; i2 < COL; i2++ ) { //float den = 1/(i1+i2); h_a[i1][i2] = i1; h_b[i1][i2] = i2; h_c[i1][i2] = 0; } } hipMemcpy(d_a,h_a,ROW*COL*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(d_b,h_b,ROW*COL*sizeof(float),hipMemcpyHostToDevice); printf("Host memory copied to device memory \n"); int numBlocks =1; dim3 threadsPerBlock(COL,ROW); matrixAdd<<<threadsPerBlock,numBlocks>>>(d_a,d_b,d_c); hipMemcpy(h_c,d_c,ROW*COL*sizeof(float),hipMemcpyDeviceToHost); printf("Device memory copied to host memory \n"); printf("Output matrix : \n"); for (int i1 = 0; i1 < ROW; i1++) { for (int i2 = 0; i2 < COL; i2++) { printf("%f + %f = %f \n",h_a[i1][i2],h_b[i1][i2],h_c[i1][i2]); } } hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; } */
55ae6f90fbb4f984d86928756d3419520bf2619b.cu
#include "stdio.h" #include "cuda_runtime.h" #include "matAddcuda.h" /** * CUDA Kernel Device code * Computes the matrix addition of a and b into c. */ __global__ void matrixAdd(float *a, float *b, float *c, int n2) { int x = blockIdx.x; int y = blockIdx.y; int i = (n2*y) + x; c[i] = a[i] + b[i]; } /** * Host main routine */ /*int main(void) { // Allocate host vectors float h_a[ROW][COL]; float h_b[ROW][COL]; float h_c[ROW][COL]; float *d_a; float *d_b; float *d_c; printf("Host memory allocated \n"); // Allocate memory cudaMalloc((void **) &d_a, ROW*COL*sizeof(float)); cudaMalloc((void **) &d_b, ROW*COL*sizeof(float)); cudaMalloc((void **) &d_c, ROW*COL*sizeof(float)); printf("Device memory allocated \n"); for (int i1 = 0; i1 < ROW; i1++) { for (int i2 = 0; i2 < COL; i2++ ) { //float den = 1/(i1+i2); h_a[i1][i2] = i1; h_b[i1][i2] = i2; h_c[i1][i2] = 0; } } cudaMemcpy(d_a,h_a,ROW*COL*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,ROW*COL*sizeof(float),cudaMemcpyHostToDevice); printf("Host memory copied to device memory \n"); int numBlocks =1; dim3 threadsPerBlock(COL,ROW); matrixAdd<<<threadsPerBlock,numBlocks>>>(d_a,d_b,d_c); cudaMemcpy(h_c,d_c,ROW*COL*sizeof(float),cudaMemcpyDeviceToHost); printf("Device memory copied to host memory \n"); printf("Output matrix : \n"); for (int i1 = 0; i1 < ROW; i1++) { for (int i2 = 0; i2 < COL; i2++) { printf("%f + %f = %f \n",h_a[i1][i2],h_b[i1][i2],h_c[i1][i2]); } } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; } */
4996d8a2a135345142f26bed2582fbe424c6f61c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "adagradKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int numberIterations = 1; int *parameterIndices = NULL; hipMalloc(&parameterIndices, XSIZE*YSIZE); int *counts = NULL; hipMalloc(&counts, XSIZE*YSIZE); int dimension = 1; float *parameters = NULL; hipMalloc(&parameters, XSIZE*YSIZE); float *gradient = NULL; hipMalloc(&gradient, XSIZE*YSIZE); float learningRate = 1; float *history = NULL; hipMalloc(&history, XSIZE*YSIZE); float epsilon = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( adagradKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,history,epsilon); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( adagradKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,history,epsilon); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( adagradKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,history,epsilon); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4996d8a2a135345142f26bed2582fbe424c6f61c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "adagradKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int numberIterations = 1; int *parameterIndices = NULL; cudaMalloc(&parameterIndices, XSIZE*YSIZE); int *counts = NULL; cudaMalloc(&counts, XSIZE*YSIZE); int dimension = 1; float *parameters = NULL; cudaMalloc(&parameters, XSIZE*YSIZE); float *gradient = NULL; cudaMalloc(&gradient, XSIZE*YSIZE); float learningRate = 1; float *history = NULL; cudaMalloc(&history, XSIZE*YSIZE); float epsilon = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); adagradKernel<<<gridBlock,threadBlock>>>(numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,history,epsilon); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { adagradKernel<<<gridBlock,threadBlock>>>(numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,history,epsilon); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { adagradKernel<<<gridBlock,threadBlock>>>(numberIterations,parameterIndices,counts,dimension,parameters,gradient,learningRate,history,epsilon); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e4b4e33fcb5fd6a5c59a22ce5f64a411493f38fb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> int main() { /* * Device ID is required first to query the device. */ int deviceId; hipGetDevice(&deviceId); hipDeviceProp_t props; hipGetDeviceProperties(&props, deviceId); /* * `props` now contains several properties about the current device. */ int computeCapabilityMajor = props.major; int computeCapabilityMinor = props.minor; int multiProcessorCount = props.multiProcessorCount; int warpSize = props.warpSize; printf("Device ID: %d\nNumber of SMs: %d\nCompute Capability Major: %d\nCompute Capability Minor: %d\nWarp Size: %d\n", deviceId, multiProcessorCount, computeCapabilityMajor, computeCapabilityMinor, warpSize); }
e4b4e33fcb5fd6a5c59a22ce5f64a411493f38fb.cu
#include <stdio.h> int main() { /* * Device ID is required first to query the device. */ int deviceId; cudaGetDevice(&deviceId); cudaDeviceProp props; cudaGetDeviceProperties(&props, deviceId); /* * `props` now contains several properties about the current device. */ int computeCapabilityMajor = props.major; int computeCapabilityMinor = props.minor; int multiProcessorCount = props.multiProcessorCount; int warpSize = props.warpSize; printf("Device ID: %d\nNumber of SMs: %d\nCompute Capability Major: %d\nCompute Capability Minor: %d\nWarp Size: %d\n", deviceId, multiProcessorCount, computeCapabilityMajor, computeCapabilityMinor, warpSize); }
1050fff9493072298bec47244341a11a4fa5160c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void bcnn_op_cuda_ramp_kernel(int n, float *x, float *y) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { y[i] = x[i] * (x[i] > 0) + 0.1 * x[i]; } return; }
1050fff9493072298bec47244341a11a4fa5160c.cu
#include "includes.h" __global__ void bcnn_op_cuda_ramp_kernel(int n, float *x, float *y) { int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < n) { y[i] = x[i] * (x[i] > 0) + 0.1 * x[i]; } return; }
c8eccbb69e1c6d0b41049442bab87deb738b9285.hip
// !!! This is a file automatically generated by hipify!!! /* 159.735 Semester 2, 2016. Ian Bond, 3/10/2016 Sequential version of the N-sphere counting problem for Assignment 5. Two alternative algorithms are presented. Note: a rethink will be needed when implementing a GPU version of this. You can't just cut and paste code. To compile: g++ -O3 -o nsphere nsphere.cpp (you will get slightly better performance with the O3 optimization flag) */ #include <cstdlib> #include <cmath> #include <iostream> #include <string> #include <vector> #include <hip/hip_runtime.h> #include <ctime> double diffclock(clock_t clock1,clock_t clock2) { double diffticks = clock1 - clock2; double diffms = (diffticks * 1000) / CLOCKS_PER_SEC; return diffms; // Time difference in milliseconds } long powlong(long n, long k) /* Evaluate n**k where both are long integers */ { long p = 1; for (long i = 0; i < k; ++i) p *= n; return p; } /*----------------------------------------------------------------------------*/ __global__ void count_in_v1_gpu (long ntotal , long base, long halfb, double rsquare, long ndim , unsigned long long int* count , unsigned long long int start_index) { long n = start_index + blockDim.x * blockIdx.x + threadIdx.x; if (n < ntotal) { long idx = 0; double rtestsq = 0; while (n != 0) { long rem = n % base; n = n / base; double xk = rem - halfb; rtestsq += xk * xk; ++idx; } for (long k = idx; k < ndim; ++k) { double xk = 0.0 - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare) atomicAdd(count,1); } } void convert(long num, long base, std::vector<long>& index) /* Convert a decimal number into another base system - the individual digits in the new base are stored in the index array. */ { const long ndim = index.size(); for (long i = 0; i < ndim; ++i) index[i] = 0; long idx = 0; while (num != 0) { long rem = num % base; num = num / base; index[idx] = rem; ++idx; } } long count_in_v1(long ndim, double radius) /* Version 1 of the counting algorithm. Given: ndim -> number of dimensions of the hypersphere radius -> radius of the hypersphere count the number of integer points that lie wholly within the hypersphere, assuming it is centred on the origin. */ { const long halfb = static_cast<long>(floor(radius)); const long base = 2 * halfb + 1; const double rsquare = radius * radius; // This is the total number of points we will need to test. const long ntotal = powlong(base, ndim); long count = 0; // Indices in x,y,z,.... std::vector<long> index(ndim, 0); // Loop over the total number of points. For each visit of the loop, // we covert n to its equivalent in a number system of given "base". for (long n = 0; n < ntotal; ++n) { convert(n, base, index); double rtestsq = 0; for (long k = 0; k < ndim; ++k) { double xk = index[k] - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare) ++count; } return count; } /*----------------------------------------------------------------------------*/ void addone(std::vector<long>& index, long base, long i) /* Add one to a digital counter of given base. When one digit hits maximum, it is necessary to carry one over into the next column. This is done recursively here. */ { long ndim = index.size(); long newv = index[i] + 1; if (newv >= base) { index[i] = 0; if (i < ndim - 1) addone(index, base, i+1); } else { index[i] = newv; } } long count_in_v2(long ndim, double radius) /* Version 2 of the counting algorithm. Given: ndim -> number of dimensions of the hypersphere radius -> radius of the hypersphere count the number of integer points that lie wholly within the hypersphere, assuming it is centred on the origin. */ { const long halfb = static_cast<long>(floor(radius)); const long base = 2 * halfb + 1; const double rsquare = radius * radius; const long ntotal = powlong(base, ndim); long count = 0; // This is the counter std::vector<long> index(ndim, 0); // Loop over the total number of points to test, ticking over the // counter as we go. for (long n = 0; n < ntotal; ++n) { double rtestsq = 0; for (long k = 0; k < ndim; ++k) { double xk = index[k] - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare) ++count; addone(index, base, 0); } return count; } //Sequential version void seq_count_in_v1_v2(long nd , double r) { clock_t tstart = clock(); const long num1 = count_in_v1(nd, r); clock_t tend = clock(); double tms = diffclock(tend, tstart); std::cout << " CPU v1-> " << num1 << std::endl; std::cout << "# Time elapsed: " << tms << " ms " << std::endl; tstart = clock(); const long num2 = count_in_v2(nd, r); tend = clock(); tms = diffclock(tend, tstart); std::cout << " CPU v2-> " << num2 << std::endl; std::cout << "# Time elapsed: " << tms << " ms " << std::endl; } int main(int argc, char* argv[]) { const double r = atof(argv[1]); const long nd = atol(argv[2]); const long halfb = static_cast<long>(floor(r)); const long base = 2 * halfb + 1; const long ntotal = powlong(base, nd); const double rsquare = r * r; std::cout << "### " << " Radius " << r << " Dimension " << nd << " Total Points " << ntotal << std::endl; unsigned long long int *d_count; unsigned long long int count; unsigned long long int total_count = 0; unsigned long long int MAX_THREAD_SIZE = 2147483648;// 1073741824; 65536 x 65536 unsigned long long int threadsPerBlock = 1024; unsigned long long int blocksPerGrid = (ntotal + threadsPerBlock - 1) / threadsPerBlock ; int patition_num = (blocksPerGrid * threadsPerBlock )/ MAX_THREAD_SIZE; unsigned long long int rest_blocksPerGrid = ( (blocksPerGrid * threadsPerBlock ) % MAX_THREAD_SIZE + threadsPerBlock - 1) / threadsPerBlock ; std::cout << "total threads " << " " << threadsPerBlock * blocksPerGrid<< " " << std::endl; std::cout << "size " << " " << size << " " << std::endl; hipMalloc(&d_count, sizeof(unsigned long long int)); for (int i = 0 ; i <= patition_num ; i++){ unsigned long long int start_i = MAX_THREAD_SIZE * i; if (i != patition_num ) blocksPerGrid = MAX_THREAD_SIZE / threadsPerBlock; else blocksPerGrid = rest_blocksPerGrid; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); count=0 ; hipMemcpy(d_count, &count, sizeof(unsigned long long int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( count_in_v1_gpu), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, ntotal, base, halfb, rsquare, nd, d_count ,start_i); hipEventRecord(stop, 0); hipEventSynchronize(stop); float time; // Must be a float hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); hipMemcpy( &count, d_count, sizeof(unsigned long long int), hipMemcpyDeviceToHost); total_count += count; std::cout << "total threads " << " " << threadsPerBlock * blocksPerGrid<< " " << std::endl; std::cout << " GPU total_count " << i << " -> " << total_count << std::endl; std::cout << "Kernel took: " << time << " ms" << std::endl; } hipFree(d_count); std::cout << "---final result----" << std::endl; std::cout << " GPU total_count-> " << total_count << std::endl; //seq_count_in_v1_v2(nd, r); }
c8eccbb69e1c6d0b41049442bab87deb738b9285.cu
/* 159.735 Semester 2, 2016. Ian Bond, 3/10/2016 Sequential version of the N-sphere counting problem for Assignment 5. Two alternative algorithms are presented. Note: a rethink will be needed when implementing a GPU version of this. You can't just cut and paste code. To compile: g++ -O3 -o nsphere nsphere.cpp (you will get slightly better performance with the O3 optimization flag) */ #include <cstdlib> #include <cmath> #include <iostream> #include <string> #include <vector> #include <cuda.h> #include <ctime> double diffclock(clock_t clock1,clock_t clock2) { double diffticks = clock1 - clock2; double diffms = (diffticks * 1000) / CLOCKS_PER_SEC; return diffms; // Time difference in milliseconds } long powlong(long n, long k) /* Evaluate n**k where both are long integers */ { long p = 1; for (long i = 0; i < k; ++i) p *= n; return p; } /*----------------------------------------------------------------------------*/ __global__ void count_in_v1_gpu (long ntotal , long base, long halfb, double rsquare, long ndim , unsigned long long int* count , unsigned long long int start_index) { long n = start_index + blockDim.x * blockIdx.x + threadIdx.x; if (n < ntotal) { long idx = 0; double rtestsq = 0; while (n != 0) { long rem = n % base; n = n / base; double xk = rem - halfb; rtestsq += xk * xk; ++idx; } for (long k = idx; k < ndim; ++k) { double xk = 0.0 - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare) atomicAdd(count,1); } } void convert(long num, long base, std::vector<long>& index) /* Convert a decimal number into another base system - the individual digits in the new base are stored in the index array. */ { const long ndim = index.size(); for (long i = 0; i < ndim; ++i) index[i] = 0; long idx = 0; while (num != 0) { long rem = num % base; num = num / base; index[idx] = rem; ++idx; } } long count_in_v1(long ndim, double radius) /* Version 1 of the counting algorithm. Given: ndim -> number of dimensions of the hypersphere radius -> radius of the hypersphere count the number of integer points that lie wholly within the hypersphere, assuming it is centred on the origin. */ { const long halfb = static_cast<long>(floor(radius)); const long base = 2 * halfb + 1; const double rsquare = radius * radius; // This is the total number of points we will need to test. const long ntotal = powlong(base, ndim); long count = 0; // Indices in x,y,z,.... std::vector<long> index(ndim, 0); // Loop over the total number of points. For each visit of the loop, // we covert n to its equivalent in a number system of given "base". for (long n = 0; n < ntotal; ++n) { convert(n, base, index); double rtestsq = 0; for (long k = 0; k < ndim; ++k) { double xk = index[k] - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare) ++count; } return count; } /*----------------------------------------------------------------------------*/ void addone(std::vector<long>& index, long base, long i) /* Add one to a digital counter of given base. When one digit hits maximum, it is necessary to carry one over into the next column. This is done recursively here. */ { long ndim = index.size(); long newv = index[i] + 1; if (newv >= base) { index[i] = 0; if (i < ndim - 1) addone(index, base, i+1); } else { index[i] = newv; } } long count_in_v2(long ndim, double radius) /* Version 2 of the counting algorithm. Given: ndim -> number of dimensions of the hypersphere radius -> radius of the hypersphere count the number of integer points that lie wholly within the hypersphere, assuming it is centred on the origin. */ { const long halfb = static_cast<long>(floor(radius)); const long base = 2 * halfb + 1; const double rsquare = radius * radius; const long ntotal = powlong(base, ndim); long count = 0; // This is the counter std::vector<long> index(ndim, 0); // Loop over the total number of points to test, ticking over the // counter as we go. for (long n = 0; n < ntotal; ++n) { double rtestsq = 0; for (long k = 0; k < ndim; ++k) { double xk = index[k] - halfb; rtestsq += xk * xk; } if (rtestsq < rsquare) ++count; addone(index, base, 0); } return count; } //Sequential version void seq_count_in_v1_v2(long nd , double r) { clock_t tstart = clock(); const long num1 = count_in_v1(nd, r); clock_t tend = clock(); double tms = diffclock(tend, tstart); std::cout << " CPU v1-> " << num1 << std::endl; std::cout << "# Time elapsed: " << tms << " ms " << std::endl; tstart = clock(); const long num2 = count_in_v2(nd, r); tend = clock(); tms = diffclock(tend, tstart); std::cout << " CPU v2-> " << num2 << std::endl; std::cout << "# Time elapsed: " << tms << " ms " << std::endl; } int main(int argc, char* argv[]) { const double r = atof(argv[1]); const long nd = atol(argv[2]); const long halfb = static_cast<long>(floor(r)); const long base = 2 * halfb + 1; const long ntotal = powlong(base, nd); const double rsquare = r * r; std::cout << "### " << " Radius " << r << " Dimension " << nd << " Total Points " << ntotal << std::endl; unsigned long long int *d_count; unsigned long long int count; unsigned long long int total_count = 0; unsigned long long int MAX_THREAD_SIZE = 2147483648;// 1073741824; 65536 x 65536 unsigned long long int threadsPerBlock = 1024; unsigned long long int blocksPerGrid = (ntotal + threadsPerBlock - 1) / threadsPerBlock ; int patition_num = (blocksPerGrid * threadsPerBlock )/ MAX_THREAD_SIZE; unsigned long long int rest_blocksPerGrid = ( (blocksPerGrid * threadsPerBlock ) % MAX_THREAD_SIZE + threadsPerBlock - 1) / threadsPerBlock ; std::cout << "total threads " << " " << threadsPerBlock * blocksPerGrid<< " " << std::endl; std::cout << "size " << " " << size << " " << std::endl; cudaMalloc(&d_count, sizeof(unsigned long long int)); for (int i = 0 ; i <= patition_num ; i++){ unsigned long long int start_i = MAX_THREAD_SIZE * i; if (i != patition_num ) blocksPerGrid = MAX_THREAD_SIZE / threadsPerBlock; else blocksPerGrid = rest_blocksPerGrid; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); count=0 ; cudaMemcpy(d_count, &count, sizeof(unsigned long long int), cudaMemcpyHostToDevice); count_in_v1_gpu<<<blocksPerGrid, threadsPerBlock>>>( ntotal, base, halfb, rsquare, nd, d_count ,start_i); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float time; // Must be a float cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy( &count, d_count, sizeof(unsigned long long int), cudaMemcpyDeviceToHost); total_count += count; std::cout << "total threads " << " " << threadsPerBlock * blocksPerGrid<< " " << std::endl; std::cout << " GPU total_count " << i << " -> " << total_count << std::endl; std::cout << "Kernel took: " << time << " ms" << std::endl; } cudaFree(d_count); std::cout << "---final result----" << std::endl; std::cout << " GPU total_count-> " << total_count << std::endl; //seq_count_in_v1_v2(nd, r); }
be99d4cf5623c5fe863fe84dd2b1a42b04b7c730.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" const int Nthreads = 1024, maxFR = 10000, NrankMax = 3, nt0max=81, NchanMax = 17; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void Conv1D(const double *Params, const float *data, const float *W, float *conv_sig){ volatile __shared__ float sW[81*NrankMax], sdata[Nthreads+81]; float x, y; int tid, tid0, bid, i, nid, Nrank, NT, nt0; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; nt0 = (int) Params[3]; Nrank = (int) Params[6]; if(tid<nt0*Nrank) sW[tid]= W[tid]; __syncthreads(); tid0 = 0; while (tid0<NT-Nthreads-nt0+1){ if (tid<nt0) sdata[tid] = data[tid0 + tid+ NT*bid]; sdata[tid + nt0] = data[tid0 + tid + nt0 + NT*bid]; __syncthreads(); x = 0.0f; for(nid=0;nid<Nrank;nid++){ y = 0.0f; #pragma unroll 4 for(i=0;i<nt0;i++) y += sW[i + nid*nt0] * sdata[i+tid]; x += y*y; } conv_sig[tid0 + tid + NT*bid] = sqrt(x); tid0+=Nthreads; __syncthreads(); } }
be99d4cf5623c5fe863fe84dd2b1a42b04b7c730.cu
#include "includes.h" const int Nthreads = 1024, maxFR = 10000, NrankMax = 3, nt0max=81, NchanMax = 17; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void Conv1D(const double *Params, const float *data, const float *W, float *conv_sig){ volatile __shared__ float sW[81*NrankMax], sdata[Nthreads+81]; float x, y; int tid, tid0, bid, i, nid, Nrank, NT, nt0; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; nt0 = (int) Params[3]; Nrank = (int) Params[6]; if(tid<nt0*Nrank) sW[tid]= W[tid]; __syncthreads(); tid0 = 0; while (tid0<NT-Nthreads-nt0+1){ if (tid<nt0) sdata[tid] = data[tid0 + tid+ NT*bid]; sdata[tid + nt0] = data[tid0 + tid + nt0 + NT*bid]; __syncthreads(); x = 0.0f; for(nid=0;nid<Nrank;nid++){ y = 0.0f; #pragma unroll 4 for(i=0;i<nt0;i++) y += sW[i + nid*nt0] * sdata[i+tid]; x += y*y; } conv_sig[tid0 + tid + NT*bid] = sqrt(x); tid0+=Nthreads; __syncthreads(); } }
321dee2823c90e3f8e3ac108064dae88a4abb783.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <glog/logging.h> #include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { namespace details { template <typename T> struct Add { __device__ T operator()(const T &a, const T &b) const { return a + b; } }; template <typename T> struct Mul { __device__ T operator()(const T &a, const T &b) const { return a * b; } }; template <typename T> struct Div { __device__ T operator()(const T &a, const T &b) const { return a / b; } }; template <typename T> struct Sub { __device__ T operator()(const T &a, const T &b) const { return a - b; } }; template <typename T> struct Pow { __device__ T operator()(const T &a, const T &b) const { return static_cast<T>(::powf(static_cast<float>(a), static_cast<float>(b))); } }; } // namespace details template <typename T, typename Operator> __global__ void elementwise_kernel(const size_t total, const T *x_data, const T *y_data, T *out_data, int pre, int n, int post, Operator op) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < total) { int idx = tid / post % n; #if __CUDA_ARCH__ >= 350 out_data[tid] = op(__ldg(x_data + tid), __ldg(y_data + idx)); #else out_data[tid] = op(x_data[tid], y_data[idx]); #endif } } nvinfer1::Dims ElementWisePlugin::getOutputDimensions( int index, const nvinfer1::Dims *input_dims, int num_inputs) TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "There is only one output in TRT elementwise " "op plugin, but got output index: %d.", index)); PADDLE_ENFORCE_EQ( num_inputs, 2, platform::errors::InvalidArgument("There are 2 inputs in TRT elementwise " "op plugin, but got input number: %d.", num_inputs)); PADDLE_ENFORCE_NOT_NULL( input_dims, platform::errors::InvalidArgument( "The input dims of TRT elementwise op plugin should not be null.")); return input_dims[0]; } int ElementWisePlugin::initialize() TRT_NOEXCEPT { axis_ = (axis_ == -1) ? dims_x_.nbDims - dims_y_.nbDims : axis_; int trimed_nb_dims = dims_y_.nbDims; for (; trimed_nb_dims > 0; --trimed_nb_dims) { if (dims_y_.d[trimed_nb_dims - 1] != 1) { break; } } dims_y_.nbDims = trimed_nb_dims; PADDLE_ENFORCE_GE(dims_x_.nbDims, dims_y_.nbDims + axis_, platform::errors::InvalidArgument( "We expect [number of x dims] >= [number of y dims + " "axis] in TRT elementwise op plugin, but got [number " "of x dims] = %d, [number of y dims + axis] = %d.", dims_x_.nbDims, dims_y_.nbDims + axis_)); PADDLE_ENFORCE_LT( axis_, dims_x_.nbDims, platform::errors::InvalidArgument("We expect [axis] < [number of x dims] " "in TRT elementwise op plugin, but got " "[axis] = %d, [number of x dims] = %d.", axis_, dims_x_.nbDims)); prev_size_ = 1; midd_size_ = 1; post_size_ = 1; for (int i = 0; i < axis_; ++i) { prev_size_ *= dims_x_.d[i]; } for (int i = 0; i < dims_y_.nbDims; ++i) { PADDLE_ENFORCE_EQ(dims_x_.d[i + axis_], dims_y_.d[i], platform::errors::InvalidArgument( "Broadcast dimension mismatch. The dims of input Y " "should be a subsequence of X.")); midd_size_ *= dims_y_.d[i]; } for (int i = axis_ + dims_y_.nbDims; i < dims_x_.nbDims; ++i) { post_size_ *= dims_x_.d[i]; } return 0; } int ElementWisePlugin::enqueue(int batch_size, const void *const *inputs, #if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, #else void *const *outputs, void *workspace, #endif hipStream_t stream) TRT_NOEXCEPT { const float *x = reinterpret_cast<const float *>(inputs[0]); const float *y = reinterpret_cast<const float *>(inputs[1]); float *out = reinterpret_cast<float *>(outputs[0]); int num = batch_size * prev_size_ * midd_size_ * post_size_; int thread = 256; int block = (num + thread - 1) / thread; if (type_ == "add") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Add<float>()); } else if (type_ == "mul") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Mul<float>()); } else if (type_ == "div") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Div<float>()); } else if (type_ == "sub") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Sub<float>()); } else if (type_ == "pow") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Pow<float>()); } else { PADDLE_THROW(platform::errors::Fatal( "The %s type elementwise is not implemented in trt plugin.", type_)); } return hipGetLastError() != hipSuccess; } // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) int ElementwisePluginDynamic::initialize() TRT_NOEXCEPT { return 0; } size_t ElementwisePluginDynamic::getSerializationSize() const TRT_NOEXCEPT { return SerializedSize(type_.c_str()) + SerializedSize(axis_); } void ElementwisePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT { SerializeValue(&buffer, type_.c_str()); SerializeValue(&buffer, axis_); } nvinfer1::DimsExprs ElementwisePluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { return inputs[0]; } bool ElementwisePluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); (in_out && pos < (nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType ElementwisePluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The Elementwise Plugin only has one input, so the " "index value should be 0, but get %d.", index)); return input_types[0]; } int ElementwisePluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT { auto x_dims = input_desc[0].dims; auto y_dims = input_desc[1].dims; int axis = (axis_ == -1) ? x_dims.nbDims - y_dims.nbDims : axis_; int batch_size = x_dims.d[0]; int prev_size = 1; int midd_size = 1; int post_size = 1; for (int i = 0; i < axis; ++i) { prev_size *= x_dims.d[i]; } int trimed_nb_dims = y_dims.nbDims; for (; trimed_nb_dims > 0; --trimed_nb_dims) { if (y_dims.d[trimed_nb_dims - 1] != 1) { break; } } for (int i = 0; i < trimed_nb_dims; ++i) { PADDLE_ENFORCE_EQ(x_dims.d[i + axis], y_dims.d[i], platform::errors::InvalidArgument( "Broadcast dimension mismatch found in trt " "elementwise plugin's x and y input.")); midd_size *= y_dims.d[i]; } for (int i = axis + trimed_nb_dims; i < x_dims.nbDims; ++i) { post_size *= x_dims.d[i]; } const float *x = static_cast<const float *>(inputs[0]); const float *y = static_cast<const float *>(inputs[1]); float *out = static_cast<float *>(outputs[0]); int num = prev_size * midd_size * post_size; int thread = 256; int block = (num + thread - 1) / thread; if (type_ == "add") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size, midd_size, post_size, details::Add<float>()); } else if (type_ == "mul") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size, midd_size, post_size, details::Mul<float>()); } else if (type_ == "div") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size, midd_size, post_size, details::Div<float>()); } else if (type_ == "sub") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size, midd_size, post_size, details::Sub<float>()); } else if (type_ == "pow") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size, midd_size, post_size, details::Pow<float>()); } else { PADDLE_THROW(platform::errors::Unimplemented( "Paddle-TRT only support elementwise " "operation: {add, mul, div, sub, pow} currently, " "but got %s.", type_)); } return hipGetLastError() != hipSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
321dee2823c90e3f8e3ac108064dae88a4abb783.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <glog/logging.h> #include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { namespace details { template <typename T> struct Add { __device__ T operator()(const T &a, const T &b) const { return a + b; } }; template <typename T> struct Mul { __device__ T operator()(const T &a, const T &b) const { return a * b; } }; template <typename T> struct Div { __device__ T operator()(const T &a, const T &b) const { return a / b; } }; template <typename T> struct Sub { __device__ T operator()(const T &a, const T &b) const { return a - b; } }; template <typename T> struct Pow { __device__ T operator()(const T &a, const T &b) const { return static_cast<T>(::powf(static_cast<float>(a), static_cast<float>(b))); } }; } // namespace details template <typename T, typename Operator> __global__ void elementwise_kernel(const size_t total, const T *x_data, const T *y_data, T *out_data, int pre, int n, int post, Operator op) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < total) { int idx = tid / post % n; #if __CUDA_ARCH__ >= 350 out_data[tid] = op(__ldg(x_data + tid), __ldg(y_data + idx)); #else out_data[tid] = op(x_data[tid], y_data[idx]); #endif } } nvinfer1::Dims ElementWisePlugin::getOutputDimensions( int index, const nvinfer1::Dims *input_dims, int num_inputs) TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "There is only one output in TRT elementwise " "op plugin, but got output index: %d.", index)); PADDLE_ENFORCE_EQ( num_inputs, 2, platform::errors::InvalidArgument("There are 2 inputs in TRT elementwise " "op plugin, but got input number: %d.", num_inputs)); PADDLE_ENFORCE_NOT_NULL( input_dims, platform::errors::InvalidArgument( "The input dims of TRT elementwise op plugin should not be null.")); return input_dims[0]; } int ElementWisePlugin::initialize() TRT_NOEXCEPT { axis_ = (axis_ == -1) ? dims_x_.nbDims - dims_y_.nbDims : axis_; int trimed_nb_dims = dims_y_.nbDims; for (; trimed_nb_dims > 0; --trimed_nb_dims) { if (dims_y_.d[trimed_nb_dims - 1] != 1) { break; } } dims_y_.nbDims = trimed_nb_dims; PADDLE_ENFORCE_GE(dims_x_.nbDims, dims_y_.nbDims + axis_, platform::errors::InvalidArgument( "We expect [number of x dims] >= [number of y dims + " "axis] in TRT elementwise op plugin, but got [number " "of x dims] = %d, [number of y dims + axis] = %d.", dims_x_.nbDims, dims_y_.nbDims + axis_)); PADDLE_ENFORCE_LT( axis_, dims_x_.nbDims, platform::errors::InvalidArgument("We expect [axis] < [number of x dims] " "in TRT elementwise op plugin, but got " "[axis] = %d, [number of x dims] = %d.", axis_, dims_x_.nbDims)); prev_size_ = 1; midd_size_ = 1; post_size_ = 1; for (int i = 0; i < axis_; ++i) { prev_size_ *= dims_x_.d[i]; } for (int i = 0; i < dims_y_.nbDims; ++i) { PADDLE_ENFORCE_EQ(dims_x_.d[i + axis_], dims_y_.d[i], platform::errors::InvalidArgument( "Broadcast dimension mismatch. The dims of input Y " "should be a subsequence of X.")); midd_size_ *= dims_y_.d[i]; } for (int i = axis_ + dims_y_.nbDims; i < dims_x_.nbDims; ++i) { post_size_ *= dims_x_.d[i]; } return 0; } int ElementWisePlugin::enqueue(int batch_size, const void *const *inputs, #if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, #else void *const *outputs, void *workspace, #endif cudaStream_t stream) TRT_NOEXCEPT { const float *x = reinterpret_cast<const float *>(inputs[0]); const float *y = reinterpret_cast<const float *>(inputs[1]); float *out = reinterpret_cast<float *>(outputs[0]); int num = batch_size * prev_size_ * midd_size_ * post_size_; int thread = 256; int block = (num + thread - 1) / thread; if (type_ == "add") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Add<float>()); } else if (type_ == "mul") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Mul<float>()); } else if (type_ == "div") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Div<float>()); } else if (type_ == "sub") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Sub<float>()); } else if (type_ == "pow") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Pow<float>()); } else { PADDLE_THROW(platform::errors::Fatal( "The %s type elementwise is not implemented in trt plugin.", type_)); } return cudaGetLastError() != cudaSuccess; } // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) int ElementwisePluginDynamic::initialize() TRT_NOEXCEPT { return 0; } size_t ElementwisePluginDynamic::getSerializationSize() const TRT_NOEXCEPT { return SerializedSize(type_.c_str()) + SerializedSize(axis_); } void ElementwisePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT { SerializeValue(&buffer, type_.c_str()); SerializeValue(&buffer, axis_); } nvinfer1::DimsExprs ElementwisePluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { return inputs[0]; } bool ElementwisePluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); (in_out && pos < (nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType ElementwisePluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The Elementwise Plugin only has one input, so the " "index value should be 0, but get %d.", index)); return input_types[0]; } int ElementwisePluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT { auto x_dims = input_desc[0].dims; auto y_dims = input_desc[1].dims; int axis = (axis_ == -1) ? x_dims.nbDims - y_dims.nbDims : axis_; int batch_size = x_dims.d[0]; int prev_size = 1; int midd_size = 1; int post_size = 1; for (int i = 0; i < axis; ++i) { prev_size *= x_dims.d[i]; } int trimed_nb_dims = y_dims.nbDims; for (; trimed_nb_dims > 0; --trimed_nb_dims) { if (y_dims.d[trimed_nb_dims - 1] != 1) { break; } } for (int i = 0; i < trimed_nb_dims; ++i) { PADDLE_ENFORCE_EQ(x_dims.d[i + axis], y_dims.d[i], platform::errors::InvalidArgument( "Broadcast dimension mismatch found in trt " "elementwise plugin's x and y input.")); midd_size *= y_dims.d[i]; } for (int i = axis + trimed_nb_dims; i < x_dims.nbDims; ++i) { post_size *= x_dims.d[i]; } const float *x = static_cast<const float *>(inputs[0]); const float *y = static_cast<const float *>(inputs[1]); float *out = static_cast<float *>(outputs[0]); int num = prev_size * midd_size * post_size; int thread = 256; int block = (num + thread - 1) / thread; if (type_ == "add") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size, midd_size, post_size, details::Add<float>()); } else if (type_ == "mul") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size, midd_size, post_size, details::Mul<float>()); } else if (type_ == "div") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size, midd_size, post_size, details::Div<float>()); } else if (type_ == "sub") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size, midd_size, post_size, details::Sub<float>()); } else if (type_ == "pow") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size, midd_size, post_size, details::Pow<float>()); } else { PADDLE_THROW(platform::errors::Unimplemented( "Paddle-TRT only support elementwise " "operation: {add, mul, div, sub, pow} currently, " "but got %s.", type_)); } return cudaGetLastError() != cudaSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
33744254c24685c47b87c381cfd720067aebf70e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common_hip.cuh" __global__ void timetakingfunction(int *a){ int s = 0; for(int i=0;i<1000;i++){ s=s*19; } a[threadIdx.x] = s; } /* Code framework to test different kinds of kernel launches * with varying grid sizes threads and blocks. */ int main(){ int *a; check_error(hipMalloc(&a, 10000000 * sizeof(int))); for(int i=1;i<10;i=i+10){ auto start_time = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( timetakingfunction), dim3(100),dim3(128), 0, 0, a); check_error( hipPeekAtLastError() ); hipDeviceSynchronize(); auto end_time = std::chrono::high_resolution_clock::now(); cout << "Thread " << i << ":" << (end_time - start_time)/std::chrono::microseconds(1) <<"ms \n"; } }
33744254c24685c47b87c381cfd720067aebf70e.cu
#include "common.cuh" __global__ void timetakingfunction(int *a){ int s = 0; for(int i=0;i<1000;i++){ s=s*19; } a[threadIdx.x] = s; } /* Code framework to test different kinds of kernel launches * with varying grid sizes threads and blocks. */ int main(){ int *a; check_error(cudaMalloc(&a, 10000000 * sizeof(int))); for(int i=1;i<10;i=i+10){ auto start_time = std::chrono::high_resolution_clock::now(); timetakingfunction<<<100,128>>>(a); check_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); auto end_time = std::chrono::high_resolution_clock::now(); cout << "Thread " << i << ":" << (end_time - start_time)/std::chrono::microseconds(1) <<"ms \n"; } }
b2477453b788eb773558dba310be63f3b374bea2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Note that in this model we do not check // the error codes and status of kernel call. #include <cstdio> #include <cmath> __global__ void set(int *A, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) A[idx] = idx; } int main(void) { const int N = 128; int *d_A; int *h_A; h_A = (int*) malloc(N * sizeof(int)); hipMalloc((void**)&d_A, N * sizeof(int)); hipLaunchKernelGGL(( set), dim3(2), dim3(64), 0, 0, d_A, N); hipMemcpy(h_A, d_A, N * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < N; i++) printf("%i ", h_A[i]); printf("\n"); free(h_A); hipFree((void*)d_A); return 0; }
b2477453b788eb773558dba310be63f3b374bea2.cu
// Note that in this model we do not check // the error codes and status of kernel call. #include <cstdio> #include <cmath> __global__ void set(int *A, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) A[idx] = idx; } int main(void) { const int N = 128; int *d_A; int *h_A; h_A = (int*) malloc(N * sizeof(int)); cudaMalloc((void**)&d_A, N * sizeof(int)); set<<<2, 64>>>(d_A, N); cudaMemcpy(h_A, d_A, N * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) printf("%i ", h_A[i]); printf("\n"); free(h_A); cudaFree((void*)d_A); return 0; }
db371867d673ba0f55a5701a5f6a6720e622566a.hip
// !!! This is a file automatically generated by hipify!!! #pragma once #include "hip/hip_runtime.h" //The following kernels are just a demo //for working with classes in CUDA/C++ __global__ void test(int * i) { Grid<int> * g = new Grid<int>(1, 1); g->set(0, 0, 1); *i = g->get(0, 0); delete g; } __global__ void change(Grid<int> * g, int row, int col, int v) { g->set(row, col, v); }
db371867d673ba0f55a5701a5f6a6720e622566a.cu
#pragma once #include "cuda_runtime.h" //The following kernels are just a demo //for working with classes in CUDA/C++ __global__ void test(int * i) { Grid<int> * g = new Grid<int>(1, 1); g->set(0, 0, 1); *i = g->get(0, 0); delete g; } __global__ void change(Grid<int> * g, int row, int col, int v) { g->set(row, col, v); }
f0b6af8a916ab023f92ea1e04d05f78c8296ae28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define N 9999 __global__ void vector_addition (int *a, int *b, int *c, int n) { for (int i=0; i<n; i++) { c[i] = a[i] + b[i]; } } int main(){ int *a, *b, *c; int *dev_a, *dev_b, *dev_c; // allocate memory a = (int*) malloc (sizeof(int) * N); b = (int*) malloc (sizeof(int) * N); c = (int*) malloc (sizeof(int) * N); // initialize array for (int i=0; i<N; i++) { a[i] = 1; b[i] = 2; } // Allocate device memory hipMalloc ( (void**)&dev_a, sizeof(int) * N); hipMalloc ( (void**)&dev_b, sizeof(int) * N); hipMalloc ( (void**)&dev_c, sizeof(int) * N); // transfer data from host to device memory hipMemcpy (dev_a, a, sizeof(int) * N, hipMemcpyHostToDevice); hipMemcpy (dev_b, b, sizeof(int) * N, hipMemcpyHostToDevice); // execute kernel hipLaunchKernelGGL(( vector_addition), dim3(1),dim3(1), 0, 0, dev_a,dev_b,dev_c,N); // transfer data back to host memory hipMemcpy (c, dev_c, sizeof(int) * N, hipMemcpyDeviceToHost); // display the result for (int i=0; i<N; i++) { printf("%d + %d = %d\n", ,a[i] + b[i] = c[i]); } // deallocate the device memory hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); // deallocate host memory free(a); free(b); free(c); }
f0b6af8a916ab023f92ea1e04d05f78c8296ae28.cu
#define N 9999 __global__ void vector_addition (int *a, int *b, int *c, int n) { for (int i=0; i<n; i++) { c[i] = a[i] + b[i]; } } int main(){ int *a, *b, *c; int *dev_a, *dev_b, *dev_c; // allocate memory a = (int*) malloc (sizeof(int) * N); b = (int*) malloc (sizeof(int) * N); c = (int*) malloc (sizeof(int) * N); // initialize array for (int i=0; i<N; i++) { a[i] = 1; b[i] = 2; } // Allocate device memory cudaMalloc ( (void**)&dev_a, sizeof(int) * N); cudaMalloc ( (void**)&dev_b, sizeof(int) * N); cudaMalloc ( (void**)&dev_c, sizeof(int) * N); // transfer data from host to device memory cudaMemcpy (dev_a, a, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy (dev_b, b, sizeof(int) * N, cudaMemcpyHostToDevice); // execute kernel vector_addition<<<1,1>>>(dev_a,dev_b,dev_c,N); // transfer data back to host memory cudaMemcpy (c, dev_c, sizeof(int) * N, cudaMemcpyDeviceToHost); // display the result for (int i=0; i<N; i++) { printf("%d + %d = %d\n", ,a[i] + b[i] = c[i]); } // deallocate the device memory cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); // deallocate host memory free(a); free(b); free(c); }
dabd5ad0f28f9ac0cf70ff84d1390cf53780316c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernUpSweep(int n, int d, int *data, int offset_1, int offset_2) { int k = threadIdx.x + (blockIdx.x * blockDim.x); if (k % offset_1 != 0) { return; } if (k > n) { return; } data[k + offset_1 - 1] += data[k + offset_2 - 1]; if (k == n - 1) { data[k] = 0; } } __global__ void kernDownSweep(int n, int d, int *data, int offset_1, int offset_2) { int k = threadIdx.x + (blockIdx.x * blockDim.x); if (k % offset_1 != 0) { return; } if (k > n) { return; } int t = data[k + offset_2 - 1]; data[k + offset_2 - 1] = data[k + offset_1 - 1]; data[k + offset_1 - 1] += t; } __global__ void kernZeroCorrect(int n, int *data) { int k = threadIdx.x + (blockIdx.x * blockDim.x); if (k > n) { return; } data[k] -= data[0]; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int paddedSize = 1 << ilog2ceil(n); int *idataPadded = new int[paddedSize]; for (int i = 0; i < paddedSize; ++i) { idataPadded[i] = i < n ? idata[i] : 0; } int blockSize = 128; dim3 blocksPerGrid((paddedSize + blockSize - 1) / blockSize); dim3 threadsPerBlock(blockSize); int *dv_data; hipMalloc((void **) &dv_data, paddedSize * sizeof(int)); checkCUDAError("hipMalloc dv_data failed!"); hipMemcpy(dv_data, idataPadded, paddedSize * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("hipMemcpy to dv_data failed!"); bool end = true; try { timer().startGpuTimer(); } catch (std::exception &) { end = false; } for (int i = 0; i < ilog2ceil(n); ++i) { kernUpSweep << <blocksPerGrid, threadsPerBlock >> > (paddedSize, i, dv_data, 1 << (i + 1), 1 << i); } // set root to 0 int z = 0; hipMemcpy(dv_data + n - 1, &z, sizeof(int), hipMemcpyHostToDevice); for (int i = ilog2ceil(n) - 1; i >= 0; i--) { kernDownSweep << <blocksPerGrid, threadsPerBlock >> > (paddedSize, i, dv_data, 1 << (i + 1), 1 << i); } if (end) { timer().endGpuTimer(); } kernZeroCorrect << <blocksPerGrid, threadsPerBlock >> > (paddedSize, dv_data); hipMemcpy(odata, dv_data, n * sizeof(int), hipMemcpyDeviceToHost); delete idataPadded; hipFree(dv_data); } __global__ void kernMapToBoolean(int n, int *odata, int *idata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx > n) { return; } odata[idx] = idata[idx] == 0 ? 0 : 1; } __global__ void kernScatter(int n, int *odata, int *bdata, int *scandata, int *idata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx > n) { return; } if (bdata[idx] == 1) { odata[scandata[idx]] = idata[idx]; } } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int blockSize = 128; dim3 blocksPerGrid((n + blockSize - 1) / blockSize); dim3 threadsPerBlock(blockSize); int *dv_bdata, *dv_scandata, *dv_idata, *dv_data; hipMalloc((void **) &dv_bdata, n * sizeof(int)); checkCUDAError("hipMalloc dv_bdata failed!"); hipMalloc((void **) &dv_scandata, n * sizeof(int)); checkCUDAError("hipMalloc dv_scandata failed!"); hipMalloc((void **) &dv_data, n * sizeof(int)); checkCUDAError("hipMalloc dv_data failed!"); hipMalloc((void **) &dv_idata, n * sizeof(int)); checkCUDAError("hipMalloc dv_idata failed!"); hipMemcpy(dv_idata, idata, n * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("hipMemcpy to dv_idata failed!"); int *cpu_bdata, *cpu_scandata; cpu_bdata = new int[n]; cpu_scandata = new int[n]; timer().startGpuTimer(); kernMapToBoolean << <blocksPerGrid, threadsPerBlock >> > (n, dv_bdata, dv_idata); hipMemcpy(cpu_bdata, dv_bdata, n * sizeof(int), hipMemcpyDeviceToHost); int count = 0; for (int i = 0; i < n; ++i) { if (cpu_bdata[i] == 1) { count++; } } scan(n, cpu_scandata, cpu_bdata); hipMemcpy(dv_scandata, cpu_scandata, n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernScatter), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, n, dv_data, dv_bdata, dv_scandata, dv_idata); timer().endGpuTimer(); hipMemcpy(odata, dv_data, count * sizeof(int), hipMemcpyDeviceToHost); delete(cpu_bdata); delete(cpu_scandata); hipFree(dv_bdata); hipFree(dv_scandata); hipFree(dv_idata); hipFree(dv_data); return count; } } }
dabd5ad0f28f9ac0cf70ff84d1390cf53780316c.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernUpSweep(int n, int d, int *data, int offset_1, int offset_2) { int k = threadIdx.x + (blockIdx.x * blockDim.x); if (k % offset_1 != 0) { return; } if (k > n) { return; } data[k + offset_1 - 1] += data[k + offset_2 - 1]; if (k == n - 1) { data[k] = 0; } } __global__ void kernDownSweep(int n, int d, int *data, int offset_1, int offset_2) { int k = threadIdx.x + (blockIdx.x * blockDim.x); if (k % offset_1 != 0) { return; } if (k > n) { return; } int t = data[k + offset_2 - 1]; data[k + offset_2 - 1] = data[k + offset_1 - 1]; data[k + offset_1 - 1] += t; } __global__ void kernZeroCorrect(int n, int *data) { int k = threadIdx.x + (blockIdx.x * blockDim.x); if (k > n) { return; } data[k] -= data[0]; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int paddedSize = 1 << ilog2ceil(n); int *idataPadded = new int[paddedSize]; for (int i = 0; i < paddedSize; ++i) { idataPadded[i] = i < n ? idata[i] : 0; } int blockSize = 128; dim3 blocksPerGrid((paddedSize + blockSize - 1) / blockSize); dim3 threadsPerBlock(blockSize); int *dv_data; cudaMalloc((void **) &dv_data, paddedSize * sizeof(int)); checkCUDAError("cudaMalloc dv_data failed!"); cudaMemcpy(dv_data, idataPadded, paddedSize * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy to dv_data failed!"); bool end = true; try { timer().startGpuTimer(); } catch (std::exception &) { end = false; } for (int i = 0; i < ilog2ceil(n); ++i) { kernUpSweep << <blocksPerGrid, threadsPerBlock >> > (paddedSize, i, dv_data, 1 << (i + 1), 1 << i); } // set root to 0 int z = 0; cudaMemcpy(dv_data + n - 1, &z, sizeof(int), cudaMemcpyHostToDevice); for (int i = ilog2ceil(n) - 1; i >= 0; i--) { kernDownSweep << <blocksPerGrid, threadsPerBlock >> > (paddedSize, i, dv_data, 1 << (i + 1), 1 << i); } if (end) { timer().endGpuTimer(); } kernZeroCorrect << <blocksPerGrid, threadsPerBlock >> > (paddedSize, dv_data); cudaMemcpy(odata, dv_data, n * sizeof(int), cudaMemcpyDeviceToHost); delete idataPadded; cudaFree(dv_data); } __global__ void kernMapToBoolean(int n, int *odata, int *idata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx > n) { return; } odata[idx] = idata[idx] == 0 ? 0 : 1; } __global__ void kernScatter(int n, int *odata, int *bdata, int *scandata, int *idata) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx > n) { return; } if (bdata[idx] == 1) { odata[scandata[idx]] = idata[idx]; } } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int blockSize = 128; dim3 blocksPerGrid((n + blockSize - 1) / blockSize); dim3 threadsPerBlock(blockSize); int *dv_bdata, *dv_scandata, *dv_idata, *dv_data; cudaMalloc((void **) &dv_bdata, n * sizeof(int)); checkCUDAError("cudaMalloc dv_bdata failed!"); cudaMalloc((void **) &dv_scandata, n * sizeof(int)); checkCUDAError("cudaMalloc dv_scandata failed!"); cudaMalloc((void **) &dv_data, n * sizeof(int)); checkCUDAError("cudaMalloc dv_data failed!"); cudaMalloc((void **) &dv_idata, n * sizeof(int)); checkCUDAError("cudaMalloc dv_idata failed!"); cudaMemcpy(dv_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy to dv_idata failed!"); int *cpu_bdata, *cpu_scandata; cpu_bdata = new int[n]; cpu_scandata = new int[n]; timer().startGpuTimer(); kernMapToBoolean << <blocksPerGrid, threadsPerBlock >> > (n, dv_bdata, dv_idata); cudaMemcpy(cpu_bdata, dv_bdata, n * sizeof(int), cudaMemcpyDeviceToHost); int count = 0; for (int i = 0; i < n; ++i) { if (cpu_bdata[i] == 1) { count++; } } scan(n, cpu_scandata, cpu_bdata); cudaMemcpy(dv_scandata, cpu_scandata, n * sizeof(int), cudaMemcpyHostToDevice); kernScatter<<<blocksPerGrid, threadsPerBlock>>>(n, dv_data, dv_bdata, dv_scandata, dv_idata); timer().endGpuTimer(); cudaMemcpy(odata, dv_data, count * sizeof(int), cudaMemcpyDeviceToHost); delete(cpu_bdata); delete(cpu_scandata); cudaFree(dv_bdata); cudaFree(dv_scandata); cudaFree(dv_idata); cudaFree(dv_data); return count; } } }
3af18ae6669e96eda39405ebed1c0b1ec53ef100.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> using namespace std; __global__ void vecMat(int *a, int *b, int *c, int n) { int row = threadIdx.y + blockIdx.y * blockDim.y; int sum = 0; for (int j = 0; j < n; j++) { printf("\n%d", (row * n) + j); sum += a[row * n + j] * b[j]; } c[row] = sum; } int main() { int n; cin >> n; int *a = new int[n * n]; int *b = new int[n]; int *c = new int[n]; int size = n * sizeof(int); cout<<"Matrix A: "<<endl; for (int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { cin >> a[i * n + j]; } } cout<<"Matrix A is: "<<endl; for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { cout << "a[" << i * n + j << "] = " << a[i * n + j] << " "; } cout << endl; } cout<<"Vector B: "<<endl; for(int i = 0; i < n; i++) { cin >> b[i]; } cout<<"Vector B is: "<<endl; for(int i = 0; i < n; i++) { cout << "b[" << i << "] = " <<b[i] << " "; } cout<<endl; int *dev_a, *dev_b, *dev_c; hipMalloc(&dev_a, n * size); hipMalloc(&dev_b, size); hipMalloc(&dev_c, size); hipMemcpy(dev_a, a, n * size, hipMemcpyHostToDevice); hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice); dim3 grid_dim(n, n, 1); hipLaunchKernelGGL(( vecMat) , dim3(grid_dim), dim3(1) , 0, 0, dev_a, dev_b, dev_c, n); hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost); cout << "Output: " << endl; for(int i = 0; i < n; i++) { cout<< "c[" << i << "] = " << c[i] <<" "; } }
3af18ae6669e96eda39405ebed1c0b1ec53ef100.cu
#include<iostream> using namespace std; __global__ void vecMat(int *a, int *b, int *c, int n) { int row = threadIdx.y + blockIdx.y * blockDim.y; int sum = 0; for (int j = 0; j < n; j++) { printf("\n%d", (row * n) + j); sum += a[row * n + j] * b[j]; } c[row] = sum; } int main() { int n; cin >> n; int *a = new int[n * n]; int *b = new int[n]; int *c = new int[n]; int size = n * sizeof(int); cout<<"Matrix A: "<<endl; for (int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { cin >> a[i * n + j]; } } cout<<"Matrix A is: "<<endl; for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { cout << "a[" << i * n + j << "] = " << a[i * n + j] << " "; } cout << endl; } cout<<"Vector B: "<<endl; for(int i = 0; i < n; i++) { cin >> b[i]; } cout<<"Vector B is: "<<endl; for(int i = 0; i < n; i++) { cout << "b[" << i << "] = " <<b[i] << " "; } cout<<endl; int *dev_a, *dev_b, *dev_c; cudaMalloc(&dev_a, n * size); cudaMalloc(&dev_b, size); cudaMalloc(&dev_c, size); cudaMemcpy(dev_a, a, n * size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice); dim3 grid_dim(n, n, 1); vecMat <<< grid_dim, 1 >>> (dev_a, dev_b, dev_c, n); cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost); cout << "Output: " << endl; for(int i = 0; i < n; i++) { cout<< "c[" << i << "] = " << c[i] <<" "; } }
4928f40680d07da9fda00cc9b2367282651f2214.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: jglaser #include "MolecularForceCompute.cuh" #include <thrust/iterator/permutation_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/device_vector.h> #include <thrust/binary_search.h> #include <thrust/scan.h> #include <thrust/extrema.h> #include <thrust/reduce.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <thrust/execution_policy.h> /*! \file MolecularForceCompute.cu \brief Contains GPU kernel code used by MolecularForceCompute */ //! Sort local molecules and assign local molecule indices to particles hipError_t gpu_sort_by_molecule(unsigned int nptl, const unsigned int *d_tag, const unsigned int *d_molecule_tag, unsigned int *d_local_molecule_tags, unsigned int *d_local_unique_molecule_tags, unsigned int *d_local_molecule_idx, unsigned int *d_sorted_by_tag, unsigned int *d_idx_sorted_by_tag, unsigned int *d_molecule_length, unsigned int &n_local_molecules, unsigned int &max_len, unsigned int &n_local_ptls_in_molecules, const CachedAllocator& alloc) { thrust::device_ptr<const unsigned int> tag(d_tag); thrust::device_ptr<const unsigned int> molecule_tag(d_molecule_tag); thrust::device_ptr<unsigned int> local_molecule_tags(d_local_molecule_tags); thrust::device_ptr<unsigned int> local_unique_molecule_tags(d_local_unique_molecule_tags); thrust::device_ptr<unsigned int> local_molecule_idx(d_local_molecule_idx); thrust::device_ptr<unsigned int> sorted_by_tag(d_sorted_by_tag); thrust::device_ptr<unsigned int> idx_sorted_by_tag(d_idx_sorted_by_tag); thrust::device_ptr<unsigned int> molecule_length(d_molecule_length); // sort local particles by tag thrust::copy(tag,tag+nptl,sorted_by_tag); auto iter = thrust::counting_iterator<unsigned int>(0); thrust::copy(iter, iter+nptl, idx_sorted_by_tag); thrust::sort_by_key(thrust::hip::par(alloc), sorted_by_tag, sorted_by_tag+nptl, idx_sorted_by_tag); auto molecule_tag_lookup = thrust::make_permutation_iterator(molecule_tag, tag); auto molecule_tag_lookup_sorted_by_tag = thrust::make_permutation_iterator(molecule_tag_lookup, idx_sorted_by_tag); thrust::copy(molecule_tag_lookup_sorted_by_tag, molecule_tag_lookup_sorted_by_tag+nptl, local_molecule_tags); // sort local particle indices by global molecule tag, keeping tag order thrust::stable_sort_by_key(thrust::hip::par(alloc), local_molecule_tags, local_molecule_tags + nptl, idx_sorted_by_tag); // find the end of the molecule list auto end = thrust::lower_bound(thrust::hip::par, local_molecule_tags, local_molecule_tags + nptl, NO_MOLECULE); n_local_ptls_in_molecules = end - local_molecule_tags; // gather unique molecule tags, and reduce their lengths by key thrust::constant_iterator<unsigned int> one(1); #if (CUDART_VERSION < 8000) // work around CUDA 7.5 bug // https://devtalk.nvidia.com/default/topic/900103/thrust-reduce_by_key-issues-with-maxwell-devices/ // allocate a temporary vector thrust::device_vector<unsigned int> local_molecule_tags_vec(nptl); thrust::copy(thrust::hip::par(alloc), local_molecule_tags, local_molecule_tags + nptl, local_molecule_tags_vec.begin()); auto new_end = thrust::reduce_by_key(thrust::hip::par(alloc), local_molecule_tags_vec.begin(), local_molecule_tags_vec.begin() + n_local_ptls_in_molecules, one, local_unique_molecule_tags, molecule_length ); #else auto new_end = thrust::reduce_by_key(thrust::hip::par(alloc), local_molecule_tags, end, one, local_unique_molecule_tags, molecule_length ); #endif n_local_molecules = new_end.first - local_unique_molecule_tags; // compute maximum molecule length thrust::device_ptr<unsigned int> max_ptr = thrust::max_element(molecule_length, molecule_length + n_local_molecules); hipMemcpy(&max_len, max_ptr.get(), sizeof(unsigned int), hipMemcpyDeviceToHost); // assign local molecule tags to particles thrust::fill(thrust::hip::par(alloc), local_molecule_idx, local_molecule_idx+nptl,NO_MOLECULE); auto idx_lookup = thrust::make_permutation_iterator(local_molecule_idx, idx_sorted_by_tag); thrust::lower_bound(thrust::hip::par(alloc), local_unique_molecule_tags, local_unique_molecule_tags + n_local_molecules, local_molecule_tags, end, idx_lookup); return hipSuccess; } __global__ void gpu_fill_molecule_table_kernel( unsigned int nptl, Index2D molecule_idx, const unsigned int *d_molecule_idx, unsigned int *d_molecule_list, unsigned int *d_molecule_order) { unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx >= nptl) return; unsigned int molidx = d_molecule_idx[idx]; if (molidx != NO_MOLECULE) d_molecule_list[molecule_idx(molidx, d_molecule_order[idx])] = idx; } hipError_t gpu_fill_molecule_table( unsigned int nptl, unsigned int n_local_ptls_in_molecules, Index2D molecule_idx, const unsigned int *d_molecule_idx, const unsigned int *d_local_molecule_tags, const unsigned int *d_idx_sorted_by_tag, unsigned int *d_molecule_list, unsigned int *d_molecule_order, unsigned int block_size, const CachedAllocator& alloc ) { thrust::device_ptr<unsigned int> molecule_order(d_molecule_order); thrust::device_ptr<const unsigned int> local_molecule_tags(d_local_molecule_tags); thrust::device_ptr<const unsigned int> idx_sorted_by_tag(d_idx_sorted_by_tag); auto idx_lookup = thrust::make_permutation_iterator(molecule_order, idx_sorted_by_tag); // generate ascending index for every molecule thrust::constant_iterator<unsigned int> one(1); thrust::exclusive_scan_by_key(thrust::hip::par(alloc), local_molecule_tags, local_molecule_tags+n_local_ptls_in_molecules, one, idx_lookup); // write out the table hipLaunchKernelGGL(( gpu_fill_molecule_table_kernel), dim3(nptl/block_size+1),dim3(block_size), 0, 0, nptl, molecule_idx, d_molecule_idx, d_molecule_list, d_molecule_order); return hipSuccess; }
4928f40680d07da9fda00cc9b2367282651f2214.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: jglaser #include "MolecularForceCompute.cuh" #include <thrust/iterator/permutation_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/device_vector.h> #include <thrust/binary_search.h> #include <thrust/scan.h> #include <thrust/extrema.h> #include <thrust/reduce.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <thrust/execution_policy.h> /*! \file MolecularForceCompute.cu \brief Contains GPU kernel code used by MolecularForceCompute */ //! Sort local molecules and assign local molecule indices to particles cudaError_t gpu_sort_by_molecule(unsigned int nptl, const unsigned int *d_tag, const unsigned int *d_molecule_tag, unsigned int *d_local_molecule_tags, unsigned int *d_local_unique_molecule_tags, unsigned int *d_local_molecule_idx, unsigned int *d_sorted_by_tag, unsigned int *d_idx_sorted_by_tag, unsigned int *d_molecule_length, unsigned int &n_local_molecules, unsigned int &max_len, unsigned int &n_local_ptls_in_molecules, const CachedAllocator& alloc) { thrust::device_ptr<const unsigned int> tag(d_tag); thrust::device_ptr<const unsigned int> molecule_tag(d_molecule_tag); thrust::device_ptr<unsigned int> local_molecule_tags(d_local_molecule_tags); thrust::device_ptr<unsigned int> local_unique_molecule_tags(d_local_unique_molecule_tags); thrust::device_ptr<unsigned int> local_molecule_idx(d_local_molecule_idx); thrust::device_ptr<unsigned int> sorted_by_tag(d_sorted_by_tag); thrust::device_ptr<unsigned int> idx_sorted_by_tag(d_idx_sorted_by_tag); thrust::device_ptr<unsigned int> molecule_length(d_molecule_length); // sort local particles by tag thrust::copy(tag,tag+nptl,sorted_by_tag); auto iter = thrust::counting_iterator<unsigned int>(0); thrust::copy(iter, iter+nptl, idx_sorted_by_tag); thrust::sort_by_key(thrust::cuda::par(alloc), sorted_by_tag, sorted_by_tag+nptl, idx_sorted_by_tag); auto molecule_tag_lookup = thrust::make_permutation_iterator(molecule_tag, tag); auto molecule_tag_lookup_sorted_by_tag = thrust::make_permutation_iterator(molecule_tag_lookup, idx_sorted_by_tag); thrust::copy(molecule_tag_lookup_sorted_by_tag, molecule_tag_lookup_sorted_by_tag+nptl, local_molecule_tags); // sort local particle indices by global molecule tag, keeping tag order thrust::stable_sort_by_key(thrust::cuda::par(alloc), local_molecule_tags, local_molecule_tags + nptl, idx_sorted_by_tag); // find the end of the molecule list auto end = thrust::lower_bound(thrust::cuda::par, local_molecule_tags, local_molecule_tags + nptl, NO_MOLECULE); n_local_ptls_in_molecules = end - local_molecule_tags; // gather unique molecule tags, and reduce their lengths by key thrust::constant_iterator<unsigned int> one(1); #if (CUDART_VERSION < 8000) // work around CUDA 7.5 bug // https://devtalk.nvidia.com/default/topic/900103/thrust-reduce_by_key-issues-with-maxwell-devices/ // allocate a temporary vector thrust::device_vector<unsigned int> local_molecule_tags_vec(nptl); thrust::copy(thrust::cuda::par(alloc), local_molecule_tags, local_molecule_tags + nptl, local_molecule_tags_vec.begin()); auto new_end = thrust::reduce_by_key(thrust::cuda::par(alloc), local_molecule_tags_vec.begin(), local_molecule_tags_vec.begin() + n_local_ptls_in_molecules, one, local_unique_molecule_tags, molecule_length ); #else auto new_end = thrust::reduce_by_key(thrust::cuda::par(alloc), local_molecule_tags, end, one, local_unique_molecule_tags, molecule_length ); #endif n_local_molecules = new_end.first - local_unique_molecule_tags; // compute maximum molecule length thrust::device_ptr<unsigned int> max_ptr = thrust::max_element(molecule_length, molecule_length + n_local_molecules); cudaMemcpy(&max_len, max_ptr.get(), sizeof(unsigned int), cudaMemcpyDeviceToHost); // assign local molecule tags to particles thrust::fill(thrust::cuda::par(alloc), local_molecule_idx, local_molecule_idx+nptl,NO_MOLECULE); auto idx_lookup = thrust::make_permutation_iterator(local_molecule_idx, idx_sorted_by_tag); thrust::lower_bound(thrust::cuda::par(alloc), local_unique_molecule_tags, local_unique_molecule_tags + n_local_molecules, local_molecule_tags, end, idx_lookup); return cudaSuccess; } __global__ void gpu_fill_molecule_table_kernel( unsigned int nptl, Index2D molecule_idx, const unsigned int *d_molecule_idx, unsigned int *d_molecule_list, unsigned int *d_molecule_order) { unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx >= nptl) return; unsigned int molidx = d_molecule_idx[idx]; if (molidx != NO_MOLECULE) d_molecule_list[molecule_idx(molidx, d_molecule_order[idx])] = idx; } cudaError_t gpu_fill_molecule_table( unsigned int nptl, unsigned int n_local_ptls_in_molecules, Index2D molecule_idx, const unsigned int *d_molecule_idx, const unsigned int *d_local_molecule_tags, const unsigned int *d_idx_sorted_by_tag, unsigned int *d_molecule_list, unsigned int *d_molecule_order, unsigned int block_size, const CachedAllocator& alloc ) { thrust::device_ptr<unsigned int> molecule_order(d_molecule_order); thrust::device_ptr<const unsigned int> local_molecule_tags(d_local_molecule_tags); thrust::device_ptr<const unsigned int> idx_sorted_by_tag(d_idx_sorted_by_tag); auto idx_lookup = thrust::make_permutation_iterator(molecule_order, idx_sorted_by_tag); // generate ascending index for every molecule thrust::constant_iterator<unsigned int> one(1); thrust::exclusive_scan_by_key(thrust::cuda::par(alloc), local_molecule_tags, local_molecule_tags+n_local_ptls_in_molecules, one, idx_lookup); // write out the table gpu_fill_molecule_table_kernel<<<nptl/block_size+1,block_size>>>( nptl, molecule_idx, d_molecule_idx, d_molecule_list, d_molecule_order); return cudaSuccess; }
54ccf129241458c45364b9622b091980be224eff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* LICENSE: this code is subject to the license listed at http://www.amolf.nl/~vanmeel/mdgpu/download.html Among other restrictions, this code is released under the GNU Public License (GPL). Authors: A. Arnold (original) Kipton Barros (modifications) ---- Generate pseudo-random numbers using a linear congruential generator. The generated random numbers are identical to those produced by the lrand48() provided by the C standard library. Usage: // From host, allocate the Rand48 structure, pass it to CUDA, and release it. // The random sequence is persistent across CUDA kernel calls. void hostFunction() { rng = new Rand48(); rng->init(GRID_DIM*BLOCK_DIM, SEED); cudaFunction1 <<<GRID_DIM, BLOCK_DIM, sharedMem>>> (*rng); cudaFunction2 <<<GRID_DIM, BLOCK_DIM, sharedMem>>> (*rng); rng->destroy(); delete rng; } // From CUDA, load the random state from device memory into local registers, // generate random numbers, and finally store state back to device memory. // Note that the random state, rng, is stored in registers, and is being updated // with each device call. __global__ void cudaFunction1(Rand48 rng) { rand48_loadState(rng); ... rand48_nextInt(rng); ... rand48_storeState(rng); } */ struct Rand48 { // strided iteration constants (48-bit, distributed on 2x 24-bit) uint2 A, C; // CUDA array -- random numbers for all threads uint2 *state; // random number for a single thread (used by CUDA device functions only) uint2 state0; // magic constants for rand48 static const unsigned long long a = 0x5DEECE66DLL, c = 0xB; void init(int nThreads, int seed) { uint2* seeds = new uint2[ nThreads ]; hipMalloc((void**) &state, sizeof(uint2)*nThreads); // calculate strided iteration constants unsigned long long A, C; A = 1LL; C = 0LL; for (unsigned int i = 0; i < nThreads; ++i) { C += A*c; A *= a; } this->A.x = A & 0xFFFFFFLL; this->A.y = (A >> 24) & 0xFFFFFFLL; this->C.x = C & 0xFFFFFFLL; this->C.y = (C >> 24) & 0xFFFFFFLL; // prepare first nThreads random numbers from seed unsigned long long x = (((unsigned long long)seed) << 16) | 0x330E; for (unsigned int i = 0; i < nThreads; ++i) { x = a*x + c; seeds[i].x = x & 0xFFFFFFLL; seeds[i].y = (x >> 24) & 0xFFFFFFLL; } hipMemcpy(state, seeds, sizeof(uint2)*nThreads, hipMemcpyHostToDevice); delete[] seeds; } void destroy() { hipFree((void*) state); } }; __device__ inline void rand48_loadState(Rand48 &r) { int i = ((blockIdx.y*gridDim.x + blockIdx.x) * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x; r.state0 = r.state[i]; } __device__ inline void rand48_storeState(Rand48 &r) { int i = ((blockIdx.y*gridDim.x + blockIdx.x) * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x; r.state[i] = r.state0; } __device__ inline void rand48_iterate(Rand48 &r) { // state0 is 2x 24bit to handle overflows optimally, i.e. // in one operation. // the multiplication commands however give the low and hi 32 bit, // which have to be converted as follows: // 48bit in bytes = ABCD EF (space marks 32bit boundary) // R0 = ABC // R1 = D EF unsigned int R0, R1; // low 24-bit multiplication const unsigned int lo00 = __umul24(r.state0.x, r.A.x); const unsigned int hi00 = __umulhi(r.state0.x, r.A.x); // 24bit distribution of 32bit multiplication results R0 = (lo00 & 0xFFFFFF); R1 = (lo00 >> 24) | (hi00 << 8); R0 += r.C.x; R1 += r.C.y; // transfer overflows R1 += (R0 >> 24); R0 &= 0xFFFFFF; // cross-terms, low/hi 24-bit multiplication R1 += __umul24(r.state0.y, r.A.x); R1 += __umul24(r.state0.x, r.A.y); R1 &= 0xFFFFFF; r.state0 = make_uint2(R0, R1); } __device__ inline int rand48_nextInt(Rand48 &r) { // get upper 31 (!) bits of the 2x 24bits int res = ( r.state0.x >> 17 ) | ( r.state0.y << 7 ); rand48_iterate(r); return res; } // returns a float in the range [0, 1) __device__ inline float rand48_nextFloat(Rand48 &r) { // use only upper 24 bits since floating point has 24 bit significand // (ref: Java random documentation) float res = r.state0.y / (float)(1<<24); rand48_iterate(r); return res; }
54ccf129241458c45364b9622b091980be224eff.cu
/* LICENSE: this code is subject to the license listed at http://www.amolf.nl/~vanmeel/mdgpu/download.html Among other restrictions, this code is released under the GNU Public License (GPL). Authors: A. Arnold (original) Kipton Barros (modifications) ---- Generate pseudo-random numbers using a linear congruential generator. The generated random numbers are identical to those produced by the lrand48() provided by the C standard library. Usage: // From host, allocate the Rand48 structure, pass it to CUDA, and release it. // The random sequence is persistent across CUDA kernel calls. void hostFunction() { rng = new Rand48(); rng->init(GRID_DIM*BLOCK_DIM, SEED); cudaFunction1 <<<GRID_DIM, BLOCK_DIM, sharedMem>>> (*rng); cudaFunction2 <<<GRID_DIM, BLOCK_DIM, sharedMem>>> (*rng); rng->destroy(); delete rng; } // From CUDA, load the random state from device memory into local registers, // generate random numbers, and finally store state back to device memory. // Note that the random state, rng, is stored in registers, and is being updated // with each device call. __global__ void cudaFunction1(Rand48 rng) { rand48_loadState(rng); ... rand48_nextInt(rng); ... rand48_storeState(rng); } */ struct Rand48 { // strided iteration constants (48-bit, distributed on 2x 24-bit) uint2 A, C; // CUDA array -- random numbers for all threads uint2 *state; // random number for a single thread (used by CUDA device functions only) uint2 state0; // magic constants for rand48 static const unsigned long long a = 0x5DEECE66DLL, c = 0xB; void init(int nThreads, int seed) { uint2* seeds = new uint2[ nThreads ]; cudaMalloc((void**) &state, sizeof(uint2)*nThreads); // calculate strided iteration constants unsigned long long A, C; A = 1LL; C = 0LL; for (unsigned int i = 0; i < nThreads; ++i) { C += A*c; A *= a; } this->A.x = A & 0xFFFFFFLL; this->A.y = (A >> 24) & 0xFFFFFFLL; this->C.x = C & 0xFFFFFFLL; this->C.y = (C >> 24) & 0xFFFFFFLL; // prepare first nThreads random numbers from seed unsigned long long x = (((unsigned long long)seed) << 16) | 0x330E; for (unsigned int i = 0; i < nThreads; ++i) { x = a*x + c; seeds[i].x = x & 0xFFFFFFLL; seeds[i].y = (x >> 24) & 0xFFFFFFLL; } cudaMemcpy(state, seeds, sizeof(uint2)*nThreads, cudaMemcpyHostToDevice); delete[] seeds; } void destroy() { cudaFree((void*) state); } }; __device__ inline void rand48_loadState(Rand48 &r) { int i = ((blockIdx.y*gridDim.x + blockIdx.x) * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x; r.state0 = r.state[i]; } __device__ inline void rand48_storeState(Rand48 &r) { int i = ((blockIdx.y*gridDim.x + blockIdx.x) * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x; r.state[i] = r.state0; } __device__ inline void rand48_iterate(Rand48 &r) { // state0 is 2x 24bit to handle overflows optimally, i.e. // in one operation. // the multiplication commands however give the low and hi 32 bit, // which have to be converted as follows: // 48bit in bytes = ABCD EF (space marks 32bit boundary) // R0 = ABC // R1 = D EF unsigned int R0, R1; // low 24-bit multiplication const unsigned int lo00 = __umul24(r.state0.x, r.A.x); const unsigned int hi00 = __umulhi(r.state0.x, r.A.x); // 24bit distribution of 32bit multiplication results R0 = (lo00 & 0xFFFFFF); R1 = (lo00 >> 24) | (hi00 << 8); R0 += r.C.x; R1 += r.C.y; // transfer overflows R1 += (R0 >> 24); R0 &= 0xFFFFFF; // cross-terms, low/hi 24-bit multiplication R1 += __umul24(r.state0.y, r.A.x); R1 += __umul24(r.state0.x, r.A.y); R1 &= 0xFFFFFF; r.state0 = make_uint2(R0, R1); } __device__ inline int rand48_nextInt(Rand48 &r) { // get upper 31 (!) bits of the 2x 24bits int res = ( r.state0.x >> 17 ) | ( r.state0.y << 7 ); rand48_iterate(r); return res; } // returns a float in the range [0, 1) __device__ inline float rand48_nextFloat(Rand48 &r) { // use only upper 24 bits since floating point has 24 bit significand // (ref: Java random documentation) float res = r.state0.y / (float)(1<<24); rand48_iterate(r); return res; }
eb99ffd3893e9fbf83b48c508a09a49f5cb36d6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "gputimer.h" #include "utils.h" const int BLOCKSIZE = 128; //const int NUMBLOCKS = 1000; // set this to 1 or 2 for debugging const int NUMBLOCKS = 2; // set this to 1 or 2 for debugging const int N = BLOCKSIZE*NUMBLOCKS; /* * TODO: modify the foo and bar kernels to use tiling: * - copy the input data to shared memory * - perform the computation there * - copy the result back to global memory * - assume thread blocks of 128 threads * - handle intra-block boundaries correctly * You can ignore boundary conditions (we ignore the first 2 and last 2 elements) */ #if 0 __global__ void foo(float out[], float A[], float B[], float C[], float D[], float E[]){ int i = threadIdx.x + blockIdx.x*blockDim.x; out[i] = (A[i] + B[i] + C[i] + D[i] + E[i]) / 5.0f; } __global__ void bar(float out[], float in[]) { int i = threadIdx.x + blockIdx.x*blockDim.x; out[i] = (in[i-2] + in[i-1] + in[i] + in[i+1] + in[i+2]) / 5.0f; } void cpuFoo(float out[], float A[], float B[], float C[], float D[], float E[]) { for (int i=0; i<N; i++) { out[i] = (A[i] + B[i] + C[i] + D[i] + E[i]) / 5.0f; } } void cpuBar(float out[], float in[]) { // ignore the boundaries for (int i = 2; i<N - 2; i++) { out[i] = (in[i - 2] + in[i - 1] + in[i] + in[i + 1] + in[i + 2]) / 5.0f; } } #else __global__ void foo(float out[], float A[], float B[], float C[], float D[], float E[]) { __shared__ float tileA[BLOCKSIZE]; __shared__ float tileB[BLOCKSIZE]; __shared__ float tileC[BLOCKSIZE]; __shared__ float tileD[BLOCKSIZE]; __shared__ float tileE[BLOCKSIZE]; int i = threadIdx.x + blockIdx.x*blockDim.x; tileA[threadIdx.x] = A[i]; tileB[threadIdx.x] = B[i]; tileC[threadIdx.x] = C[i]; tileD[threadIdx.x] = D[i]; tileE[threadIdx.x] = E[i]; __syncthreads(); out[i] = (tileA[threadIdx.x] + tileB[threadIdx.x] + tileC[threadIdx.x] + tileD[threadIdx.x] + tileE[threadIdx.x]) / 5.0f; } __global__ void bar(float out[], float in[]) { #if 1 __shared__ float tile[BLOCKSIZE+4]; int myId = (blockIdx.x * blockDim.x) + threadIdx.x; tile[threadIdx.x+2] = in[myId]; if (threadIdx.x == 0) { tile[0] = in[myId - 2]; tile[1] = in[myId - 1]; } if (threadIdx.x == (blockDim.x - 1)) { tile[threadIdx.x + 3] = in[myId + 1]; tile[threadIdx.x + 4] = in[myId + 2]; } __syncthreads(); out[myId] = (tile[threadIdx.x] + tile[threadIdx.x + 1] + tile[threadIdx.x+2] + tile[threadIdx.x + 3] + tile[threadIdx.x + 4]) / 5.0f; #else int myId = (blockIdx.x * blockDim.x) + threadIdx.x; out[myId] = (in[myId - 2] + in[myId - 1] + in[myId] + in[myId + 1] + in[myId + 2]) / 5.0f; #endif } void cpuFoo(float out[], float A[], float B[], float C[], float D[], float E[]) { for (int i = 0; i<N; i++) { out[i] = (A[i] + B[i] + C[i] + D[i] + E[i]) / 5.0f; } } void cpuBar(float out[], float in[]) { // ignore the boundaries for (int i = 2; i<N - 2; i++) { out[i] = (in[i - 2] + in[i - 1] + in[i] + in[i + 1] + in[i + 2]) / 5.0f; } } #endif int main(int argc, char **argv) { // declare and fill input arrays for foo() and bar() float fooA[N], fooB[N], fooC[N], fooD[N], fooE[N], barIn[N]; for (int i=0; i<N; i++) { fooA[i] = i; fooB[i] = i+1; fooC[i] = i+2; fooD[i] = i+3; fooE[i] = i+4; barIn[i] = 2*i; } // device arrays int numBytes = N * sizeof(float); float *d_fooA; hipMalloc(&d_fooA, numBytes); float *d_fooB; hipMalloc(&d_fooB, numBytes); float *d_fooC; hipMalloc(&d_fooC, numBytes); float *d_fooD; hipMalloc(&d_fooD, numBytes); float *d_fooE; hipMalloc(&d_fooE, numBytes); float *d_barIn; hipMalloc(&d_barIn, numBytes); hipMemcpy(d_fooA, fooA, numBytes, hipMemcpyHostToDevice); hipMemcpy(d_fooB, fooB, numBytes, hipMemcpyHostToDevice); hipMemcpy(d_fooC, fooC, numBytes, hipMemcpyHostToDevice); hipMemcpy(d_fooD, fooD, numBytes, hipMemcpyHostToDevice); hipMemcpy(d_fooE, fooE, numBytes, hipMemcpyHostToDevice); hipMemcpy(d_barIn, barIn, numBytes, hipMemcpyHostToDevice); // output arrays for host and device float fooOut[N], barOut[N], *d_fooOut, *d_barOut; hipMalloc(&d_fooOut, numBytes); hipMalloc(&d_barOut, numBytes); // declare and compute reference solutions float ref_fooOut[N], ref_barOut[N]; cpuFoo(ref_fooOut, fooA, fooB, fooC, fooD, fooE); cpuBar(ref_barOut, barIn); // launch and time foo and bar GpuTimer fooTimer, barTimer; fooTimer.Start(); hipLaunchKernelGGL(( foo), dim3(N/BLOCKSIZE), dim3(BLOCKSIZE), 0, 0, d_fooOut, d_fooA, d_fooB, d_fooC, d_fooD, d_fooE); fooTimer.Stop(); barTimer.Start(); hipLaunchKernelGGL(( bar), dim3(N/BLOCKSIZE), dim3(BLOCKSIZE), 0, 0, d_barOut, d_barIn); barTimer.Stop(); hipMemcpy(fooOut, d_fooOut, numBytes, hipMemcpyDeviceToHost); hipMemcpy(barOut, d_barOut, numBytes, hipMemcpyDeviceToHost); printf("foo<<<>>>(): %g ms elapsed. Verifying solution...", fooTimer.Elapsed()); compareArrays(ref_fooOut, fooOut, N); printf("bar<<<>>>(): %g ms elapsed. Verifying solution...", barTimer.Elapsed()); compareArrays(ref_barOut, barOut, N); }
eb99ffd3893e9fbf83b48c508a09a49f5cb36d6e.cu
#include <stdio.h> #include "gputimer.h" #include "utils.h" const int BLOCKSIZE = 128; //const int NUMBLOCKS = 1000; // set this to 1 or 2 for debugging const int NUMBLOCKS = 2; // set this to 1 or 2 for debugging const int N = BLOCKSIZE*NUMBLOCKS; /* * TODO: modify the foo and bar kernels to use tiling: * - copy the input data to shared memory * - perform the computation there * - copy the result back to global memory * - assume thread blocks of 128 threads * - handle intra-block boundaries correctly * You can ignore boundary conditions (we ignore the first 2 and last 2 elements) */ #if 0 __global__ void foo(float out[], float A[], float B[], float C[], float D[], float E[]){ int i = threadIdx.x + blockIdx.x*blockDim.x; out[i] = (A[i] + B[i] + C[i] + D[i] + E[i]) / 5.0f; } __global__ void bar(float out[], float in[]) { int i = threadIdx.x + blockIdx.x*blockDim.x; out[i] = (in[i-2] + in[i-1] + in[i] + in[i+1] + in[i+2]) / 5.0f; } void cpuFoo(float out[], float A[], float B[], float C[], float D[], float E[]) { for (int i=0; i<N; i++) { out[i] = (A[i] + B[i] + C[i] + D[i] + E[i]) / 5.0f; } } void cpuBar(float out[], float in[]) { // ignore the boundaries for (int i = 2; i<N - 2; i++) { out[i] = (in[i - 2] + in[i - 1] + in[i] + in[i + 1] + in[i + 2]) / 5.0f; } } #else __global__ void foo(float out[], float A[], float B[], float C[], float D[], float E[]) { __shared__ float tileA[BLOCKSIZE]; __shared__ float tileB[BLOCKSIZE]; __shared__ float tileC[BLOCKSIZE]; __shared__ float tileD[BLOCKSIZE]; __shared__ float tileE[BLOCKSIZE]; int i = threadIdx.x + blockIdx.x*blockDim.x; tileA[threadIdx.x] = A[i]; tileB[threadIdx.x] = B[i]; tileC[threadIdx.x] = C[i]; tileD[threadIdx.x] = D[i]; tileE[threadIdx.x] = E[i]; __syncthreads(); out[i] = (tileA[threadIdx.x] + tileB[threadIdx.x] + tileC[threadIdx.x] + tileD[threadIdx.x] + tileE[threadIdx.x]) / 5.0f; } __global__ void bar(float out[], float in[]) { #if 1 __shared__ float tile[BLOCKSIZE+4]; int myId = (blockIdx.x * blockDim.x) + threadIdx.x; tile[threadIdx.x+2] = in[myId]; if (threadIdx.x == 0) { tile[0] = in[myId - 2]; tile[1] = in[myId - 1]; } if (threadIdx.x == (blockDim.x - 1)) { tile[threadIdx.x + 3] = in[myId + 1]; tile[threadIdx.x + 4] = in[myId + 2]; } __syncthreads(); out[myId] = (tile[threadIdx.x] + tile[threadIdx.x + 1] + tile[threadIdx.x+2] + tile[threadIdx.x + 3] + tile[threadIdx.x + 4]) / 5.0f; #else int myId = (blockIdx.x * blockDim.x) + threadIdx.x; out[myId] = (in[myId - 2] + in[myId - 1] + in[myId] + in[myId + 1] + in[myId + 2]) / 5.0f; #endif } void cpuFoo(float out[], float A[], float B[], float C[], float D[], float E[]) { for (int i = 0; i<N; i++) { out[i] = (A[i] + B[i] + C[i] + D[i] + E[i]) / 5.0f; } } void cpuBar(float out[], float in[]) { // ignore the boundaries for (int i = 2; i<N - 2; i++) { out[i] = (in[i - 2] + in[i - 1] + in[i] + in[i + 1] + in[i + 2]) / 5.0f; } } #endif int main(int argc, char **argv) { // declare and fill input arrays for foo() and bar() float fooA[N], fooB[N], fooC[N], fooD[N], fooE[N], barIn[N]; for (int i=0; i<N; i++) { fooA[i] = i; fooB[i] = i+1; fooC[i] = i+2; fooD[i] = i+3; fooE[i] = i+4; barIn[i] = 2*i; } // device arrays int numBytes = N * sizeof(float); float *d_fooA; cudaMalloc(&d_fooA, numBytes); float *d_fooB; cudaMalloc(&d_fooB, numBytes); float *d_fooC; cudaMalloc(&d_fooC, numBytes); float *d_fooD; cudaMalloc(&d_fooD, numBytes); float *d_fooE; cudaMalloc(&d_fooE, numBytes); float *d_barIn; cudaMalloc(&d_barIn, numBytes); cudaMemcpy(d_fooA, fooA, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_fooB, fooB, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_fooC, fooC, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_fooD, fooD, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_fooE, fooE, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_barIn, barIn, numBytes, cudaMemcpyHostToDevice); // output arrays for host and device float fooOut[N], barOut[N], *d_fooOut, *d_barOut; cudaMalloc(&d_fooOut, numBytes); cudaMalloc(&d_barOut, numBytes); // declare and compute reference solutions float ref_fooOut[N], ref_barOut[N]; cpuFoo(ref_fooOut, fooA, fooB, fooC, fooD, fooE); cpuBar(ref_barOut, barIn); // launch and time foo and bar GpuTimer fooTimer, barTimer; fooTimer.Start(); foo<<<N/BLOCKSIZE, BLOCKSIZE>>>(d_fooOut, d_fooA, d_fooB, d_fooC, d_fooD, d_fooE); fooTimer.Stop(); barTimer.Start(); bar<<<N/BLOCKSIZE, BLOCKSIZE>>>(d_barOut, d_barIn); barTimer.Stop(); cudaMemcpy(fooOut, d_fooOut, numBytes, cudaMemcpyDeviceToHost); cudaMemcpy(barOut, d_barOut, numBytes, cudaMemcpyDeviceToHost); printf("foo<<<>>>(): %g ms elapsed. Verifying solution...", fooTimer.Elapsed()); compareArrays(ref_fooOut, fooOut, N); printf("bar<<<>>>(): %g ms elapsed. Verifying solution...", barTimer.Elapsed()); compareArrays(ref_barOut, barOut, N); }
54d02658e7fc483ae7ee1bb55f707385c8e0f8ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/RReLU.cu" #else #include <THHUNN/common.h> void THNN_(RReLU_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *noise, double lower, double upper, bool train, bool inplace, void *generator) { THCUNN_assertSameGPU(state, 3, input, output, noise); struct hiprandStateMtgp32_t* gen_states = THCRandom_generatorStates(state); if (train) { input = THCTensor_(newContiguous)(state, input); THCTensor_(resizeAs)(state, noise, input); scalar_t *input_data = THCTensor_(data)(state, input); scalar_t *noise_data = THCTensor_(data)(state, noise); ptrdiff_t n = THCTensor_(nElement)(state, input); if (inplace) { hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(NUM_BLOCKS(n)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), n, gen_states, input_data, noise_data, input_data, lower, upper); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); scalar_t *output_data = THCTensor_(data)(state, output); hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(NUM_BLOCKS(n)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), n, gen_states, input_data, noise_data, output_data, lower, upper); } THCudaCheck(hipGetLastError()); THCTensor_(free)(state, input); } else { const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2); if (inplace) { THC_pointwiseApply1<scalar_t>(state, input, RReLUUpdateOutputEvalIP_functor<scalar_t>(negSlope)); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, RReLUUpdateOutputEval_functor<scalar_t>(negSlope)); } } } void THNN_(RReLU_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *noise, double lower, double upper, bool train, bool inplace) { THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise); gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU { // multiply the gradient by the noise tensor if (inplace) { THCTensor_(cmul)(state, gradOutput, gradOutput, noise); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(cmul)(state, gradInput, gradOutput, noise); } } else { // use constant factor for negative input values const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2); if (inplace) { THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor<scalar_t>(negSlope)); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor<scalar_t>(negSlope)); } } THCTensor_(free)(state, gradOutput); } #endif
54d02658e7fc483ae7ee1bb55f707385c8e0f8ff.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/RReLU.cu" #else #include <THCUNN/common.h> void THNN_(RReLU_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *noise, double lower, double upper, bool train, bool inplace, void *generator) { THCUNN_assertSameGPU(state, 3, input, output, noise); struct curandStateMtgp32* gen_states = THCRandom_generatorStates(state); if (train) { input = THCTensor_(newContiguous)(state, input); THCTensor_(resizeAs)(state, noise, input); scalar_t *input_data = THCTensor_(data)(state, input); scalar_t *noise_data = THCTensor_(data)(state, noise); ptrdiff_t n = THCTensor_(nElement)(state, input); if (inplace) { rreluUpdateOutputTrain<<<NUM_BLOCKS(n), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( n, gen_states, input_data, noise_data, input_data, lower, upper); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); scalar_t *output_data = THCTensor_(data)(state, output); rreluUpdateOutputTrain<<<NUM_BLOCKS(n), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( n, gen_states, input_data, noise_data, output_data, lower, upper); } THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, input); } else { const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2); if (inplace) { THC_pointwiseApply1<scalar_t>(state, input, RReLUUpdateOutputEvalIP_functor<scalar_t>(negSlope)); THCTensor_(set)(state, output, input); } else { THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, RReLUUpdateOutputEval_functor<scalar_t>(negSlope)); } } } void THNN_(RReLU_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *noise, double lower, double upper, bool train, bool inplace) { THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise); gradOutput = THCTensor_(newContiguous)(state, gradOutput); if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU { // multiply the gradient by the noise tensor if (inplace) { THCTensor_(cmul)(state, gradOutput, gradOutput, noise); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(cmul)(state, gradInput, gradOutput, noise); } } else { // use constant factor for negative input values const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2); if (inplace) { THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor<scalar_t>(negSlope)); THCTensor_(set)(state, gradInput, gradOutput); } else { THCTensor_(resizeAs)(state, gradInput, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor<scalar_t>(negSlope)); } } THCTensor_(free)(state, gradOutput); } #endif
66fbddb16f9c71c738508f061fb5cb0dade09638.hip
// !!! This is a file automatically generated by hipify!!! // // Created by root on 23/03/2020. // #include "Matrix_hip.cuh" Matrix::Matrix(int cols, int rows, double *v) { Columns = cols; Rows = rows; Value = v; } void Matrix::print(){ printf("-------------\n"); for(int i = 0; i<(this->Rows*this->Columns); i++){ printf("%lf ", Value[i]); if((i+1)%this->Columns == 0){ std::cout << std::endl; } } printf("-------------\n"); }
66fbddb16f9c71c738508f061fb5cb0dade09638.cu
// // Created by root on 23/03/2020. // #include "Matrix.cuh" Matrix::Matrix(int cols, int rows, double *v) { Columns = cols; Rows = rows; Value = v; } void Matrix::print(){ printf("-------------\n"); for(int i = 0; i<(this->Rows*this->Columns); i++){ printf("%lf ", Value[i]); if((i+1)%this->Columns == 0){ std::cout << std::endl; } } printf("-------------\n"); }
63c208f3afb47101ab02b7ecf001403682b05c7d.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> /* * This code implements the interleaved and neighbor-paired approaches to * parallel reduction in CUDA. For this example, the sum operation is used. A * variety of optimizations on parallel reduction aimed at reducing divergence * are also demonstrated, such as unrolling. */ // Recursive Implementation of Interleaved Pair Approach int recursiveReduce(int *data, int const size) { // terminate check if (size == 1) return data[0]; // renew the stride int const stride = size / 2; // in-place reduction for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; } // call recursively return recursiveReduce(data, stride); } // Neighbored Pair Implementation with divergence __global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Neighbored Pair Implementation with less divergence __global__ void reduceNeighboredLess (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { // convert tid into local array index int index = 2 * stride * tid; if (index < blockDim.x) { idata[index] += idata[index + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Interleaved Pair Implementation with less divergence __global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling2 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling4 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 4; // unrolling 4 if (idx + 3 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling warp if (tid < 32) { volatile int *vmem = idata; vmem[tid] += vmem[tid + 32]; vmem[tid] += vmem[tid + 16]; vmem[tid] += vmem[tid + 8]; vmem[tid] += vmem[tid + 4]; vmem[tid] += vmem[tid + 2]; vmem[tid] += vmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceCompleteUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } template <unsigned int iBlockSize> __global__ void reduceCompleteUnroll(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling last warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = 512; // initial block size if(argc > 1) { blocksize = atoi(argv[1]); // block size from command line argument } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) { // mask off high 2 bytes to force max number to 255 h_idata[i] = (int)( rand() & 0xFF ); //h_idata[i] = (int)( rand() & 0x1 ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(hipMalloc((void **) &d_idata, bytes)); CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction iStart = seconds(); int cpu_sum = recursiveReduce (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); // kernel 1: reduceNeighbored CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceNeighbored), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 2: reduceNeighbored with less divergence CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceNeighboredLess), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 3: reduceInterleaved CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceInterleaved), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 4: reduceUnrolling2 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrolling2), dim3(grid.x / 2), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x); // kernel 5: reduceUnrolling4 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrolling4), dim3(grid.x / 4), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x); // kernel 6: reduceUnrolling8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrolling8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i]; // kernel 8: reduceUnrollWarps8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrollWarps8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnrollWarsp8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceCompleteUnrollWarps8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnroll CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); switch (blocksize) { case 1024: hipLaunchKernelGGL(( reduceCompleteUnroll<1024>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 512: hipLaunchKernelGGL(( reduceCompleteUnroll<512>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduceCompleteUnroll<256>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduceCompleteUnroll<128>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduceCompleteUnroll<64>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; } CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(hipFree(d_idata)); CHECK(hipFree(d_odata)); // reset device CHECK(hipDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
63c208f3afb47101ab02b7ecf001403682b05c7d.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> /* * This code implements the interleaved and neighbor-paired approaches to * parallel reduction in CUDA. For this example, the sum operation is used. A * variety of optimizations on parallel reduction aimed at reducing divergence * are also demonstrated, such as unrolling. */ // Recursive Implementation of Interleaved Pair Approach int recursiveReduce(int *data, int const size) { // terminate check if (size == 1) return data[0]; // renew the stride int const stride = size / 2; // in-place reduction for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; } // call recursively return recursiveReduce(data, stride); } // Neighbored Pair Implementation with divergence __global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Neighbored Pair Implementation with less divergence __global__ void reduceNeighboredLess (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { // convert tid into local array index int index = 2 * stride * tid; if (index < blockDim.x) { idata[index] += idata[index + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Interleaved Pair Implementation with less divergence __global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling2 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling4 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 4; // unrolling 4 if (idx + 3 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling warp if (tid < 32) { volatile int *vmem = idata; vmem[tid] += vmem[tid + 32]; vmem[tid] += vmem[tid + 16]; vmem[tid] += vmem[tid + 8]; vmem[tid] += vmem[tid + 4]; vmem[tid] += vmem[tid + 2]; vmem[tid] += vmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceCompleteUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } template <unsigned int iBlockSize> __global__ void reduceCompleteUnroll(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling last warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = 512; // initial block size if(argc > 1) { blocksize = atoi(argv[1]); // block size from command line argument } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) { // mask off high 2 bytes to force max number to 255 h_idata[i] = (int)( rand() & 0xFF ); //h_idata[i] = (int)( rand() & 0x1 ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(cudaMalloc((void **) &d_idata, bytes)); CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction iStart = seconds(); int cpu_sum = recursiveReduce (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); // kernel 1: reduceNeighbored CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 2: reduceNeighbored with less divergence CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceNeighboredLess<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 3: reduceInterleaved CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceInterleaved<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 4: reduceUnrolling2 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrolling2<<<grid.x / 2, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x); // kernel 5: reduceUnrolling4 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrolling4<<<grid.x / 4, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x); // kernel 6: reduceUnrolling8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrolling8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i]; // kernel 8: reduceUnrollWarps8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnrollWarsp8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceCompleteUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnroll CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); switch (blocksize) { case 1024: reduceCompleteUnroll<1024><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 512: reduceCompleteUnroll<512><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 256: reduceCompleteUnroll<256><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 128: reduceCompleteUnroll<128><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 64: reduceCompleteUnroll<64><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; } CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(cudaFree(d_idata)); CHECK(cudaFree(d_odata)); // reset device CHECK(cudaDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
df387bd7f56544b28480f7b86e37a602a1c5ecf8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #define size 25 #define threads 32 using namespace std; __global__ void callOperation(int * a, int *b, int *res, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } res[tid] = a[tid] - b[tid]; if (res[tid] < 0) { res[tid] = 0; } } __global__ void callOperationSharedStatic(int * a, int *b, int *res, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } __shared__ int s_a[size], s_b[size], s_res[size]; s_a[tid] = a[tid]; s_b[tid] = b[tid]; s_res[tid] = s_a[tid] - s_b[tid]; if (s_res[tid] < 0) { s_res[tid] = 0; } res[tid] = s_res[tid]; } __global__ void callOperationSharedDynamic(int * a, int *b, int *res, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } extern __shared__ int data[]; int *s_a = data; int *s_b = &s_a[size]; int *s_res = &s_b[size]; s_a[tid] = a[tid]; s_b[tid] = b[tid]; s_res[tid] = s_a[tid] - s_b[tid]; if (s_res[tid] < 0) { s_res[tid] = 0; } res[tid] = s_res[tid]; } int main() { int *a, *b, *res; int *d_a, *d_b, *d_res; a = (int*)malloc(size * sizeof(int)); b = (int*)malloc(size * sizeof(int)); res = (int*)malloc(size * sizeof(int)); for (int i = 0; i < size; i++) { a[i] = -i; b[i] = i; } cout << "\nNiz A:" << endl; for (int i = 0; i < size; i++) { cout << a[i] << "\t"; } cout << "\nNiz B:" << endl; for (int i = 0; i < size; i++) { cout << b[i] << "\t"; } hipMalloc((void**)&d_a, size * sizeof(int)); hipMalloc((void**)&d_b, size * sizeof(int)); hipMalloc((void**)&d_res, size * sizeof(int)); hipMemcpy(d_a, a, size * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, b, size * sizeof(int), hipMemcpyHostToDevice); dim3 numberOfBlocks(size / threads + 1, 1, 1); dim3 numberOfThreads(threads, 1, 1); //callOperation << < numberOfBlocks, numberOfThreads >> > (d_a, d_b, d_res, size); //callOperationSharedStatic << < numberOfBlocks, numberOfThreads >> > (d_a, d_b, d_res, size); callOperationSharedDynamic << < numberOfBlocks, numberOfThreads, size * sizeof(int) + size * sizeof(int) + size * sizeof(int) >> > (d_a, d_b, d_res, size); hipMemcpy(res, d_res, size * sizeof(int), hipMemcpyDeviceToHost); cout << "\nNiz RES:" << endl; for (int i = 0; i < size; i++) { cout << res[i] << "\t"; } hipFree(d_a); hipFree(d_b); hipFree(d_res); free(a); free(b); free(res); hipDeviceReset(); system("PAUSE"); return 0; }
df387bd7f56544b28480f7b86e37a602a1c5ecf8.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #define size 25 #define threads 32 using namespace std; __global__ void callOperation(int * a, int *b, int *res, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } res[tid] = a[tid] - b[tid]; if (res[tid] < 0) { res[tid] = 0; } } __global__ void callOperationSharedStatic(int * a, int *b, int *res, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } __shared__ int s_a[size], s_b[size], s_res[size]; s_a[tid] = a[tid]; s_b[tid] = b[tid]; s_res[tid] = s_a[tid] - s_b[tid]; if (s_res[tid] < 0) { s_res[tid] = 0; } res[tid] = s_res[tid]; } __global__ void callOperationSharedDynamic(int * a, int *b, int *res, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } extern __shared__ int data[]; int *s_a = data; int *s_b = &s_a[size]; int *s_res = &s_b[size]; s_a[tid] = a[tid]; s_b[tid] = b[tid]; s_res[tid] = s_a[tid] - s_b[tid]; if (s_res[tid] < 0) { s_res[tid] = 0; } res[tid] = s_res[tid]; } int main() { int *a, *b, *res; int *d_a, *d_b, *d_res; a = (int*)malloc(size * sizeof(int)); b = (int*)malloc(size * sizeof(int)); res = (int*)malloc(size * sizeof(int)); for (int i = 0; i < size; i++) { a[i] = -i; b[i] = i; } cout << "\nNiz A:" << endl; for (int i = 0; i < size; i++) { cout << a[i] << "\t"; } cout << "\nNiz B:" << endl; for (int i = 0; i < size; i++) { cout << b[i] << "\t"; } cudaMalloc((void**)&d_a, size * sizeof(int)); cudaMalloc((void**)&d_b, size * sizeof(int)); cudaMalloc((void**)&d_res, size * sizeof(int)); cudaMemcpy(d_a, a, size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size * sizeof(int), cudaMemcpyHostToDevice); dim3 numberOfBlocks(size / threads + 1, 1, 1); dim3 numberOfThreads(threads, 1, 1); //callOperation << < numberOfBlocks, numberOfThreads >> > (d_a, d_b, d_res, size); //callOperationSharedStatic << < numberOfBlocks, numberOfThreads >> > (d_a, d_b, d_res, size); callOperationSharedDynamic << < numberOfBlocks, numberOfThreads, size * sizeof(int) + size * sizeof(int) + size * sizeof(int) >> > (d_a, d_b, d_res, size); cudaMemcpy(res, d_res, size * sizeof(int), cudaMemcpyDeviceToHost); cout << "\nNiz RES:" << endl; for (int i = 0; i < size; i++) { cout << res[i] << "\t"; } cudaFree(d_a); cudaFree(d_b); cudaFree(d_res); free(a); free(b); free(res); cudaDeviceReset(); system("PAUSE"); return 0; }
c37fbbaef84ddf0200ba430cb82d8d0ef045ba4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define OPERATOR_PLUS 1.0 #define OPERATOR_MINUS 2.0 #define OPERATOR_MULTIPLY 3.0 #define OPERATOR_DIVIDE 4.0 #define OPERATOR_SIN 5.0 #define OPERATOR_COS 6.0 __global__ void graph_compute(float* node, int* parent, int start, int end) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = (end - start) / 2; if (tid < threadNum) { float left = node[start + tid * 2]; float right = node[start + tid * 2 + 1]; int parentId = parent[start / 2 + tid]; float oper = node[parentId]; float result; switch (oper) { case OPERATOR_PLUS: result = left + right; break; case OPERATOR_MINUS: result = left - right; break; case OPERATOR_MULTIPLY: result = left * right; break; case OPERATOR_DIVIDE: result = left / right; break; case OPERATOR_SIN: result = sinf(left); break; case OPERATOR_PLUS: result = cosf(left); break; default: result = 0; } node[parentId] = result; } }
c37fbbaef84ddf0200ba430cb82d8d0ef045ba4c.cu
#define OPERATOR_PLUS 1.0 #define OPERATOR_MINUS 2.0 #define OPERATOR_MULTIPLY 3.0 #define OPERATOR_DIVIDE 4.0 #define OPERATOR_SIN 5.0 #define OPERATOR_COS 6.0 __global__ void graph_compute(float* node, int* parent, int start, int end) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int threadNum = (end - start) / 2; if (tid < threadNum) { float left = node[start + tid * 2]; float right = node[start + tid * 2 + 1]; int parentId = parent[start / 2 + tid]; float oper = node[parentId]; float result; switch (oper) { case OPERATOR_PLUS: result = left + right; break; case OPERATOR_MINUS: result = left - right; break; case OPERATOR_MULTIPLY: result = left * right; break; case OPERATOR_DIVIDE: result = left / right; break; case OPERATOR_SIN: result = sinf(left); break; case OPERATOR_PLUS: result = cosf(left); break; default: result = 0; } node[parentId] = result; } }
eee08bcdb62daeaae94cc7568da41371fa052350.hip
// !!! This is a file automatically generated by hipify!!! #include <fstream> #include <iostream> #include <vector> #include <algorithm> #include "models.cuh" #include "models_pq_fifo.cuh" #include "models_pq.cuh" #include "models_fifo.cuh" #include "util.cuh" #include "datastructure.hpp" using namespace std; bool cmp(KnapsackItem a, KnapsackItem b) { double r1 = (double)a.first / (double)a.second; double r2 = (double)b.first / (double)b.second; return r1 > r2; } int main(int argc, char *argv[]) { if (argc != 2) { cout << "./knapsack [dataset]\n"; /*cout << "./knapsack [dataset] [batchnum] [batchsize] [blocknum] [blocksize] [gc_threshold] [model] [has_maxbenefit] [switch_counter]\n";*/ return 1; } ifstream inputFile; /*int batchNum = atoi(argv[2]);*/ /*int batchSize = atoi(argv[3]);*/ /*int blockNum = atoi(argv[4]);*/ /*int blockSize = atoi(argv[5]);*/ /*int gc_threshold = atoi(argv[6]);*/ /*int model = atoi(argv[7]);*/ /*int has_maxbenefit = atoi(argv[8]);*/ /*int switch_counter = atoi(argv[9]);*/ int batchNum = 102400; int batchSize = 256; int blockNum = 4; int blockSize = 256; int gc_threshold = 150; int model = 0; int has_maxbenefit = 0; int switch_counter = 20; /*int endingBlockNum = atoi(argv[10]);*/ inputFile.open(argv[1]); int capacity, inputSize; inputFile >> inputSize >> capacity; int *weight = new int[inputSize]; int *benefit = new int[inputSize]; float *benefitPerWeight = new float[inputSize]; int max_benefit = 0; for (int i = 0; i < inputSize; i++) { inputFile >> benefit[i] >> weight[i]; benefitPerWeight[i] = (float)benefit[i] / (float)weight[i]; } inputFile.close(); if (has_maxbenefit == 1) { inputFile.open(strcat(argv[1], ".res")); inputFile >> max_benefit; inputFile.close(); cout << max_benefit << " "; } // Sort items by ppw KnapsackItem *items = new KnapsackItem[inputSize]; for (int i = 0; i < inputSize; i++){ items[i] = KnapsackItem(benefit[i], weight[i], 0, 0); } sort(items, items + inputSize, cmp); for (int i = 0; i < inputSize; i++){ benefit[i] = items[i].first; weight[i] = items[i].second; benefitPerWeight[i] = (float)(benefit[i]) / (float)(weight[i]); } delete[]items; int *d_weight, *d_benefit; float *d_benefitPerWeight; hipMalloc((void **)&d_weight, sizeof(int) * inputSize); hipMalloc((void **)&d_benefit, sizeof(int) * inputSize); hipMalloc((void **)&d_benefitPerWeight, sizeof(float) * inputSize); hipMemcpy(d_weight, weight, sizeof(int) * inputSize, hipMemcpyHostToDevice); hipMemcpy(d_benefit, benefit, sizeof(int) * inputSize, hipMemcpyHostToDevice); hipMemcpy(d_benefitPerWeight, benefitPerWeight, sizeof(float) * inputSize, hipMemcpyHostToDevice); int *d_max_benefit; hipMalloc((void **)&d_max_benefit, sizeof(int)); hipMemset(d_max_benefit, 0, sizeof(int)); printf("%s,knapsack,%s,",argv[0] == std::string("./knapsackT") ? "BGPQ_T" : "BGPQ_B",argv[1]); if (model == 0) /* heap */ { oneheap(d_weight, d_benefit, d_benefitPerWeight, d_max_benefit, capacity, inputSize, batchNum, batchSize, blockNum, blockSize, gc_threshold); } else if (model == 1) /* fifo queue */ { onebuffer(d_weight, d_benefit, d_benefitPerWeight, d_max_benefit, capacity, inputSize, batchNum, batchSize, blockNum, blockSize, gc_threshold); } else if (model == 2) /* heap + fifo queue */ { if (has_maxbenefit == 0) { oneheap(d_weight, d_benefit, d_benefitPerWeight, d_max_benefit, capacity, inputSize, batchNum, batchSize, blockNum, blockSize, gc_threshold); hipMemcpy(&max_benefit, d_max_benefit, sizeof(int), hipMemcpyDeviceToHost); hipMemset(d_max_benefit, 0, sizeof(int)); } oneheapearlystop(d_weight, d_benefit, d_benefitPerWeight, d_max_benefit, capacity, inputSize, batchNum, batchSize, blockNum, blockSize, gc_threshold, max_benefit); } else if (model == 3) /* heap + fifo switch */ { oneheapfifoswitch(d_weight, d_benefit, d_benefitPerWeight, d_max_benefit, capacity, inputSize, batchNum, batchSize, blockNum, blockSize, gc_threshold, switch_counter, max_benefit); } hipMemcpy(&max_benefit, d_max_benefit, sizeof(int), hipMemcpyDeviceToHost); /*cout << max_benefit << " ";*/ delete[] weight; weight = NULL; delete[] benefit; benefit = NULL; delete[] benefitPerWeight; benefitPerWeight = NULL; hipFree(d_weight); hipFree(d_benefit); hipFree(d_benefitPerWeight); hipFree(d_max_benefit); return 0; }
eee08bcdb62daeaae94cc7568da41371fa052350.cu
#include <fstream> #include <iostream> #include <vector> #include <algorithm> #include "models.cuh" #include "models_pq_fifo.cuh" #include "models_pq.cuh" #include "models_fifo.cuh" #include "util.cuh" #include "datastructure.hpp" using namespace std; bool cmp(KnapsackItem a, KnapsackItem b) { double r1 = (double)a.first / (double)a.second; double r2 = (double)b.first / (double)b.second; return r1 > r2; } int main(int argc, char *argv[]) { if (argc != 2) { cout << "./knapsack [dataset]\n"; /*cout << "./knapsack [dataset] [batchnum] [batchsize] [blocknum] [blocksize] [gc_threshold] [model] [has_maxbenefit] [switch_counter]\n";*/ return 1; } ifstream inputFile; /*int batchNum = atoi(argv[2]);*/ /*int batchSize = atoi(argv[3]);*/ /*int blockNum = atoi(argv[4]);*/ /*int blockSize = atoi(argv[5]);*/ /*int gc_threshold = atoi(argv[6]);*/ /*int model = atoi(argv[7]);*/ /*int has_maxbenefit = atoi(argv[8]);*/ /*int switch_counter = atoi(argv[9]);*/ int batchNum = 102400; int batchSize = 256; int blockNum = 4; int blockSize = 256; int gc_threshold = 150; int model = 0; int has_maxbenefit = 0; int switch_counter = 20; /*int endingBlockNum = atoi(argv[10]);*/ inputFile.open(argv[1]); int capacity, inputSize; inputFile >> inputSize >> capacity; int *weight = new int[inputSize]; int *benefit = new int[inputSize]; float *benefitPerWeight = new float[inputSize]; int max_benefit = 0; for (int i = 0; i < inputSize; i++) { inputFile >> benefit[i] >> weight[i]; benefitPerWeight[i] = (float)benefit[i] / (float)weight[i]; } inputFile.close(); if (has_maxbenefit == 1) { inputFile.open(strcat(argv[1], ".res")); inputFile >> max_benefit; inputFile.close(); cout << max_benefit << " "; } // Sort items by ppw KnapsackItem *items = new KnapsackItem[inputSize]; for (int i = 0; i < inputSize; i++){ items[i] = KnapsackItem(benefit[i], weight[i], 0, 0); } sort(items, items + inputSize, cmp); for (int i = 0; i < inputSize; i++){ benefit[i] = items[i].first; weight[i] = items[i].second; benefitPerWeight[i] = (float)(benefit[i]) / (float)(weight[i]); } delete[]items; int *d_weight, *d_benefit; float *d_benefitPerWeight; cudaMalloc((void **)&d_weight, sizeof(int) * inputSize); cudaMalloc((void **)&d_benefit, sizeof(int) * inputSize); cudaMalloc((void **)&d_benefitPerWeight, sizeof(float) * inputSize); cudaMemcpy(d_weight, weight, sizeof(int) * inputSize, cudaMemcpyHostToDevice); cudaMemcpy(d_benefit, benefit, sizeof(int) * inputSize, cudaMemcpyHostToDevice); cudaMemcpy(d_benefitPerWeight, benefitPerWeight, sizeof(float) * inputSize, cudaMemcpyHostToDevice); int *d_max_benefit; cudaMalloc((void **)&d_max_benefit, sizeof(int)); cudaMemset(d_max_benefit, 0, sizeof(int)); printf("%s,knapsack,%s,",argv[0] == std::string("./knapsackT") ? "BGPQ_T" : "BGPQ_B",argv[1]); if (model == 0) /* heap */ { oneheap(d_weight, d_benefit, d_benefitPerWeight, d_max_benefit, capacity, inputSize, batchNum, batchSize, blockNum, blockSize, gc_threshold); } else if (model == 1) /* fifo queue */ { onebuffer(d_weight, d_benefit, d_benefitPerWeight, d_max_benefit, capacity, inputSize, batchNum, batchSize, blockNum, blockSize, gc_threshold); } else if (model == 2) /* heap + fifo queue */ { if (has_maxbenefit == 0) { oneheap(d_weight, d_benefit, d_benefitPerWeight, d_max_benefit, capacity, inputSize, batchNum, batchSize, blockNum, blockSize, gc_threshold); cudaMemcpy(&max_benefit, d_max_benefit, sizeof(int), cudaMemcpyDeviceToHost); cudaMemset(d_max_benefit, 0, sizeof(int)); } oneheapearlystop(d_weight, d_benefit, d_benefitPerWeight, d_max_benefit, capacity, inputSize, batchNum, batchSize, blockNum, blockSize, gc_threshold, max_benefit); } else if (model == 3) /* heap + fifo switch */ { oneheapfifoswitch(d_weight, d_benefit, d_benefitPerWeight, d_max_benefit, capacity, inputSize, batchNum, batchSize, blockNum, blockSize, gc_threshold, switch_counter, max_benefit); } cudaMemcpy(&max_benefit, d_max_benefit, sizeof(int), cudaMemcpyDeviceToHost); /*cout << max_benefit << " ";*/ delete[] weight; weight = NULL; delete[] benefit; benefit = NULL; delete[] benefitPerWeight; benefitPerWeight = NULL; cudaFree(d_weight); cudaFree(d_benefit); cudaFree(d_benefitPerWeight); cudaFree(d_max_benefit); return 0; }
bdc74867db370ef58d83b5e1e8dfea23c165fe28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ long long int mod(int base, int exponent, int den) { long long int ret; ret = 1; for (int i = 0; i < exponent; i++) { ret *= base; ret = ret % den; } return ret; } __device__ long long int mod_optimized(int base, int exponent, int den) { unsigned int a = (base % den) * (base % den); unsigned long long int ret = 1; float size = (float) exponent / 2; if (exponent == 0) { return base % den; } else { while (1) { if (size > 0.5) { ret = (ret * a) % den; size = size - 1.0; } else if (size == 0.5) { ret = (ret * (base % den)) % den; break; } else { break; } } return ret; } } __global__ void rsa(int * num, int *key, int *den, int * result) { int i = blockDim.x * blockIdx.x + threadIdx.x; int temp; temp = mod(num[i], *key, *den); //temp = mod_optimized(num[i], *key, *den); atomicExch(&result[i], temp); }
bdc74867db370ef58d83b5e1e8dfea23c165fe28.cu
__device__ long long int mod(int base, int exponent, int den) { long long int ret; ret = 1; for (int i = 0; i < exponent; i++) { ret *= base; ret = ret % den; } return ret; } __device__ long long int mod_optimized(int base, int exponent, int den) { unsigned int a = (base % den) * (base % den); unsigned long long int ret = 1; float size = (float) exponent / 2; if (exponent == 0) { return base % den; } else { while (1) { if (size > 0.5) { ret = (ret * a) % den; size = size - 1.0; } else if (size == 0.5) { ret = (ret * (base % den)) % den; break; } else { break; } } return ret; } } __global__ void rsa(int * num, int *key, int *den, int * result) { int i = blockDim.x * blockIdx.x + threadIdx.x; int temp; temp = mod(num[i], *key, *den); //temp = mod_optimized(num[i], *key, *den); atomicExch(&result[i], temp); }
6028495d730d6ef75af60de91647d8c2bb8bb70f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define CUSTOMMERS 400 #define PEOPLE_PER_TABLE 10 #define WAITERS_PER_TABLE 2 #define TABLES CUSTOMMERS/PEOPLE_PER_TABLE using namespace std; // GPU function that simulates an sleep function __device__ void wait_gpu(float time) { int wait = 1000000 * time * 2.5; for (int i = 0; i < wait; i++) { } } // GPU function that simulates the serving dishes time __global__ void GPU_serving_dishes(int round_offset, bool* plates) { int gId = threadIdx.x + round_offset; wait_gpu(1); plates[gId] = true; } // GPU function that simulates the eating and picking plates up time __global__ void GPU_finish_eating_and_picking_up_plates(bool* plates, float* eating_times, clock_t* global_now) { int gId = threadIdx.x; wait_gpu(eating_times[gId]); wait_gpu(1); plates[gId] = true; } // Simulates how the catering service will behave in the event __host__ void CPU_serving_dishes(char food_course[], hipEvent_t s, hipEvent_t e) { printf("SERVICE: serving %s ...\n", food_course); // Determine how many rounds did the waiters will need to do, to serve a table int serving_rounds = CUSTOMMERS / (TABLES * WAITERS_PER_TABLE); // Declare host and device variables bool* host_customers_plate; bool* dev_customers_plate; // Reserve space for host and device variables host_customers_plate = (bool*)malloc(CUSTOMMERS * sizeof(bool)); hipMalloc((void**)&dev_customers_plate, CUSTOMMERS * sizeof(bool)); // Initialice host variables for (int i = 0; i < CUSTOMMERS; i++) { host_customers_plate[i] = false; } float totalTime = 0; // Start serving rounds for (int i = 0; i < serving_rounds; i++) { hipMemcpy(dev_customers_plate, host_customers_plate, CUSTOMMERS * sizeof(bool), hipMemcpyHostToDevice); dim3 block(CUSTOMMERS / serving_rounds); // Start kernel function and capture the time it spend in the execution hipEventCreate(&s); hipEventCreate(&e); hipEventRecord(s, 0); GPU_serving_dishes << <1, block >> > (i * CUSTOMMERS / serving_rounds, dev_customers_plate); hipEventRecord(e, 0); hipDeviceSynchronize(); float currElapsedTime; hipEventElapsedTime(&currElapsedTime, s, e); totalTime += currElapsedTime; hipEventDestroy(s); hipEventDestroy(e); hipMemcpy(host_customers_plate, dev_customers_plate, CUSTOMMERS * sizeof(bool), hipMemcpyDeviceToHost); // Calculate the percentage of people that have been served float percentage_served = 0; int persons_served = 0; for (int i = 0; i < CUSTOMMERS; i++) { if (host_customers_plate[i]) { persons_served++; } } percentage_served = 100 * persons_served / (float)CUSTOMMERS; printf("SERVICE: people served -> %.2f%%\n", percentage_served); } // Free space for host and device variables free(host_customers_plate); hipFree(dev_customers_plate); printf("SERVICE: %s served in %.3f ms\n", food_course, totalTime); } // Generates a random number from 0-10 __host__ float getRangeRandom() { return (rand() % 11 / 10.0) * 10; } // Fills a float list with random numbers __host__ void fillRandomNumbersList(float* randomNumbersList, int size) { for (int i = 0; i < size; i++) { randomNumbersList[i] = getRangeRandom()+1; } } // Simulates the eating and picking up of the plates when a costummer has finish __host__ void CPU_finish_eating_and_picking_up_plates(hipEvent_t s, hipEvent_t e) { printf("FOOD: people eating...\n"); int serving_rounds = CUSTOMMERS / (TABLES * WAITERS_PER_TABLE); // Declare host and device variables bool* host_customers_plate; bool* dev_customers_plate; float* host_random_eating_times; float* dev_random_eating_times; clock_t* host_global_clock; clock_t* dev_global_clock; // Reserve space for host and device variables host_customers_plate = (bool*)malloc(CUSTOMMERS * sizeof(bool)); host_random_eating_times = (float*)malloc(CUSTOMMERS * sizeof(float)); host_global_clock = (clock_t*)malloc(sizeof(clock_t)); hipMalloc((void**)&dev_customers_plate, CUSTOMMERS * sizeof(bool)); hipMalloc((void**)&dev_random_eating_times, CUSTOMMERS * sizeof(float)); hipMalloc((void**)&dev_global_clock, sizeof(clock_t)); // Initialice host variables for (int i = 0; i < CUSTOMMERS; i++) { host_customers_plate[i] = false; } fillRandomNumbersList(host_random_eating_times, CUSTOMMERS); hipMemcpy(dev_customers_plate, host_customers_plate, CUSTOMMERS * sizeof(bool), hipMemcpyHostToDevice); hipMemcpy(dev_random_eating_times, host_random_eating_times, CUSTOMMERS * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_global_clock, host_global_clock, sizeof(clock_t), hipMemcpyHostToDevice); dim3 block(CUSTOMMERS); // Start kernel function and capture the time it spend in the execution float totalTime = 0; hipEventCreate(&s); hipEventCreate(&e); hipEventRecord(s, 0); GPU_finish_eating_and_picking_up_plates << < 1, block >> > (dev_customers_plate,dev_random_eating_times, dev_global_clock); hipEventRecord(e, 0); hipDeviceSynchronize(); float currElapsedTime; hipEventElapsedTime(&currElapsedTime, s, e); totalTime += currElapsedTime; hipEventDestroy(s); hipEventDestroy(e); hipMemcpy(host_customers_plate, dev_customers_plate, CUSTOMMERS * sizeof(bool), hipMemcpyDeviceToHost); hipMemcpy(host_random_eating_times, dev_random_eating_times, CUSTOMMERS * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(host_global_clock, dev_global_clock, sizeof(clock_t), hipMemcpyDeviceToHost); // Free space for host and device variables free(host_customers_plate); free(host_random_eating_times); free(host_global_clock); hipFree(dev_customers_plate); hipFree(dev_random_eating_times); hipFree(dev_global_clock); printf("FOOD: people finish eating and plates picked up in %.3f ms\n", currElapsedTime); } int main() { printf("START\n"); printf("SERVICE: Preparing food...\n"); _sleep(1000); printf("SERVICE: Food ready\n"); // Starting cuda events, these help us calculating the time spend in the kernel hipEvent_t start; hipEvent_t end; hipEventCreate(&start); hipEventCreate(&end); // Three course dinner CPU_serving_dishes("starter", start, end); CPU_finish_eating_and_picking_up_plates(start, end); CPU_serving_dishes("main course", start, end); CPU_finish_eating_and_picking_up_plates(start, end); CPU_serving_dishes("dessert", start, end); CPU_finish_eating_and_picking_up_plates(start, end); printf("END\n"); return 0; }
6028495d730d6ef75af60de91647d8c2bb8bb70f.cu
#include <stdio.h> #include <iostream> #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define CUSTOMMERS 400 #define PEOPLE_PER_TABLE 10 #define WAITERS_PER_TABLE 2 #define TABLES CUSTOMMERS/PEOPLE_PER_TABLE using namespace std; // GPU function that simulates an sleep function __device__ void wait_gpu(float time) { int wait = 1000000 * time * 2.5; for (int i = 0; i < wait; i++) { } } // GPU function that simulates the serving dishes time __global__ void GPU_serving_dishes(int round_offset, bool* plates) { int gId = threadIdx.x + round_offset; wait_gpu(1); plates[gId] = true; } // GPU function that simulates the eating and picking plates up time __global__ void GPU_finish_eating_and_picking_up_plates(bool* plates, float* eating_times, clock_t* global_now) { int gId = threadIdx.x; wait_gpu(eating_times[gId]); wait_gpu(1); plates[gId] = true; } // Simulates how the catering service will behave in the event __host__ void CPU_serving_dishes(char food_course[], cudaEvent_t s, cudaEvent_t e) { printf("SERVICE: serving %s ...\n", food_course); // Determine how many rounds did the waiters will need to do, to serve a table int serving_rounds = CUSTOMMERS / (TABLES * WAITERS_PER_TABLE); // Declare host and device variables bool* host_customers_plate; bool* dev_customers_plate; // Reserve space for host and device variables host_customers_plate = (bool*)malloc(CUSTOMMERS * sizeof(bool)); cudaMalloc((void**)&dev_customers_plate, CUSTOMMERS * sizeof(bool)); // Initialice host variables for (int i = 0; i < CUSTOMMERS; i++) { host_customers_plate[i] = false; } float totalTime = 0; // Start serving rounds for (int i = 0; i < serving_rounds; i++) { cudaMemcpy(dev_customers_plate, host_customers_plate, CUSTOMMERS * sizeof(bool), cudaMemcpyHostToDevice); dim3 block(CUSTOMMERS / serving_rounds); // Start kernel function and capture the time it spend in the execution cudaEventCreate(&s); cudaEventCreate(&e); cudaEventRecord(s, 0); GPU_serving_dishes << <1, block >> > (i * CUSTOMMERS / serving_rounds, dev_customers_plate); cudaEventRecord(e, 0); cudaDeviceSynchronize(); float currElapsedTime; cudaEventElapsedTime(&currElapsedTime, s, e); totalTime += currElapsedTime; cudaEventDestroy(s); cudaEventDestroy(e); cudaMemcpy(host_customers_plate, dev_customers_plate, CUSTOMMERS * sizeof(bool), cudaMemcpyDeviceToHost); // Calculate the percentage of people that have been served float percentage_served = 0; int persons_served = 0; for (int i = 0; i < CUSTOMMERS; i++) { if (host_customers_plate[i]) { persons_served++; } } percentage_served = 100 * persons_served / (float)CUSTOMMERS; printf("SERVICE: people served -> %.2f%%\n", percentage_served); } // Free space for host and device variables free(host_customers_plate); cudaFree(dev_customers_plate); printf("SERVICE: %s served in %.3f ms\n", food_course, totalTime); } // Generates a random number from 0-10 __host__ float getRangeRandom() { return (rand() % 11 / 10.0) * 10; } // Fills a float list with random numbers __host__ void fillRandomNumbersList(float* randomNumbersList, int size) { for (int i = 0; i < size; i++) { randomNumbersList[i] = getRangeRandom()+1; } } // Simulates the eating and picking up of the plates when a costummer has finish __host__ void CPU_finish_eating_and_picking_up_plates(cudaEvent_t s, cudaEvent_t e) { printf("FOOD: people eating...\n"); int serving_rounds = CUSTOMMERS / (TABLES * WAITERS_PER_TABLE); // Declare host and device variables bool* host_customers_plate; bool* dev_customers_plate; float* host_random_eating_times; float* dev_random_eating_times; clock_t* host_global_clock; clock_t* dev_global_clock; // Reserve space for host and device variables host_customers_plate = (bool*)malloc(CUSTOMMERS * sizeof(bool)); host_random_eating_times = (float*)malloc(CUSTOMMERS * sizeof(float)); host_global_clock = (clock_t*)malloc(sizeof(clock_t)); cudaMalloc((void**)&dev_customers_plate, CUSTOMMERS * sizeof(bool)); cudaMalloc((void**)&dev_random_eating_times, CUSTOMMERS * sizeof(float)); cudaMalloc((void**)&dev_global_clock, sizeof(clock_t)); // Initialice host variables for (int i = 0; i < CUSTOMMERS; i++) { host_customers_plate[i] = false; } fillRandomNumbersList(host_random_eating_times, CUSTOMMERS); cudaMemcpy(dev_customers_plate, host_customers_plate, CUSTOMMERS * sizeof(bool), cudaMemcpyHostToDevice); cudaMemcpy(dev_random_eating_times, host_random_eating_times, CUSTOMMERS * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_global_clock, host_global_clock, sizeof(clock_t), cudaMemcpyHostToDevice); dim3 block(CUSTOMMERS); // Start kernel function and capture the time it spend in the execution float totalTime = 0; cudaEventCreate(&s); cudaEventCreate(&e); cudaEventRecord(s, 0); GPU_finish_eating_and_picking_up_plates << < 1, block >> > (dev_customers_plate,dev_random_eating_times, dev_global_clock); cudaEventRecord(e, 0); cudaDeviceSynchronize(); float currElapsedTime; cudaEventElapsedTime(&currElapsedTime, s, e); totalTime += currElapsedTime; cudaEventDestroy(s); cudaEventDestroy(e); cudaMemcpy(host_customers_plate, dev_customers_plate, CUSTOMMERS * sizeof(bool), cudaMemcpyDeviceToHost); cudaMemcpy(host_random_eating_times, dev_random_eating_times, CUSTOMMERS * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(host_global_clock, dev_global_clock, sizeof(clock_t), cudaMemcpyDeviceToHost); // Free space for host and device variables free(host_customers_plate); free(host_random_eating_times); free(host_global_clock); cudaFree(dev_customers_plate); cudaFree(dev_random_eating_times); cudaFree(dev_global_clock); printf("FOOD: people finish eating and plates picked up in %.3f ms\n", currElapsedTime); } int main() { printf("START\n"); printf("SERVICE: Preparing food...\n"); _sleep(1000); printf("SERVICE: Food ready\n"); // Starting cuda events, these help us calculating the time spend in the kernel cudaEvent_t start; cudaEvent_t end; cudaEventCreate(&start); cudaEventCreate(&end); // Three course dinner CPU_serving_dishes("starter", start, end); CPU_finish_eating_and_picking_up_plates(start, end); CPU_serving_dishes("main course", start, end); CPU_finish_eating_and_picking_up_plates(start, end); CPU_serving_dishes("dessert", start, end); CPU_finish_eating_and_picking_up_plates(start, end); printf("END\n"); return 0; }
79cdb0cddc4f9ae2e02368c663ee33a564edcef3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char** argv); /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j, index=0; FILE *fp; char str[STR_SIZE]; if( (fp = fopen(file, "w" )) == 0 ) printf( "The file was not opened\n" ); for (i=0; i < grid_rows; i++) for (j=0; j < grid_cols; j++) { sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]); fputs(str,fp); index++; } fclose(fp); } void readinput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j; FILE *fp; char str[STR_SIZE]; float val; if( (fp = fopen(file, "r" )) ==0 ) printf( "The file was not opened\n" ); for (i=0; i <= grid_rows-1; i++) for (j=0; j <= grid_cols-1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal("not enough lines in file"); //if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal("invalid file format"); vect[i*grid_cols+j] = val; } fclose(fp); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void calculate_temp(int iteration, //number of iteration float *power, //power input float *temp_src, //temperature input/output float *temp_dst, //temperature input/output int grid_cols, //Col of grid int grid_rows, //Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, //Capacitance float Rx, float Ry, float Rz, float step, float time_elapsed){ __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows*by-border_rows; int blkX = small_block_cols*bx-border_cols; int blkYmax = blkY+BLOCK_SIZE-1; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int yidx = blkY+ty; int xidx = blkX+tx; // load data if it is within the valid input range int loadYidx=yidx, loadXidx=xidx; int index = grid_cols*loadYidx+loadXidx; if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){ temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1; int N = ty-1; int S = ty+1; int W = tx-1; int E = tx+1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; float constant = 2.0; bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(tx, validXmin, validXmax) && \ IN_RANGE(ty, validYmin, validYmax) ) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - constant*temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - constant*temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range temp_on_cuda[ty][tx]= temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ temp_dst[index]= temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\ col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); } return dst; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]); fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n"); fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n"); fprintf(stderr, "\t<sim_time> - number of iterations\n"); fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n"); fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n"); fprintf(stderr, "\t<output_file> - name of the output file\n"); exit(1); } int main(int argc, char** argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { int size; int grid_rows,grid_cols; float *FilesavingTemp,*FilesavingPower,*MatrixOut; char *tfile, *pfile, *ofile; int total_iterations = 60; int pyramid_height = 1; // number of iterations if (argc != 7) usage(argc, argv); if((grid_rows = atoi(argv[1]))<=0|| (grid_cols = atoi(argv[1]))<=0|| (pyramid_height = atoi(argv[2]))<=0|| (total_iterations = atoi(argv[3]))<=0) usage(argc, argv); tfile=argv[4]; pfile=argv[5]; ofile=argv[6]; size=grid_rows*grid_cols; /* --------------- pyramid parameters --------------- */ # define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE/2; int borderRows = (pyramid_height)*EXPAND_RATE/2; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1); int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1); FilesavingTemp = (float *) malloc(size*sizeof(float)); FilesavingPower = (float *) malloc(size*sizeof(float)); MatrixOut = (float *) calloc (size, sizeof(float)); if( !FilesavingPower || !FilesavingTemp || !MatrixOut) fatal("unable to allocate memory"); printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\ pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); readinput(FilesavingTemp, grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); float *MatrixTemp[2], *MatrixPower; hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size); hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size); hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice); hipMalloc((void**)&MatrixPower, sizeof(float)*size); hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice); printf("Start computing the transient temperature\n"); int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \ total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows); printf("Ending simulation\n"); hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost); writeoutput(MatrixOut,grid_rows, grid_cols, ofile); hipFree(MatrixPower); hipFree(MatrixTemp[0]); hipFree(MatrixTemp[1]); free(MatrixOut); }
79cdb0cddc4f9ae2e02368c663ee33a564edcef3.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char** argv); /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j, index=0; FILE *fp; char str[STR_SIZE]; if( (fp = fopen(file, "w" )) == 0 ) printf( "The file was not opened\n" ); for (i=0; i < grid_rows; i++) for (j=0; j < grid_cols; j++) { sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]); fputs(str,fp); index++; } fclose(fp); } void readinput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j; FILE *fp; char str[STR_SIZE]; float val; if( (fp = fopen(file, "r" )) ==0 ) printf( "The file was not opened\n" ); for (i=0; i <= grid_rows-1; i++) for (j=0; j <= grid_cols-1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal("not enough lines in file"); //if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal("invalid file format"); vect[i*grid_cols+j] = val; } fclose(fp); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void calculate_temp(int iteration, //number of iteration float *power, //power input float *temp_src, //temperature input/output float *temp_dst, //temperature input/output int grid_cols, //Col of grid int grid_rows, //Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, //Capacitance float Rx, float Ry, float Rz, float step, float time_elapsed){ __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows*by-border_rows; int blkX = small_block_cols*bx-border_cols; int blkYmax = blkY+BLOCK_SIZE-1; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int yidx = blkY+ty; int xidx = blkX+tx; // load data if it is within the valid input range int loadYidx=yidx, loadXidx=xidx; int index = grid_cols*loadYidx+loadXidx; if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){ temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1; int N = ty-1; int S = ty+1; int W = tx-1; int E = tx+1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; float constant = 2.0; bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(tx, validXmin, validXmax) && \ IN_RANGE(ty, validYmin, validYmax) ) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - constant*temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - constant*temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range temp_on_cuda[ty][tx]= temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ temp_dst[index]= temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\ col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); } return dst; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]); fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n"); fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n"); fprintf(stderr, "\t<sim_time> - number of iterations\n"); fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n"); fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n"); fprintf(stderr, "\t<output_file> - name of the output file\n"); exit(1); } int main(int argc, char** argv) { printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { int size; int grid_rows,grid_cols; float *FilesavingTemp,*FilesavingPower,*MatrixOut; char *tfile, *pfile, *ofile; int total_iterations = 60; int pyramid_height = 1; // number of iterations if (argc != 7) usage(argc, argv); if((grid_rows = atoi(argv[1]))<=0|| (grid_cols = atoi(argv[1]))<=0|| (pyramid_height = atoi(argv[2]))<=0|| (total_iterations = atoi(argv[3]))<=0) usage(argc, argv); tfile=argv[4]; pfile=argv[5]; ofile=argv[6]; size=grid_rows*grid_cols; /* --------------- pyramid parameters --------------- */ # define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE/2; int borderRows = (pyramid_height)*EXPAND_RATE/2; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1); int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1); FilesavingTemp = (float *) malloc(size*sizeof(float)); FilesavingPower = (float *) malloc(size*sizeof(float)); MatrixOut = (float *) calloc (size, sizeof(float)); if( !FilesavingPower || !FilesavingTemp || !MatrixOut) fatal("unable to allocate memory"); printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\ pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); readinput(FilesavingTemp, grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); float *MatrixTemp[2], *MatrixPower; cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size); cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size); cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice); cudaMalloc((void**)&MatrixPower, sizeof(float)*size); cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice); printf("Start computing the transient temperature\n"); int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \ total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows); printf("Ending simulation\n"); cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost); writeoutput(MatrixOut,grid_rows, grid_cols, ofile); cudaFree(MatrixPower); cudaFree(MatrixTemp[0]); cudaFree(MatrixTemp[1]); free(MatrixOut); }
80f80b1e254e9df6c3f47b5521fb1519745d33f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2016, The Bifrost Authors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of The Bifrost Authors nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <bifrost/fdmt.h> #include "assert.hpp" #include "utils.hpp" #include "workspace.hpp" #include "cuda.hpp" #include "trace.hpp" //#include <limits> #include <math_constants.h> // For CUDART_NAN_F #include <thrust/device_vector.h> #include <vector> #include <map> #include <string> #include <complex> // HACK TESTING #include <iostream> using std::cout; using std::endl; // Note: Can be tuned over block shape template<typename InType, typename OutType> __global__ void fdmt_init_kernel(int ntime, int nchan, int nbatch, bool reverse_band, bool reverse_time, int const* __restrict__ d_offsets, InType /*const* __restrict__*/ d_in, int istride, int ibatchstride, OutType* __restrict__ d_out, int ostride, int obatchstride) { int t0 = threadIdx.x + blockIdx.x*blockDim.x; int c0 = threadIdx.y + blockIdx.y*blockDim.y; int b0 = blockIdx.z; for( int b=b0; b<nbatch; b+=gridDim.z ) { for( int c=c0; c<nchan; c+=blockDim.y*gridDim.y ) { int offset = d_offsets[c]; int ndelay = d_offsets[c+1] - offset; for( int t=t0; t<ntime; t+=blockDim.x*gridDim.x ) { OutType tmp(0); for( int d=0; d<ndelay; ++d ) { // Note: This fills the unused elements with NaNs OutType outval(CUDART_NAN_F);//std::numeric_limits<OutType>::quiet_NaN()); if( t >= d ) { int c_ = reverse_band ? nchan-1 - c : c; int t_ = reverse_time ? ntime-1 - t : t; tmp += d_in[(t_-d) + istride*c_ + ibatchstride*b]; // TODO: Check effect of not-/using sqrt // The final paper has no sqrt (i.e., computation is just the mean) //outval = tmp * rsqrtf(d+1); outval = tmp * (1.f/(d+1)); } d_out[t + ostride*(offset+d) + obatchstride*b] = outval; } } } } } // Note: Can be tuned over block shape template<typename DType> __global__ void fdmt_exec_kernel(int ntime, int nrow, int nbatch, bool is_final_step, bool reverse_time, int const* __restrict__ d_delays, int2 const* __restrict__ d_srcrows, DType const* __restrict__ d_in, int istride, int ibatchstride, DType* __restrict__ d_out, int ostride, int obatchstride) { int t0 = threadIdx.x + blockIdx.x*blockDim.x; int r0 = threadIdx.y + blockIdx.y*blockDim.y; int b0 = blockIdx.z; for( int b=b0; b<nbatch; b+=gridDim.z ) { for( int r=r0; r<nrow; r+=blockDim.y*gridDim.y ) { int delay = d_delays[r]; int srcrow0 = d_srcrows[r].x; int srcrow1 = d_srcrows[r].y; for( int t=t0; t<ntime; t+=blockDim.x*gridDim.x ) { // Avoid elements that go unused due to diagonal reindexing if( is_final_step && t < r ) { //int ostride_ = ostride - reverse_time; //d_out[t + ostride_*r] = CUDART_NAN_F; continue; } // HACK TESTING ////if( ostride < ntime && t >= ntime-1 - r ) { //if( ostride != ntime && t < r ) { // int ostride_ = ostride - (ostride > ntime); // d_out[t + ostride_*r] = CUDART_NAN_F; // continue; //}// else if( ostride > ntime && t >= ntime - r ) { // //d_out[t - (ntime-1) + ostride*r] = CUDART_NAN_F; // continue; //} // Note: Non-existent rows are signified by -1 //if( t == 0 && r == 0 ) { // printf("t,srcrow0,srcrow1,istride = %i, %i, %i, %i\n", t, srcrow0, srcrow1, istride); //} //if( threadIdx.x == 63 && blockIdx.y == 4 ) { //printf("istride = %i, srcrow0 = %i, srcrow1 = %i, d_in = %p\n", istride, srcrow0, srcrow1, d_in); //} //if( t == 0 ) {// && r == 1 ) { // printf("istride = %i, srcrow0 = %i, srcrow1 = %i, d_in = %p\n", istride, srcrow0, srcrow1, d_in); //} DType outval = (srcrow0 != -1) ? d_in[ t + istride*srcrow0 + ibatchstride*b] : 0; if( t >= delay ) { outval += (srcrow1 != -1) ? d_in[(t-delay) + istride*srcrow1 + ibatchstride*b] : 0; } int t_ = (is_final_step && reverse_time) ? ntime-1 - t : t; d_out[t_ + ostride*r + obatchstride*b] = outval; } } } } template<typename InType, typename OutType> inline void launch_fdmt_init_kernel(int ntime, int nchan, int nbatch, bool reverse_band, bool reverse_time, //int const* d_ndelays, int const* d_offsets, InType /*const**/ d_in, int istride, int ibatchstride, OutType* d_out, int ostride, int obatchstride, hipStream_t stream=0) { dim3 block(256, 1); // TODO: Tune this dim3 grid(::min((ntime-1)/block.x+1, 65535u), ::min((nchan-1)/block.y+1, 65535u)); void* args[] = {&ntime, &nchan, &nbatch, &reverse_band, &reverse_time, &d_offsets, &d_in, &istride, &ibatchstride, &d_out, &ostride, &obatchstride}; BF_CHECK_CUDA_EXCEPTION( cudaLaunchKernel((void*)fdmt_init_kernel<InType,OutType>, grid, block, &args[0], 0, stream), BF_STATUS_INTERNAL_ERROR); } template<typename DType> inline void launch_fdmt_exec_kernel(int ntime, int nrow, int nbatch, bool is_final_step, bool reverse_time, int const* d_delays, int2 const* d_srcrows, DType const* d_in, int istride, int ibatchstride, DType* d_out, int ostride, int obatchstride, hipStream_t stream=0) { //cout << "LAUNCH " << d_in << ", " << d_out << endl; dim3 block(256, 1); // TODO: Tune this dim3 grid(::min((ntime-1)/block.x+1, 65535u), ::min((nrow -1)/block.y+1, 65535u)); void* args[] = {&ntime, &nrow, &nbatch, &is_final_step, &reverse_time, &d_delays, &d_srcrows, &d_in, &istride, &ibatchstride, &d_out, &ostride, &obatchstride}; BF_CHECK_CUDA_EXCEPTION( cudaLaunchKernel((void*)fdmt_exec_kernel<DType>, grid, block, &args[0], 0, stream), BF_STATUS_INTERNAL_ERROR); } /* **** 4096 **** 4096 **** 2048 **** 1066 **** 650 **** 475 **** 381 **** 337 **** 316 **** 302 **** 299 **** 295 **** 293 SB 3 delay 135 Step 10 prev: 58, 78 srcs: 57, 78 NROW_MAX = 4096 STEP 1 STEP 2 STEP 3 STEP 4 STEP 5 STEP 6 STEP 7 STEP 8 STEP 9 STEP 10 STEP 11 */ class BFfdmt_impl { typedef int IType; typedef double FType; typedef int2 IndexPair; public: // HACK WAR for what looks like a bug in the CUDA 7.0 compiler typedef float DType; private: IType _nchan; IType _max_delay; FType _f0; FType _df; FType _exponent; IType _nrow_max; IType _plan_stride; IType _buffer_stride; IType _batch_stride; std::vector<IType> _offsets; std::vector<std::vector<IndexPair> > _step_srcrows; std::vector<std::vector<IType> > _step_delays; IType* _d_offsets; IndexPair* _d_step_srcrows; IType* _d_step_delays; DType* _d_buffer_a; DType* _d_buffer_b; Workspace _plan_storage; Workspace _exec_storage; // TODO: Use something other than Thrust thrust::device_vector<char> _dv_plan_storage; thrust::device_vector<char> _dv_exec_storage; hipStream_t _stream; bool _reverse_band; FType cfreq(IType chan) { return _f0 + _df*chan; } FType rel_delay(FType flo, FType fhi, FType fmin, FType fmax) { FType g = _exponent; // Note: We use complex math in order to support negative frequencies // (the result is real regardless). std::complex<FType> c_flo=flo, c_fhi=fhi, c_fmin=fmin, c_fmax=fmax; std::complex<FType> numer = ::pow(c_flo, g) - ::pow(c_fhi, g); std::complex<FType> denom = ::pow(c_fmin, g) - ::pow(c_fmax, g); FType eps = std::numeric_limits<FType>::epsilon(); if( std::norm(denom) < eps*eps ) { // Note: The only time I've seen this fail is when nchan==1 BF_ASSERT_EXCEPTION(std::norm(numer) < eps*eps, BF_STATUS_INTERNAL_ERROR); return 0; } std::complex<FType> result = numer / denom; BF_ASSERT_EXCEPTION(std::abs(result.imag()) <= eps, BF_STATUS_INTERNAL_ERROR); return result.real(); } FType rel_delay(FType flo, FType fhi) { FType fmin = cfreq(0); FType fmax = cfreq(_nchan-1); //std::swap(fmin, fmax); //FType fmax = cfreq(_nchan); // HACK TESTING return rel_delay(flo, fhi, fmin, fmax); } IType subband_ndelay(FType f0, FType df) { FType fracdelay = rel_delay(f0, f0+df); FType fmaxdelay = fracdelay*(_max_delay-1); IType ndelay = IType(::ceil(fmaxdelay)) + 1; return ndelay; } public: BFfdmt_impl() : _nchan(0), _max_delay(0), _f0(0), _df(0), _exponent(0), _stream(g_cuda_stream) {} inline IType nchan() const { return _nchan; } inline IType max_delay() const { return _max_delay; } void init(IType nchan, IType max_delay, FType f0, FType df, FType exponent) { BF_TRACE(); if( df < 0. ) { _reverse_band = true; f0 += (nchan-1)*df; df *= -1; } else { _reverse_band = false; } if( nchan == _nchan && max_delay == _max_delay && f0 == _f0 && df == _df && exponent == _exponent ) { return; } _f0 = f0; _df = df; _nchan = nchan; _max_delay = max_delay; _exponent = exponent; // Note: Initialized with 1 entry as dummy for initialization step std::vector<std::vector<IndexPair> > step_subband_parents(1); IType nsubband = _nchan; while( nsubband > 1 ) { IType step = step_subband_parents.size(); step_subband_parents.push_back(std::vector<IndexPair>()); for( IType sb=0; sb<nsubband; sb+=2 ) { IType parent0 = sb; IType parent1 = sb+1; if( nsubband % 2 ) { // Note: Alternating left/right-biased merging scheme if( (step-1) % 2 ) { parent0 -= 1; // Note: First entry becomes -1 => non-existent parent1 -= 1; } else { // Note: Last entry becomes -1 => non-existent if( parent1 == nsubband ) parent1 = -1; } } //cout << step << ": " << parent0 << ", " << parent1 << endl; IndexPair parents = make_int2(parent0, parent1); step_subband_parents[step].push_back(parents); } nsubband = step_subband_parents[step].size(); } // Note: Includes initialization step IType nstep = step_subband_parents.size(); std::vector<std::vector<IType> > step_subband_nchans(nstep); step_subband_nchans[0].assign(_nchan, 1); for( IType step=1; step<nstep; ++step ) { IType nsubband = step_subband_parents[step].size(); step_subband_nchans[step].resize(nsubband); for( IType sb=0; sb<nsubband; ++sb ) { IndexPair parents = step_subband_parents[step][sb]; IType p0 = parents.x;//first; IType p1 = parents.y;//second; IType parent0_nchan = (p0!=-1) ? step_subband_nchans[step-1][p0] : 0; IType parent1_nchan = (p1!=-1) ? step_subband_nchans[step-1][p1] : 0; IType child_nchan = parent0_nchan + parent1_nchan; step_subband_nchans[step][sb] = child_nchan; } } std::vector<std::vector<IType> > step_subband_chan_offsets(nstep); std::vector<std::vector<IType> > step_subband_row_offsets(nstep); IType nrow_max = 0; for( IType step=0; step<nstep; ++step ) { IType nsubband = step_subband_nchans[step].size(); // Note: +1 to store the total in the last element // (The array will hold a complete exclusive scan) step_subband_chan_offsets[step].resize(nsubband+1); step_subband_row_offsets[step].resize(nsubband+1); IType chan0 = 0; IType row_offset = 0; for( IType sb=0; sb<nsubband; ++sb ) { IType nchan = step_subband_nchans[step][sb]; FType f0 = cfreq(chan0) - (step == 0 ? 0.5*_df : 0.); //FType f0 = cfreq(chan0); // HACK TESTING FType df = _df * (step == 0 ? 1 : nchan-1); //FType df = _df * nchan; // HACK TESTING //cout << "df = " << df << endl; IType ndelay = subband_ndelay(f0, df); //cout << "NDELAY = " << ndelay << endl; step_subband_chan_offsets[step][sb] = chan0; step_subband_row_offsets[step][sb] = row_offset; chan0 += nchan; row_offset += ndelay; } step_subband_chan_offsets[step][nsubband] = chan0; step_subband_row_offsets[step][nsubband] = row_offset; nrow_max = ::max(nrow_max, row_offset); //*cout << "**** Nrow: " << row_offset << endl; } // Save for use during initialization //plan->_init_subband_row_offsets = step_subband_row_offsets[0]; _offsets = step_subband_row_offsets[0]; _nrow_max = nrow_max; //cout << "**** " << _nrow_max << endl; // Note: First entry in these remains empty std::vector<std::vector<IndexPair> > step_srcrows(nstep); std::vector<std::vector<IType> > step_delays(nstep); for( IType step=1; step<nstep; ++step ) { IType nsubband = step_subband_nchans[step].size(); IType nrow = step_subband_row_offsets[step][nsubband]; //*cout << "nrow " << nrow << endl; step_srcrows[step].resize(nrow); step_delays[step].resize(nrow); for( IType sb=0; sb<nsubband; ++sb ) { IndexPair parents = step_subband_parents[step][sb]; IType p0 = parents.x;//first; IType p1 = parents.y;//second; // TODO: Setting these to 1 instead of 0 in the exceptional case fixed some indexing // issues, but should double-check that the results are good. IType p0_nchan = (p0!=-1) ? step_subband_nchans[step-1][p0] : 1; IType p1_nchan = (p1!=-1) ? step_subband_nchans[step-1][p1] : 1; // Note: If first parent doesn't exist, then it effectively starts where the second parent starts // If second parent doesn't exist, then it effectively starts where the first parent ends IType p0_chan0 = step_subband_chan_offsets[step-1][(p0!=-1) ? p0 : p1]; IType p1_chan0 = step_subband_chan_offsets[step-1][(p1!=-1) ? p1 : p0]; if( p1 == -1 ) { p1_chan0 += (p0_nchan-1); } FType flo = cfreq(p0_chan0); FType fmidlo = cfreq(p0_chan0 + (p0_nchan-1)); FType fmidhi = cfreq(p1_chan0); FType fhi = cfreq(p1_chan0 + (p1_nchan-1)); FType cmidlo = rel_delay(flo, fmidlo, flo, fhi); FType cmidhi = rel_delay(flo, fmidhi, flo, fhi); /* // HACK TESTING FType flo = cfreq(p0_chan0) - 0.5*_df; FType fmidlo = flo + (p0_nchan-1)*_df; FType fmidhi = flo + p0_nchan*_df; FType fhi = flo + (p0_nchan + p1_nchan - 1)*_df; FType cmidlo = rel_delay(fmidlo, flo, fhi, flo); FType cmidhi = rel_delay(fmidhi, flo, fhi, flo); */ //cout << p0 << ", " << p1 << endl; //cout << p0_chan0 << ", " << p0_nchan << "; " << p1_chan0 << ", " << p1_nchan << endl; //cout << cmidlo << ", " << cmidhi << endl; // TODO: See if should use same approach with these as in fdmt.py IType beg = step_subband_row_offsets[step][sb]; IType end = step_subband_row_offsets[step][sb+1]; IType ndelay = end - beg; for( IType delay=0; delay<ndelay; ++delay ) { IType dmidlo = (IType)::round(delay*cmidlo); IType dmidhi = (IType)::round(delay*cmidhi); IType drest = delay - dmidhi; assert( dmidlo <= delay ); assert( dmidhi <= delay ); IType prev_beg = (p0!=-1) ? step_subband_row_offsets[step-1][p0] : -1; IType prev_mid0 = (p0!=-1) ? step_subband_row_offsets[step-1][p0+1] : -1; IType prev_mid1 = (p1!=-1) ? step_subband_row_offsets[step-1][p1] : -1; IType prev_end = (p1!=-1) ? step_subband_row_offsets[step-1][p1+1] : -1; // HACK WAR for strange indexing error observed only when nchan=4096 if( p1 != -1 && drest >= prev_end - prev_mid1 ) { drest -= 1; } if( (p0 != -1 && dmidlo >= prev_mid0 - prev_beg) || (p1 != -1 && drest >= prev_end - prev_mid1) ) { cout << "FDMT DEBUGGING INFO" << endl; cout << "SB " << sb << endl; cout << "delay " << delay << endl; cout << "Step " << step << " prev: " << prev_mid0 - prev_beg << ", " << prev_end - prev_mid1 << endl; cout << " srcs: " << dmidlo << ", " << drest << endl; } assert( p0 == -1 || dmidlo < prev_mid0 - prev_beg ); assert( p1 == -1 || drest < prev_end - prev_mid1 ); IType dst_row = step_subband_row_offsets[step ][sb] + delay; IType src_row0 = (p0!=-1) ? step_subband_row_offsets[step-1][p0] + dmidlo : -1; IType src_row1 = (p1!=-1) ? step_subband_row_offsets[step-1][p1] + drest : -1; step_srcrows[step][dst_row].x = src_row0;//first = src_row0; //cout << "step " << step << ", dst_row = " << dst_row << ", delay = " << dmidhi << ", src_row0 = " << src_row0 << ", src_row1 = " << src_row1 << endl; step_srcrows[step][dst_row].y = src_row1;//second = src_row1; step_delays[step][dst_row] = dmidhi; //IType prev_nsubband = step_subband_nchans[step-1].size(); //IType prev_nrow = step_subband_row_offsets[step-1][prev_nsubband]; } } } // Save for use during execution _step_srcrows = step_srcrows; _step_delays = step_delays; } bool init_plan_storage(void* storage_ptr, BFsize* storage_size) { BF_TRACE(); BF_TRACE_STREAM(_stream); enum { ALIGNMENT_BYTES = 512, ALIGNMENT_ELMTS = ALIGNMENT_BYTES / sizeof(int) }; Workspace workspace(ALIGNMENT_BYTES); _plan_stride = round_up(_nrow_max, ALIGNMENT_ELMTS); //int nstep_execute = _step_delays.size() - 1; int nstep = _step_delays.size(); workspace.reserve(_nchan+1, &_d_offsets); workspace.reserve(nstep*_plan_stride, &_d_step_srcrows); workspace.reserve(nstep*_plan_stride, &_d_step_delays); if( storage_size ) { if( !storage_ptr ) { // Return required storage size *storage_size = workspace.size(); return false; } else { BF_ASSERT_EXCEPTION(*storage_size >= workspace.size(), BF_STATUS_INSUFFICIENT_STORAGE); } } else { // Auto-allocate storage BF_ASSERT_EXCEPTION(!storage_ptr, BF_STATUS_INVALID_ARGUMENT); _dv_plan_storage.resize(workspace.size()); storage_ptr = thrust::raw_pointer_cast(&_dv_plan_storage[0]); } //std::cout << "workspace.size() = " << workspace.size() << std::endl; //_d_offsets = (IType*)0x123; //std::cout << "_d_offsets = " << _d_offsets << std::endl; //std::cout << "storage_ptr = " << storage_ptr << std::endl; workspace.commit(storage_ptr); //std::cout << "_d_offsets = " << _d_offsets << std::endl; BF_CHECK_CUDA_EXCEPTION( hipMemcpyAsync(_d_offsets, &_offsets[0], sizeof(int )*_offsets.size(), hipMemcpyHostToDevice, _stream), BF_STATUS_MEM_OP_FAILED ); for( int step=0; step<nstep; ++step ) { BF_CHECK_CUDA_EXCEPTION( hipMemcpyAsync(_d_step_srcrows + step*_plan_stride, &_step_srcrows[step][0], sizeof(int2)*_step_srcrows[step].size(), hipMemcpyHostToDevice, _stream), BF_STATUS_MEM_OP_FAILED ); BF_CHECK_CUDA_EXCEPTION( hipMemcpyAsync(_d_step_delays + step*_plan_stride, &_step_delays[step][0], sizeof(int)*_step_delays[step].size(), hipMemcpyHostToDevice, _stream), BF_STATUS_MEM_OP_FAILED ); } BF_CHECK_CUDA_EXCEPTION( hipStreamSynchronize(_stream), BF_STATUS_DEVICE_ERROR ); return true; } bool init_exec_storage(void* storage_ptr, BFsize* storage_size, size_t ntime, size_t nbatch) { BF_TRACE(); enum { ALIGNMENT_BYTES = 512, ALIGNMENT_ELMTS = ALIGNMENT_BYTES / sizeof(DType) }; Workspace workspace(ALIGNMENT_BYTES); //std::cout << "ntime = " << ntime << std::endl; //std::cout << "_nrow_max = " << _nrow_max << std::endl; _buffer_stride = round_up(ntime, ALIGNMENT_ELMTS); _batch_stride = _nrow_max*_buffer_stride; //std::cout << "_buffer_stride = " << _buffer_stride << std::endl; // TODO: Check if truly safe to allocate smaller buffer_b workspace.reserve(nbatch*_batch_stride, &_d_buffer_a); workspace.reserve(nbatch*_batch_stride, &_d_buffer_b); if( storage_size ) { if( !storage_ptr ) { //cout << "++++ returning storage size" << endl; // Return required storage size *storage_size = workspace.size(); return false; } else { //cout << "++++ using user storage" << endl; BF_ASSERT_EXCEPTION(*storage_size >= workspace.size(), BF_STATUS_INSUFFICIENT_STORAGE); } } else { //cout << "++++ auto-allocating storage" << endl; // Auto-allocate storage BF_ASSERT_EXCEPTION(!storage_ptr, BF_STATUS_INVALID_ARGUMENT); _dv_exec_storage.resize(workspace.size()); storage_ptr = thrust::raw_pointer_cast(&_dv_exec_storage[0]); //std::cout << "*** exec storage_ptr = " << storage_ptr << std::endl; } //cout << "++++ committing" << endl; workspace.commit(storage_ptr); return true; } void execute(BFarray const* in, BFarray const* out, size_t ntime, size_t nbatch, bool negative_delays) { BF_TRACE(); BF_TRACE_STREAM(_stream); BF_ASSERT_EXCEPTION(out->dtype == BF_DTYPE_F32, BF_STATUS_UNSUPPORTED_DTYPE); BF_ASSERT_EXCEPTION( out->strides[in->ndim-1] == 4, BF_STATUS_UNSUPPORTED_STRIDE); int ndim = in->ndim; DType* d_ibuf = _d_buffer_b; DType* d_obuf = _d_buffer_a; BF_ASSERT_EXCEPTION(in->strides[ndim-2] % in->strides[ndim-1] == 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(out->strides[ndim-2] % out->strides[ndim-1] == 0, BF_STATUS_UNSUPPORTED_STRIDE); size_t istride = in->strides[ndim-2] / in->strides[ndim-1]; size_t ostride = out->strides[ndim-2] / out->strides[ndim-1]; BF_ASSERT_EXCEPTION( in->strides[in->ndim-2] > 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(out->strides[in->ndim-2] > 0, BF_STATUS_UNSUPPORTED_STRIDE); size_t ibatchstride = 0; size_t obatchstride = 0; if( in->ndim == 3 ) { BF_ASSERT_EXCEPTION(in->strides[ndim-3] % in->strides[ndim-1] == 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(out->strides[ndim-3] % out->strides[ndim-1] == 0, BF_STATUS_UNSUPPORTED_STRIDE); ibatchstride = in->strides[ndim-3] / in->strides[ndim-1]; obatchstride = out->strides[ndim-3] / out->strides[ndim-1]; } BF_ASSERT_EXCEPTION(in->strides[ndim-1] == BF_DTYPE_NBYTE(in->dtype), BF_STATUS_UNSUPPORTED_STRIDE); //bool reverse_time = (in->strides[in->ndim-1] < 0); bool reverse_time = negative_delays; BF_CHECK_CUDA_EXCEPTION(hipGetLastError(), BF_STATUS_INTERNAL_ERROR); #define LAUNCH_FDMT_INIT_KERNEL(IterType) \ launch_fdmt_init_kernel(ntime, _nchan, nbatch, \ _reverse_band, reverse_time, \ _d_offsets, \ (IterType)in->data, istride, ibatchstride, \ d_obuf, _buffer_stride, _batch_stride, \ _stream) switch( in->dtype ) { // HACK testing disabled // TODO: Get NbitReader working //case BF_DTYPE_I1: LAUNCH_FDMT_INIT_KERNEL(NbitReader<1>); break; //case BF_DTYPE_I2: LAUNCH_FDMT_INIT_KERNEL(NbitReader<2>); break; //case BF_DTYPE_I4: LAUNCH_FDMT_INIT_KERNEL(NbitReader<4>); break; case BF_DTYPE_I8: LAUNCH_FDMT_INIT_KERNEL(const int8_t*); break; case BF_DTYPE_I16: LAUNCH_FDMT_INIT_KERNEL(const int16_t*); break; case BF_DTYPE_I32: LAUNCH_FDMT_INIT_KERNEL(const int32_t*); break; case BF_DTYPE_U8: LAUNCH_FDMT_INIT_KERNEL(const uint8_t*); break; case BF_DTYPE_U16: LAUNCH_FDMT_INIT_KERNEL(const uint16_t*); break; case BF_DTYPE_U32: LAUNCH_FDMT_INIT_KERNEL(const uint32_t*); break; case BF_DTYPE_F32: LAUNCH_FDMT_INIT_KERNEL(const float*); break; default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE); } #undef LAUNCH_FDMT_INIT_KERNEL BF_CHECK_CUDA_EXCEPTION(hipGetLastError(), BF_STATUS_INTERNAL_ERROR); std::swap(d_ibuf, d_obuf); size_t ostride_cur = _buffer_stride; size_t obatchstride_cur = _batch_stride; IType nstep = _step_delays.size(); for( int step=1; step<nstep; ++step ) { //cout << "STEP " << step << endl; IType nrow = _step_srcrows[step].size(); //cout << "nrow " << nrow << endl; if( step == nstep-1 ) { d_obuf = (DType*)out->data; ostride_cur = ostride; // HACK TESTING diagonal reindexing to align output with TOA at highest freq ostride_cur += reverse_time ? +1 : -1; obatchstride_cur = obatchstride; } launch_fdmt_exec_kernel(ntime, nrow, nbatch, (step==nstep-1), reverse_time, _d_step_delays + step*_plan_stride, _d_step_srcrows + step*_plan_stride, d_ibuf, _buffer_stride, _batch_stride, d_obuf, ostride_cur, obatchstride_cur, _stream); std::swap(d_ibuf, d_obuf); } BF_CHECK_CUDA_EXCEPTION(hipGetLastError(), BF_STATUS_INTERNAL_ERROR); } void set_stream(hipStream_t stream) { _stream = stream; } }; BFstatus bfFdmtCreate(BFfdmt* plan_ptr) { BF_TRACE(); BF_ASSERT(plan_ptr, BF_STATUS_INVALID_POINTER); BF_TRY_RETURN_ELSE(*plan_ptr = new BFfdmt_impl(), *plan_ptr = 0); } // **TODO: Passing 'BFarray const* in' here could replace nchan, f0, df and space if BFarray included dimension scales // Also, could potentially set the output dimension scales (dm0, ddm) // OR, could just leave these to higher-level wrappers (e.g., Python) // This might be for the best in the short term BFstatus bfFdmtInit(BFfdmt plan, BFsize nchan, BFsize max_delay, double f0, double df, double exponent, BFspace space, void* plan_storage, BFsize* plan_storage_size) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); // TODO: Is there any sensible/natural way to handle nchan==1? BF_ASSERT(nchan > 1, BF_STATUS_INVALID_ARGUMENT); BF_ASSERT(space_accessible_from(space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE); BF_TRY(plan->init(nchan, max_delay, f0, df, exponent)); BF_TRY_RETURN(plan->init_plan_storage(plan_storage, plan_storage_size)); } BFstatus bfFdmtSetStream(BFfdmt plan, void const* stream) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); BF_ASSERT(stream, BF_STATUS_INVALID_POINTER); BF_TRY_RETURN(plan->set_stream(*(hipStream_t*)stream)); } BFstatus bfFdmtExecute(BFfdmt plan, BFarray const* in, BFarray const* out, BFbool negative_delays, void* exec_storage, BFsize* exec_storage_size) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); BF_ASSERT(in, BF_STATUS_INVALID_POINTER); BF_ASSERT(out, BF_STATUS_INVALID_POINTER); BF_ASSERT(in->ndim == out->ndim, BF_STATUS_INVALID_SHAPE); BF_ASSERT( in->shape[ in->ndim-2] == plan->nchan(), BF_STATUS_INVALID_SHAPE); BF_ASSERT(out->shape[out->ndim-2] == plan->max_delay(), BF_STATUS_INVALID_SHAPE); BF_ASSERT( in->shape[in->ndim-1] == out->shape[out->ndim-1], BF_STATUS_INVALID_SHAPE); // TODO: BF_ASSERT(...); int ndim = in->ndim; size_t ntime = in->shape[in->ndim-1]; size_t nbatch = 1; BFarray out_flattened, in_flattened; // Handle batch dims if( ndim > 2 ) { // Keep the last 3 dims but attempt to flatten all others unsigned long keep_dims_mask = 0x7 << (ndim-3); keep_dims_mask |= padded_dims_mask(out); keep_dims_mask |= padded_dims_mask(in); flatten(out, &out_flattened, keep_dims_mask); flatten(in, &in_flattened, keep_dims_mask); out = &out_flattened; in = &in_flattened; BF_ASSERT(in_flattened.ndim == out_flattened.ndim, BF_STATUS_INTERNAL_ERROR); // TODO: Use streams to support multiple non-contiguous batch dims // (Like in linalg.cu) BF_ASSERT(in_flattened.ndim == 3, BF_STATUS_UNSUPPORTED_SHAPE); BF_ASSERT_EXCEPTION(in_flattened.shape[0] == out_flattened.shape[0], BF_STATUS_INVALID_SHAPE); nbatch = in->shape[0]; } bool ready; BF_TRY(ready = plan->init_exec_storage(exec_storage, exec_storage_size, ntime, nbatch)); if( !ready ) { // Just requesting exec_storage_size, not ready to execute yet return BF_STATUS_SUCCESS; } BF_ASSERT(space_accessible_from( in->space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE); BF_ASSERT(space_accessible_from(out->space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE); BF_TRY_RETURN(plan->execute(in, out, ntime, nbatch, negative_delays)); } BFstatus bfFdmtDestroy(BFfdmt plan) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); delete plan; return BF_STATUS_SUCCESS; }
80f80b1e254e9df6c3f47b5521fb1519745d33f6.cu
/* * Copyright (c) 2016, The Bifrost Authors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of The Bifrost Authors nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <bifrost/fdmt.h> #include "assert.hpp" #include "utils.hpp" #include "workspace.hpp" #include "cuda.hpp" #include "trace.hpp" //#include <limits> #include <math_constants.h> // For CUDART_NAN_F #include <thrust/device_vector.h> #include <vector> #include <map> #include <string> #include <complex> // HACK TESTING #include <iostream> using std::cout; using std::endl; // Note: Can be tuned over block shape template<typename InType, typename OutType> __global__ void fdmt_init_kernel(int ntime, int nchan, int nbatch, bool reverse_band, bool reverse_time, int const* __restrict__ d_offsets, InType /*const* __restrict__*/ d_in, int istride, int ibatchstride, OutType* __restrict__ d_out, int ostride, int obatchstride) { int t0 = threadIdx.x + blockIdx.x*blockDim.x; int c0 = threadIdx.y + blockIdx.y*blockDim.y; int b0 = blockIdx.z; for( int b=b0; b<nbatch; b+=gridDim.z ) { for( int c=c0; c<nchan; c+=blockDim.y*gridDim.y ) { int offset = d_offsets[c]; int ndelay = d_offsets[c+1] - offset; for( int t=t0; t<ntime; t+=blockDim.x*gridDim.x ) { OutType tmp(0); for( int d=0; d<ndelay; ++d ) { // Note: This fills the unused elements with NaNs OutType outval(CUDART_NAN_F);//std::numeric_limits<OutType>::quiet_NaN()); if( t >= d ) { int c_ = reverse_band ? nchan-1 - c : c; int t_ = reverse_time ? ntime-1 - t : t; tmp += d_in[(t_-d) + istride*c_ + ibatchstride*b]; // TODO: Check effect of not-/using sqrt // The final paper has no sqrt (i.e., computation is just the mean) //outval = tmp * rsqrtf(d+1); outval = tmp * (1.f/(d+1)); } d_out[t + ostride*(offset+d) + obatchstride*b] = outval; } } } } } // Note: Can be tuned over block shape template<typename DType> __global__ void fdmt_exec_kernel(int ntime, int nrow, int nbatch, bool is_final_step, bool reverse_time, int const* __restrict__ d_delays, int2 const* __restrict__ d_srcrows, DType const* __restrict__ d_in, int istride, int ibatchstride, DType* __restrict__ d_out, int ostride, int obatchstride) { int t0 = threadIdx.x + blockIdx.x*blockDim.x; int r0 = threadIdx.y + blockIdx.y*blockDim.y; int b0 = blockIdx.z; for( int b=b0; b<nbatch; b+=gridDim.z ) { for( int r=r0; r<nrow; r+=blockDim.y*gridDim.y ) { int delay = d_delays[r]; int srcrow0 = d_srcrows[r].x; int srcrow1 = d_srcrows[r].y; for( int t=t0; t<ntime; t+=blockDim.x*gridDim.x ) { // Avoid elements that go unused due to diagonal reindexing if( is_final_step && t < r ) { //int ostride_ = ostride - reverse_time; //d_out[t + ostride_*r] = CUDART_NAN_F; continue; } // HACK TESTING ////if( ostride < ntime && t >= ntime-1 - r ) { //if( ostride != ntime && t < r ) { // int ostride_ = ostride - (ostride > ntime); // d_out[t + ostride_*r] = CUDART_NAN_F; // continue; //}// else if( ostride > ntime && t >= ntime - r ) { // //d_out[t - (ntime-1) + ostride*r] = CUDART_NAN_F; // continue; //} // Note: Non-existent rows are signified by -1 //if( t == 0 && r == 0 ) { // printf("t,srcrow0,srcrow1,istride = %i, %i, %i, %i\n", t, srcrow0, srcrow1, istride); //} //if( threadIdx.x == 63 && blockIdx.y == 4 ) { //printf("istride = %i, srcrow0 = %i, srcrow1 = %i, d_in = %p\n", istride, srcrow0, srcrow1, d_in); //} //if( t == 0 ) {// && r == 1 ) { // printf("istride = %i, srcrow0 = %i, srcrow1 = %i, d_in = %p\n", istride, srcrow0, srcrow1, d_in); //} DType outval = (srcrow0 != -1) ? d_in[ t + istride*srcrow0 + ibatchstride*b] : 0; if( t >= delay ) { outval += (srcrow1 != -1) ? d_in[(t-delay) + istride*srcrow1 + ibatchstride*b] : 0; } int t_ = (is_final_step && reverse_time) ? ntime-1 - t : t; d_out[t_ + ostride*r + obatchstride*b] = outval; } } } } template<typename InType, typename OutType> inline void launch_fdmt_init_kernel(int ntime, int nchan, int nbatch, bool reverse_band, bool reverse_time, //int const* d_ndelays, int const* d_offsets, InType /*const**/ d_in, int istride, int ibatchstride, OutType* d_out, int ostride, int obatchstride, cudaStream_t stream=0) { dim3 block(256, 1); // TODO: Tune this dim3 grid(std::min((ntime-1)/block.x+1, 65535u), std::min((nchan-1)/block.y+1, 65535u)); void* args[] = {&ntime, &nchan, &nbatch, &reverse_band, &reverse_time, &d_offsets, &d_in, &istride, &ibatchstride, &d_out, &ostride, &obatchstride}; BF_CHECK_CUDA_EXCEPTION( cudaLaunchKernel((void*)fdmt_init_kernel<InType,OutType>, grid, block, &args[0], 0, stream), BF_STATUS_INTERNAL_ERROR); } template<typename DType> inline void launch_fdmt_exec_kernel(int ntime, int nrow, int nbatch, bool is_final_step, bool reverse_time, int const* d_delays, int2 const* d_srcrows, DType const* d_in, int istride, int ibatchstride, DType* d_out, int ostride, int obatchstride, cudaStream_t stream=0) { //cout << "LAUNCH " << d_in << ", " << d_out << endl; dim3 block(256, 1); // TODO: Tune this dim3 grid(std::min((ntime-1)/block.x+1, 65535u), std::min((nrow -1)/block.y+1, 65535u)); void* args[] = {&ntime, &nrow, &nbatch, &is_final_step, &reverse_time, &d_delays, &d_srcrows, &d_in, &istride, &ibatchstride, &d_out, &ostride, &obatchstride}; BF_CHECK_CUDA_EXCEPTION( cudaLaunchKernel((void*)fdmt_exec_kernel<DType>, grid, block, &args[0], 0, stream), BF_STATUS_INTERNAL_ERROR); } /* **** 4096 **** 4096 **** 2048 **** 1066 **** 650 **** 475 **** 381 **** 337 **** 316 **** 302 **** 299 **** 295 **** 293 SB 3 delay 135 Step 10 prev: 58, 78 srcs: 57, 78 NROW_MAX = 4096 STEP 1 STEP 2 STEP 3 STEP 4 STEP 5 STEP 6 STEP 7 STEP 8 STEP 9 STEP 10 STEP 11 */ class BFfdmt_impl { typedef int IType; typedef double FType; typedef int2 IndexPair; public: // HACK WAR for what looks like a bug in the CUDA 7.0 compiler typedef float DType; private: IType _nchan; IType _max_delay; FType _f0; FType _df; FType _exponent; IType _nrow_max; IType _plan_stride; IType _buffer_stride; IType _batch_stride; std::vector<IType> _offsets; std::vector<std::vector<IndexPair> > _step_srcrows; std::vector<std::vector<IType> > _step_delays; IType* _d_offsets; IndexPair* _d_step_srcrows; IType* _d_step_delays; DType* _d_buffer_a; DType* _d_buffer_b; Workspace _plan_storage; Workspace _exec_storage; // TODO: Use something other than Thrust thrust::device_vector<char> _dv_plan_storage; thrust::device_vector<char> _dv_exec_storage; cudaStream_t _stream; bool _reverse_band; FType cfreq(IType chan) { return _f0 + _df*chan; } FType rel_delay(FType flo, FType fhi, FType fmin, FType fmax) { FType g = _exponent; // Note: We use complex math in order to support negative frequencies // (the result is real regardless). std::complex<FType> c_flo=flo, c_fhi=fhi, c_fmin=fmin, c_fmax=fmax; std::complex<FType> numer = std::pow(c_flo, g) - std::pow(c_fhi, g); std::complex<FType> denom = std::pow(c_fmin, g) - std::pow(c_fmax, g); FType eps = std::numeric_limits<FType>::epsilon(); if( std::norm(denom) < eps*eps ) { // Note: The only time I've seen this fail is when nchan==1 BF_ASSERT_EXCEPTION(std::norm(numer) < eps*eps, BF_STATUS_INTERNAL_ERROR); return 0; } std::complex<FType> result = numer / denom; BF_ASSERT_EXCEPTION(std::abs(result.imag()) <= eps, BF_STATUS_INTERNAL_ERROR); return result.real(); } FType rel_delay(FType flo, FType fhi) { FType fmin = cfreq(0); FType fmax = cfreq(_nchan-1); //std::swap(fmin, fmax); //FType fmax = cfreq(_nchan); // HACK TESTING return rel_delay(flo, fhi, fmin, fmax); } IType subband_ndelay(FType f0, FType df) { FType fracdelay = rel_delay(f0, f0+df); FType fmaxdelay = fracdelay*(_max_delay-1); IType ndelay = IType(::ceil(fmaxdelay)) + 1; return ndelay; } public: BFfdmt_impl() : _nchan(0), _max_delay(0), _f0(0), _df(0), _exponent(0), _stream(g_cuda_stream) {} inline IType nchan() const { return _nchan; } inline IType max_delay() const { return _max_delay; } void init(IType nchan, IType max_delay, FType f0, FType df, FType exponent) { BF_TRACE(); if( df < 0. ) { _reverse_band = true; f0 += (nchan-1)*df; df *= -1; } else { _reverse_band = false; } if( nchan == _nchan && max_delay == _max_delay && f0 == _f0 && df == _df && exponent == _exponent ) { return; } _f0 = f0; _df = df; _nchan = nchan; _max_delay = max_delay; _exponent = exponent; // Note: Initialized with 1 entry as dummy for initialization step std::vector<std::vector<IndexPair> > step_subband_parents(1); IType nsubband = _nchan; while( nsubband > 1 ) { IType step = step_subband_parents.size(); step_subband_parents.push_back(std::vector<IndexPair>()); for( IType sb=0; sb<nsubband; sb+=2 ) { IType parent0 = sb; IType parent1 = sb+1; if( nsubband % 2 ) { // Note: Alternating left/right-biased merging scheme if( (step-1) % 2 ) { parent0 -= 1; // Note: First entry becomes -1 => non-existent parent1 -= 1; } else { // Note: Last entry becomes -1 => non-existent if( parent1 == nsubband ) parent1 = -1; } } //cout << step << ": " << parent0 << ", " << parent1 << endl; IndexPair parents = make_int2(parent0, parent1); step_subband_parents[step].push_back(parents); } nsubband = step_subband_parents[step].size(); } // Note: Includes initialization step IType nstep = step_subband_parents.size(); std::vector<std::vector<IType> > step_subband_nchans(nstep); step_subband_nchans[0].assign(_nchan, 1); for( IType step=1; step<nstep; ++step ) { IType nsubband = step_subband_parents[step].size(); step_subband_nchans[step].resize(nsubband); for( IType sb=0; sb<nsubband; ++sb ) { IndexPair parents = step_subband_parents[step][sb]; IType p0 = parents.x;//first; IType p1 = parents.y;//second; IType parent0_nchan = (p0!=-1) ? step_subband_nchans[step-1][p0] : 0; IType parent1_nchan = (p1!=-1) ? step_subband_nchans[step-1][p1] : 0; IType child_nchan = parent0_nchan + parent1_nchan; step_subband_nchans[step][sb] = child_nchan; } } std::vector<std::vector<IType> > step_subband_chan_offsets(nstep); std::vector<std::vector<IType> > step_subband_row_offsets(nstep); IType nrow_max = 0; for( IType step=0; step<nstep; ++step ) { IType nsubband = step_subband_nchans[step].size(); // Note: +1 to store the total in the last element // (The array will hold a complete exclusive scan) step_subband_chan_offsets[step].resize(nsubband+1); step_subband_row_offsets[step].resize(nsubband+1); IType chan0 = 0; IType row_offset = 0; for( IType sb=0; sb<nsubband; ++sb ) { IType nchan = step_subband_nchans[step][sb]; FType f0 = cfreq(chan0) - (step == 0 ? 0.5*_df : 0.); //FType f0 = cfreq(chan0); // HACK TESTING FType df = _df * (step == 0 ? 1 : nchan-1); //FType df = _df * nchan; // HACK TESTING //cout << "df = " << df << endl; IType ndelay = subband_ndelay(f0, df); //cout << "NDELAY = " << ndelay << endl; step_subband_chan_offsets[step][sb] = chan0; step_subband_row_offsets[step][sb] = row_offset; chan0 += nchan; row_offset += ndelay; } step_subband_chan_offsets[step][nsubband] = chan0; step_subband_row_offsets[step][nsubband] = row_offset; nrow_max = std::max(nrow_max, row_offset); //*cout << "**** Nrow: " << row_offset << endl; } // Save for use during initialization //plan->_init_subband_row_offsets = step_subband_row_offsets[0]; _offsets = step_subband_row_offsets[0]; _nrow_max = nrow_max; //cout << "**** " << _nrow_max << endl; // Note: First entry in these remains empty std::vector<std::vector<IndexPair> > step_srcrows(nstep); std::vector<std::vector<IType> > step_delays(nstep); for( IType step=1; step<nstep; ++step ) { IType nsubband = step_subband_nchans[step].size(); IType nrow = step_subband_row_offsets[step][nsubband]; //*cout << "nrow " << nrow << endl; step_srcrows[step].resize(nrow); step_delays[step].resize(nrow); for( IType sb=0; sb<nsubband; ++sb ) { IndexPair parents = step_subband_parents[step][sb]; IType p0 = parents.x;//first; IType p1 = parents.y;//second; // TODO: Setting these to 1 instead of 0 in the exceptional case fixed some indexing // issues, but should double-check that the results are good. IType p0_nchan = (p0!=-1) ? step_subband_nchans[step-1][p0] : 1; IType p1_nchan = (p1!=-1) ? step_subband_nchans[step-1][p1] : 1; // Note: If first parent doesn't exist, then it effectively starts where the second parent starts // If second parent doesn't exist, then it effectively starts where the first parent ends IType p0_chan0 = step_subband_chan_offsets[step-1][(p0!=-1) ? p0 : p1]; IType p1_chan0 = step_subband_chan_offsets[step-1][(p1!=-1) ? p1 : p0]; if( p1 == -1 ) { p1_chan0 += (p0_nchan-1); } FType flo = cfreq(p0_chan0); FType fmidlo = cfreq(p0_chan0 + (p0_nchan-1)); FType fmidhi = cfreq(p1_chan0); FType fhi = cfreq(p1_chan0 + (p1_nchan-1)); FType cmidlo = rel_delay(flo, fmidlo, flo, fhi); FType cmidhi = rel_delay(flo, fmidhi, flo, fhi); /* // HACK TESTING FType flo = cfreq(p0_chan0) - 0.5*_df; FType fmidlo = flo + (p0_nchan-1)*_df; FType fmidhi = flo + p0_nchan*_df; FType fhi = flo + (p0_nchan + p1_nchan - 1)*_df; FType cmidlo = rel_delay(fmidlo, flo, fhi, flo); FType cmidhi = rel_delay(fmidhi, flo, fhi, flo); */ //cout << p0 << ", " << p1 << endl; //cout << p0_chan0 << ", " << p0_nchan << "; " << p1_chan0 << ", " << p1_nchan << endl; //cout << cmidlo << ", " << cmidhi << endl; // TODO: See if should use same approach with these as in fdmt.py IType beg = step_subband_row_offsets[step][sb]; IType end = step_subband_row_offsets[step][sb+1]; IType ndelay = end - beg; for( IType delay=0; delay<ndelay; ++delay ) { IType dmidlo = (IType)::round(delay*cmidlo); IType dmidhi = (IType)::round(delay*cmidhi); IType drest = delay - dmidhi; assert( dmidlo <= delay ); assert( dmidhi <= delay ); IType prev_beg = (p0!=-1) ? step_subband_row_offsets[step-1][p0] : -1; IType prev_mid0 = (p0!=-1) ? step_subband_row_offsets[step-1][p0+1] : -1; IType prev_mid1 = (p1!=-1) ? step_subband_row_offsets[step-1][p1] : -1; IType prev_end = (p1!=-1) ? step_subband_row_offsets[step-1][p1+1] : -1; // HACK WAR for strange indexing error observed only when nchan=4096 if( p1 != -1 && drest >= prev_end - prev_mid1 ) { drest -= 1; } if( (p0 != -1 && dmidlo >= prev_mid0 - prev_beg) || (p1 != -1 && drest >= prev_end - prev_mid1) ) { cout << "FDMT DEBUGGING INFO" << endl; cout << "SB " << sb << endl; cout << "delay " << delay << endl; cout << "Step " << step << " prev: " << prev_mid0 - prev_beg << ", " << prev_end - prev_mid1 << endl; cout << " srcs: " << dmidlo << ", " << drest << endl; } assert( p0 == -1 || dmidlo < prev_mid0 - prev_beg ); assert( p1 == -1 || drest < prev_end - prev_mid1 ); IType dst_row = step_subband_row_offsets[step ][sb] + delay; IType src_row0 = (p0!=-1) ? step_subband_row_offsets[step-1][p0] + dmidlo : -1; IType src_row1 = (p1!=-1) ? step_subband_row_offsets[step-1][p1] + drest : -1; step_srcrows[step][dst_row].x = src_row0;//first = src_row0; //cout << "step " << step << ", dst_row = " << dst_row << ", delay = " << dmidhi << ", src_row0 = " << src_row0 << ", src_row1 = " << src_row1 << endl; step_srcrows[step][dst_row].y = src_row1;//second = src_row1; step_delays[step][dst_row] = dmidhi; //IType prev_nsubband = step_subband_nchans[step-1].size(); //IType prev_nrow = step_subband_row_offsets[step-1][prev_nsubband]; } } } // Save for use during execution _step_srcrows = step_srcrows; _step_delays = step_delays; } bool init_plan_storage(void* storage_ptr, BFsize* storage_size) { BF_TRACE(); BF_TRACE_STREAM(_stream); enum { ALIGNMENT_BYTES = 512, ALIGNMENT_ELMTS = ALIGNMENT_BYTES / sizeof(int) }; Workspace workspace(ALIGNMENT_BYTES); _plan_stride = round_up(_nrow_max, ALIGNMENT_ELMTS); //int nstep_execute = _step_delays.size() - 1; int nstep = _step_delays.size(); workspace.reserve(_nchan+1, &_d_offsets); workspace.reserve(nstep*_plan_stride, &_d_step_srcrows); workspace.reserve(nstep*_plan_stride, &_d_step_delays); if( storage_size ) { if( !storage_ptr ) { // Return required storage size *storage_size = workspace.size(); return false; } else { BF_ASSERT_EXCEPTION(*storage_size >= workspace.size(), BF_STATUS_INSUFFICIENT_STORAGE); } } else { // Auto-allocate storage BF_ASSERT_EXCEPTION(!storage_ptr, BF_STATUS_INVALID_ARGUMENT); _dv_plan_storage.resize(workspace.size()); storage_ptr = thrust::raw_pointer_cast(&_dv_plan_storage[0]); } //std::cout << "workspace.size() = " << workspace.size() << std::endl; //_d_offsets = (IType*)0x123; //std::cout << "_d_offsets = " << _d_offsets << std::endl; //std::cout << "storage_ptr = " << storage_ptr << std::endl; workspace.commit(storage_ptr); //std::cout << "_d_offsets = " << _d_offsets << std::endl; BF_CHECK_CUDA_EXCEPTION( cudaMemcpyAsync(_d_offsets, &_offsets[0], sizeof(int )*_offsets.size(), cudaMemcpyHostToDevice, _stream), BF_STATUS_MEM_OP_FAILED ); for( int step=0; step<nstep; ++step ) { BF_CHECK_CUDA_EXCEPTION( cudaMemcpyAsync(_d_step_srcrows + step*_plan_stride, &_step_srcrows[step][0], sizeof(int2)*_step_srcrows[step].size(), cudaMemcpyHostToDevice, _stream), BF_STATUS_MEM_OP_FAILED ); BF_CHECK_CUDA_EXCEPTION( cudaMemcpyAsync(_d_step_delays + step*_plan_stride, &_step_delays[step][0], sizeof(int)*_step_delays[step].size(), cudaMemcpyHostToDevice, _stream), BF_STATUS_MEM_OP_FAILED ); } BF_CHECK_CUDA_EXCEPTION( cudaStreamSynchronize(_stream), BF_STATUS_DEVICE_ERROR ); return true; } bool init_exec_storage(void* storage_ptr, BFsize* storage_size, size_t ntime, size_t nbatch) { BF_TRACE(); enum { ALIGNMENT_BYTES = 512, ALIGNMENT_ELMTS = ALIGNMENT_BYTES / sizeof(DType) }; Workspace workspace(ALIGNMENT_BYTES); //std::cout << "ntime = " << ntime << std::endl; //std::cout << "_nrow_max = " << _nrow_max << std::endl; _buffer_stride = round_up(ntime, ALIGNMENT_ELMTS); _batch_stride = _nrow_max*_buffer_stride; //std::cout << "_buffer_stride = " << _buffer_stride << std::endl; // TODO: Check if truly safe to allocate smaller buffer_b workspace.reserve(nbatch*_batch_stride, &_d_buffer_a); workspace.reserve(nbatch*_batch_stride, &_d_buffer_b); if( storage_size ) { if( !storage_ptr ) { //cout << "++++ returning storage size" << endl; // Return required storage size *storage_size = workspace.size(); return false; } else { //cout << "++++ using user storage" << endl; BF_ASSERT_EXCEPTION(*storage_size >= workspace.size(), BF_STATUS_INSUFFICIENT_STORAGE); } } else { //cout << "++++ auto-allocating storage" << endl; // Auto-allocate storage BF_ASSERT_EXCEPTION(!storage_ptr, BF_STATUS_INVALID_ARGUMENT); _dv_exec_storage.resize(workspace.size()); storage_ptr = thrust::raw_pointer_cast(&_dv_exec_storage[0]); //std::cout << "*** exec storage_ptr = " << storage_ptr << std::endl; } //cout << "++++ committing" << endl; workspace.commit(storage_ptr); return true; } void execute(BFarray const* in, BFarray const* out, size_t ntime, size_t nbatch, bool negative_delays) { BF_TRACE(); BF_TRACE_STREAM(_stream); BF_ASSERT_EXCEPTION(out->dtype == BF_DTYPE_F32, BF_STATUS_UNSUPPORTED_DTYPE); BF_ASSERT_EXCEPTION( out->strides[in->ndim-1] == 4, BF_STATUS_UNSUPPORTED_STRIDE); int ndim = in->ndim; DType* d_ibuf = _d_buffer_b; DType* d_obuf = _d_buffer_a; BF_ASSERT_EXCEPTION(in->strides[ndim-2] % in->strides[ndim-1] == 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(out->strides[ndim-2] % out->strides[ndim-1] == 0, BF_STATUS_UNSUPPORTED_STRIDE); size_t istride = in->strides[ndim-2] / in->strides[ndim-1]; size_t ostride = out->strides[ndim-2] / out->strides[ndim-1]; BF_ASSERT_EXCEPTION( in->strides[in->ndim-2] > 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(out->strides[in->ndim-2] > 0, BF_STATUS_UNSUPPORTED_STRIDE); size_t ibatchstride = 0; size_t obatchstride = 0; if( in->ndim == 3 ) { BF_ASSERT_EXCEPTION(in->strides[ndim-3] % in->strides[ndim-1] == 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(out->strides[ndim-3] % out->strides[ndim-1] == 0, BF_STATUS_UNSUPPORTED_STRIDE); ibatchstride = in->strides[ndim-3] / in->strides[ndim-1]; obatchstride = out->strides[ndim-3] / out->strides[ndim-1]; } BF_ASSERT_EXCEPTION(in->strides[ndim-1] == BF_DTYPE_NBYTE(in->dtype), BF_STATUS_UNSUPPORTED_STRIDE); //bool reverse_time = (in->strides[in->ndim-1] < 0); bool reverse_time = negative_delays; BF_CHECK_CUDA_EXCEPTION(cudaGetLastError(), BF_STATUS_INTERNAL_ERROR); #define LAUNCH_FDMT_INIT_KERNEL(IterType) \ launch_fdmt_init_kernel(ntime, _nchan, nbatch, \ _reverse_band, reverse_time, \ _d_offsets, \ (IterType)in->data, istride, ibatchstride, \ d_obuf, _buffer_stride, _batch_stride, \ _stream) switch( in->dtype ) { // HACK testing disabled // TODO: Get NbitReader working //case BF_DTYPE_I1: LAUNCH_FDMT_INIT_KERNEL(NbitReader<1>); break; //case BF_DTYPE_I2: LAUNCH_FDMT_INIT_KERNEL(NbitReader<2>); break; //case BF_DTYPE_I4: LAUNCH_FDMT_INIT_KERNEL(NbitReader<4>); break; case BF_DTYPE_I8: LAUNCH_FDMT_INIT_KERNEL(const int8_t*); break; case BF_DTYPE_I16: LAUNCH_FDMT_INIT_KERNEL(const int16_t*); break; case BF_DTYPE_I32: LAUNCH_FDMT_INIT_KERNEL(const int32_t*); break; case BF_DTYPE_U8: LAUNCH_FDMT_INIT_KERNEL(const uint8_t*); break; case BF_DTYPE_U16: LAUNCH_FDMT_INIT_KERNEL(const uint16_t*); break; case BF_DTYPE_U32: LAUNCH_FDMT_INIT_KERNEL(const uint32_t*); break; case BF_DTYPE_F32: LAUNCH_FDMT_INIT_KERNEL(const float*); break; default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE); } #undef LAUNCH_FDMT_INIT_KERNEL BF_CHECK_CUDA_EXCEPTION(cudaGetLastError(), BF_STATUS_INTERNAL_ERROR); std::swap(d_ibuf, d_obuf); size_t ostride_cur = _buffer_stride; size_t obatchstride_cur = _batch_stride; IType nstep = _step_delays.size(); for( int step=1; step<nstep; ++step ) { //cout << "STEP " << step << endl; IType nrow = _step_srcrows[step].size(); //cout << "nrow " << nrow << endl; if( step == nstep-1 ) { d_obuf = (DType*)out->data; ostride_cur = ostride; // HACK TESTING diagonal reindexing to align output with TOA at highest freq ostride_cur += reverse_time ? +1 : -1; obatchstride_cur = obatchstride; } launch_fdmt_exec_kernel(ntime, nrow, nbatch, (step==nstep-1), reverse_time, _d_step_delays + step*_plan_stride, _d_step_srcrows + step*_plan_stride, d_ibuf, _buffer_stride, _batch_stride, d_obuf, ostride_cur, obatchstride_cur, _stream); std::swap(d_ibuf, d_obuf); } BF_CHECK_CUDA_EXCEPTION(cudaGetLastError(), BF_STATUS_INTERNAL_ERROR); } void set_stream(cudaStream_t stream) { _stream = stream; } }; BFstatus bfFdmtCreate(BFfdmt* plan_ptr) { BF_TRACE(); BF_ASSERT(plan_ptr, BF_STATUS_INVALID_POINTER); BF_TRY_RETURN_ELSE(*plan_ptr = new BFfdmt_impl(), *plan_ptr = 0); } // **TODO: Passing 'BFarray const* in' here could replace nchan, f0, df and space if BFarray included dimension scales // Also, could potentially set the output dimension scales (dm0, ddm) // OR, could just leave these to higher-level wrappers (e.g., Python) // This might be for the best in the short term BFstatus bfFdmtInit(BFfdmt plan, BFsize nchan, BFsize max_delay, double f0, double df, double exponent, BFspace space, void* plan_storage, BFsize* plan_storage_size) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); // TODO: Is there any sensible/natural way to handle nchan==1? BF_ASSERT(nchan > 1, BF_STATUS_INVALID_ARGUMENT); BF_ASSERT(space_accessible_from(space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE); BF_TRY(plan->init(nchan, max_delay, f0, df, exponent)); BF_TRY_RETURN(plan->init_plan_storage(plan_storage, plan_storage_size)); } BFstatus bfFdmtSetStream(BFfdmt plan, void const* stream) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); BF_ASSERT(stream, BF_STATUS_INVALID_POINTER); BF_TRY_RETURN(plan->set_stream(*(cudaStream_t*)stream)); } BFstatus bfFdmtExecute(BFfdmt plan, BFarray const* in, BFarray const* out, BFbool negative_delays, void* exec_storage, BFsize* exec_storage_size) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); BF_ASSERT(in, BF_STATUS_INVALID_POINTER); BF_ASSERT(out, BF_STATUS_INVALID_POINTER); BF_ASSERT(in->ndim == out->ndim, BF_STATUS_INVALID_SHAPE); BF_ASSERT( in->shape[ in->ndim-2] == plan->nchan(), BF_STATUS_INVALID_SHAPE); BF_ASSERT(out->shape[out->ndim-2] == plan->max_delay(), BF_STATUS_INVALID_SHAPE); BF_ASSERT( in->shape[in->ndim-1] == out->shape[out->ndim-1], BF_STATUS_INVALID_SHAPE); // TODO: BF_ASSERT(...); int ndim = in->ndim; size_t ntime = in->shape[in->ndim-1]; size_t nbatch = 1; BFarray out_flattened, in_flattened; // Handle batch dims if( ndim > 2 ) { // Keep the last 3 dims but attempt to flatten all others unsigned long keep_dims_mask = 0x7 << (ndim-3); keep_dims_mask |= padded_dims_mask(out); keep_dims_mask |= padded_dims_mask(in); flatten(out, &out_flattened, keep_dims_mask); flatten(in, &in_flattened, keep_dims_mask); out = &out_flattened; in = &in_flattened; BF_ASSERT(in_flattened.ndim == out_flattened.ndim, BF_STATUS_INTERNAL_ERROR); // TODO: Use streams to support multiple non-contiguous batch dims // (Like in linalg.cu) BF_ASSERT(in_flattened.ndim == 3, BF_STATUS_UNSUPPORTED_SHAPE); BF_ASSERT_EXCEPTION(in_flattened.shape[0] == out_flattened.shape[0], BF_STATUS_INVALID_SHAPE); nbatch = in->shape[0]; } bool ready; BF_TRY(ready = plan->init_exec_storage(exec_storage, exec_storage_size, ntime, nbatch)); if( !ready ) { // Just requesting exec_storage_size, not ready to execute yet return BF_STATUS_SUCCESS; } BF_ASSERT(space_accessible_from( in->space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE); BF_ASSERT(space_accessible_from(out->space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE); BF_TRY_RETURN(plan->execute(in, out, ntime, nbatch, negative_delays)); } BFstatus bfFdmtDestroy(BFfdmt plan) { BF_TRACE(); BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE); delete plan; return BF_STATUS_SUCCESS; }
09de3fa5dae82524d1a84f0af7d8c9029e375d7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __host__ double** getWeightMatrix(int rows, int cols) { FILE *myFile; myFile = fopen("WMatrix.txt", "r"); double** mat; hipMallocManaged(&mat, rows * sizeof(double*)); for (int i = 0; i < rows; i++) { hipMallocManaged(&(mat[i]), cols * sizeof(double)); fscanf(myFile, "%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,", &(mat[i][0]), &(mat[i][1]), &(mat[i][2]), &(mat[i][3]), &(mat[i][4]), &(mat[i][5]), &(mat[i][6]), &(mat[i][7]), &(mat[i][8]), &(mat[i][9]) ); } fclose(myFile); return mat; } __host__ double* getBVector(int cols) { FILE *myFile; myFile = fopen("bVector.txt", "r"); double* vector; hipMallocManaged(&vector, cols * sizeof(double)); fscanf(myFile, "%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,", &(vector[0]), &(vector[1]), &(vector[2]), &(vector[3]), &(vector[4]), &(vector[5]), &(vector[6]), &(vector[7]), &(vector[8]), &(vector[9]) ); fclose(myFile); return vector; } __host__ void printMatrix(double **mat, int rows, int cols) { for (int i = 0; i < rows; i++) { printf("["); for (int j = 0; j < cols; j++) { printf("%0.10lf\t", mat[i][j]); } printf("]\n"); } printf("\n"); } __host__ void getMNISTTest(int indice, int* label, int* vector, int rows, int cols) { if (indice > 10000) { printf("Solo hay 10,000 datos de test !"); return; } FILE *myFile; myFile = fopen("mnist_test.csv", "r"); for (int i = 0; i < indice; i++) { fscanf(myFile, "%*[^\n]\n"); } fscanf(myFile, "%d", label); fscanf(myFile, ","); for (int j = 0; j < rows; j++) { fscanf(myFile, "%d,", &(vector[j])); } fclose(myFile); } __host__ void printImage(int label, int* vector, int rows) { for (int j = 0, x; j < rows; j++) { x = vector[j]; if (x == 0) { printf("--"); } else { printf("%d%d", label, label); } if ((j + 1) % 28 == 0) { printf("\n"); } } printf("\n"); } __host__ double getMaxIndex(double* y, int cols) { double max = y[0]; int maxIndex = -1; for (int i = 0; i < cols; i++) { if (y[i] >= max) { max = y[i]; maxIndex = i; } } return maxIndex; } __global__ void productMatrixVectorKernel(int* X, double** W, double* WX, int cols) { int j = threadIdx.y; double sum = 0; for (int k = 0; k < cols; k++) { sum += X[k] * W[k][j]; } WX[j] = sum; __syncthreads(); } __global__ void SumVectorVectorKernel(double* WX, double* b, double* y) { int j = threadIdx.y; y[j] = WX[j] + b[j]; __syncthreads(); } int main() { int rows = 784; int cols = 10; int indexTest = 44; double** W = getWeightMatrix(rows, cols); double* b = getBVector(cols); int label; int* X; hipMallocManaged(&X, rows * sizeof(int)); double* WX; hipMallocManaged(&WX, cols * sizeof(double)); double* y; hipMallocManaged(&y, cols * sizeof(double)); int prediction; while(1) { printf("Indice del Data Set: "); scanf("%d", &indexTest); if (indexTest > 10000) { printf("Solo hay 10,000 datos\n\n"); continue; } getMNISTTest(indexTest, &label, X, rows, cols); dim3 blocksPerGrid(1); dim3 threadsPerBlock(1, cols); hipLaunchKernelGGL(( productMatrixVectorKernel), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, X, W, WX, rows); hipDeviceSynchronize(); hipLaunchKernelGGL(( SumVectorVectorKernel), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, WX, b, y); hipDeviceSynchronize(); prediction = getMaxIndex(y, cols); printf("Original %d, Prediccion %d\n", label, prediction); printImage(label, X, rows); } }
09de3fa5dae82524d1a84f0af7d8c9029e375d7f.cu
#include <stdio.h> #include <stdlib.h> __host__ double** getWeightMatrix(int rows, int cols) { FILE *myFile; myFile = fopen("WMatrix.txt", "r"); double** mat; cudaMallocManaged(&mat, rows * sizeof(double*)); for (int i = 0; i < rows; i++) { cudaMallocManaged(&(mat[i]), cols * sizeof(double)); fscanf(myFile, "%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,", &(mat[i][0]), &(mat[i][1]), &(mat[i][2]), &(mat[i][3]), &(mat[i][4]), &(mat[i][5]), &(mat[i][6]), &(mat[i][7]), &(mat[i][8]), &(mat[i][9]) ); } fclose(myFile); return mat; } __host__ double* getBVector(int cols) { FILE *myFile; myFile = fopen("bVector.txt", "r"); double* vector; cudaMallocManaged(&vector, cols * sizeof(double)); fscanf(myFile, "%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,", &(vector[0]), &(vector[1]), &(vector[2]), &(vector[3]), &(vector[4]), &(vector[5]), &(vector[6]), &(vector[7]), &(vector[8]), &(vector[9]) ); fclose(myFile); return vector; } __host__ void printMatrix(double **mat, int rows, int cols) { for (int i = 0; i < rows; i++) { printf("["); for (int j = 0; j < cols; j++) { printf("%0.10lf\t", mat[i][j]); } printf("]\n"); } printf("\n"); } __host__ void getMNISTTest(int indice, int* label, int* vector, int rows, int cols) { if (indice > 10000) { printf("Solo hay 10,000 datos de test !"); return; } FILE *myFile; myFile = fopen("mnist_test.csv", "r"); for (int i = 0; i < indice; i++) { fscanf(myFile, "%*[^\n]\n"); } fscanf(myFile, "%d", label); fscanf(myFile, ","); for (int j = 0; j < rows; j++) { fscanf(myFile, "%d,", &(vector[j])); } fclose(myFile); } __host__ void printImage(int label, int* vector, int rows) { for (int j = 0, x; j < rows; j++) { x = vector[j]; if (x == 0) { printf("--"); } else { printf("%d%d", label, label); } if ((j + 1) % 28 == 0) { printf("\n"); } } printf("\n"); } __host__ double getMaxIndex(double* y, int cols) { double max = y[0]; int maxIndex = -1; for (int i = 0; i < cols; i++) { if (y[i] >= max) { max = y[i]; maxIndex = i; } } return maxIndex; } __global__ void productMatrixVectorKernel(int* X, double** W, double* WX, int cols) { int j = threadIdx.y; double sum = 0; for (int k = 0; k < cols; k++) { sum += X[k] * W[k][j]; } WX[j] = sum; __syncthreads(); } __global__ void SumVectorVectorKernel(double* WX, double* b, double* y) { int j = threadIdx.y; y[j] = WX[j] + b[j]; __syncthreads(); } int main() { int rows = 784; int cols = 10; int indexTest = 44; double** W = getWeightMatrix(rows, cols); double* b = getBVector(cols); int label; int* X; cudaMallocManaged(&X, rows * sizeof(int)); double* WX; cudaMallocManaged(&WX, cols * sizeof(double)); double* y; cudaMallocManaged(&y, cols * sizeof(double)); int prediction; while(1) { printf("Indice del Data Set: "); scanf("%d", &indexTest); if (indexTest > 10000) { printf("Solo hay 10,000 datos\n\n"); continue; } getMNISTTest(indexTest, &label, X, rows, cols); dim3 blocksPerGrid(1); dim3 threadsPerBlock(1, cols); productMatrixVectorKernel<<< blocksPerGrid, threadsPerBlock >>>(X, W, WX, rows); cudaDeviceSynchronize(); SumVectorVectorKernel<<< blocksPerGrid, threadsPerBlock >>>(WX, b, y); cudaDeviceSynchronize(); prediction = getMaxIndex(y, cols); printf("Original %d, Prediccion %d\n", label, prediction); printImage(label, X, rows); } }
651d914b63df143898d65d103168006a2b64f02f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "psc_cuda.h" #define BLOCKSIZE_X 1 #define BLOCKSIZE_Y 4 #define BLOCKSIZE_Z 4 #define DIM DIM_YZ #define PFX(x) yz_a_ ## x #include "constants.c" #include "common.c" #include "common_push.c" #include "common_fld_cache.c" __device__ static void push_part_yz_a_one(int n, particles_cuda_dev_t d_particles, real *d_flds) { struct d_particle p; LOAD_PARTICLE(p, d_particles, n); real vxi[3]; // x^n, p^n -> x^(n+0.5), p^n calc_vxi(vxi, p); push_xi(&p, vxi, .5f * d_consts.dt); STORE_PARTICLE_POS(p, d_particles, n); // STORE_PARTICLE_MOM(p, d_particles, n); } __global__ static void push_part_yz_a(int n_part, particles_cuda_dev_t d_part, float *d_flds, int stride) { int n = threadIdx.x + blockDim.x * blockIdx.x; while (n < n_part) { push_part_yz_a_one(n, d_part, d_flds); n += stride; } } EXTERN_C void __cuda_push_part_yz_a(struct psc_particles *prts, struct psc_fields *pf) { struct psc_fields_cuda *pfc = psc_fields_cuda(pf); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); const int threadsPerBlock = 128; const int gridSize = 256; int dimBlock[2] = { threadsPerBlock, 1 }; int dimGrid[2] = { gridSize, 1 }; RUN_KERNEL(dimGrid, dimBlock, push_part_yz_a, (prts->n_part, cuda->d_part, pfc->d_flds, gridSize * threadsPerBlock)); }
651d914b63df143898d65d103168006a2b64f02f.cu
#include "psc_cuda.h" #define BLOCKSIZE_X 1 #define BLOCKSIZE_Y 4 #define BLOCKSIZE_Z 4 #define DIM DIM_YZ #define PFX(x) yz_a_ ## x #include "constants.c" #include "common.c" #include "common_push.c" #include "common_fld_cache.c" __device__ static void push_part_yz_a_one(int n, particles_cuda_dev_t d_particles, real *d_flds) { struct d_particle p; LOAD_PARTICLE(p, d_particles, n); real vxi[3]; // x^n, p^n -> x^(n+0.5), p^n calc_vxi(vxi, p); push_xi(&p, vxi, .5f * d_consts.dt); STORE_PARTICLE_POS(p, d_particles, n); // STORE_PARTICLE_MOM(p, d_particles, n); } __global__ static void push_part_yz_a(int n_part, particles_cuda_dev_t d_part, float *d_flds, int stride) { int n = threadIdx.x + blockDim.x * blockIdx.x; while (n < n_part) { push_part_yz_a_one(n, d_part, d_flds); n += stride; } } EXTERN_C void __cuda_push_part_yz_a(struct psc_particles *prts, struct psc_fields *pf) { struct psc_fields_cuda *pfc = psc_fields_cuda(pf); struct psc_particles_cuda *cuda = psc_particles_cuda(prts); const int threadsPerBlock = 128; const int gridSize = 256; int dimBlock[2] = { threadsPerBlock, 1 }; int dimGrid[2] = { gridSize, 1 }; RUN_KERNEL(dimGrid, dimBlock, push_part_yz_a, (prts->n_part, cuda->d_part, pfc->d_flds, gridSize * threadsPerBlock)); }
8c73553e9f5f13912b27c94b26a09a3c5df9bad2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "CombinationIt.h" #include "PermutationIt.h" #include <list> #include "hip/device_functions.h" #define BLOCKS 709; #define THREADS 512; #define N 1000;//amount of results __global__ void addKernel(int * c); static void reportMemStatus(); hipError_t addWithCuda(int *c); __device__ int myPushBack(int* result,int *results) { if (results[0] >= 1000) { return; } int pointer= atomicAdd(&results[0],1); if (pointer >= 1000) { return; } for (int i = 0; i < 9; i++) { results[pointer * 9 + i + 1] = result[i]; } return 0; } __device__ void factorial(long * number) { int n = (int) *number; if (n == 3) { n = 3; } while (n-1 > 0) { n--; *number *= n; } return; } __device__ bool magicSquare(int *c) { int r1 = c[0] + c[1] + c[2]; int r2 = c[3] + c[4] + c[5]; int r3 = c[6] + c[7] + c[8]; int c1 = c[0] + c[3] + c[6]; int c2 = c[1] + c[4] + c[7]; int c3 = c[2] + c[5] + c[8]; int d1 = c[0] + c[4] + c[8]; int d2 = c[6] + c[4] + c[2]; bool temp = r1 == r2 && r2 == r3 && r1 == c1 && c1 == c2 && c2 == c3; temp = temp && (r1 == d1 || r1 == d2); return temp; } __device__ int getIth(int *c, int index) { int count = -1; int firstPositiveElement =-1; int i = -1; do { i++; if (c[i] != -1) { count++; } } while (i < 8 && count < index); /*for (; i < 9 && count < index; i++) { if (c[i] != -1) { count++; } }*/ int k = c[i]; c[i] = -1; return k; } /* give y and x calculate c such that c*y!<x, for the largest c possible */ __device__ int largestC(int y, int x,int * factorial) { int c = 0; while (x >= (c+1)*factorial[y]) { c++; } return c; } __device__ void reorderd(int* set, int* pos) { int *reordered = new int[9]; for (int i = 0; i < 9; i++) { int k = pos[i]; int j = set[k]; reordered[i] = j; } for (int i = 0; i < 9; i++) { set[i] = reordered[i]; } delete[] reordered; } __device__ void getPermutation(int *permu, int index, int* factorial) { int *temp = new int[9]; int *ordering = new int[9]; for (int i = 0; i < 9; i++) { temp[i] = i; } for (int i = 0; i < 9; i++) { int t = largestC(8 - i, index, factorial); index -= t * factorial[8 - i]; int val=getIth(temp, t); ordering[i] = val; } //order the actual array into the correct order reorderd(permu,ordering); delete [] temp; delete [] ordering; return; } __device__ void initialize(int c[]) { c[0] = 1; for (int i = 1; i < 9; i++) { c[i] = c[i - 1] * i; } return; } __global__ void addKernel(int *c, int * factorial, int* results, int* d_pos) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index > 362880) { return; } int *ccopy = new int[9]; for (int i = 0; i < 9; i++) { ccopy[i] = c[i]; } //getPermutation(ccopy, index, factorial); reorderd(ccopy, &d_pos[index]); index = 10; bool correct = magicSquare(ccopy); if (correct) { myPushBack(ccopy, results); } delete[] ccopy; return; } int main() { int c[9] = { 0,1,2,3,4,5,6,7,8 }; // Add vectors in parallel. reportMemStatus(); hipError_t cudaStatus = addWithCuda(c); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } static void reportMemStatus() { // show memory usage of GPU size_t free_byte; size_t total_byte; size_t malloc_byte; hipError_t cudaStatus = hipSetDevice(0); if (hipSuccess != cudaStatus) { printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cudaStatus)); return; } hipError_t cuda_status; cuda_status = hipMemGetInfo(&free_byte, &total_byte); if (hipSuccess != cuda_status) { printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status)); return; } cuda_status=hipDeviceSetLimit(hipLimitMallocHeapSize, 1024 * 1000 * 500); if (cuda_status != hipSuccess) { printf("fdskjfhdskajfdsafdsafs"); } cuda_status = hipDeviceGetLimit(&malloc_byte, hipLimitMallocHeapSize); if (hipSuccess != cuda_status) { printf("Error: hipDeviceGetLimit fails, %s \n", hipGetErrorString(cuda_status)); return; } double free_db = (double)free_byte; double total_db = (double)total_byte; double used_db = total_db - free_db; printf("GPU memory usage: used = %f, free = %f MB, total = %f MB, malloc limit = %f MB\n", used_db / 1024.0 / 1024.0, free_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0, malloc_byte / 1024.0 / 1024.0); } hipError_t addWithCuda(int *c) { int t = BLOCKS; t *= THREADS; int *d_results =0; int *dev_c = 0; hipError_t cudaStatus; int *d_factorial=0; int factorial[9] = { 0 }; factorial[0] = 1; for (int i = 1; i < 9; i++) { factorial[i] = factorial[i - 1] * i; } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = hipDeviceSetLimit(hipLimitMallocHeapSize,1024*1000*500); if (cudaStatus != hipSuccess) { fprintf(stderr, "cuda limit malloc failed!"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c,9*sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&d_results, (1+1000 * 9) * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } hipMemset(d_results, 0,(1 + 1000 * 9) * sizeof(int)); // Allocate GPU buffers for three vectors(two input, one output) . cudaStatus = hipMalloc((void**)&d_factorial, 9 * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Allocate GPU buffers for three vectors(two input, one output) . cudaStatus = hipMemcpy(d_factorial,factorial, 9 * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } int* d_pos = 0; int *h_pos = new int[362880 * 9]; cudaStatus=hipMalloc((void**)&d_pos, 362880 * 9 * sizeof(int)); int *arr = initPerm(); int i = 0; while (hasNextPerm()) { nextPerm(); for (int j = 0; j < 9; j++) { h_pos[j + i * 9] = arr[j]; } i++; } cudaStatus=hipMemcpy(d_pos, h_pos, 362880 * 9 * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } init(12, 9, choose(12, 9)); int *sets = new int[9]; while (hasNext()) { sets = next(); // Allocate GPU buffers for three vectors(two input, one output) . cudaStatus = hipMemcpy(dev_c, sets, 9 * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel << <709, 512 >> > (dev_c, d_factorial, d_results,d_pos); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } int *results = new int[(1 + 1000 * 9)]; // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(results, d_results, (1 + 1000 * 9) * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!last"); goto Error; } int nbresults = results[0]; for (int i = 0; i < nbresults; i++) { printf("magic square {%d,%d,%d,%d,%d,%d,%d,%d,%d}\n", results[1 + i * 9], results[2 + i * 9], results[3 + i * 9], results[4 + i * 9], results[5 + i * 9], results[6 + i * 9], results[7 + i * 9], results[8 + i * 9], results[9 + i * 9]); } } Error: hipFree(dev_c); hipFree(d_factorial); hipFree(d_results); hipFree(d_pos); return cudaStatus; }
8c73553e9f5f13912b27c94b26a09a3c5df9bad2.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "CombinationIt.h" #include "PermutationIt.h" #include <list> #include "device_functions.h" #define BLOCKS 709; #define THREADS 512; #define N 1000;//amount of results __global__ void addKernel(int * c); static void reportMemStatus(); cudaError_t addWithCuda(int *c); __device__ int myPushBack(int* result,int *results) { if (results[0] >= 1000) { return; } int pointer= atomicAdd(&results[0],1); if (pointer >= 1000) { return; } for (int i = 0; i < 9; i++) { results[pointer * 9 + i + 1] = result[i]; } return 0; } __device__ void factorial(long * number) { int n = (int) *number; if (n == 3) { n = 3; } while (n-1 > 0) { n--; *number *= n; } return; } __device__ bool magicSquare(int *c) { int r1 = c[0] + c[1] + c[2]; int r2 = c[3] + c[4] + c[5]; int r3 = c[6] + c[7] + c[8]; int c1 = c[0] + c[3] + c[6]; int c2 = c[1] + c[4] + c[7]; int c3 = c[2] + c[5] + c[8]; int d1 = c[0] + c[4] + c[8]; int d2 = c[6] + c[4] + c[2]; bool temp = r1 == r2 && r2 == r3 && r1 == c1 && c1 == c2 && c2 == c3; temp = temp && (r1 == d1 || r1 == d2); return temp; } __device__ int getIth(int *c, int index) { int count = -1; int firstPositiveElement =-1; int i = -1; do { i++; if (c[i] != -1) { count++; } } while (i < 8 && count < index); /*for (; i < 9 && count < index; i++) { if (c[i] != -1) { count++; } }*/ int k = c[i]; c[i] = -1; return k; } /* give y and x calculate c such that c*y!<x, for the largest c possible */ __device__ int largestC(int y, int x,int * factorial) { int c = 0; while (x >= (c+1)*factorial[y]) { c++; } return c; } __device__ void reorderd(int* set, int* pos) { int *reordered = new int[9]; for (int i = 0; i < 9; i++) { int k = pos[i]; int j = set[k]; reordered[i] = j; } for (int i = 0; i < 9; i++) { set[i] = reordered[i]; } delete[] reordered; } __device__ void getPermutation(int *permu, int index, int* factorial) { int *temp = new int[9]; int *ordering = new int[9]; for (int i = 0; i < 9; i++) { temp[i] = i; } for (int i = 0; i < 9; i++) { int t = largestC(8 - i, index, factorial); index -= t * factorial[8 - i]; int val=getIth(temp, t); ordering[i] = val; } //order the actual array into the correct order reorderd(permu,ordering); delete [] temp; delete [] ordering; return; } __device__ void initialize(int c[]) { c[0] = 1; for (int i = 1; i < 9; i++) { c[i] = c[i - 1] * i; } return; } __global__ void addKernel(int *c, int * factorial, int* results, int* d_pos) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index > 362880) { return; } int *ccopy = new int[9]; for (int i = 0; i < 9; i++) { ccopy[i] = c[i]; } //getPermutation(ccopy, index, factorial); reorderd(ccopy, &d_pos[index]); index = 10; bool correct = magicSquare(ccopy); if (correct) { myPushBack(ccopy, results); } delete[] ccopy; return; } int main() { int c[9] = { 0,1,2,3,4,5,6,7,8 }; // Add vectors in parallel. reportMemStatus(); cudaError_t cudaStatus = addWithCuda(c); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } static void reportMemStatus() { // show memory usage of GPU size_t free_byte; size_t total_byte; size_t malloc_byte; cudaError_t cudaStatus = cudaSetDevice(0); if (cudaSuccess != cudaStatus) { printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cudaStatus)); return; } cudaError_t cuda_status; cuda_status = cudaMemGetInfo(&free_byte, &total_byte); if (cudaSuccess != cuda_status) { printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status)); return; } cuda_status=cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1024 * 1000 * 500); if (cuda_status != cudaSuccess) { printf("fdskjfhdskajfdsafdsafs"); } cuda_status = cudaDeviceGetLimit(&malloc_byte, cudaLimitMallocHeapSize); if (cudaSuccess != cuda_status) { printf("Error: cudaDeviceGetLimit fails, %s \n", cudaGetErrorString(cuda_status)); return; } double free_db = (double)free_byte; double total_db = (double)total_byte; double used_db = total_db - free_db; printf("GPU memory usage: used = %f, free = %f MB, total = %f MB, malloc limit = %f MB\n", used_db / 1024.0 / 1024.0, free_db / 1024.0 / 1024.0, total_db / 1024.0 / 1024.0, malloc_byte / 1024.0 / 1024.0); } cudaError_t addWithCuda(int *c) { int t = BLOCKS; t *= THREADS; int *d_results =0; int *dev_c = 0; cudaError_t cudaStatus; int *d_factorial=0; int factorial[9] = { 0 }; factorial[0] = 1; for (int i = 1; i < 9; i++) { factorial[i] = factorial[i - 1] * i; } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = cudaDeviceSetLimit(cudaLimitMallocHeapSize,1024*1000*500); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cuda limit malloc failed!"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c,9*sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&d_results, (1+1000 * 9) * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaMemset(d_results, 0,(1 + 1000 * 9) * sizeof(int)); // Allocate GPU buffers for three vectors(two input, one output) . cudaStatus = cudaMalloc((void**)&d_factorial, 9 * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Allocate GPU buffers for three vectors(two input, one output) . cudaStatus = cudaMemcpy(d_factorial,factorial, 9 * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } int* d_pos = 0; int *h_pos = new int[362880 * 9]; cudaStatus=cudaMalloc((void**)&d_pos, 362880 * 9 * sizeof(int)); int *arr = initPerm(); int i = 0; while (hasNextPerm()) { nextPerm(); for (int j = 0; j < 9; j++) { h_pos[j + i * 9] = arr[j]; } i++; } cudaStatus=cudaMemcpy(d_pos, h_pos, 362880 * 9 * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } init(12, 9, choose(12, 9)); int *sets = new int[9]; while (hasNext()) { sets = next(); // Allocate GPU buffers for three vectors(two input, one output) . cudaStatus = cudaMemcpy(dev_c, sets, 9 * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel << <709, 512 >> > (dev_c, d_factorial, d_results,d_pos); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } int *results = new int[(1 + 1000 * 9)]; // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(results, d_results, (1 + 1000 * 9) * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!last"); goto Error; } int nbresults = results[0]; for (int i = 0; i < nbresults; i++) { printf("magic square {%d,%d,%d,%d,%d,%d,%d,%d,%d}\n", results[1 + i * 9], results[2 + i * 9], results[3 + i * 9], results[4 + i * 9], results[5 + i * 9], results[6 + i * 9], results[7 + i * 9], results[8 + i * 9], results[9 + i * 9]); } } Error: cudaFree(dev_c); cudaFree(d_factorial); cudaFree(d_results); cudaFree(d_pos); return cudaStatus; }
cc524a89f89af4536a02b3c8942e061e3fa1e989.hip
// !!! This is a file automatically generated by hipify!!! #include "JohnsonSUPdf.hh" const fptype SQRT2PI = 2.506628; EXEC_TARGET fptype device_JohnsonSU (fptype* evt, fptype* p, unsigned int* indices) { fptype _Jm = p[indices[1]]; fptype _Js = p[indices[2]]; fptype _Jg = p[indices[3]]; fptype _Jd = p[indices[4]]; fptype x = evt[indices[2 + indices[0]]]; fptype px = (x -_Jm)/_Js; fptype px2 = px * px; fptype sqrt_arg = SQRT(1+px2); fptype inv_sinh = LOG(px + sqrt_arg); fptype gaus_arg = _Jg + _Jd * inv_sinh; //if ((gpuDebug & 1) && (0 == BLOCKIDX) && (0 == THREADIDX)) //if (gpuDebug & 1) //printf("Johnson SU: %f %f %f %f | %f %f %i\n", _Jm, _Js, _Jg, _Jd, x, _Jd / (_Js * SQRT2PI * sqrt_arg) * EXP(-0.5 * gaus_arg * gaus_arg), indices[2 + indices[0]]); //printf("Johnson SU: %f %f %f %f | %f %f %f %f\n", _Jm, _Js, _Jg, _Jd, x, _Jd / (_Js * SQRT2PI * sqrt_arg) * EXP(-0.5 * gaus_arg * gaus_arg), hipArray[indices[1]], hipArray[indices[2]]); return _Jd / (_Js * SQRT2PI * sqrt_arg) * EXP(-0.5 * gaus_arg * gaus_arg ); } MEM_DEVICE device_function_ptr ptr_to_JohnsonSU = device_JohnsonSU; __host__ JohnsonSUPdf::JohnsonSUPdf (std::string n, Variable* _x, Variable* mean, Variable* sigma, Variable* gamma, Variable* delta) : GooPdf(_x, n) { std::vector<unsigned int> pindices; pindices.push_back(registerParameter(mean)); pindices.push_back(registerParameter(sigma)); pindices.push_back(registerParameter(gamma)); pindices.push_back(registerParameter(delta)); GET_FUNCTION_ADDR(ptr_to_JohnsonSU); initialise(pindices); } __host__ fptype JohnsonSUPdf::integrate (fptype lo, fptype hi) const { return 1.0; // Analytic integral included in device function! (Correct for minus to plus inf.) }
cc524a89f89af4536a02b3c8942e061e3fa1e989.cu
#include "JohnsonSUPdf.hh" const fptype SQRT2PI = 2.506628; EXEC_TARGET fptype device_JohnsonSU (fptype* evt, fptype* p, unsigned int* indices) { fptype _Jm = p[indices[1]]; fptype _Js = p[indices[2]]; fptype _Jg = p[indices[3]]; fptype _Jd = p[indices[4]]; fptype x = evt[indices[2 + indices[0]]]; fptype px = (x -_Jm)/_Js; fptype px2 = px * px; fptype sqrt_arg = SQRT(1+px2); fptype inv_sinh = LOG(px + sqrt_arg); fptype gaus_arg = _Jg + _Jd * inv_sinh; //if ((gpuDebug & 1) && (0 == BLOCKIDX) && (0 == THREADIDX)) //if (gpuDebug & 1) //printf("Johnson SU: %f %f %f %f | %f %f %i\n", _Jm, _Js, _Jg, _Jd, x, _Jd / (_Js * SQRT2PI * sqrt_arg) * EXP(-0.5 * gaus_arg * gaus_arg), indices[2 + indices[0]]); //printf("Johnson SU: %f %f %f %f | %f %f %f %f\n", _Jm, _Js, _Jg, _Jd, x, _Jd / (_Js * SQRT2PI * sqrt_arg) * EXP(-0.5 * gaus_arg * gaus_arg), cudaArray[indices[1]], cudaArray[indices[2]]); return _Jd / (_Js * SQRT2PI * sqrt_arg) * EXP(-0.5 * gaus_arg * gaus_arg ); } MEM_DEVICE device_function_ptr ptr_to_JohnsonSU = device_JohnsonSU; __host__ JohnsonSUPdf::JohnsonSUPdf (std::string n, Variable* _x, Variable* mean, Variable* sigma, Variable* gamma, Variable* delta) : GooPdf(_x, n) { std::vector<unsigned int> pindices; pindices.push_back(registerParameter(mean)); pindices.push_back(registerParameter(sigma)); pindices.push_back(registerParameter(gamma)); pindices.push_back(registerParameter(delta)); GET_FUNCTION_ADDR(ptr_to_JohnsonSU); initialise(pindices); } __host__ fptype JohnsonSUPdf::integrate (fptype lo, fptype hi) const { return 1.0; // Analytic integral included in device function! (Correct for minus to plus inf.) }
0a6dad64942b77de788a4cac6af36a304d971efe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity HW5 Histogramming for Speed The goal of this assignment is compute a histogram as fast as possible. We have simplified the problem as much as possible to allow you to focus solely on the histogramming algorithm. The input values that you need to histogram are already the exact bins that need to be updated. This is unlike in HW3 where you needed to compute the range of the data and then do: bin = (val - valMin) / valRange to determine the bin. Here the bin is just: bin = val so the serial histogram calculation looks like: for (i = 0; i < numElems; ++i) histo[val[i]]++; That's it! Your job is to make it run as fast as possible! The values are normally distributed - you may take advantage of this fact in your implementation. Evan Cummings Homework 6 The implementation here is derived from the cuda-programming blogspot (link below). It splits the data into blocks as a function of the number of processors and computes each block's histogram counts into a shared memory histogram using atomic adds for each thread within a block, then each block does a single atomic add to the output array. This method produces code that is readable, with a good amount of speedup obtained by reducing the number of atomic adds. the code is about 7x faster than the naive parallel approach. http://cuda-programming.blogspot.com/2013/03/computing-histogram-on-cuda-cuda-code_8.html */ #include "utils.h" #include "reference.cpp" __global__ void histogram_optimized(const unsigned int* const vals, unsigned int* const histo, const unsigned int numVals) { extern __shared__ unsigned int temp[]; temp[threadIdx.x] = 0; __syncthreads(); int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; // num threads in block while (i < numVals) { atomicAdd( &(temp[vals[i]]), 1 ); i += stride; } __syncthreads(); atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] ); } __global__ void histogram(const unsigned int* const vals, unsigned int* const histo, int numVals) { int idx = threadIdx.x + blockIdx.x * blockDim.x; atomicAdd(&(histo[vals[idx]]), 1); } void computeHistogram(const unsigned int* const d_vals, //INPUT unsigned int* const d_histo, //OUTPUT const unsigned int numBins, const unsigned int numElems) { hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipDeviceProp_t prop; checkCudaErrors( hipGetDeviceProperties( &prop, 0 ) ); int blocks = prop.multiProcessorCount; int shared = numBins * sizeof(unsigned int); hipLaunchKernelGGL(( histogram_optimized) , dim3(blocks*8), dim3(numBins), shared, 0, d_vals, d_histo, numElems); }
0a6dad64942b77de788a4cac6af36a304d971efe.cu
/* Udacity HW5 Histogramming for Speed The goal of this assignment is compute a histogram as fast as possible. We have simplified the problem as much as possible to allow you to focus solely on the histogramming algorithm. The input values that you need to histogram are already the exact bins that need to be updated. This is unlike in HW3 where you needed to compute the range of the data and then do: bin = (val - valMin) / valRange to determine the bin. Here the bin is just: bin = val so the serial histogram calculation looks like: for (i = 0; i < numElems; ++i) histo[val[i]]++; That's it! Your job is to make it run as fast as possible! The values are normally distributed - you may take advantage of this fact in your implementation. Evan Cummings Homework 6 The implementation here is derived from the cuda-programming blogspot (link below). It splits the data into blocks as a function of the number of processors and computes each block's histogram counts into a shared memory histogram using atomic adds for each thread within a block, then each block does a single atomic add to the output array. This method produces code that is readable, with a good amount of speedup obtained by reducing the number of atomic adds. the code is about 7x faster than the naive parallel approach. http://cuda-programming.blogspot.com/2013/03/computing-histogram-on-cuda-cuda-code_8.html */ #include "utils.h" #include "reference.cpp" __global__ void histogram_optimized(const unsigned int* const vals, unsigned int* const histo, const unsigned int numVals) { extern __shared__ unsigned int temp[]; temp[threadIdx.x] = 0; __syncthreads(); int i = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; // num threads in block while (i < numVals) { atomicAdd( &(temp[vals[i]]), 1 ); i += stride; } __syncthreads(); atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] ); } __global__ void histogram(const unsigned int* const vals, unsigned int* const histo, int numVals) { int idx = threadIdx.x + blockIdx.x * blockDim.x; atomicAdd(&(histo[vals[idx]]), 1); } void computeHistogram(const unsigned int* const d_vals, //INPUT unsigned int* const d_histo, //OUTPUT const unsigned int numBins, const unsigned int numElems) { cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); cudaDeviceProp prop; checkCudaErrors( cudaGetDeviceProperties( &prop, 0 ) ); int blocks = prop.multiProcessorCount; int shared = numBins * sizeof(unsigned int); histogram_optimized <<<blocks*8, numBins, shared>>> (d_vals, d_histo, numElems); }
b06f3bbb9dedc7f65c1c244689caddaf658d38df.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <cfloat> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <iostream> #include <list> #include <math.h> #include <stdlib.h> #include "cudacommon.h" #include "MD.h" #include "OptionParser.h" #include "ResultDatabase.h" #include "Utility.h" using namespace std; // Forward Declarations template <class T, class forceVecType, class posVecType> void runTest(const string& testName, ResultDatabase& resultDB, OptionParser& op); template <class T, class posVecType> inline T distance(const posVecType* position, const int i, const int j); template <class T> inline void insertInOrder(std::list<T>& currDist, std::list<int>& currList, const int j, const T distIJ, const int maxNeighbors); template <class T, class posVecType> inline int buildNeighborList(const int nAtom, const posVecType* position, int* neighborList); template <class T> inline int populateNeighborList(std::list<T>& currDist, std::list<int>& currList, const int j, const int nAtom, int* neighborList); // Texture caches for position info texture<float4, 1, hipReadModeElementType> posTexture; texture<int4, 1, hipReadModeElementType> posTexture_dp; struct texReader_sp { __device__ __forceinline__ float4 operator()(int idx) const { return tex1Dfetch(posTexture, idx); } }; // CUDA doesn't support double4 textures, so we have to do some conversion // here, resulting in a bit of overhead, but it's still faster than // an uncoalesced read struct texReader_dp { __device__ __forceinline__ double4 operator()(int idx) const { #if (__CUDA_ARCH__ < 130) // Devices before arch 130 don't support DP, and having the // __hiloint2double() intrinsic will cause compilation to fail. // This return statement added as a workaround -- it will compile, // but since the arch doesn't support DP, it will never be called return make_double4(0., 0., 0., 0.); #else int4 v = tex1Dfetch(posTexture_dp, idx*2); double2 a = make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z)); v = tex1Dfetch(posTexture_dp, idx*2 + 1); double2 b = make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z)); return make_double4(a.x, a.y, b.x, b.y); #endif } }; // **************************************************************************** // Function: compute_lj_force // // Purpose: // GPU kernel to calculate Lennard Jones force // // Arguments: // force3: array to store the calculated forces // position: positions of atoms // neighCount: number of neighbors for each atom to consider // neighList: atom neighbor list // cutsq: cutoff distance squared // lj1, lj2: LJ force constants // inum: total number of atoms // // Returns: nothing // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // // **************************************************************************** template <class T, class forceVecType, class posVecType> __global__ void compute_lj_force(forceVecType* __restrict__ force3, const posVecType* __restrict__ position, const int neighCount, const int* __restrict__ neighList, const T cutsq, const T lj1, const T lj2, const int inum) { extern __shared__ char tmp_shr[]; // Global ID - one thread per atom int idx = blockIdx.x*blockDim.x + threadIdx.x; // Position of this thread's atom posVecType ipos = position[idx]; // Force accumulator forceVecType f = {0.0f, 0.0f, 0.0f}; int j = 0; while (j < neighCount) { int jidx = neighList[j*inum + idx]; posVecType jpos; jpos = position[jidx]; // Calculate distance T delx = ipos.x - jpos.x; T dely = ipos.y - jpos.y; T delz = ipos.z - jpos.z; T r2inv = delx*delx + dely*dely + delz*delz; // If distance is less than cutoff, calculate force // and add to accumulator if (r2inv < cutsq) { r2inv = 1.0f/r2inv; T r6inv = r2inv * r2inv * r2inv; T force = r2inv*r6inv*(lj1*r6inv - lj2); f.x += delx * force; f.y += dely * force; f.z += delz * force; } j++; } // store the results force3[idx] = f; } // **************************************************************************** // Function: checkResults // // Purpose: // Check device results against cpu results -- this is the CPU equivalent of // // Arguments: // d_force: forces calculated on the device // position: positions of atoms // neighList: atom neighbor list // nAtom: number of atoms // Returns: true if results match, false otherwise // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // // **************************************************************************** template <class T, class forceVecType, class posVecType> bool checkResults(forceVecType* d_force, posVecType *position, int *neighList, int nAtom) { for (int i = 0; i < nAtom; i++) { posVecType ipos = position[i]; forceVecType f = {0.0f, 0.0f, 0.0f}; int j = 0; while (j < maxNeighbors) { int jidx = neighList[j*nAtom + i]; posVecType jpos = position[jidx]; // Calculate distance T delx = ipos.x - jpos.x; T dely = ipos.y - jpos.y; T delz = ipos.z - jpos.z; T r2inv = delx*delx + dely*dely + delz*delz; // If distance is less than cutoff, calculate force if (r2inv < cutsq) { r2inv = 1.0f/r2inv; T r6inv = r2inv * r2inv * r2inv; T force = r2inv*r6inv*(lj1*r6inv - lj2); f.x += delx * force; f.y += dely * force; f.z += delz * force; } j++; } // Check the results T diffx = (d_force[i].x - f.x) / d_force[i].x; T diffy = (d_force[i].y - f.y) / d_force[i].y; T diffz = (d_force[i].z - f.z) / d_force[i].z; T err = sqrt(diffx*diffx) + sqrt(diffy*diffy) + sqrt(diffz*diffz); if (err > (3.0 * EPSILON)) { cout << "Test Failed, idx: " << i << " diff: " << err << "\n"; cout << "f.x: " << f.x << " df.x: " << d_force[i].x << "\n"; cout << "f.y: " << f.y << " df.y: " << d_force[i].y << "\n"; cout << "f.z: " << f.z << " df.z: " << d_force[i].z << "\n"; cout << "Test FAILED\n"; return false; } } cout << "Test Passed\n"; return true; } // ******************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing // // Arguments: // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // ******************************************************** void addBenchmarkSpecOptions(OptionParser &op) { op.addOption("iterations", OPT_INT, "1", "specify MD kernel iterations", 'r'); } // ******************************************************** // Function: RunBenchmark // // Purpose: // Executes the md benchmark // // Arguments: // resultDB: results from the benchmark are stored in this db // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // ******************************************************** void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { // Test to see if this device supports double precision int device; hipGetDevice(&device); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device); //cout << "Running single precision test" << endl; //runTest<float, float3, float4>("MD-LJ", resultDB, op); if ((deviceProp.major == 1 && deviceProp.minor >= 3) || (deviceProp.major >= 2)) { cout << "Running double precision test" << endl; runTest<double, double3, double4> ("MD-LJ-DP", resultDB, op); } else { cout << "Skipping double precision test" << endl; char atts[32] = "DP_Not_Supported"; // resultDB requires neg entry for every possible result int passes = op.getOptionInt("passes"); for (int i = 0; i < passes; i++) { resultDB.AddResult("MD-LJ-DP" , atts, "GB/s", FLT_MAX); resultDB.AddResult("MD-LJ-DP_PCIe" , atts, "GB/s", FLT_MAX); resultDB.AddResult("MD-LJ-DP-Bandwidth", atts, "GB/s", FLT_MAX); resultDB.AddResult("MD-LJ-DP-Bandwidth_PCIe", atts, "GB/s", FLT_MAX); resultDB.AddResult("MD-LJ-DP_Parity" , atts, "GB/s", FLT_MAX); } } } template <class T, class forceVecType, class posVecType> void runTest(const string& testName, ResultDatabase& resultDB, OptionParser& op) { // Problem Parameters const int probSizes[4] = { 12288, 24576, 36864, 73728 }; int sizeClass = op.getOptionInt("size"); assert(sizeClass >= 0 && sizeClass < 5); int nAtom = probSizes[sizeClass - 1]; // Allocate problem data on host posVecType* position; forceVecType* force; int* neighborList; CUDA_SAFE_CALL(hipHostMalloc((void**)&position, nAtom*sizeof(posVecType))); CUDA_SAFE_CALL(hipHostMalloc((void**)&force, nAtom*sizeof(forceVecType))); CUDA_SAFE_CALL(hipHostMalloc((void**)&neighborList, nAtom*maxNeighbors*sizeof(int))); // Allocate device memory for position and force forceVecType* d_force; posVecType* d_position; CUDA_SAFE_CALL(hipMalloc((void**)&d_force, nAtom*sizeof(forceVecType))); CUDA_SAFE_CALL(hipMalloc((void**)&d_position, nAtom*sizeof(posVecType))); // Allocate device memory for neighbor list int* d_neighborList; CUDA_SAFE_CALL(hipMalloc((void**)&d_neighborList, nAtom*maxNeighbors*sizeof(int))); cout << "Initializing test problem (this can take several " "minutes for large problems)\n"; // Seed random number generator srand48(8650341L); // Initialize positions -- random distribution in cubic domain // domainEdge constant specifies edge length for (int i = 0; i < nAtom; i++) { position[i].x = (T)(drand48() * domainEdge); position[i].y = (T)(drand48() * domainEdge); position[i].z = (T)(drand48() * domainEdge); } // Keep track of how many atoms are within the cutoff distance to // accurately calculate FLOPS later int totalPairs = buildNeighborList<T, posVecType>(nAtom, position, neighborList); cout << "Finished.\n"; cout << totalPairs << " of " << nAtom*maxNeighbors << " pairs within cutoff distance = " << 100.0 * ((double)totalPairs / (nAtom*maxNeighbors)) << " %" << endl; // Time the transfer of input data to the GPU hipEvent_t inputTransfer_start, inputTransfer_stop; hipEventCreate(&inputTransfer_start); hipEventCreate(&inputTransfer_stop); hipEventRecord(inputTransfer_start, 0); // Copy neighbor list data to GPU CUDA_SAFE_CALL(hipMemcpy(d_neighborList, neighborList, maxNeighbors*nAtom*sizeof(int), hipMemcpyHostToDevice)); // Copy position to GPU CUDA_SAFE_CALL(hipMemcpy(d_position, position, nAtom*sizeof(posVecType), hipMemcpyHostToDevice)); hipEventRecord(inputTransfer_stop, 0); CUDA_SAFE_CALL(hipEventSynchronize(inputTransfer_stop)); // Get elapsed time float inputTransfer_time = 0.0f; hipEventElapsedTime(&inputTransfer_time, inputTransfer_start, inputTransfer_stop); inputTransfer_time *= 1.e-3; int blockSize = 256; //int blockSize = 512; int gridSize = nAtom / blockSize; // Warm up the kernel and check correctness hipLaunchKernelGGL(( compute_lj_force<T, forceVecType, posVecType>) , dim3(gridSize), dim3(blockSize), blockSize *5 *4, 0, d_force, d_position, maxNeighbors, d_neighborList, cutsq, lj1, lj2, nAtom); CUDA_SAFE_CALL(hipDeviceSynchronize()); // Copy back forces hipEvent_t outputTransfer_start, outputTransfer_stop; hipEventCreate(&outputTransfer_start); hipEventCreate(&outputTransfer_stop); hipEventRecord(outputTransfer_start, 0); CUDA_SAFE_CALL(hipMemcpy(force, d_force, nAtom*sizeof(forceVecType), hipMemcpyDeviceToHost)); hipEventRecord(outputTransfer_stop, 0); CUDA_SAFE_CALL(hipEventSynchronize(outputTransfer_stop)); // Get elapsed time float outputTransfer_time = 0.0f; hipEventElapsedTime(&outputTransfer_time, outputTransfer_start, outputTransfer_stop); outputTransfer_time *= 1.e-3; // If results are incorrect, skip the performance tests cout << "Performing Correctness Check (can take several minutes)\n"; if (!checkResults<T, forceVecType, posVecType> (force, position, neighborList, nAtom)) { return; } // Begin performance tests hipEvent_t kernel_start, kernel_stop; hipEventCreate(&kernel_start); hipEventCreate(&kernel_stop); int passes = op.getOptionInt("passes"); int iter = op.getOptionInt("iterations"); for (int i = 0; i < passes; i++) { // Other kernels will be involved in true parallel versions hipEventRecord(kernel_start, 0); for (int j = 0; j < iter; j++) { hipLaunchKernelGGL(( compute_lj_force<T, forceVecType, posVecType>) , dim3(gridSize), dim3(blockSize), blockSize * 5 * 4, 0, d_force, d_position, maxNeighbors, d_neighborList, cutsq, lj1, lj2, nAtom); } hipEventRecord(kernel_stop, 0); CUDA_SAFE_CALL(hipEventSynchronize(kernel_stop)); // get elapsed time float kernel_time = 0.0f; hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time /= (float)iter; kernel_time *= 1.e-3; // Convert to seconds // Total number of flops // Every pair of atoms compute distance - 8 flops // totalPairs with distance < cutsq perform an additional 13 // for force calculation double gflops = ((8 * nAtom * maxNeighbors) + (totalPairs * 13)) * 1e-9; char atts[64]; sprintf(atts, "%d_atoms", nAtom);; resultDB.AddResult(testName, atts, "GFLOPS", gflops / kernel_time); resultDB.AddResult(testName+"_PCIe", atts, "GFLOPS", gflops / (kernel_time+inputTransfer_time+outputTransfer_time)); int numPairs = nAtom * maxNeighbors; long int nbytes = (3 * sizeof(T) * (1+numPairs)) + // position data (3 * sizeof(T) * nAtom) + // force for each atom (sizeof(int) * numPairs); // neighbor list double gbytes = (double)nbytes / (1000. * 1000. * 1000.); resultDB.AddResult(testName + "-Bandwidth", atts, "GB/s", gbytes / kernel_time); resultDB.AddResult(testName + "-Bandwidth_PCIe", atts, "GB/s", gbytes / (kernel_time+inputTransfer_time+outputTransfer_time)); resultDB.AddResult(testName+"_Parity", atts, "N", (inputTransfer_time+outputTransfer_time) / kernel_time); } // Clean up // Host CUDA_SAFE_CALL(hipHostFree(position)); CUDA_SAFE_CALL(hipHostFree(force)); CUDA_SAFE_CALL(hipHostFree(neighborList)); // Device CUDA_SAFE_CALL(hipUnbindTexture(posTexture)); CUDA_SAFE_CALL(hipFree(d_position)); CUDA_SAFE_CALL(hipFree(d_force)); CUDA_SAFE_CALL(hipFree(d_neighborList)); CUDA_SAFE_CALL(hipEventDestroy(inputTransfer_start)); CUDA_SAFE_CALL(hipEventDestroy(inputTransfer_stop)); CUDA_SAFE_CALL(hipEventDestroy(outputTransfer_start)); CUDA_SAFE_CALL(hipEventDestroy(outputTransfer_stop)); CUDA_SAFE_CALL(hipEventDestroy(kernel_start)); CUDA_SAFE_CALL(hipEventDestroy(kernel_stop)); } // ******************************************************** // Function: distance // // Purpose: // Calculates distance squared between two atoms // // Arguments: // position: atom position information // i, j: indexes of the two atoms // // Returns: the computed distance // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // // ******************************************************** template <class T, class posVecType> inline T distance(const posVecType* position, const int i, const int j) { posVecType ipos = position[i]; posVecType jpos = position[j]; T delx = ipos.x - jpos.x; T dely = ipos.y - jpos.y; T delz = ipos.z - jpos.z; T r2inv = delx * delx + dely * dely + delz * delz; return r2inv; } // ******************************************************** // Function: insertInOrder // // Purpose: // Adds atom j to current neighbor list and distance list // if it's distance is low enough. // // Arguments: // currDist: distance between current atom and each of its neighbors in the // current list, sorted in ascending order // currList: neighbor list for current atom, sorted by distance in asc. order // j: atom to insert into neighbor list // distIJ: distance between current atom and atom J // maxNeighbors: max length of neighbor list // // Returns: nothing // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // // ******************************************************** template <class T> inline void insertInOrder(list<T>& currDist, list<int>& currList, const int j, const T distIJ, const int maxNeighbors) { typename list<T>::iterator it; typename list<int>::iterator it2; it2 = currList.begin(); T currMax = currDist.back(); if (distIJ > currMax) return; for (it=currDist.begin(); it!=currDist.end(); it++) { if (distIJ < (*it)) { // Insert into appropriate place in list currDist.insert(it,distIJ); currList.insert(it2, j); // Trim end of list currList.resize(maxNeighbors); currDist.resize(maxNeighbors); return; } it2++; } } // ******************************************************** // Function: buildNeighborList // // Purpose: // Builds the neighbor list structure for all atoms for GPU coalesced reads // and counts the number of pairs within the cutoff distance, so // the benchmark gets an accurate FLOPS count // // Arguments: // nAtom: total number of atoms // position: pointer to the atom's position information // neighborList: pointer to neighbor list data structure // // Returns: number of pairs of atoms within cutoff distance // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // Jeremy Meredith, Tue Oct 9 17:35:16 EDT 2012 // On some slow systems and without optimization, this // could take a while. Give users a rough completion // percentage so they don't give up. // // ******************************************************** template <class T, class posVecType> inline int buildNeighborList(const int nAtom, const posVecType* position, int* neighborList) { int totalPairs = 0; // Build Neighbor List // Find the nearest N atoms to each other atom, where N = maxNeighbors for (int i = 0; i < nAtom; i++) { // Print progress every 10% completion. if (int((i+1)/(nAtom/10)) > int(i/(nAtom/10))) cout << " " << 10*int((i+1)/(nAtom/10)) << "% done\n"; // Current neighbor list for atom i, initialized to -1 list<int> currList(maxNeighbors, -1); // Distance to those neighbors. We're populating this with the // closest neighbors, so initialize to FLT_MAX list<T> currDist(maxNeighbors, FLT_MAX); for (int j = 0; j < nAtom; j++) { if (i == j) continue; // An atom cannot be its own neighbor // Calculate distance and insert in order into the current lists T distIJ = distance<T, posVecType>(position, i, j); insertInOrder<T>(currDist, currList, j, distIJ, maxNeighbors); } // We should now have the closest maxNeighbors neighbors and their // distances to atom i. Populate the neighbor list data structure // for GPU coalesced reads. // The populate method returns how many of the maxNeighbors closest // neighbors are within the cutoff distance. This will be used to // calculate GFLOPS later. totalPairs += populateNeighborList<T>(currDist, currList, i, nAtom, neighborList); } return totalPairs; } // ******************************************************** // Function: populateNeighborList // // Purpose: // Populates the neighbor list structure for a *single* atom for // GPU coalesced reads and counts the number of pairs within the cutoff // distance, (for current atom) so the benchmark gets an accurate FLOPS count // // Arguments: // currDist: distance between current atom and each of its maxNeighbors // neighbors // currList: current list of neighbors // i: current atom // nAtom: total number of atoms // neighborList: pointer to neighbor list data structure // // Returns: number of pairs of atoms within cutoff distance // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // // ******************************************************** template <class T> inline int populateNeighborList(list<T>& currDist, list<int>& currList, const int i, const int nAtom, int* neighborList) { int idx = 0; int validPairs = 0; // Pairs of atoms closer together than the cutoff // Iterate across distance and neighbor list typename list<T>::iterator distanceIter = currDist.begin(); for (list<int>::iterator neighborIter = currList.begin(); neighborIter != currList.end(); neighborIter++) { // Populate packed neighbor list neighborList[(idx * nAtom) + i] = *neighborIter; // If the distance is less than cutoff, increment valid counter if (*distanceIter < cutsq) validPairs++; // Increment idx and distance iterator idx++; distanceIter++; } return validPairs; }
b06f3bbb9dedc7f65c1c244689caddaf658d38df.cu
#include <cassert> #include <cfloat> #include <cuda_runtime_api.h> #include <cuda.h> #include <iostream> #include <list> #include <math.h> #include <stdlib.h> #include "cudacommon.h" #include "MD.h" #include "OptionParser.h" #include "ResultDatabase.h" #include "Utility.h" using namespace std; // Forward Declarations template <class T, class forceVecType, class posVecType> void runTest(const string& testName, ResultDatabase& resultDB, OptionParser& op); template <class T, class posVecType> inline T distance(const posVecType* position, const int i, const int j); template <class T> inline void insertInOrder(std::list<T>& currDist, std::list<int>& currList, const int j, const T distIJ, const int maxNeighbors); template <class T, class posVecType> inline int buildNeighborList(const int nAtom, const posVecType* position, int* neighborList); template <class T> inline int populateNeighborList(std::list<T>& currDist, std::list<int>& currList, const int j, const int nAtom, int* neighborList); // Texture caches for position info texture<float4, 1, cudaReadModeElementType> posTexture; texture<int4, 1, cudaReadModeElementType> posTexture_dp; struct texReader_sp { __device__ __forceinline__ float4 operator()(int idx) const { return tex1Dfetch(posTexture, idx); } }; // CUDA doesn't support double4 textures, so we have to do some conversion // here, resulting in a bit of overhead, but it's still faster than // an uncoalesced read struct texReader_dp { __device__ __forceinline__ double4 operator()(int idx) const { #if (__CUDA_ARCH__ < 130) // Devices before arch 130 don't support DP, and having the // __hiloint2double() intrinsic will cause compilation to fail. // This return statement added as a workaround -- it will compile, // but since the arch doesn't support DP, it will never be called return make_double4(0., 0., 0., 0.); #else int4 v = tex1Dfetch(posTexture_dp, idx*2); double2 a = make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z)); v = tex1Dfetch(posTexture_dp, idx*2 + 1); double2 b = make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z)); return make_double4(a.x, a.y, b.x, b.y); #endif } }; // **************************************************************************** // Function: compute_lj_force // // Purpose: // GPU kernel to calculate Lennard Jones force // // Arguments: // force3: array to store the calculated forces // position: positions of atoms // neighCount: number of neighbors for each atom to consider // neighList: atom neighbor list // cutsq: cutoff distance squared // lj1, lj2: LJ force constants // inum: total number of atoms // // Returns: nothing // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // // **************************************************************************** template <class T, class forceVecType, class posVecType> __global__ void compute_lj_force(forceVecType* __restrict__ force3, const posVecType* __restrict__ position, const int neighCount, const int* __restrict__ neighList, const T cutsq, const T lj1, const T lj2, const int inum) { extern __shared__ char tmp_shr[]; // Global ID - one thread per atom int idx = blockIdx.x*blockDim.x + threadIdx.x; // Position of this thread's atom posVecType ipos = position[idx]; // Force accumulator forceVecType f = {0.0f, 0.0f, 0.0f}; int j = 0; while (j < neighCount) { int jidx = neighList[j*inum + idx]; posVecType jpos; jpos = position[jidx]; // Calculate distance T delx = ipos.x - jpos.x; T dely = ipos.y - jpos.y; T delz = ipos.z - jpos.z; T r2inv = delx*delx + dely*dely + delz*delz; // If distance is less than cutoff, calculate force // and add to accumulator if (r2inv < cutsq) { r2inv = 1.0f/r2inv; T r6inv = r2inv * r2inv * r2inv; T force = r2inv*r6inv*(lj1*r6inv - lj2); f.x += delx * force; f.y += dely * force; f.z += delz * force; } j++; } // store the results force3[idx] = f; } // **************************************************************************** // Function: checkResults // // Purpose: // Check device results against cpu results -- this is the CPU equivalent of // // Arguments: // d_force: forces calculated on the device // position: positions of atoms // neighList: atom neighbor list // nAtom: number of atoms // Returns: true if results match, false otherwise // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // // **************************************************************************** template <class T, class forceVecType, class posVecType> bool checkResults(forceVecType* d_force, posVecType *position, int *neighList, int nAtom) { for (int i = 0; i < nAtom; i++) { posVecType ipos = position[i]; forceVecType f = {0.0f, 0.0f, 0.0f}; int j = 0; while (j < maxNeighbors) { int jidx = neighList[j*nAtom + i]; posVecType jpos = position[jidx]; // Calculate distance T delx = ipos.x - jpos.x; T dely = ipos.y - jpos.y; T delz = ipos.z - jpos.z; T r2inv = delx*delx + dely*dely + delz*delz; // If distance is less than cutoff, calculate force if (r2inv < cutsq) { r2inv = 1.0f/r2inv; T r6inv = r2inv * r2inv * r2inv; T force = r2inv*r6inv*(lj1*r6inv - lj2); f.x += delx * force; f.y += dely * force; f.z += delz * force; } j++; } // Check the results T diffx = (d_force[i].x - f.x) / d_force[i].x; T diffy = (d_force[i].y - f.y) / d_force[i].y; T diffz = (d_force[i].z - f.z) / d_force[i].z; T err = sqrt(diffx*diffx) + sqrt(diffy*diffy) + sqrt(diffz*diffz); if (err > (3.0 * EPSILON)) { cout << "Test Failed, idx: " << i << " diff: " << err << "\n"; cout << "f.x: " << f.x << " df.x: " << d_force[i].x << "\n"; cout << "f.y: " << f.y << " df.y: " << d_force[i].y << "\n"; cout << "f.z: " << f.z << " df.z: " << d_force[i].z << "\n"; cout << "Test FAILED\n"; return false; } } cout << "Test Passed\n"; return true; } // ******************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing // // Arguments: // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // ******************************************************** void addBenchmarkSpecOptions(OptionParser &op) { op.addOption("iterations", OPT_INT, "1", "specify MD kernel iterations", 'r'); } // ******************************************************** // Function: RunBenchmark // // Purpose: // Executes the md benchmark // // Arguments: // resultDB: results from the benchmark are stored in this db // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // ******************************************************** void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { // Test to see if this device supports double precision int device; cudaGetDevice(&device); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); //cout << "Running single precision test" << endl; //runTest<float, float3, float4>("MD-LJ", resultDB, op); if ((deviceProp.major == 1 && deviceProp.minor >= 3) || (deviceProp.major >= 2)) { cout << "Running double precision test" << endl; runTest<double, double3, double4> ("MD-LJ-DP", resultDB, op); } else { cout << "Skipping double precision test" << endl; char atts[32] = "DP_Not_Supported"; // resultDB requires neg entry for every possible result int passes = op.getOptionInt("passes"); for (int i = 0; i < passes; i++) { resultDB.AddResult("MD-LJ-DP" , atts, "GB/s", FLT_MAX); resultDB.AddResult("MD-LJ-DP_PCIe" , atts, "GB/s", FLT_MAX); resultDB.AddResult("MD-LJ-DP-Bandwidth", atts, "GB/s", FLT_MAX); resultDB.AddResult("MD-LJ-DP-Bandwidth_PCIe", atts, "GB/s", FLT_MAX); resultDB.AddResult("MD-LJ-DP_Parity" , atts, "GB/s", FLT_MAX); } } } template <class T, class forceVecType, class posVecType> void runTest(const string& testName, ResultDatabase& resultDB, OptionParser& op) { // Problem Parameters const int probSizes[4] = { 12288, 24576, 36864, 73728 }; int sizeClass = op.getOptionInt("size"); assert(sizeClass >= 0 && sizeClass < 5); int nAtom = probSizes[sizeClass - 1]; // Allocate problem data on host posVecType* position; forceVecType* force; int* neighborList; CUDA_SAFE_CALL(cudaMallocHost((void**)&position, nAtom*sizeof(posVecType))); CUDA_SAFE_CALL(cudaMallocHost((void**)&force, nAtom*sizeof(forceVecType))); CUDA_SAFE_CALL(cudaMallocHost((void**)&neighborList, nAtom*maxNeighbors*sizeof(int))); // Allocate device memory for position and force forceVecType* d_force; posVecType* d_position; CUDA_SAFE_CALL(cudaMalloc((void**)&d_force, nAtom*sizeof(forceVecType))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_position, nAtom*sizeof(posVecType))); // Allocate device memory for neighbor list int* d_neighborList; CUDA_SAFE_CALL(cudaMalloc((void**)&d_neighborList, nAtom*maxNeighbors*sizeof(int))); cout << "Initializing test problem (this can take several " "minutes for large problems)\n"; // Seed random number generator srand48(8650341L); // Initialize positions -- random distribution in cubic domain // domainEdge constant specifies edge length for (int i = 0; i < nAtom; i++) { position[i].x = (T)(drand48() * domainEdge); position[i].y = (T)(drand48() * domainEdge); position[i].z = (T)(drand48() * domainEdge); } // Keep track of how many atoms are within the cutoff distance to // accurately calculate FLOPS later int totalPairs = buildNeighborList<T, posVecType>(nAtom, position, neighborList); cout << "Finished.\n"; cout << totalPairs << " of " << nAtom*maxNeighbors << " pairs within cutoff distance = " << 100.0 * ((double)totalPairs / (nAtom*maxNeighbors)) << " %" << endl; // Time the transfer of input data to the GPU cudaEvent_t inputTransfer_start, inputTransfer_stop; cudaEventCreate(&inputTransfer_start); cudaEventCreate(&inputTransfer_stop); cudaEventRecord(inputTransfer_start, 0); // Copy neighbor list data to GPU CUDA_SAFE_CALL(cudaMemcpy(d_neighborList, neighborList, maxNeighbors*nAtom*sizeof(int), cudaMemcpyHostToDevice)); // Copy position to GPU CUDA_SAFE_CALL(cudaMemcpy(d_position, position, nAtom*sizeof(posVecType), cudaMemcpyHostToDevice)); cudaEventRecord(inputTransfer_stop, 0); CUDA_SAFE_CALL(cudaEventSynchronize(inputTransfer_stop)); // Get elapsed time float inputTransfer_time = 0.0f; cudaEventElapsedTime(&inputTransfer_time, inputTransfer_start, inputTransfer_stop); inputTransfer_time *= 1.e-3; int blockSize = 256; //int blockSize = 512; int gridSize = nAtom / blockSize; // Warm up the kernel and check correctness compute_lj_force<T, forceVecType, posVecType> <<<gridSize, blockSize, blockSize *5 *4>>> (d_force, d_position, maxNeighbors, d_neighborList, cutsq, lj1, lj2, nAtom); CUDA_SAFE_CALL(cudaThreadSynchronize()); // Copy back forces cudaEvent_t outputTransfer_start, outputTransfer_stop; cudaEventCreate(&outputTransfer_start); cudaEventCreate(&outputTransfer_stop); cudaEventRecord(outputTransfer_start, 0); CUDA_SAFE_CALL(cudaMemcpy(force, d_force, nAtom*sizeof(forceVecType), cudaMemcpyDeviceToHost)); cudaEventRecord(outputTransfer_stop, 0); CUDA_SAFE_CALL(cudaEventSynchronize(outputTransfer_stop)); // Get elapsed time float outputTransfer_time = 0.0f; cudaEventElapsedTime(&outputTransfer_time, outputTransfer_start, outputTransfer_stop); outputTransfer_time *= 1.e-3; // If results are incorrect, skip the performance tests cout << "Performing Correctness Check (can take several minutes)\n"; if (!checkResults<T, forceVecType, posVecType> (force, position, neighborList, nAtom)) { return; } // Begin performance tests cudaEvent_t kernel_start, kernel_stop; cudaEventCreate(&kernel_start); cudaEventCreate(&kernel_stop); int passes = op.getOptionInt("passes"); int iter = op.getOptionInt("iterations"); for (int i = 0; i < passes; i++) { // Other kernels will be involved in true parallel versions cudaEventRecord(kernel_start, 0); for (int j = 0; j < iter; j++) { compute_lj_force<T, forceVecType, posVecType> <<<gridSize, blockSize, blockSize * 5 * 4>>> (d_force, d_position, maxNeighbors, d_neighborList, cutsq, lj1, lj2, nAtom); } cudaEventRecord(kernel_stop, 0); CUDA_SAFE_CALL(cudaEventSynchronize(kernel_stop)); // get elapsed time float kernel_time = 0.0f; cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time /= (float)iter; kernel_time *= 1.e-3; // Convert to seconds // Total number of flops // Every pair of atoms compute distance - 8 flops // totalPairs with distance < cutsq perform an additional 13 // for force calculation double gflops = ((8 * nAtom * maxNeighbors) + (totalPairs * 13)) * 1e-9; char atts[64]; sprintf(atts, "%d_atoms", nAtom);; resultDB.AddResult(testName, atts, "GFLOPS", gflops / kernel_time); resultDB.AddResult(testName+"_PCIe", atts, "GFLOPS", gflops / (kernel_time+inputTransfer_time+outputTransfer_time)); int numPairs = nAtom * maxNeighbors; long int nbytes = (3 * sizeof(T) * (1+numPairs)) + // position data (3 * sizeof(T) * nAtom) + // force for each atom (sizeof(int) * numPairs); // neighbor list double gbytes = (double)nbytes / (1000. * 1000. * 1000.); resultDB.AddResult(testName + "-Bandwidth", atts, "GB/s", gbytes / kernel_time); resultDB.AddResult(testName + "-Bandwidth_PCIe", atts, "GB/s", gbytes / (kernel_time+inputTransfer_time+outputTransfer_time)); resultDB.AddResult(testName+"_Parity", atts, "N", (inputTransfer_time+outputTransfer_time) / kernel_time); } // Clean up // Host CUDA_SAFE_CALL(cudaFreeHost(position)); CUDA_SAFE_CALL(cudaFreeHost(force)); CUDA_SAFE_CALL(cudaFreeHost(neighborList)); // Device CUDA_SAFE_CALL(cudaUnbindTexture(posTexture)); CUDA_SAFE_CALL(cudaFree(d_position)); CUDA_SAFE_CALL(cudaFree(d_force)); CUDA_SAFE_CALL(cudaFree(d_neighborList)); CUDA_SAFE_CALL(cudaEventDestroy(inputTransfer_start)); CUDA_SAFE_CALL(cudaEventDestroy(inputTransfer_stop)); CUDA_SAFE_CALL(cudaEventDestroy(outputTransfer_start)); CUDA_SAFE_CALL(cudaEventDestroy(outputTransfer_stop)); CUDA_SAFE_CALL(cudaEventDestroy(kernel_start)); CUDA_SAFE_CALL(cudaEventDestroy(kernel_stop)); } // ******************************************************** // Function: distance // // Purpose: // Calculates distance squared between two atoms // // Arguments: // position: atom position information // i, j: indexes of the two atoms // // Returns: the computed distance // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // // ******************************************************** template <class T, class posVecType> inline T distance(const posVecType* position, const int i, const int j) { posVecType ipos = position[i]; posVecType jpos = position[j]; T delx = ipos.x - jpos.x; T dely = ipos.y - jpos.y; T delz = ipos.z - jpos.z; T r2inv = delx * delx + dely * dely + delz * delz; return r2inv; } // ******************************************************** // Function: insertInOrder // // Purpose: // Adds atom j to current neighbor list and distance list // if it's distance is low enough. // // Arguments: // currDist: distance between current atom and each of its neighbors in the // current list, sorted in ascending order // currList: neighbor list for current atom, sorted by distance in asc. order // j: atom to insert into neighbor list // distIJ: distance between current atom and atom J // maxNeighbors: max length of neighbor list // // Returns: nothing // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // // ******************************************************** template <class T> inline void insertInOrder(list<T>& currDist, list<int>& currList, const int j, const T distIJ, const int maxNeighbors) { typename list<T>::iterator it; typename list<int>::iterator it2; it2 = currList.begin(); T currMax = currDist.back(); if (distIJ > currMax) return; for (it=currDist.begin(); it!=currDist.end(); it++) { if (distIJ < (*it)) { // Insert into appropriate place in list currDist.insert(it,distIJ); currList.insert(it2, j); // Trim end of list currList.resize(maxNeighbors); currDist.resize(maxNeighbors); return; } it2++; } } // ******************************************************** // Function: buildNeighborList // // Purpose: // Builds the neighbor list structure for all atoms for GPU coalesced reads // and counts the number of pairs within the cutoff distance, so // the benchmark gets an accurate FLOPS count // // Arguments: // nAtom: total number of atoms // position: pointer to the atom's position information // neighborList: pointer to neighbor list data structure // // Returns: number of pairs of atoms within cutoff distance // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // Jeremy Meredith, Tue Oct 9 17:35:16 EDT 2012 // On some slow systems and without optimization, this // could take a while. Give users a rough completion // percentage so they don't give up. // // ******************************************************** template <class T, class posVecType> inline int buildNeighborList(const int nAtom, const posVecType* position, int* neighborList) { int totalPairs = 0; // Build Neighbor List // Find the nearest N atoms to each other atom, where N = maxNeighbors for (int i = 0; i < nAtom; i++) { // Print progress every 10% completion. if (int((i+1)/(nAtom/10)) > int(i/(nAtom/10))) cout << " " << 10*int((i+1)/(nAtom/10)) << "% done\n"; // Current neighbor list for atom i, initialized to -1 list<int> currList(maxNeighbors, -1); // Distance to those neighbors. We're populating this with the // closest neighbors, so initialize to FLT_MAX list<T> currDist(maxNeighbors, FLT_MAX); for (int j = 0; j < nAtom; j++) { if (i == j) continue; // An atom cannot be its own neighbor // Calculate distance and insert in order into the current lists T distIJ = distance<T, posVecType>(position, i, j); insertInOrder<T>(currDist, currList, j, distIJ, maxNeighbors); } // We should now have the closest maxNeighbors neighbors and their // distances to atom i. Populate the neighbor list data structure // for GPU coalesced reads. // The populate method returns how many of the maxNeighbors closest // neighbors are within the cutoff distance. This will be used to // calculate GFLOPS later. totalPairs += populateNeighborList<T>(currDist, currList, i, nAtom, neighborList); } return totalPairs; } // ******************************************************** // Function: populateNeighborList // // Purpose: // Populates the neighbor list structure for a *single* atom for // GPU coalesced reads and counts the number of pairs within the cutoff // distance, (for current atom) so the benchmark gets an accurate FLOPS count // // Arguments: // currDist: distance between current atom and each of its maxNeighbors // neighbors // currList: current list of neighbors // i: current atom // nAtom: total number of atoms // neighborList: pointer to neighbor list data structure // // Returns: number of pairs of atoms within cutoff distance // // Programmer: Kyle Spafford // Creation: July 26, 2010 // // Modifications: // // ******************************************************** template <class T> inline int populateNeighborList(list<T>& currDist, list<int>& currList, const int i, const int nAtom, int* neighborList) { int idx = 0; int validPairs = 0; // Pairs of atoms closer together than the cutoff // Iterate across distance and neighbor list typename list<T>::iterator distanceIter = currDist.begin(); for (list<int>::iterator neighborIter = currList.begin(); neighborIter != currList.end(); neighborIter++) { // Populate packed neighbor list neighborList[(idx * nAtom) + i] = *neighborIter; // If the distance is less than cutoff, increment valid counter if (*distanceIter < cutsq) validPairs++; // Increment idx and distance iterator idx++; distanceIter++; } return validPairs; }